summaryrefslogblamecommitdiffstats
path: root/drivers/usb/serial/belkin_sa.c
blob: b72a4c1667055b7881f9ec8513006f36029628cb (plain) (tree)
1
2
3
4
5
6
7
8
9
10



                                                                         
                                                                   
                                                                



                                                                     
  



                                                                            
  

                                                                          



                                                                          
                                    

   








                             
                          
                      
                             

                      



                                                                 

                                                               
                                                  
                                                      
                                                          
                                                         
                                                        


                                                                             
                                                       
                                                      
                                                                              

 
                                                





                                                                       
                                       
  
                                   

                                                             
                                                 

                                            
                                         
          
                                                                               
                                         


                                                
                                                            
                                                           



                                                      

                                                      

  



                                                            

















                                                                              
                                                                                         


                                                                              
                                                             
 
                                                   

                                       

                                                                     

                               




                                         

                                                                           
                                                         

                                                                               
 
                                             

                 

 
                                                              
 
                                       
 



                                              

 
                                                 
                                                                     
 
                   
 

                                                                    
                                                                         
                              

         



                                                     
                      
 
 
                                                         
 
                                       
                                             
 
 
                                                        
 
                                                    


                                                   
                                 

                            
                         






                                                      

                                                                               

                       

                                                                             


                          
                                                                              






                                                   
 




















                                                  
                                                   

                                                   
                                                 
                   

                                                                      

 














































                                                                             

                                                                           










                                                                        
                     
                                                 
 

                                 
 
                                    





                                                           
 

                                         

                               
                                                     
                                                                        
                                                

                                                                          
                                                                       
                                                                          
                                                   

                                                                         
                                                                               
                 
         
 
                                      








                                                                    
                                                      
                                                                               
                                                                    

                                          

                                                                         
                                                                           


                                                                  
                                                               
                                                                  
                                                               


                            
                                                      
                                   

                                                                            


                                                                             
                                                                  


                                         
                                                     
                                        











                                                           
                        

                                                                               

                                                           

                                                                                
                                                                     


                                         




                                                                         
                                                                     


                              

                                                     














                                                                                  
                                                                        





                                                   
 
 
                                                                        
 
                                                        


                                                                              
                                                                       

 
                                                     
 
                                                        


                                                                        
 






                                                   
                                                     

                                                                    
                                                        






                                                                        
 
























                                                             
                                                                  




                                                             
                                                                  





                          
                                                   
 

                                
                      
/*
 * Belkin USB Serial Adapter Driver
 *
 *  Copyright (C) 2000		William Greathouse (wgreathouse@smva.com)
 *  Copyright (C) 2000-2001	Greg Kroah-Hartman (greg@kroah.com)
 *  Copyright (C) 2010		Johan Hovold (jhovold@gmail.com)
 *
 *  This program is largely derived from work by the linux-usb group
 *  and associated source files.  Please see the usb/serial files for
 *  individual credits and copyrights.
 *
 *	This program is free software; you can redistribute it and/or modify
 *	it under the terms of the GNU General Public License as published by
 *	the Free Software Foundation; either version 2 of the License, or
 *	(at your option) any later version.
 *
 * See Documentation/usb/usb-serial.txt for more information on using this
 * driver
 *
 * TODO:
 * -- Add true modem contol line query capability.  Currently we track the
 *    states reported by the interrupt and the states we request.
 * -- Add support for flush commands
 */

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "belkin_sa.h"

#define DRIVER_AUTHOR "William Greathouse <wgreathouse@smva.com>"
#define DRIVER_DESC "USB Belkin Serial converter driver"

/* function prototypes for a Belkin USB Serial Adapter F5U103 */
static int belkin_sa_port_probe(struct usb_serial_port *port);
static int belkin_sa_port_remove(struct usb_serial_port *port);
static int  belkin_sa_open(struct tty_struct *tty,
			struct usb_serial_port *port);
static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
			struct usb_serial_port *port, struct ktermios * old);
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
static int  belkin_sa_tiocmget(struct tty_struct *tty);
static int  belkin_sa_tiocmset(struct tty_struct *tty,
					unsigned int set, unsigned int clear);


static const struct usb_device_id id_table[] = {
	{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
	{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
	{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
	{ USB_DEVICE(GOHUBS_VID, GOHUBS_PID) },
	{ USB_DEVICE(GOHUBS_VID, HANDYLINK_PID) },
	{ USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) },
	{ }	/* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);

/* All of the device info needed for the serial converters */
static struct usb_serial_driver belkin_device = {
	.driver = {
		.owner =	THIS_MODULE,
		.name =		"belkin",
	},
	.description =		"Belkin / Peracom / GoHubs USB Serial Adapter",
	.id_table =		id_table,
	.num_ports =		1,
	.open =			belkin_sa_open,
	.close =		belkin_sa_close,
	.read_int_callback =	belkin_sa_read_int_callback,
	.process_read_urb =	belkin_sa_process_read_urb,
	.set_termios =		belkin_sa_set_termios,
	.break_ctl =		belkin_sa_break_ctl,
	.tiocmget =		belkin_sa_tiocmget,
	.tiocmset =		belkin_sa_tiocmset,
	.port_probe =		belkin_sa_port_probe,
	.port_remove =		belkin_sa_port_remove,
};

static struct usb_serial_driver * const serial_drivers[] = {
	&belkin_device, NULL
};

struct belkin_sa_private {
	spinlock_t		lock;
	unsigned long		control_state;
	unsigned char		last_lsr;
	unsigned char		last_msr;
	int			bad_flow_control;
};


/*
 * ***************************************************************************
 * Belkin USB Serial Adapter F5U103 specific driver functions
 * ***************************************************************************
 */

#define WDR_TIMEOUT 5000 /* default urb timeout */

/* assumes that struct usb_serial *serial is available */
#define BSA_USB_CMD(c, v) usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), \
					    (c), BELKIN_SA_SET_REQUEST_TYPE, \
					    (v), 0, NULL, 0, WDR_TIMEOUT)

static int belkin_sa_port_probe(struct usb_serial_port *port)
{
	struct usb_device *dev = port->serial->dev;
	struct belkin_sa_private *priv;

	priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	spin_lock_init(&priv->lock);
	priv->control_state = 0;
	priv->last_lsr = 0;
	priv->last_msr = 0;
	/* see comments at top of file */
	priv->bad_flow_control =
		(le16_to_cpu(dev->descriptor.bcdDevice) <= 0x0206) ? 1 : 0;
	dev_info(&dev->dev, "bcdDevice: %04x, bfc: %d\n",
					le16_to_cpu(dev->descriptor.bcdDevice),
					priv->bad_flow_control);

	usb_set_serial_port_data(port, priv);

	return 0;
}

static int belkin_sa_port_remove(struct usb_serial_port *port)
{
	struct belkin_sa_private *priv;

	priv = usb_get_serial_port_data(port);
	kfree(priv);

	return 0;
}

static int belkin_sa_open(struct tty_struct *tty,
					struct usb_serial_port *port)
{
	int retval;

	retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
	if (retval) {
		dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
		return retval;
	}

	retval = usb_serial_generic_open(tty, port);
	if (retval)
		usb_kill_urb(port->interrupt_in_urb);

	return retval;
}

static void belkin_sa_close(struct usb_serial_port *port)
{
	usb_serial_generic_close(port);
	usb_kill_urb(port->interrupt_in_urb);
}

static void belkin_sa_read_int_callback(struct urb *urb)
{
	struct usb_serial_port *port = urb->context;
	struct belkin_sa_private *priv;
	unsigned char *data = urb->transfer_buffer;
	int retval;
	int status = urb->status;
	unsigned long flags;

	switch (status) {
	case 0:
		/* success */
		break;
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
		/* this urb is terminated, clean up */
		dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
			__func__, status);
		return;
	default:
		dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
			__func__, status);
		goto exit;
	}

	usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);

	/* Handle known interrupt data */
	/* ignore data[0] and data[1] */

	priv = usb_get_serial_port_data(port);
	spin_lock_irqsave(&priv->lock, flags);
	priv->last_msr = data[BELKIN_SA_MSR_INDEX];

	/* Record Control Line states */
	if (priv->last_msr & BELKIN_SA_MSR_DSR)
		priv->control_state |= TIOCM_DSR;
	else
		priv->control_state &= ~TIOCM_DSR;

	if (priv->last_msr & BELKIN_SA_MSR_CTS)
		priv->control_state |= TIOCM_CTS;
	else
		priv->control_state &= ~TIOCM_CTS;

	if (priv->last_msr & BELKIN_SA_MSR_RI)
		priv->control_state |= TIOCM_RI;
	else
		priv->control_state &= ~TIOCM_RI;

	if (priv->last_msr & BELKIN_SA_MSR_CD)
		priv->control_state |= TIOCM_CD;
	else
		priv->control_state &= ~TIOCM_CD;

	priv->last_lsr = data[BELKIN_SA_LSR_INDEX];
	spin_unlock_irqrestore(&priv->lock, flags);
exit:
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(&port->dev, "%s - usb_submit_urb failed with "
			"result %d\n", __func__, retval);
}

static void belkin_sa_process_read_urb(struct urb *urb)
{
	struct usb_serial_port *port = urb->context;
	struct belkin_sa_private *priv = usb_get_serial_port_data(port);
	struct tty_struct *tty;
	unsigned char *data = urb->transfer_buffer;
	unsigned long flags;
	unsigned char status;
	char tty_flag;

	/* Update line status */
	tty_flag = TTY_NORMAL;

	spin_lock_irqsave(&priv->lock, flags);
	status = priv->last_lsr;
	priv->last_lsr &= ~BELKIN_SA_LSR_ERR;
	spin_unlock_irqrestore(&priv->lock, flags);

	if (!urb->actual_length)
		return;

	tty = tty_port_tty_get(&port->port);
	if (!tty)
		return;

	if (status & BELKIN_SA_LSR_ERR) {
		/* Break takes precedence over parity, which takes precedence
		 * over framing errors. */
		if (status & BELKIN_SA_LSR_BI)
			tty_flag = TTY_BREAK;
		else if (status & BELKIN_SA_LSR_PE)
			tty_flag = TTY_PARITY;
		else if (status & BELKIN_SA_LSR_FE)
			tty_flag = TTY_FRAME;
		dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);

		/* Overrun is special, not associated with a char. */
		if (status & BELKIN_SA_LSR_OE)
			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
	}

	tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
							urb->actual_length);
	tty_flip_buffer_push(tty);
	tty_kref_put(tty);
}

static void belkin_sa_set_termios(struct tty_struct *tty,
		struct usb_serial_port *port, struct ktermios *old_termios)
{
	struct usb_serial *serial = port->serial;
	struct belkin_sa_private *priv = usb_get_serial_port_data(port);
	unsigned int iflag;
	unsigned int cflag;
	unsigned int old_iflag = 0;
	unsigned int old_cflag = 0;
	__u16 urb_value = 0; /* Will hold the new flags */
	unsigned long flags;
	unsigned long control_state;
	int bad_flow_control;
	speed_t baud;
	struct ktermios *termios = &tty->termios;

	iflag = termios->c_iflag;
	cflag = termios->c_cflag;

	termios->c_cflag &= ~CMSPAR;

	/* get a local copy of the current port settings */
	spin_lock_irqsave(&priv->lock, flags);
	control_state = priv->control_state;
	bad_flow_control = priv->bad_flow_control;
	spin_unlock_irqrestore(&priv->lock, flags);

	old_iflag = old_termios->c_iflag;
	old_cflag = old_termios->c_cflag;

	/* Set the baud rate */
	if ((cflag & CBAUD) != (old_cflag & CBAUD)) {
		/* reassert DTR and (maybe) RTS on transition from B0 */
		if ((old_cflag & CBAUD) == B0) {
			control_state |= (TIOCM_DTR|TIOCM_RTS);
			if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 1) < 0)
				dev_err(&port->dev, "Set DTR error\n");
			/* don't set RTS if using hardware flow control */
			if (!(old_cflag & CRTSCTS))
				if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST
								, 1) < 0)
					dev_err(&port->dev, "Set RTS error\n");
		}
	}

	baud = tty_get_baud_rate(tty);
	if (baud) {
		urb_value = BELKIN_SA_BAUD(baud);
		/* Clip to maximum speed */
		if (urb_value == 0)
			urb_value = 1;
		/* Turn it back into a resulting real baud rate */
		baud = BELKIN_SA_BAUD(urb_value);

		/* Report the actual baud rate back to the caller */
		tty_encode_baud_rate(tty, baud, baud);
		if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0)
			dev_err(&port->dev, "Set baudrate error\n");
	} else {
		/* Disable flow control */
		if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST,
						BELKIN_SA_FLOW_NONE) < 0)
			dev_err(&port->dev, "Disable flowcontrol error\n");
		/* Drop RTS and DTR */
		control_state &= ~(TIOCM_DTR | TIOCM_RTS);
		if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0)
			dev_err(&port->dev, "DTR LOW error\n");
		if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0)
			dev_err(&port->dev, "RTS LOW error\n");
	}

	/* set the parity */
	if ((cflag ^ old_cflag) & (PARENB | PARODD)) {
		if (cflag & PARENB)
			urb_value = (cflag & PARODD) ?  BELKIN_SA_PARITY_ODD
						: BELKIN_SA_PARITY_EVEN;
		else
			urb_value = BELKIN_SA_PARITY_NONE;
		if (BSA_USB_CMD(BELKIN_SA_SET_PARITY_REQUEST, urb_value) < 0)
			dev_err(&port->dev, "Set parity error\n");
	}

	/* set the number of data bits */
	if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
		switch (cflag & CSIZE) {
		case CS5:
			urb_value = BELKIN_SA_DATA_BITS(5);
			break;
		case CS6:
			urb_value = BELKIN_SA_DATA_BITS(6);
			break;
		case CS7:
			urb_value = BELKIN_SA_DATA_BITS(7);
			break;
		case CS8:
			urb_value = BELKIN_SA_DATA_BITS(8);
			break;
		default:
			dev_dbg(&port->dev,
				"CSIZE was not CS5-CS8, using default of 8\n");
			urb_value = BELKIN_SA_DATA_BITS(8);
			break;
		}
		if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0)
			dev_err(&port->dev, "Set data bits error\n");
	}

	/* set the number of stop bits */
	if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
		urb_value = (cflag & CSTOPB) ? BELKIN_SA_STOP_BITS(2)
						: BELKIN_SA_STOP_BITS(1);
		if (BSA_USB_CMD(BELKIN_SA_SET_STOP_BITS_REQUEST,
							urb_value) < 0)
			dev_err(&port->dev, "Set stop bits error\n");
	}

	/* Set flow control */
	if (((iflag ^ old_iflag) & (IXOFF | IXON)) ||
		((cflag ^ old_cflag) & CRTSCTS)) {
		urb_value = 0;
		if ((iflag & IXOFF) || (iflag & IXON))
			urb_value |= (BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON);
		else
			urb_value &= ~(BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON);

		if (cflag & CRTSCTS)
			urb_value |=  (BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS);
		else
			urb_value &= ~(BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS);

		if (bad_flow_control)
			urb_value &= ~(BELKIN_SA_FLOW_IRTS);

		if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, urb_value) < 0)
			dev_err(&port->dev, "Set flow control error\n");
	}

	/* save off the modified port settings */
	spin_lock_irqsave(&priv->lock, flags);
	priv->control_state = control_state;
	spin_unlock_irqrestore(&priv->lock, flags);
}

static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
{
	struct usb_serial_port *port = tty->driver_data;
	struct usb_serial *serial = port->serial;

	if (BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0) < 0)
		dev_err(&port->dev, "Set break_ctl %d\n", break_state);
}

static int belkin_sa_tiocmget(struct tty_struct *tty)
{
	struct usb_serial_port *port = tty->driver_data;
	struct belkin_sa_private *priv = usb_get_serial_port_data(port);
	unsigned long control_state;
	unsigned long flags;

	spin_lock_irqsave(&priv->lock, flags);
	control_state = priv->control_state;
	spin_unlock_irqrestore(&priv->lock, flags);

	return control_state;
}

static int belkin_sa_tiocmset(struct tty_struct *tty,
			       unsigned int set, unsigned int clear)
{
	struct usb_serial_port *port = tty->driver_data;
	struct usb_serial *serial = port->serial;
	struct belkin_sa_private *priv = usb_get_serial_port_data(port);
	unsigned long control_state;
	unsigned long flags;
	int retval;
	int rts = 0;
	int dtr = 0;

	spin_lock_irqsave(&priv->lock, flags);
	control_state = priv->control_state;

	if (set & TIOCM_RTS) {
		control_state |= TIOCM_RTS;
		rts = 1;
	}
	if (set & TIOCM_DTR) {
		control_state |= TIOCM_DTR;
		dtr = 1;
	}
	if (clear & TIOCM_RTS) {
		control_state &= ~TIOCM_RTS;
		rts = 0;
	}
	if (clear & TIOCM_DTR) {
		control_state &= ~TIOCM_DTR;
		dtr = 0;
	}

	priv->control_state = control_state;
	spin_unlock_irqrestore(&priv->lock, flags);

	retval = BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, rts);
	if (retval < 0) {
		dev_err(&port->dev, "Set RTS error %d\n", retval);
		goto exit;
	}

	retval = BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, dtr);
	if (retval < 0) {
		dev_err(&port->dev, "Set DTR error %d\n", retval);
		goto exit;
	}
exit:
	return retval;
}

module_usb_serial_driver(serial_drivers, id_table);

MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/td>8
-rw-r--r--Documentation/devicetree/bindings/mfd/bd9571mwv.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/bfticu.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/da9055.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9062.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9063.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9150.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/madera.txt102
-rw-r--r--Documentation/devicetree/bindings/mfd/max14577.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77802.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max8998.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/palmas.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/retu.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt62
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,sec-core.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/stmpe.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tc3589x.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65086.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65912.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/twl-familly.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/wm831x.txt1
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/soc.txt2
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/cib.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed,cvic.txt35
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/pxa-mmc.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt7
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-of-dwcmshc.txt20
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt9
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt64
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt46
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt37
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom_nandc.txt7
-rw-r--r--Documentation/devicetree/bindings/mtd/spear_smi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/spi-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/mux/adi,adgs1408.txt48
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt7
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/holt_hi311x.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx_can.txt36
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt1
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt4
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek-smi.txt153
-rw-r--r--Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt81
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt27
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt1
-rw-r--r--Documentation/devicetree/bindings/net/ibm,emac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-bluetooth.txt35
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt3
-rw-r--r--Documentation/devicetree/bindings/net/microchip,enc28j60.txt3
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp-nci.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn544.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfca.txt2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st95hf.txt3
-rw-r--r--Documentation/devicetree/bindings/net/nfc/trf7970a.txt1
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt2
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt1
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt29
-rw-r--r--Documentation/devicetree/bindings/net/ralink,rt2880-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt2
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt5
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/samsung-sxgbe.txt2
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt7
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt4
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt3
-rw-r--r--Documentation/devicetree/bindings/nvmem/sc27xx-efuse.txt52
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie-msi.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt41
-rw-r--r--Documentation/devicetree/bindings/phy/phy-ath79-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt14
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt99
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt36
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt17
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt11
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt10
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.txt45
-rw-r--r--Documentation/devicetree/bindings/power/supply/act8945a-charger.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24257.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/lp8727_charger.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,ds2760.txt26
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max14656.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/rt9455_charger.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt12
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/akebono.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/hsta.txt1
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt7
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/diu.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dma.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/ecm.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mcm.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt5
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/pamu.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/nintendo/wii.txt1
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt15
-rw-r--r--Documentation/devicetree/bindings/regulator/cpcap-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/max8997-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt86
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt160
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65090.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/uniphier-regulator.txt57
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt7
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt6
-rw-r--r--Documentation/devicetree/bindings/reset/st,sti-softreset.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl12057.txt3
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl1219.txt29
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-cmos.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-ds1307.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-m41t80.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-omap.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-palmas.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/spear-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/maxim,max310x.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt10
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt19
-rw-r--r--Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.txt23
-rw-r--r--Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt84
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/ac97-bus.txt32
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt124
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-i2s.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l33.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l34.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l35.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/da7218.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/da7219.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/dioo,dio2125.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7134.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7241.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/name-prefix.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/omap-dmic.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcbsp.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8096.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6adm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6afe.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6asm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sdm845.txt80
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd9335.txt123
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/simple-amplifier.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/tas571x.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/ux500-msp.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8994.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-spi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-hspi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-uniphier.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xlp.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt1
-rw-r--r--Documentation/devicetree/bindings/sram/sram.txt2
-rw-r--r--Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt5
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/exynos-thermal.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.txt31
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,orion-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt34
-rw-r--r--Documentation/devicetree/bindings/timer/snps,arc-timer.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt1
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-hisi.txt41
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt10
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/fsl-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max3421.txt3
-rw-r--r--Documentation/devicetree/bindings/usb/npcm7xx-usb.txt18
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt3
-rw-r--r--Documentation/devicetree/bindings/usb/samsung-hsotg.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/spear-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/typec-tcpci.txt49
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt11
-rw-r--r--Documentation/devicetree/bindings/w1/w1-gpio.txt11
-rw-r--r--Documentation/devicetree/bindings/w1/w1.txt25
-rw-r--r--Documentation/devicetree/bindings/watchdog/arm,sp805.txt29
-rw-r--r--Documentation/devicetree/bindings/watchdog/cadence-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/sp805-wdt.txt31
-rw-r--r--Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt13
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/xillybus/xillybus.txt2
-rw-r--r--Documentation/devicetree/dynamic-resolution-notes.txt5
-rw-r--r--Documentation/doc-guide/kernel-doc.rst9
-rw-r--r--Documentation/doc-guide/parse-headers.rst4
-rw-r--r--Documentation/doc-guide/sphinx.rst2
-rw-r--r--Documentation/driver-api/device_link.rst12
-rw-r--r--Documentation/driver-api/dma-buf.rst6
-rw-r--r--Documentation/driver-api/firmware/fallback-mechanisms.rst7
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst12
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst12
-rw-r--r--Documentation/driver-api/fpga/intro.rst14
-rw-r--r--Documentation/driver-api/mtdnand.rst4
-rw-r--r--Documentation/driver-api/slimbus.rst5
-rw-r--r--Documentation/driver-api/usb/typec_bus.rst136
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--Documentation/fb/fbcon.txt7
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt8
-rw-r--r--Documentation/filesystems/Locking11
-rw-r--r--Documentation/filesystems/cifs/CHANGES1072
-rw-r--r--Documentation/filesystems/cifs/README22
-rw-r--r--Documentation/filesystems/ext4/ext4.rst (renamed from Documentation/filesystems/ext4.txt)142
-rw-r--r--Documentation/filesystems/ext4/index.rst17
-rw-r--r--Documentation/filesystems/ext4/ondisk/about.rst44
-rw-r--r--Documentation/filesystems/ext4/ondisk/allocators.rst56
-rw-r--r--Documentation/filesystems/ext4/ondisk/attributes.rst191
-rw-r--r--Documentation/filesystems/ext4/ondisk/bigalloc.rst22
-rw-r--r--Documentation/filesystems/ext4/ondisk/bitmaps.rst28
-rw-r--r--Documentation/filesystems/ext4/ondisk/blockgroup.rst135
-rw-r--r--Documentation/filesystems/ext4/ondisk/blockmap.rst49
-rw-r--r--Documentation/filesystems/ext4/ondisk/blocks.rst142
-rw-r--r--Documentation/filesystems/ext4/ondisk/checksums.rst73
-rw-r--r--Documentation/filesystems/ext4/ondisk/directory.rst426
-rw-r--r--Documentation/filesystems/ext4/ondisk/dynamic.rst12
-rw-r--r--Documentation/filesystems/ext4/ondisk/eainode.rst18
-rw-r--r--Documentation/filesystems/ext4/ondisk/globals.rst13
-rw-r--r--Documentation/filesystems/ext4/ondisk/group_descr.rst170
-rw-r--r--Documentation/filesystems/ext4/ondisk/ifork.rst194
-rw-r--r--Documentation/filesystems/ext4/ondisk/index.rst9
-rw-r--r--Documentation/filesystems/ext4/ondisk/inlinedata.rst37
-rw-r--r--Documentation/filesystems/ext4/ondisk/inodes.rst575
-rw-r--r--Documentation/filesystems/ext4/ondisk/journal.rst611
-rw-r--r--Documentation/filesystems/ext4/ondisk/mmp.rst77
-rw-r--r--Documentation/filesystems/ext4/ondisk/overview.rst26
-rw-r--r--Documentation/filesystems/ext4/ondisk/special_inodes.rst38
-rw-r--r--Documentation/filesystems/ext4/ondisk/super.rst801
-rw-r--r--Documentation/filesystems/f2fs.txt18
-rw-r--r--Documentation/filesystems/index.rst33
-rw-r--r--Documentation/filesystems/overlayfs.txt81
-rw-r--r--Documentation/filesystems/porting20
-rw-r--r--Documentation/filesystems/proc.txt6
-rw-r--r--Documentation/filesystems/relay.txt4
-rw-r--r--Documentation/filesystems/seq_file.txt63
-rw-r--r--Documentation/filesystems/vfs.txt34
-rw-r--r--Documentation/filesystems/xfs.txt4
-rw-r--r--Documentation/fpga/dfl.txt285
-rw-r--r--Documentation/gpu/amdgpu.rst129
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-client.rst12
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst26
-rw-r--r--Documentation/gpu/drm-kms.rst28
-rw-r--r--Documentation/gpu/drm-mm.rst20
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/gpu/kms-properties.csv1
-rw-r--r--Documentation/gpu/msm-crash-dump.rst96
-rw-r--r--Documentation/gpu/v3d.rst28
-rw-r--r--Documentation/hwmon/ibmpowernv43
-rw-r--r--Documentation/hwmon/max3444016
-rw-r--r--Documentation/hwmon/mlxreg-fan60
-rw-r--r--Documentation/hwmon/npcm750-pwm-fan22
-rw-r--r--Documentation/hwmon/sysfs-interface48
-rw-r--r--Documentation/i2c/busses/i2c-i80111
-rw-r--r--Documentation/i2c/gpio-fault-injection49
-rw-r--r--Documentation/index.rst28
-rw-r--r--Documentation/input/multi-touch-protocol.rst12
-rw-r--r--Documentation/ioctl/ioctl-number.txt3
-rw-r--r--Documentation/iostats.txt15
-rw-r--r--Documentation/kbuild/kbuild.txt33
-rw-r--r--Documentation/kbuild/kconfig-language.txt4
-rw-r--r--Documentation/kbuild/kconfig.txt51
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rw-r--r--Documentation/kernel-hacking/hacking.rst2
-rw-r--r--Documentation/kernel-hacking/index.rst2
-rw-r--r--Documentation/kernel-hacking/locking.rst6
-rw-r--r--Documentation/kprobes.txt35
-rw-r--r--Documentation/locking/ww-mutex-design.txt65
-rw-r--r--Documentation/media/audio.h.rst.exceptions3
-rw-r--r--Documentation/media/media.h.rst.exceptions2
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst18
-rw-r--r--Documentation/media/uapi/dvb/audio-get-pts.rst65
-rw-r--r--Documentation/media/uapi/dvb/audio-set-attributes.rst67
-rw-r--r--Documentation/media/uapi/dvb/audio-set-ext-id.rst66
-rw-r--r--Documentation/media/uapi/dvb/audio-set-karaoke.rst66
-rw-r--r--Documentation/media/uapi/dvb/audio_data_types.rst37
-rw-r--r--Documentation/media/uapi/dvb/audio_function_calls.rst4
-rw-r--r--Documentation/media/uapi/dvb/video-get-frame-rate.rst61
-rw-r--r--Documentation/media/uapi/dvb/video-get-navi.rst84
-rw-r--r--Documentation/media/uapi/dvb/video-set-attributes.rst93
-rw-r--r--Documentation/media/uapi/dvb/video-set-highlight.rst86
-rw-r--r--Documentation/media/uapi/dvb/video-set-id.rst75
-rw-r--r--Documentation/media/uapi/dvb/video-set-spu-palette.rst82
-rw-r--r--Documentation/media/uapi/dvb/video-set-spu.rst85
-rw-r--r--Documentation/media/uapi/dvb/video-set-system.rst77
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst7
-rw-r--r--Documentation/media/uapi/dvb/video_types.rst131
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-device-info.rst48
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst92
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-links.rst72
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-g-topology.rst240
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst515
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst48
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst7
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-rgb.rst1
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb14p.rst127
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10p.rst33
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst87
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumstd.rst11
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-std.rst14
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querystd.rst11
-rw-r--r--Documentation/media/uapi/v4l/yuv-formats.rst1
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss.rst93
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot104
-rw-r--r--Documentation/media/video.h.rst.exceptions3
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions1
-rw-r--r--Documentation/memory-barriers.txt43
-rw-r--r--Documentation/misc-devices/pci-endpoint-test.txt6
-rw-r--r--Documentation/networking/00-INDEX4
-rw-r--r--Documentation/networking/alias.rst49
-rw-r--r--Documentation/networking/alias.txt40
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/networking/bridge.rst (renamed from Documentation/networking/bridge.txt)6
-rw-r--r--Documentation/networking/can_ucan_protocol.rst332
-rw-r--r--Documentation/networking/dpaa2/overview.rst1
-rw-r--r--Documentation/networking/e100.rst27
-rw-r--r--Documentation/networking/e1000.rst187
-rw-r--r--Documentation/networking/index.rst6
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/net_failover.rst111
-rw-r--r--Documentation/networking/netdev-FAQ.rst259
-rw-r--r--Documentation/networking/netdev-FAQ.txt244
-rw-r--r--Documentation/networking/scaling.txt61
-rw-r--r--Documentation/networking/ti-cpsw.txt541
-rw-r--r--Documentation/networking/tproxy.txt34
-rw-r--r--Documentation/nommu-mmap.txt2
-rw-r--r--Documentation/pcmcia/driver-changes.txt3
-rw-r--r--Documentation/power/freezing-of-tasks.txt12
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt6
-rw-r--r--Documentation/powerpc/DAWR-POWER9.txt58
-rw-r--r--Documentation/powerpc/transactional_memory.txt44
-rw-r--r--Documentation/process/2.Process.rst2
-rw-r--r--Documentation/process/changes.rst29
-rw-r--r--Documentation/process/howto.rst2
-rw-r--r--Documentation/process/management-style.rst25
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/process/submitting-patches.rst1
-rw-r--r--Documentation/rfkill.txt18
-rw-r--r--Documentation/sound/alsa-configuration.rst2
-rwxr-xr-xDocumentation/sound/cards/multisound.sh1139
-rw-r--r--Documentation/sound/hd-audio/models.rst264
-rw-r--r--Documentation/sound/soc/dpcm.rst4
-rw-r--r--Documentation/sphinx/kerneldoc.py10
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl2
-rw-r--r--Documentation/sysctl/kernel.txt29
-rw-r--r--Documentation/sysctl/vm.txt25
-rw-r--r--Documentation/timers/timekeeping.txt2
-rw-r--r--Documentation/trace/events-power.rst1
-rw-r--r--Documentation/trace/events.rst2
-rw-r--r--Documentation/trace/ftrace.rst4
-rw-r--r--Documentation/trace/histogram.rst (renamed from Documentation/trace/histogram.txt)1250
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/kprobetrace.rst9
-rw-r--r--Documentation/trace/uprobetracer.rst2
-rw-r--r--Documentation/translations/index.rst13
-rw-r--r--Documentation/translations/it_IT/disclaimer-ita.rst13
-rw-r--r--Documentation/translations/it_IT/doc-guide/index.rst24
-rw-r--r--Documentation/translations/it_IT/doc-guide/kernel-doc.rst554
-rw-r--r--Documentation/translations/it_IT/doc-guide/parse-headers.rst196
-rw-r--r--Documentation/translations/it_IT/doc-guide/sphinx.rst457
-rw-r--r--Documentation/translations/it_IT/index.rst118
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/hacking.rst855
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/index.rst16
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/locking.rst1493
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt22
-rw-r--r--Documentation/translations/zh_CN/oops-tracing.txt4
-rw-r--r--Documentation/usb/usb-serial.txt9
-rw-r--r--Documentation/virtual/kvm/api.txt152
-rw-r--r--Documentation/virtual/kvm/cpuid.txt4
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-v3.txt8
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt15
-rw-r--r--Documentation/virtual/kvm/hypercalls.txt20
-rw-r--r--Documentation/w1/slaves/w1_ds24382
-rw-r--r--Documentation/x86/intel_rdt_ui.txt380
-rw-r--r--Documentation/x86/x86_64/boot-options.txt8
-rw-r--r--Kconfig22
-rw-r--r--MAINTAINERS421
-rw-r--r--Makefile107
-rw-r--r--arch/Kconfig172
-rw-r--r--arch/alpha/Kconfig23
-rw-r--r--arch/alpha/Kconfig.debug5
-rw-r--r--arch/alpha/boot/Makefile2
-rw-r--r--arch/alpha/include/asm/atomic.h64
-rw-r--r--arch/alpha/include/uapi/asm/socket.h3
-rw-r--r--arch/alpha/kernel/osf_sys.c5
-rw-r--r--arch/alpha/mm/fault.c3
-rw-r--r--arch/arc/Kconfig24
-rw-r--r--arch/arc/Kconfig.debug5
-rw-r--r--arch/arc/Makefile15
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/include/asm/atomic.h86
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/delay.h3
-rw-r--r--arch/arc/include/asm/entry-compact.h6
-rw-r--r--arch/arc/include/asm/entry.h3
-rw-r--r--arch/arc/include/asm/kprobes.h2
-rw-r--r--arch/arc/include/asm/mach_desc.h2
-rw-r--r--arch/arc/include/asm/page.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/kernel/irq.c2
-rw-r--r--arch/arc/kernel/kprobes.c50
-rw-r--r--arch/arc/kernel/process.c47
-rw-r--r--arch/arc/mm/cache.c17
-rw-r--r--arch/arc/mm/dma.c49
-rw-r--r--arch/arc/mm/fault.c4
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h10
-rw-r--r--arch/arc/plat-eznps/mtm.c6
-rw-r--r--arch/arc/plat-hsdk/Kconfig3
-rw-r--r--arch/arc/plat-hsdk/platform.c62
-rw-r--r--arch/arm/Kconfig52
-rw-r--r--arch/arm/Kconfig.debug19
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi1
-rw-r--r--arch/arm/boot/dts/am3517.dtsi9
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts1
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts1
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts140
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts9
-rw-r--r--arch/arm/configs/exynos_defconfig4
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/pxa_defconfig4
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S10
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm/crypto/sha1_glue.c1
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c1
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm/crypto/sha256_glue.c2
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512-glue.c2
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c2
-rw-r--r--arch/arm/crypto/speck-neon-core.S6
-rw-r--r--arch/arm/firmware/Makefile3
-rw-r--r--arch/arm/firmware/trusted_foundations.c14
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/atomic.h55
-rw-r--r--arch/arm/include/asm/bitops.h92
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kprobes.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h26
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_mmu.h14
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mach/time.h3
-rw-r--r--arch/arm/include/asm/module.h1
-rw-r--r--arch/arm/include/asm/probes.h1
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/tlb.h8
-rw-r--r--arch/arm/include/asm/uaccess.h26
-rw-r--r--arch/arm/include/debug/renesas-scif.S5
-rw-r--r--arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm/kernel/asm-offsets.c13
-rw-r--r--arch/arm/kernel/entry-armv.S10
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/head-nommu.S14
-rw-r--r--arch/arm/kernel/hw_breakpoint.c78
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/perf_event_v6.c14
-rw-r--r--arch/arm/kernel/perf_event_v7.c15
-rw-r--r--arch/arm/kernel/perf_event_xscale.c18
-rw-r--r--arch/arm/kernel/process.c10
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/signal.c58
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kernel/time.c15
-rw-r--r--arch/arm/kvm/coproc.c25
-rw-r--r--arch/arm/kvm/guest.c23
-rw-r--r--arch/arm/lib/copy_from_user.S9
-rw-r--r--arch/arm/mach-at91/Kconfig4
-rw-r--r--arch/arm/mach-at91/Makefile25
-rw-r--r--arch/arm/mach-at91/pm.c187
-rw-r--r--arch/arm/mach-at91/pm.h6
-rw-r--r--arch/arm/mach-at91/pm_suspend.S142
-rw-r--r--arch/arm/mach-davinci/Kconfig13
-rw-r--r--arch/arm/mach-davinci/Makefile4
-rw-r--r--arch/arm/mach-davinci/aemif.c218
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c68
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c95
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c61
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c57
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c54
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c57
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c143
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c2
-rw-r--r--arch/arm/mach-davinci/clock.c745
-rw-r--r--arch/arm/mach-davinci/clock.h76
-rw-r--r--arch/arm/mach-davinci/common.c3
-rw-r--r--arch/arm/mach-davinci/da830.c462
-rw-r--r--arch/arm/mach-davinci/da850.c778
-rw-r--r--arch/arm/mach-davinci/da8xx-dt.c66
-rw-r--r--arch/arm/mach-davinci/davinci.h8
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c43
-rw-r--r--arch/arm/mach-davinci/devices.c1
-rw-r--r--arch/arm/mach-davinci/dm355.c406
-rw-r--r--arch/arm/mach-davinci/dm365.c485
-rw-r--r--arch/arm/mach-davinci/dm644x.c344
-rw-r--r--arch/arm/mach-davinci/dm646x.c372
-rw-r--r--arch/arm/mach-davinci/include/mach/clock.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h11
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h6
-rw-r--r--arch/arm/mach-davinci/pm_domain.c5
-rw-r--r--arch/arm/mach-davinci/psc.c137
-rw-r--r--arch/arm/mach-davinci/psc.h12
-rw-r--r--arch/arm/mach-davinci/time.c22
-rw-r--r--arch/arm/mach-davinci/usb-da8xx.c242
-rw-r--r--arch/arm/mach-exynos/exynos.c2
-rw-r--r--arch/arm/mach-exynos/suspend.c3
-rw-r--r--arch/arm/mach-hisi/hotplug.c41
-rw-r--r--arch/arm/mach-imx/Kconfig33
-rw-r--r--arch/arm/mach-imx/Makefile5
-rw-r--r--arch/arm/mach-imx/common.h3
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c45
-rw-r--r--arch/arm/mach-imx/cpu.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sl.c7
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c1
-rw-r--r--arch/arm/mach-imx/gpc.c14
-rw-r--r--arch/arm/mach-imx/imx31-dt.c18
-rw-r--r--arch/arm/mach-imx/mach-imx51.c30
-rw-r--r--arch/arm/mach-imx/mach-imx53.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c5
-rw-r--r--arch/arm/mach-imx/mach-imx7d-cm4.c18
-rw-r--r--arch/arm/mach-imx/pm-imx6.c33
-rw-r--r--arch/arm/mach-mmp/sram.c1
-rw-r--r--arch/arm/mach-mvebu/platsmp.c49
-rw-r--r--arch/arm/mach-mvebu/pmsu.c6
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S5
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c113
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.h42
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c301
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c4
-rw-r--r--arch/arm/mach-omap2/omap-smp.c41
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_reset.c12
-rw-r--r--arch/arm/mach-omap2/pm-asm-offsets.c2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c37
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c20
-rw-r--r--arch/arm/mach-omap2/sleep33xx.S52
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S110
-rw-r--r--arch/arm/mach-pxa/balloon3.c1
-rw-r--r--arch/arm/mach-pxa/devices.c161
-rw-r--r--arch/arm/mach-pxa/devices.h6
-rw-r--r--arch/arm/mach-pxa/em-x270.c1
-rw-r--r--arch/arm/mach-pxa/hx4700.c4
-rw-r--r--arch/arm/mach-pxa/irq.c4
-rw-r--r--arch/arm/mach-pxa/mioa701.c2
-rw-r--r--arch/arm/mach-pxa/pxa25x.c38
-rw-r--r--arch/arm/mach-pxa/pxa27x.c39
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c41
-rw-r--r--arch/arm/mach-pxa/zylonite.c11
-rw-r--r--arch/arm/mach-rpc/ecard.c5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/s3c2412.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/Makefile8
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/headsmp-apmu.S7
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c245
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.h32
-rw-r--r--arch/arm/mach-shmobile/platsmp.c9
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7779.c41
-rw-r--r--arch/arm/mach-shmobile/pm-rcar-gen2.c25
-rw-r--r--arch/arm/mach-shmobile/r8a7779.h2
-rw-r--r--arch/arm/mach-shmobile/r8a7790.h7
-rw-r--r--arch/arm/mach-shmobile/r8a7791.h7
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c10
-rw-r--r--arch/arm/mach-shmobile/setup-emev2.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r7s72100.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c11
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c38
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7791.c39
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c20
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c10
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c10
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c78
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c71
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c53
-rw-r--r--arch/arm/mach-shmobile/timer.c8
-rw-r--r--arch/arm/mach-uniphier/Kconfig1
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c17
-rw-r--r--arch/arm/mm/fault.c7
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/nommu.c3
-rw-r--r--arch/arm/mm/tcm.h2
-rw-r--r--arch/arm/net/bpf_jit_32.c1064
-rw-r--r--arch/arm/net/bpf_jit_32.h42
-rw-r--r--arch/arm/plat-omap/counter_32k.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c47
-rw-r--r--arch/arm/plat-samsung/adc.c1
-rw-r--r--arch/arm/probes/kprobes/core.c139
-rw-r--r--arch/arm/probes/kprobes/test-core.c1
-rw-r--r--arch/arm/probes/uprobes/core.c2
-rw-r--r--arch/arm/vfp/Makefile5
-rw-r--r--arch/arm/vfp/vfpmodule.c17
-rw-r--r--arch/arm64/Kconfig49
-rw-r--r--arch/arm64/Kconfig.debug5
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi15
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi18
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi284
-rw-r--r--arch/arm64/configs/defconfig5
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S150
-rw-r--r--arch/arm64/crypto/aes-glue.c3
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S319
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c202
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha256-glue.c8
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c4
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-glue.c2
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h29
-rw-r--r--arch/arm64/include/asm/atomic.h47
-rw-r--r--arch/arm64/include/asm/barrier.h13
-rw-r--r--arch/arm64/include/asm/bitops.h21
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cacheflush.h27
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/include/asm/fpsimd.h17
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm64/include/asm/insn.h2
-rw-r--r--arch/arm64/include/asm/irq.h2
-rw-r--r--arch/arm64/include/asm/kprobes.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h27
-rw-r--r--arch/arm64/include/asm/kvm_host.h28
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h35
-rw-r--r--arch/arm64/include/asm/memory.h7
-rw-r--r--arch/arm64/include/asm/neon.h7
-rw-r--r--arch/arm64/include/asm/numa.h4
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h24
-rw-r--r--arch/arm64/include/asm/processor.h21
-rw-r--r--arch/arm64/include/asm/ptrace.h79
-rw-r--r--arch/arm64/include/asm/sdei.h9
-rw-r--r--arch/arm64/include/asm/simd.h19
-rw-r--r--arch/arm64/include/asm/spinlock.h117
-rw-r--r--arch/arm64/include/asm/spinlock_types.h17
-rw-r--r--arch/arm64/include/asm/stacktrace.h73
-rw-r--r--arch/arm64/include/asm/syscall.h8
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h80
-rw-r--r--arch/arm64/include/asm/sysreg.h33
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/include/asm/tlbflush.h7
-rw-r--r--arch/arm64/include/asm/topology.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h28
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c11
-rw-r--r--arch/arm64/kernel/acpi_numa.c88
-rw-r--r--arch/arm64/kernel/alternative.c4
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c32
-rw-r--r--arch/arm64/kernel/cpu-reset.h9
-rw-r--r--arch/arm64/kernel/cpu_errata.c30
-rw-r--r--arch/arm64/kernel/cpufeature.c28
-rw-r--r--arch/arm64/kernel/entry.S160
-rw-r--r--arch/arm64/kernel/entry32.S121
-rw-r--r--arch/arm64/kernel/fpsimd.c19
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c86
-rw-r--r--arch/arm64/kernel/insn.c70
-rw-r--r--arch/arm64/kernel/irq.c10
-rw-r--r--arch/arm64/kernel/jump_label.c2
-rw-r--r--arch/arm64/kernel/machine_kexec.c15
-rw-r--r--arch/arm64/kernel/perf_event.c281
-rw-r--r--arch/arm64/kernel/probes/kprobes.c88
-rw-r--r--arch/arm64/kernel/process.c42
-rw-r--r--arch/arm64/kernel/ptrace.c61
-rw-r--r--arch/arm64/kernel/sdei.c48
-rw-r--r--arch/arm64/kernel/setup.c38
-rw-r--r--arch/arm64/kernel/signal.c8
-rw-r--r--arch/arm64/kernel/signal32.c24
-rw-r--r--arch/arm64/kernel/smp.c44
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/sys.c29
-rw-r--r--arch/arm64/kernel/sys32.c135
-rw-r--r--arch/arm64/kernel/syscall.c139
-rw-r--r--arch/arm64/kernel/topology.c58
-rw-r--r--arch/arm64/kernel/traps.c18
-rw-r--r--arch/arm64/kernel/vdso/note.S3
-rw-r--r--arch/arm64/kvm/guest.c47
-rw-r--r--arch/arm64/kvm/hyp-init.S6
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c2
-rw-r--r--arch/arm64/kvm/inject_fault.c6
-rw-r--r--arch/arm64/kvm/regmap.c22
-rw-r--r--arch/arm64/kvm/reset.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c54
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/bitops.S76
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/flush.c3
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/init.c10
-rw-r--r--arch/arm64/mm/mmu.c48
-rw-r--r--arch/arm64/mm/numa.c29
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c13
-rw-r--r--arch/c6x/Kconfig41
-rw-r--r--arch/c6x/Kconfig.debug10
-rw-r--r--arch/h8300/Kconfig37
-rw-r--r--arch/h8300/Kconfig.debug1
-rw-r--r--arch/h8300/Makefile4
-rw-r--r--arch/h8300/boot/dts/h8300h_sim.dts2
-rw-r--r--arch/h8300/include/asm/atomic.h19
-rw-r--r--arch/h8300/include/asm/bitops.h14
-rw-r--r--arch/h8300/include/asm/ptrace.h2
-rw-r--r--arch/h8300/kernel/kgdb.c2
-rw-r--r--arch/h8300/kernel/setup.c46
-rw-r--r--arch/h8300/kernel/sim-console.c7
-rw-r--r--arch/hexagon/Kconfig21
-rw-r--r--arch/hexagon/Kconfig.debug1
-rw-r--r--arch/hexagon/include/asm/atomic.h18
-rw-r--r--arch/hexagon/mm/vm_fault.c2
-rw-r--r--arch/ia64/Kconfig26
-rw-r--r--arch/ia64/Kconfig.debug5
-rw-r--r--arch/ia64/hp/common/sba_iommu.c4
-rw-r--r--arch/ia64/include/asm/atomic.h81
-rw-r--r--arch/ia64/include/asm/io.h14
-rw-r--r--arch/ia64/include/asm/kprobes.h2
-rw-r--r--arch/ia64/include/asm/tlb.h7
-rw-r--r--arch/ia64/include/uapi/asm/break.h1
-rw-r--r--arch/ia64/include/uapi/asm/socket.h3
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/asm-offsets.c4
-rw-r--r--arch/ia64/kernel/fsys.S12
-rw-r--r--arch/ia64/kernel/jprobes.S90
-rw-r--r--arch/ia64/kernel/kprobes.c93
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/mm/contig.c75
-rw-r--r--arch/ia64/mm/discontig.c134
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c16
-rw-r--r--arch/m68k/Kconfig36
-rw-r--r--arch/m68k/Kconfig.debug5
-rw-r--r--arch/m68k/apollo/config.c8
-rw-r--r--arch/m68k/atari/config.c5
-rw-r--r--arch/m68k/atari/time.c63
-rw-r--r--arch/m68k/bvme6000/config.c45
-rw-r--r--arch/m68k/coldfire/clk.c29
-rw-r--r--arch/m68k/configs/amiga_defconfig32
-rw-r--r--arch/m68k/configs/apollo_defconfig30
-rw-r--r--arch/m68k/configs/atari_defconfig29
-rw-r--r--arch/m68k/configs/bvme6000_defconfig30
-rw-r--r--arch/m68k/configs/hp300_defconfig30
-rw-r--r--arch/m68k/configs/mac_defconfig32
-rw-r--r--arch/m68k/configs/multi_defconfig34
-rw-r--r--arch/m68k/configs/mvme147_defconfig30
-rw-r--r--arch/m68k/configs/mvme16x_defconfig30
-rw-r--r--arch/m68k/configs/q40_defconfig30
-rw-r--r--arch/m68k/configs/sun3_defconfig28
-rw-r--r--arch/m68k/configs/sun3x_defconfig30
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atomic.h24
-rw-r--r--arch/m68k/include/asm/bitops.h14
-rw-r--r--arch/m68k/include/asm/dma-mapping.h12
-rw-r--r--arch/m68k/include/asm/dma.h4
-rw-r--r--arch/m68k/include/asm/io.h7
-rw-r--r--arch/m68k/include/asm/io_mm.h42
-rw-r--r--arch/m68k/include/asm/io_no.h12
-rw-r--r--arch/m68k/include/asm/kmap.h9
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/macintosh.h1
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/page_no.h2
-rw-r--r--arch/m68k/kernel/dma.c68
-rw-r--r--arch/m68k/kernel/setup_mm.c15
-rw-r--r--arch/m68k/kernel/setup_no.c21
-rw-r--r--arch/m68k/mac/config.c23
-rw-r--r--arch/m68k/mac/misc.c134
-rw-r--r--arch/m68k/mm/fault.c4
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/mm/mcfmmu.c13
-rw-r--r--arch/m68k/mm/motorola.c35
-rw-r--r--arch/m68k/mvme147/config.c7
-rw-r--r--arch/m68k/mvme16x/config.c8
-rw-r--r--arch/m68k/q40/config.c30
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/microblaze/Kconfig36
-rw-r--r--arch/microblaze/Kconfig.debug6
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h28
-rw-r--r--arch/microblaze/include/asm/pgtable.h5
-rw-r--r--arch/microblaze/kernel/dma.c144
-rw-r--r--arch/microblaze/kernel/head.S5
-rw-r--r--arch/microblaze/mm/consistent.c54
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/microblaze/pci/pci-common.c13
-rw-r--r--arch/microblaze/pci/xilinx_pci.c1
-rw-r--r--arch/mips/Kconfig82
-rw-r--r--arch/mips/Kconfig.debug5
-rw-r--r--arch/mips/Makefile22
-rw-r--r--arch/mips/alchemy/board-gpr.c3
-rw-r--r--arch/mips/alchemy/board-mtx1.c3
-rw-r--r--arch/mips/alchemy/board-xxs1500.c3
-rw-r--r--arch/mips/alchemy/devboards/platform.c3
-rw-r--r--arch/mips/ar7/clock.c29
-rw-r--r--arch/mips/ar7/prom.c4
-rw-r--r--arch/mips/ath25/Kconfig1
-rw-r--r--arch/mips/ath25/board.c6
-rw-r--r--arch/mips/ath25/early_printk.c5
-rw-r--r--arch/mips/ath79/clock.c193
-rw-r--r--arch/mips/ath79/common.c10
-rw-r--r--arch/mips/ath79/early_printk.c64
-rw-r--r--arch/mips/ath79/setup.c35
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/bcm63xx/early_printk.c1
-rw-r--r--arch/mips/bmips/dma.c32
-rw-r--r--arch/mips/bmips/setup.c7
-rw-r--r--arch/mips/boot/Makefile52
-rw-r--r--arch/mips/boot/compressed/uart-prom.c3
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi19
-rw-r--r--arch/mips/boot/dts/mscc/Makefile2
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi32
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb123.dts10
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts3
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts5
-rw-r--r--arch/mips/boot/ecoff.h61
-rw-r--r--arch/mips/boot/elf2ecoff.c31
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c191
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c5
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c7
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c7
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/configs/ci20_defconfig2
-rw-r--r--arch/mips/configs/generic_defconfig3
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig1
-rw-r--r--arch/mips/configs/maltaaprp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig1
-rw-r--r--arch/mips/configs/maltaup_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/fw/arc/arc_con.c1
-rw-r--r--arch/mips/fw/arc/promlib.c1
-rw-r--r--arch/mips/fw/sni/sniprom.c1
-rw-r--r--arch/mips/generic/Kconfig12
-rw-r--r--arch/mips/generic/Platform1
-rw-r--r--arch/mips/generic/board-ocelot_pcb123.its.S23
-rw-r--r--arch/mips/generic/init.c14
-rw-r--r--arch/mips/generic/yamon-dt.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/atomic.h367
-rw-r--r--arch/mips/include/asm/bmips.h16
-rw-r--r--arch/mips/include/asm/cpu-features.h176
-rw-r--r--arch/mips/include/asm/cpu.h51
-rw-r--r--arch/mips/include/asm/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/dma-direct.h17
-rw-r--r--arch/mips/include/asm/dma-mapping.h20
-rw-r--r--arch/mips/include/asm/io.h40
-rw-r--r--arch/mips/include/asm/kprobes.h13
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h3
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h76
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h771
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h34
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h6
-rw-r--r--arch/mips/include/asm/mach-bmips/dma-coherence.h54
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h79
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h73
-rw-r--r--arch/mips/include/asm/mach-generic/kmalloc.h3
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h70
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h92
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h60
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h93
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h24
-rw-r--r--arch/mips/include/asm/mach-pic32/spaces.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h32
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h10062
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gmxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-stxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/octeon.h9
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h3
-rw-r--r--arch/mips/include/asm/page.h11
-rw-r--r--arch/mips/include/asm/processor.h15
-rw-r--r--arch/mips/include/asm/setup.h2
-rw-r--r--arch/mips/include/asm/sgialib.h1
-rw-r--r--arch/mips/include/asm/sim.h12
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/txx9/generic.h1
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h29
-rw-r--r--arch/mips/include/uapi/asm/socket.h3
-rw-r--r--arch/mips/jazz/jazzdma.c141
-rw-r--r--arch/mips/jazz/setup.c17
-rw-r--r--arch/mips/jz4740/Platform2
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c3
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/early_printk.c2
-rw-r--r--arch/mips/kernel/early_printk_8250.c1
-rw-r--r--arch/mips/kernel/genex.S46
-rw-r--r--arch/mips/kernel/idle.c12
-rw-r--r--arch/mips/kernel/kprobes.c70
-rw-r--r--arch/mips/kernel/linux32.c29
-rw-r--r--arch/mips/kernel/process.c123
-rw-r--r--arch/mips/kernel/ptrace.c254
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S4
-rw-r--r--arch/mips/kernel/setup.c38
-rw-r--r--arch/mips/kernel/signal.c24
-rw-r--r--arch/mips/kernel/signal_n32.c12
-rw-r--r--arch/mips/kernel/signal_o32.c24
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/kernel/uprobes.c2
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/lantiq/early_printk.c1
-rw-r--r--arch/mips/lantiq/prom.c8
-rw-r--r--arch/mips/lantiq/xway/dma.c3
-rw-r--r--arch/mips/lasat/prom.c1
-rw-r--r--arch/mips/lib/memset.S21
-rw-r--r--arch/mips/loongson32/Platform8
-rw-r--r--arch/mips/loongson64/Kconfig5
-rw-r--r--arch/mips/loongson64/common/Makefile6
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_ohci.c2
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c109
-rw-r--r--arch/mips/loongson64/common/dma.c18
-rw-r--r--arch/mips/loongson64/common/early_printk.c1
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/loongson64/loongson-3/Makefile2
-rw-r--r--arch/mips/loongson64/loongson-3/dma.c25
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c3
-rw-r--r--arch/mips/mm/Makefile3
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/mips/mm/dma-default.c404
-rw-r--r--arch/mips/mm/dma-noncoherent.c208
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mm/ioremap.c37
-rw-r--r--arch/mips/mm/page.c15
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-pm.c96
-rw-r--r--arch/mips/mti-malta/malta-reset.c30
-rw-r--r--arch/mips/mti-malta/malta-setup.c39
-rw-r--r--arch/mips/netlogic/common/earlycons.c1
-rw-r--r--arch/mips/netlogic/xlp/dt.c14
-rw-r--r--arch/mips/paravirt/serial.c5
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pci/pci-ar2315.c24
-rw-r--r--arch/mips/pci/pci-ar724x.c42
-rw-r--r--arch/mips/pci/pci-ip27.c25
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/mips/pci/pcie-octeon.c6
-rw-r--r--arch/mips/pic32/pic32mzda/early_console.c5
-rw-r--r--arch/mips/ralink/early_printk.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c1
-rw-r--r--arch/mips/sgi-ip32/Makefile2
-rw-r--r--arch/mips/sgi-ip32/ip32-dma.c37
-rw-r--r--arch/mips/sibyte/Kconfig1
-rw-r--r--arch/mips/sibyte/common/cfe.c1
-rw-r--r--arch/mips/txx9/generic/setup.c3
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mips/vdso/Makefile9
-rw-r--r--arch/mips/vdso/genvdso.h51
-rw-r--r--arch/mips/vr41xx/common/pmu.c3
-rw-r--r--arch/nds32/Kconfig33
-rw-r--r--arch/nds32/Kconfig.debug1
-rw-r--r--arch/nds32/Makefile2
-rw-r--r--arch/nds32/include/asm/cacheflush.h9
-rw-r--r--arch/nds32/include/asm/futex.h2
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/mm/cacheflush.c100
-rw-r--r--arch/nds32/mm/fault.c2
-rw-r--r--arch/nios2/Kconfig35
-rw-r--r--arch/nios2/Kconfig.debug5
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/dma-mapping.h20
-rw-r--r--arch/nios2/mm/dma-mapping.c139
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/openrisc/Kconfig35
-rw-r--r--arch/openrisc/Kconfig.debug1
-rw-r--r--arch/openrisc/Makefile1
-rw-r--r--arch/openrisc/include/asm/atomic.h4
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h3
-rw-r--r--arch/openrisc/include/asm/irq.h2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h6
-rw-r--r--arch/openrisc/kernel/entry.S8
-rw-r--r--arch/openrisc/kernel/head.S9
-rw-r--r--arch/openrisc/kernel/irq.c7
-rw-r--r--arch/openrisc/kernel/traps.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/Kconfig35
-rw-r--r--arch/parisc/Kconfig.debug5
-rw-r--r--arch/parisc/include/asm/assembly.h2
-rw-r--r--arch/parisc/include/asm/atomic.h107
-rw-r--r--arch/parisc/include/asm/barrier.h32
-rw-r--r--arch/parisc/include/asm/dma-mapping.h5
-rw-r--r--arch/parisc/include/asm/elf.h9
-rw-r--r--arch/parisc/include/asm/linkage.h26
-rw-r--r--arch/parisc/include/asm/processor.h6
-rw-r--r--arch/parisc/include/asm/ptrace.h11
-rw-r--r--arch/parisc/include/asm/spinlock.h8
-rw-r--r--arch/parisc/include/asm/traps.h4
-rw-r--r--arch/parisc/include/asm/unwind.h9
-rw-r--r--arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--arch/parisc/include/uapi/asm/socket.h3
-rw-r--r--arch/parisc/kernel/entry.S118
-rw-r--r--arch/parisc/kernel/pacache.S126
-rw-r--r--arch/parisc/kernel/pci-dma.c199
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/processor.c2
-rw-r--r--arch/parisc/kernel/ptrace.c100
-rw-r--r--arch/parisc/kernel/real2.S6
-rw-r--r--arch/parisc/kernel/setup.c8
-rw-r--r--arch/parisc/kernel/stacktrace.c15
-rw-r--r--arch/parisc/kernel/sys_parisc.c5
-rw-r--r--arch/parisc/kernel/syscall.S63
-rw-r--r--arch/parisc/kernel/traps.c39
-rw-r--r--arch/parisc/kernel/unwind.c128
-rw-r--r--arch/parisc/lib/lusercopy.S21
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/powerpc/Kconfig35
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/Makefile50
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts20
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcent2.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/t2080rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts8
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/configs/book3s_32.config2
-rw-r--r--arch/powerpc/configs/dpaa.config1
-rw-r--r--arch/powerpc/configs/wii_defconfig1
-rw-r--r--arch/powerpc/crypto/md5-asm.S1
-rw-r--r--arch/powerpc/crypto/md5-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S1
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c2
-rw-r--r--arch/powerpc/include/asm/asm-405.h19
-rw-r--r--arch/powerpc/include/asm/asm-compat.h26
-rw-r--r--arch/powerpc/include/asm/asm-const.h14
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h11
-rw-r--r--arch/powerpc/include/asm/atomic.h70
-rw-r--r--arch/powerpc/include/asm/barrier.h14
-rw-r--r--arch/powerpc/include/asm/bitops.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/cacheflush.h1
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h2
-rw-r--r--arch/powerpc/include/asm/code-patching-asm.h18
-rw-r--r--arch/powerpc/include/asm/code-patching.h3
-rw-r--r--arch/powerpc/include/asm/copro.h4
-rw-r--r--arch/powerpc/include/asm/cpuidle.h13
-rw-r--r--arch/powerpc/include/asm/cputable.h29
-rw-r--r--arch/powerpc/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/dcr-native.h1
-rw-r--r--arch/powerpc/include/asm/debug.h1
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h2
-rw-r--r--arch/powerpc/include/asm/eeh.h15
-rw-r--r--arch/powerpc/include/asm/exception-64s.h118
-rw-r--r--arch/powerpc/include/asm/fadump.h3
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h2
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/fixmap.h1
-rw-r--r--arch/powerpc/include/asm/futex.h2
-rw-r--r--arch/powerpc/include/asm/head-64.h16
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h8
-rw-r--r--arch/powerpc/include/asm/hw_irq.h10
-rw-r--r--arch/powerpc/include/asm/iommu.h12
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kprobes.h12
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h47
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h26
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h37
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h19
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h53
-rw-r--r--arch/powerpc/include/asm/opal-api.h4
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/paca.h8
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/include/asm/pkeys.h38
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h14
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h1
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/reg_a2.h2
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h2
-rw-r--r--arch/powerpc/include/asm/security_features.h7
-rw-r--r--arch/powerpc/include/asm/setup.h6
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/sparsemem.h13
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/stacktrace.h13
-rw-r--r--arch/powerpc/include/asm/string.h2
-rw-r--r--arch/powerpc/include/asm/synch.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h3
-rw-r--r--arch/powerpc/include/asm/tlb.h4
-rw-r--r--arch/powerpc/include/asm/tlbflush.h86
-rw-r--r--arch/powerpc/include/asm/uaccess.h14
-rw-r--r--arch/powerpc/include/asm/xive.h1
-rw-r--r--arch/powerpc/kernel/Makefile17
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S1
-rw-r--r--arch/powerpc/kernel/cputable.c38
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S12
-rw-r--r--arch/powerpc/kernel/entry_64.S64
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S48
-rw-r--r--arch/powerpc/kernel/fadump.c126
-rw-r--r--arch/powerpc/kernel/fpu.S2
-rw-r--r--arch/powerpc/kernel/head_32.S1
-rw-r--r--arch/powerpc/kernel/head_40x.S1
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_8xx.S8
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c47
-rw-r--r--arch/powerpc/kernel/idle_6xx.S1
-rw-r--r--arch/powerpc/kernel/idle_book3e.S7
-rw-r--r--arch/powerpc/kernel/idle_book3s.S54
-rw-r--r--arch/powerpc/kernel/idle_e500.S1
-rw-r--r--arch/powerpc/kernel/idle_power4.S1
-rw-r--r--arch/powerpc/kernel/irq.c41
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c72
-rw-r--r--arch/powerpc/kernel/kprobes.c92
-rw-r--r--arch/powerpc/kernel/kvm_emul.S1
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S1
-rw-r--r--arch/powerpc/kernel/machine_kexec.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c28
-rw-r--r--arch/powerpc/kernel/mce_power.c44
-rw-r--r--arch/powerpc/kernel/misc_32.S1
-rw-r--r--arch/powerpc/kernel/misc_64.S1
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/pci-common.c7
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S1
-rw-r--r--arch/powerpc/kernel/process.c64
-rw-r--r--arch/powerpc/kernel/prom.c33
-rw-r--r--arch/powerpc/kernel/prom_init.c12
-rw-r--r--arch/powerpc/kernel/security.c141
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/smp.c50
-rw-r--r--arch/powerpc/kernel/swsusp_32.S1
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S1
-rw-r--r--arch/powerpc/kernel/tm.S5
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S39
-rw-r--r--arch/powerpc/kernel/traps.c52
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kernel/vdso32/note.S3
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S4
-rw-r--r--arch/powerpc/kernel/watchdog.c1
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S3
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c19
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c26
-rw-r--r--arch/powerpc/kvm/book3s_hv.c59
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c19
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c39
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/kvm/e500mc.c1
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c7
-rw-r--r--arch/powerpc/kvm/powerpc.c31
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c16
-rw-r--r--arch/powerpc/lib/copy_32.S9
-rw-r--r--arch/powerpc/lib/copypage_64.S1
-rw-r--r--arch/powerpc/lib/copypage_power7.S4
-rw-r--r--arch/powerpc/lib/copyuser_64.S587
-rw-r--r--arch/powerpc/lib/copyuser_power7.S21
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S1
-rw-r--r--arch/powerpc/lib/feature-fixups.c35
-rw-r--r--arch/powerpc/lib/hweight_64.S1
-rw-r--r--arch/powerpc/lib/ldstfp.S1
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/lib/memcmp_64.S414
-rw-r--r--arch/powerpc/lib/memcpy_64.S11
-rw-r--r--arch/powerpc/lib/memcpy_power7.S28
-rw-r--r--arch/powerpc/lib/strlen_32.S78
-rw-r--r--arch/powerpc/lib/vmx-helper.c4
-rw-r--r--arch/powerpc/mm/44x_mmu.c2
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c3
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/mm/hash64_4k.c8
-rw-r--r--arch/powerpc/mm/hash64_64k.c15
-rw-r--r--arch/powerpc/mm/hash_low_32.S1
-rw-r--r--arch/powerpc/mm/hash_native_64.c78
-rw-r--r--arch/powerpc/mm/hash_utils_64.c41
-rw-r--r--arch/powerpc/mm/highmem.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c9
-rw-r--r--arch/powerpc/mm/hugetlbpage.c26
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c25
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c1
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c37
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c39
-rw-r--r--arch/powerpc/mm/pgtable-radix.c79
-rw-r--r--arch/powerpc/mm/pkeys.c141
-rw-r--r--arch/powerpc/mm/slb.c39
-rw-r--r--arch/powerpc/mm/slb_low.S1
-rw-r--r--arch/powerpc/mm/subpage-prot.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c18
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S1
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S2
-rw-r--r--arch/powerpc/net/Makefile2
-rw-r--r--arch/powerpc/net/bpf_jit32.h1
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S1
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c30
-rw-r--r--arch/powerpc/perf/core-book3s.c40
-rw-r--r--arch/powerpc/perf/imc-pmu.c108
-rw-r--r--arch/powerpc/perf/isa207-common.c12
-rw-r--r--arch/powerpc/perf/isa207-common.h5
-rw-r--r--arch/powerpc/perf/power9-pmu.c54
-rw-r--r--arch/powerpc/perf/req-gen/_begin.h2
-rw-r--r--arch/powerpc/perf/req-gen/perf.h1
-rw-r--r--arch/powerpc/platforms/4xx/msi.c51
-rw-r--r--arch/powerpc/platforms/52xx/Makefile2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c5
-rw-r--r--arch/powerpc/platforms/85xx/t1042rdb_diu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype39
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.h1
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c10
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c6
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c2
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c4
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/misc.c4
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c16
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c53
-rw-r--r--arch/powerpc/platforms/powermac/cache.S1
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c12
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S1
-rw-r--r--arch/powerpc/platforms/powermac/time.c4
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c4
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c12
-rw-r--r--arch/powerpc/platforms/powernv/idle.c245
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c121
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c17
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c18
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c126
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c28
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c154
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c199
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c399
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c211
-rw-r--r--arch/powerpc/platforms/powernv/pci.c158
-rw-r--r--arch/powerpc/platforms/powernv/pci.h53
-rw-r--r--arch/powerpc/platforms/powernv/setup.c10
-rw-r--r--arch/powerpc/platforms/powernv/smp.c27
-rw-r--r--arch/powerpc/platforms/powernv/vas.h1
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S1
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c51
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c1
-rw-r--r--arch/powerpc/platforms/pseries/ras.c51
-rw-r--r--arch/powerpc/platforms/pseries/setup.c53
-rw-r--r--arch/powerpc/purgatory/Makefile3
-rw-r--r--arch/powerpc/purgatory/trampoline.S10
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/cpm1.c1
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c6
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c15
-rw-r--r--arch/powerpc/sysdev/xive/common.c30
-rw-r--r--arch/powerpc/sysdev/xive/native.c34
-rwxr-xr-xarch/powerpc/tools/checkpatch.sh22
-rw-r--r--arch/powerpc/xmon/spr_access.S1
-rw-r--r--arch/powerpc/xmon/xmon.c42
-rw-r--r--arch/riscv/Kconfig72
-rw-r--r--arch/riscv/Kconfig.debug37
-rw-r--r--arch/riscv/Makefile4
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/atomic.h166
-rw-r--r--arch/riscv/include/asm/csr.h1
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/perf_event.h1
-rw-r--r--arch/riscv/include/asm/smp.h6
-rw-r--r--arch/riscv/include/asm/unistd.h5
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/uapi/asm/elf.h9
-rw-r--r--arch/riscv/include/uapi/asm/syscalls.h13
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/head.S2
-rw-r--r--arch/riscv/kernel/irq.c55
-rw-r--r--arch/riscv/kernel/module.c26
-rw-r--r--arch/riscv/kernel/perf_event.c1
-rw-r--r--arch/riscv/kernel/ptrace.c2
-rw-r--r--arch/riscv/kernel/setup.c32
-rw-r--r--arch/riscv/kernel/smp.c6
-rw-r--r--arch/riscv/kernel/smpboot.c1
-rw-r--r--arch/riscv/kernel/sys_riscv.c12
-rw-r--r--arch/riscv/kernel/time.c30
-rw-r--r--arch/riscv/kernel/traps.c1
-rw-r--r--arch/riscv/kernel/vdso/Makefile4
-rw-r--r--arch/riscv/lib/Makefile1
-rw-r--r--arch/riscv/lib/tishift.S42
-rw-r--r--arch/riscv/mm/fault.c3
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/s390/Kconfig35
-rw-r--r--arch/s390/Kconfig.debug5
-rw-r--r--arch/s390/Makefile58
-rw-r--r--arch/s390/appldata/appldata_base.c122
-rw-r--r--arch/s390/boot/Makefile45
-rw-r--r--arch/s390/boot/als.c (renamed from arch/s390/kernel/als.c)20
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/boot/compressed/Makefile55
-rw-r--r--arch/s390/boot/compressed/head.S8
-rw-r--r--arch/s390/boot/compressed/misc.c37
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S15
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr.lds.S (renamed from arch/s390/boot/compressed/vmlinux.scr)4
-rw-r--r--arch/s390/boot/ebcdic.c2
-rw-r--r--arch/s390/boot/head.S (renamed from arch/s390/kernel/head.S)11
-rw-r--r--arch/s390/boot/head_kdump.S (renamed from arch/s390/kernel/head_kdump.S)0
-rw-r--r--arch/s390/boot/mem.S2
-rw-r--r--arch/s390/boot/sclp_early_core.c2
-rw-r--r--arch/s390/crypto/aes_s390.c1
-rw-r--r--arch/s390/crypto/ghash_s390.c1
-rw-r--r--arch/s390/crypto/sha1_s390.c1
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c4
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/ap.h284
-rw-r--r--arch/s390/include/asm/atomic.h65
-rw-r--r--arch/s390/include/asm/cpu_mf.h12
-rw-r--r--arch/s390/include/asm/ftrace.h6
-rw-r--r--arch/s390/include/asm/gmap.h10
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h11
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/nospec-insn.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pgtable.h13
-rw-r--r--arch/s390/include/asm/purgatory.h6
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/uapi/asm/chsc.h10
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/include/uapi/asm/socket.h3
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/crash_dump.c104
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/entry.S8
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/head64.S43
-rw-r--r--arch/s390/kernel/kprobes.c86
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/nospec-branch.c12
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c16
-rw-r--r--arch/s390/kernel/perf_regs.c6
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/signal.c3
-rw-r--r--arch/s390/kernel/syscalls/Makefile6
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/sysinfo.c4
-rw-r--r--arch/s390/kernel/time.c15
-rw-r--r--arch/s390/kernel/topology.c44
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c383
-rw-r--r--arch/s390/kvm/priv.c143
-rw-r--r--arch/s390/kvm/vsie.c11
-rw-r--r--arch/s390/lib/mem.S16
-rw-r--r--arch/s390/mm/cmm.c74
-rw-r--r--arch/s390/mm/extmem.c4
-rw-r--r--arch/s390/mm/fault.c15
-rw-r--r--arch/s390/mm/gmap.c454
-rw-r--r--arch/s390/mm/hugetlbpage.c24
-rw-r--r--arch/s390/mm/page-states.c2
-rw-r--r--arch/s390/mm/pageattr.c6
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/s390/mm/pgtable.c159
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/numa/numa.c16
-rw-r--r--arch/s390/pci/pci_debug.c8
-rw-r--r--arch/s390/purgatory/Makefile12
-rw-r--r--arch/s390/purgatory/head.S45
-rw-r--r--arch/s390/purgatory/purgatory.c9
-rw-r--r--arch/s390/scripts/Makefile.chkbss11
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/s390/tools/gen_opcode_table.c4
-rw-r--r--arch/sh/Kconfig30
-rw-r--r--arch/sh/Kconfig.debug5
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c282
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c217
-rw-r--r--arch/sh/boards/mach-migor/setup.c8
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c120
-rw-r--r--arch/sh/boards/of-generic.c7
-rw-r--r--arch/sh/drivers/pci/pci.c2
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/atomic.h35
-rw-r--r--arch/sh/include/asm/cacheflush.h7
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h3
-rw-r--r--arch/sh/include/asm/dma-mapping.h23
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h8
-rw-r--r--arch/sh/include/asm/kexec.h3
-rw-r--r--arch/sh/include/asm/kprobes.h4
-rw-r--r--arch/sh/kernel/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c2
-rw-r--r--arch/sh/kernel/dma-coherent.c83
-rw-r--r--arch/sh/kernel/dma-nommu.c88
-rw-r--r--arch/sh/kernel/dwarf.c2
-rw-r--r--arch/sh/kernel/hw_breakpoint.c53
-rw-r--r--arch/sh/kernel/kprobes.c72
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/consistent.c87
-rw-r--r--arch/sh/mm/fault.c4
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sparc/Kconfig34
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/Makefile6
-rw-r--r--arch/sparc/crypto/md5_glue.c1
-rw-r--r--arch/sparc/crypto/sha1_glue.c1
-rw-r--r--arch/sparc/crypto/sha256_glue.c2
-rw-r--r--arch/sparc/crypto/sha512_glue.c2
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/atomic_32.h24
-rw-r--r--arch/sparc/include/asm/atomic_64.h65
-rw-r--r--arch/sparc/include/asm/dma-mapping.h5
-rw-r--r--arch/sparc/include/asm/io_64.h19
-rw-r--r--arch/sparc/include/asm/kprobes.h1
-rw-r--r--arch/sparc/include/asm/msi.h32
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/kernel/ioport.c193
-rw-r--r--arch/sparc/kernel/kprobes.c65
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/lib/atomic32.c4
-rw-r--r--arch/sparc/mm/fault_32.c3
-rw-r--r--arch/sparc/mm/fault_64.c3
-rw-r--r--arch/sparc/mm/init_32.c127
-rw-r--r--arch/sparc/mm/srmmu.c20
-rw-r--r--arch/um/Kconfig (renamed from arch/um/Kconfig.um)74
-rw-r--r--arch/um/Kconfig.char124
-rw-r--r--arch/um/Kconfig.common60
-rw-r--r--arch/um/Kconfig.debug5
-rw-r--r--arch/um/Kconfig.rest22
-rw-r--r--arch/um/Makefile15
-rw-r--r--arch/um/drivers/Kconfig (renamed from arch/um/Kconfig.net)125
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/Kconfig28
-rw-r--r--arch/unicore32/Kconfig.debug5
-rw-r--r--arch/unicore32/mm/fault.c9
-rw-r--r--arch/x86/Kconfig33
-rw-r--r--arch/x86/Kconfig.debug5
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/boot/bitops.h3
-rw-r--r--arch/x86/boot/compressed/Makefile8
-rw-r--r--arch/x86/boot/compressed/eboot.c557
-rw-r--r--arch/x86/boot/compressed/eboot.h12
-rw-r--r--arch/x86/boot/compressed/kaslr.c103
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c73
-rw-r--r--arch/x86/boot/string.c5
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S3
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis128l-aesni-asm.S3
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis256-aesni-asm.S3
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S8
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S4
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c6
-rw-r--r--arch/x86/crypto/morus1280-avx2-asm.S3
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c10
-rw-r--r--arch/x86/crypto/morus1280-sse2-asm.S3
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c10
-rw-r--r--arch/x86/crypto/morus640-sse2-asm.S3
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c10
-rw-r--r--arch/x86/crypto/poly1305_glue.c1
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c17
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S2
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c18
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c18
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--arch/x86/entry/entry_32.S632
-rw-r--r--arch/x86/entry/entry_64.S23
-rw-r--r--arch/x86/entry/vdso/Makefile26
-rw-r--r--arch/x86/entry/vdso/vdso-note.S3
-rw-r--r--arch/x86/entry/vdso/vdso32/note.S3
-rw-r--r--arch/x86/events/amd/ibs.c6
-rw-r--r--arch/x86/events/intel/core.c34
-rw-r--r--arch/x86/events/intel/ds.c82
-rw-r--r--arch/x86/events/intel/lbr.c56
-rw-r--r--arch/x86/events/intel/uncore.h2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c10
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/hyperv/Makefile2
-rw-r--r--arch/x86/hyperv/hv_apic.c62
-rw-r--r--arch/x86/hyperv/hv_init.c36
-rw-r--r--arch/x86/hyperv/mmu.c80
-rw-r--r--arch/x86/hyperv/nested.c56
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/apic.h9
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/asm.h59
-rw-r--r--arch/x86/include/asm/atomic.h32
-rw-r--r--arch/x86/include/asm/atomic64_32.h61
-rw-r--r--arch/x86/include/asm/atomic64_64.h50
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h4
-rw-r--r--arch/x86/include/asm/cpufeatures.h6
-rw-r--r--arch/x86/include/asm/dmi.h2
-rw-r--r--arch/x86/include/asm/export.h5
-rw-r--r--arch/x86/include/asm/hardirq.h26
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h7
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h25
-rw-r--r--arch/x86/include/asm/i8259.h1
-rw-r--r--arch/x86/include/asm/intel-family.h13
-rw-r--r--arch/x86/include/asm/intel-mid.h43
-rw-r--r--arch/x86/include/asm/intel_ds.h3
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/include/asm/kprobes.h5
-rw-r--r--arch/x86/include/asm/kvm_guest.h7
-rw-r--r--arch/x86/include/asm/kvm_host.h62
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mshyperv.h57
-rw-r--r--arch/x86/include/asm/msr-index.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/orc_types.h2
-rw-r--r--arch/x86/include/asm/page_32_types.h9
-rw-r--r--arch/x86/include/asm/pci-direct.h4
-rw-r--r--arch/x86/include/asm/percpu.h7
-rw-r--r--arch/x86/include/asm/pgtable-2level.h26
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h44
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h6
-rw-r--r--arch/x86/include/asm/pgtable-invert.h41
-rw-r--r--arch/x86/include/asm/pgtable.h168
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h9
-rw-r--r--arch/x86/include/asm/pgtable_64.h127
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h28
-rw-r--r--arch/x86/include/asm/processor-flags.h8
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/pti.h3
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
-rw-r--r--arch/x86/include/asm/refcount.h1
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/switch_to.h16
-rw-r--r--arch/x86/include/asm/text-patching.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h21
-rw-r--r--arch/x86/include/asm/topology.h6
-rw-r--r--arch/x86/include/asm/trace/hyperv.h29
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h7
-rw-r--r--arch/x86/include/asm/unwind_hints.h16
-rw-r--r--arch/x86/include/asm/vmx.h14
-rw-r--r--arch/x86/include/asm/xen/hypercall.h25
-rw-r--r--arch/x86/include/uapi/asm/kvm.h37
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c23
-rw-r--r--arch/x86/kernel/apic/io_apic.c1
-rw-r--r--arch/x86/kernel/apic/msi.c1
-rw-r--r--arch/x86/kernel/apic/vector.c20
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/asm-offsets.c5
-rw-r--r--arch/x86/kernel/asm-offsets_32.c10
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c68
-rw-r--r--arch/x86/kernel/cpu/bugs.c198
-rw-r--r--arch/x86/kernel/cpu/common.c106
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/intel.c17
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c11
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h143
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c129
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c1522
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h43
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c812
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c205
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c16
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c3
-rw-r--r--arch/x86/kernel/cpu/topology.c41
-rw-r--r--arch/x86/kernel/dumpstack.c28
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/fpu/core.c1
-rw-r--r--arch/x86/kernel/head_32.S20
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/hw_breakpoint.c131
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/idt.c1
-rw-r--r--arch/x86/kernel/irq.c1
-rw-r--r--arch/x86/kernel/irq_32.c1
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/irqflags.S26
-rw-r--r--arch/x86/kernel/irqinit.c1
-rw-r--r--arch/x86/kernel/jump_label.c11
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c2
-rw-r--r--arch/x86/kernel/kprobes/common.h10
-rw-r--r--arch/x86/kernel/kprobes/core.c124
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c49
-rw-r--r--arch/x86/kernel/kprobes/opt.c1
-rw-r--r--arch/x86/kernel/kvm.c128
-rw-r--r--arch/x86/kernel/kvmclock.c263
-rw-r--r--arch/x86/kernel/ldt.c137
-rw-r--r--arch/x86/kernel/machine_kexec_32.c5
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/pci-iommu_table.c2
-rw-r--r--arch/x86/kernel/pcspeaker.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kernel/smpboot.c23
-rw-r--r--arch/x86/kernel/stacktrace.c42
-rw-r--r--arch/x86/kernel/time.c1
-rw-r--r--arch/x86/kernel/tsc.c259
-rw-r--r--arch/x86/kernel/tsc_msr.c96
-rw-r--r--arch/x86/kernel/unwind_orc.c52
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S17
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/hyperv.c27
-rw-r--r--arch/x86/kvm/hyperv.h2
-rw-r--r--arch/x86/kvm/lapic.c42
-rw-r--r--arch/x86/kvm/mmu.c534
-rw-r--r--arch/x86/kvm/mmu.h24
-rw-r--r--arch/x86/kvm/paging_tmpl.h28
-rw-r--r--arch/x86/kvm/svm.c20
-rw-r--r--arch/x86/kvm/vmx.c1654
-rw-r--r--arch/x86/kvm/x86.c155
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/dump_pagetables.c27
-rw-r--r--arch/x86/mm/fault.c7
-rw-r--r--arch/x86/mm/init.c62
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/mm/kmmio.c25
-rw-r--r--arch/x86/mm/mmap.c21
-rw-r--r--arch/x86/mm/numa_emulation.c107
-rw-r--r--arch/x86/mm/pageattr.c27
-rw-r--r--arch/x86/mm/pgtable.c169
-rw-r--r--arch/x86/mm/pti.c262
-rw-r--r--arch/x86/mm/tlb.c224
-rw-r--r--arch/x86/net/bpf_jit_comp32.c8
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/x86/pci/early.c44
-rw-r--r--arch/x86/platform/efi/efi_64.c103
-rw-r--r--arch/x86/platform/efi/quirks.c14
-rw-r--r--arch/x86/platform/intel-mid/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c1
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c23
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h18
-rw-r--r--arch/x86/platform/intel-mid/mfld.c70
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c105
-rw-r--r--arch/x86/platform/olpc/olpc.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c3
-rw-r--r--arch/x86/power/hibernate_64.c36
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/purgatory/Makefile5
-rw-r--r--arch/x86/tools/relocs.c1
-rw-r--r--arch/x86/um/Kconfig15
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--arch/x86/um/vdso/.gitignore1
-rw-r--r--arch/x86/um/vdso/Makefile16
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/enlighten_pv.c77
-rw-r--r--arch/x86/xen/irq.c4
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/multicalls.c6
-rw-r--r--arch/x86/xen/spinlock.c4
-rw-r--r--arch/x86/xen/suspend_pv.c5
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/Kconfig89
-rw-r--r--arch/xtensa/Kconfig.debug5
-rw-r--r--arch/xtensa/boot/Makefile3
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S19
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/atomic.h98
-rw-r--r--arch/xtensa/include/asm/cacheasm.h69
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h26
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h7
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h42
-rw-r--r--arch/xtensa/include/asm/irq.h21
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h6
-rw-r--r--arch/xtensa/include/asm/page.h5
-rw-r--r--arch/xtensa/include/asm/pgtable.h8
-rw-r--r--arch/xtensa/include/asm/platform.h27
-rw-r--r--arch/xtensa/include/asm/processor.h1
-rw-r--r--arch/xtensa/include/asm/vectors.h1
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h3
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c40
-rw-r--r--arch/xtensa/kernel/irq.c1
-rw-r--r--arch/xtensa/kernel/pci-dma.c195
-rw-r--r--arch/xtensa/kernel/setup.c10
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--arch/xtensa/platforms/iss/include/platform/hardware.h29
-rw-r--r--arch/xtensa/platforms/xt2000/include/platform/hardware.h11
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h9
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/core.h575
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h308
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/tie.h182
-rw-r--r--block/Kconfig16
-rw-r--r--block/Makefile4
-rw-r--r--block/bfq-cgroup.c3
-rw-r--r--block/bfq-iosched.c185
-rw-r--r--block/bfq-iosched.h7
-rw-r--r--block/bfq-wf2q.c52
-rw-r--r--block/bio-integrity.c22
-rw-r--r--block/bio.c262
-rw-r--r--block/blk-cgroup.c284
-rw-r--r--block/blk-core.c116
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iolatency.c955
-rw-r--r--block/blk-lib.c10
-rw-r--r--block/blk-mq-debugfs-zoned.c24
-rw-r--r--block/blk-mq-debugfs.c24
-rw-r--r--block/blk-mq-debugfs.h9
-rw-r--r--block/blk-mq-pci.c5
-rw-r--r--block/blk-mq-sched.c156
-rw-r--r--block/blk-mq-sched.h5
-rw-r--r--block/blk-mq-tag.c27
-rw-r--r--block/blk-mq.c273
-rw-r--r--block/blk-mq.h13
-rw-r--r--block/blk-rq-qos.c194
-rw-r--r--block/blk-rq-qos.h109
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-stat.c16
-rw-r--r--block/blk-stat.h4
-rw-r--r--block/blk-sysfs.c37
-rw-r--r--block/blk-throttle.c32
-rw-r--r--block/blk-wbt.c429
-rw-r--r--block/blk-wbt.h68
-rw-r--r--block/blk-zoned.c2
-rw-r--r--block/blk.h11
-rw-r--r--block/bounce.c69
-rw-r--r--block/bsg-lib.c5
-rw-r--r--block/bsg.c462
-rw-r--r--block/cfq-iosched.c23
-rw-r--r--block/elevator.c20
-rw-r--r--block/genhd.c29
-rw-r--r--block/partition-generic.c25
-rw-r--r--block/partitions/aix.c13
-rw-r--r--block/partitions/ldm.c3
-rw-r--r--block/t10-pi.c110
-rw-r--r--certs/system_keyring.c3
-rw-r--r--crypto/ablkcipher.c59
-rw-r--r--crypto/aegis128.c1
-rw-r--r--crypto/aegis128l.c3
-rw-r--r--crypto/aegis256.c1
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_key_type.c2
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/raid6test.c4
-rw-r--r--crypto/blkcipher.c55
-rw-r--r--crypto/crypto_null.c1
-rw-r--r--crypto/dh.c66
-rw-r--r--crypto/dh_helper.c43
-rw-r--r--crypto/drbg.c39
-rw-r--r--crypto/ecc.c42
-rw-r--r--crypto/ecc_curve_defs.h22
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/md4.c1
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/morus1280.c1
-rw-r--r--crypto/morus640.c1
-rw-r--r--crypto/poly1305_generic.c1
-rw-r--r--crypto/rmd128.c1
-rw-r--r--crypto/rmd160.c1
-rw-r--r--crypto/rmd256.c11
-rw-r--r--crypto/rmd320.c13
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c4
-rw-r--r--crypto/sha3_generic.c4
-rw-r--r--crypto/sha512_generic.c26
-rw-r--r--crypto/skcipher.c57
-rw-r--r--crypto/sm3_generic.c1
-rw-r--r--crypto/tcrypt.c38
-rw-r--r--crypto/testmgr.c59
-rw-r--r--crypto/testmgr.h233
-rw-r--r--crypto/tgr192.c3
-rw-r--r--crypto/vmac.c444
-rw-r--r--crypto/wp512.c3
-rw-r--r--crypto/xts.c4
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/acpi_lpss.c26
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h17
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/dbinput.c10
-rw-r--r--drivers/acpi/acpica/dbmethod.c8
-rw-r--r--drivers/acpi/acpica/dbxface.c10
-rw-r--r--drivers/acpi/acpica/dsfield.c34
-rw-r--r--drivers/acpi/acpica/hwregs.c9
-rw-r--r--drivers/acpi/acpica/hwsleep.c10
-rw-r--r--drivers/acpi/acpica/nsaccess.c20
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/psloop.c48
-rw-r--r--drivers/acpi/acpica/tbdata.c4
-rw-r--r--drivers/acpi/acpica/utdelete.c7
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c26
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c2
-rw-r--r--drivers/acpi/arm64/iort.c48
-rw-r--r--drivers/acpi/battery.c78
-rw-r--r--drivers/acpi/bus.c40
-rw-r--r--drivers/acpi/button.c17
-rw-r--r--drivers/acpi/ec.c16
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/osi.c8
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c109
-rw-r--r--drivers/acpi/pptt.c10
-rw-r--r--drivers/acpi/property.c209
-rw-r--r--drivers/acpi/scan.c23
-rw-r--r--drivers/acpi/sleep.c30
-rw-r--r--drivers/acpi/x86/utils.c22
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c9
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/android/binder_trace.h7
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c60
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-scsi.c36
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/pata_pxa.c10
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/sata_fsl.c9
-rw-r--r--drivers/ata/sata_nv.c3
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/auxdisplay/arm-charlcd.c6
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/hd44780.c1
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/cacheinfo.c24
-rw-r--r--drivers/base/core.c87
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c142
-rw-r--r--drivers/base/firmware_loader/fallback.c12
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/node.c49
-rw-r--r--drivers/base/power/common.c17
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h3
-rw-r--r--drivers/base/regmap/regmap-sccb.c128
-rw-r--r--drivers/base/regmap/regmap-slimbus.c23
-rw-r--r--drivers/base/regmap/regmap.c79
-rw-r--r--drivers/bcma/Kconfig3
-rw-r--r--drivers/block/DAC960.c51
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile5
-rw-r--r--drivers/block/aoe/aoecmd.c1
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/brd.c14
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c12
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c102
-rw-r--r--drivers/block/null_blk.h108
-rw-r--r--drivers/block/null_blk_main.c (renamed from drivers/block/null_blk.c)129
-rw-r--r--drivers/block/null_blk_zoned.c149
-rw-r--r--drivers/block/paride/bpck.c3
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c110
-rw-r--r--drivers/block/rbd.c127
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c16
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c41
-rw-r--r--drivers/bluetooth/Kconfig25
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bpa10x.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c629
-rw-r--r--drivers/bluetooth/btqca.c123
-rw-r--r--drivers/bluetooth/btqca.h22
-rw-r--r--drivers/bluetooth/btrtl.c512
-rw-r--r--drivers/bluetooth/btrtl.h53
-rw-r--r--drivers/bluetooth/btusb.c116
-rw-r--r--drivers/bluetooth/hci_h5.c206
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_qca.c490
-rw-r--r--drivers/bus/imx-weim.c7
-rw-r--r--drivers/cdrom/cdrom.c30
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c4
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/atmel-rng.c1
-rw-r--r--drivers/char/hw_random/exynos-trng.c1
-rw-r--r--drivers/char/hw_random/imx-rngc.c1
-rw-r--r--drivers/char/hw_random/msm-rng.c183
-rw-r--r--drivers/char/hw_random/powernv-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc.c31
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/random.c59
-rw-r--r--drivers/char/rtc.c13
-rw-r--r--drivers/char/tpm/tpm-chip.c68
-rw-r--r--drivers/char/tpm/tpm-interface.c72
-rw-r--r--drivers/char/tpm/tpm.h31
-rw-r--r--drivers/char/tpm/tpm2-cmd.c258
-rw-r--r--drivers/char/tpm/tpm2-space.c12
-rw-r--r--drivers/char/tpm/tpm_crb.c101
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c8
-rw-r--r--drivers/char/tpm/tpm_tis_core.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c9
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/virtio_console.c60
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig7
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-s700.c606
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-i2s-mux.c116
-rw-r--r--drivers/clk/clk-aspeed.c61
-rw-r--r--drivers/clk/clk-cs2000-cp.c5
-rw-r--r--drivers/clk/clk-fixed-factor.c9
-rw-r--r--drivers/clk/clk-max9485.c387
-rw-r--r--drivers/clk/clk-scmi.c5
-rw-r--r--drivers/clk/clk-si514.c38
-rw-r--r--drivers/clk/clk-si544.c38
-rw-r--r--drivers/clk/clk.c221
-rw-r--r--drivers/clk/clkdev.c5
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c2
-rw-r--r--drivers/clk/davinci/psc-da830.c3
-rw-r--r--drivers/clk/davinci/psc-da850.c3
-rw-r--r--drivers/clk/davinci/psc-dm365.c3
-rw-r--r--drivers/clk/davinci/psc-dm644x.c3
-rw-r--r--drivers/clk/davinci/psc-dm646x.c3
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c44
-rw-r--r--drivers/clk/imx/clk-imx6q.c16
-rw-r--r--drivers/clk/imx/clk-imx6sl.c12
-rw-r--r--drivers/clk/imx/clk-imx6sll.c7
-rw-r--r--drivers/clk/imx/clk-imx6sx.c41
-rw-r--r--drivers/clk/imx/clk-imx6ul.c29
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c4
-rw-r--r--drivers/clk/meson/Kconfig28
-rw-r--r--drivers/clk/meson/Makefile4
-rw-r--r--drivers/clk/meson/axg-audio.c845
-rw-r--r--drivers/clk/meson/axg-audio.h127
-rw-r--r--drivers/clk/meson/axg.c244
-rw-r--r--drivers/clk/meson/axg.h8
-rw-r--r--drivers/clk/meson/clk-audio-divider.c110
-rw-r--r--drivers/clk/meson/clk-phase.c63
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc-audio.h28
-rw-r--r--drivers/clk/meson/clkc.h11
-rw-r--r--drivers/clk/meson/gxbb.c119
-rw-r--r--drivers/clk/meson/gxbb.h5
-rw-r--r--drivers/clk/meson/sclk-div.c243
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c47
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c6
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c7
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c7
-rw-r--r--drivers/clk/qcom/Kconfig19
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c10
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h14
-rw-r--r--drivers/clk/qcom/clk-branch.c10
-rw-r--r--drivers/clk/qcom/clk-branch.h14
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-regmap.c10
-rw-r--r--drivers/clk/qcom/clk-regmap.h14
-rw-r--r--drivers/clk/qcom/clk-rpmh.c329
-rw-r--r--drivers/clk/qcom/common.c10
-rw-r--r--drivers/clk/qcom/common.h15
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c685
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c45
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c3
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c2
-rw-r--r--drivers/clk/renesas/Kconfig6
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c893
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c227
-rw-r--r--drivers/clk/rockchip/clk-px30.c1039
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c3
-rw-r--r--drivers/clk/rockchip/clk.c10
-rw-r--r--drivers/clk/rockchip/clk.h126
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c9
-rw-r--r--drivers/clk/sunxi-ng/Makefile39
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h8
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c12
-rw-r--r--drivers/clk/tegra/clk-divider.c35
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c251
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c11
-rw-r--r--drivers/clk/tegra/clk-tegra124.c3
-rw-r--r--drivers/clk/tegra/clk-tegra210.c14
-rw-r--r--drivers/clk/tegra/clk-utils.c43
-rw-r--r--drivers/clk/tegra/clk.h30
-rw-r--r--drivers/clk/uniphier/clk-uniphier-peri.c9
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c58
-rw-r--r--drivers/clocksource/Kconfig11
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/mtk_timer.c268
-rw-r--r--drivers/clocksource/riscv_timer.c105
-rw-r--r--drivers/clocksource/tegra20_timer.c4
-rw-r--r--drivers/clocksource/timer-atcpit100.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/clocksource/timer-mediatek.c328
-rw-r--r--drivers/clocksource/timer-sprd.c50
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/clocksource/zevio-timer.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c163
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c52
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.c12
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c13
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c20
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c6
-rw-r--r--drivers/cpuidle/cpuidle-arm.c3
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c158
-rw-r--r--drivers/cpuidle/governors/menu.c45
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/atmel-ecc.c35
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c28
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c35
-rw-r--r--drivers/crypto/ccp/psp-dev.h19
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c36
-rw-r--r--drivers/crypto/ccree/cc_aead.c16
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c8
-rw-r--r--drivers/crypto/ccree/cc_cipher.c170
-rw-r--r--drivers/crypto/ccree/cc_cipher.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_hash.c85
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig14
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec/Makefile3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c1122
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c1323
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h428
-rw-r--r--drivers/crypto/inside-secure/safexcel.c474
-rw-r--r--drivers/crypto/inside-secure/safexcel.h201
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c492
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c560
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c63
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c1
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c31
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c1
-rw-r--r--drivers/crypto/nx/nx-sha256.c1
-rw-r--r--drivers/crypto/nx/nx-sha512.c1
-rw-r--r--drivers/crypto/omap-sham.c36
-rw-r--r--drivers/crypto/padlock-aes.c8
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/qce/sha.c3
-rw-r--r--drivers/crypto/qcom-rng.c229
-rw-r--r--drivers/crypto/s5p-sss.c9
-rw-r--r--drivers/crypto/sahara.c10
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c62
-rw-r--r--drivers/crypto/stm32/stm32-hash.c95
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c72
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c20
-rw-r--r--drivers/crypto/talitos.c37
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c116
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h25
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c33
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c81
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/dax/device.c14
-rw-r--r--drivers/devfreq/devfreq.c16
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/rk3399_dmc.c102
-rw-r--r--drivers/devfreq/tegra-devfreq.c1
-rw-r--r--drivers/dma-buf/dma-buf.c56
-rw-r--r--drivers/dma-buf/dma-fence-array.c1
-rw-r--r--drivers/dma-buf/dma-fence.c167
-rw-r--r--drivers/dma-buf/reservation.c8
-rw-r--r--drivers/dma-buf/sw_sync.c1
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c53
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/imx-sdma.c578
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/mic_x100_dma.c8
-rw-r--r--drivers/dma/mv_xor_v2.c16
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c971
-rw-r--r--drivers/dma/pl330.c14
-rw-r--r--drivers/dma/pxa_dma.c15
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sh/rcar-dmac.c112
-rw-r--r--drivers/dma/ste_dma40.c15
-rw-r--r--drivers/dma/stm32-dma.c4
-rw-r--r--drivers/dma/stm32-mdma.c8
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c22
-rw-r--r--drivers/edac/altera_edac.c3
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i7core_edac.c22
-rw-r--r--drivers/edac/sb_edac.c17
-rw-r--r--drivers/edac/thunderx_edac.c14
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c1
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max3355.c1
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c1
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c22
-rw-r--r--drivers/extcon/extcon.c3
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firmware/efi/Kconfig12
-rw-r--r--drivers/firmware/efi/arm-init.c1
-rw-r--r--drivers/firmware/efi/arm-runtime.c18
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi.c23
-rw-r--r--drivers/firmware/efi/esrt.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile8
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c31
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c202
-rw-r--r--drivers/firmware/google/vpd.c5
-rw-r--r--drivers/firmware/qemu_fw_cfg.c1
-rw-r--r--drivers/fpga/Kconfig68
-rw-r--r--drivers/fpga/Makefile14
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c463
-rw-r--r--drivers/fpga/dfl-afu-main.c636
-rw-r--r--drivers/fpga/dfl-afu-region.c166
-rw-r--r--drivers/fpga/dfl-afu.h100
-rw-r--r--drivers/fpga/dfl-fme-br.c114
-rw-r--r--drivers/fpga/dfl-fme-main.c279
-rw-r--r--drivers/fpga/dfl-fme-mgr.c349
-rw-r--r--drivers/fpga/dfl-fme-pr.c479
-rw-r--r--drivers/fpga/dfl-fme-pr.h84
-rw-r--r--drivers/fpga/dfl-fme-region.c89
-rw-r--r--drivers/fpga/dfl-fme.h38
-rw-r--r--drivers/fpga/dfl-pci.c243
-rw-r--r--drivers/fpga/dfl.c1044
-rw-r--r--drivers/fpga/dfl.h410
-rw-r--r--drivers/fpga/fpga-mgr.c28
-rw-r--r--drivers/fpga/fpga-region.c22
-rw-r--r--drivers/fsi/Kconfig32
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/cf-fsi-fw.h157
-rw-r--r--drivers/fsi/fsi-core.c590
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c1440
-rw-r--r--drivers/fsi/fsi-master-gpio.c471
-rw-r--r--drivers/fsi/fsi-master-hub.c5
-rw-r--r--drivers/fsi/fsi-master.h37
-rw-r--r--drivers/fsi/fsi-sbefifo.c1066
-rw-r--r--drivers/fsi/fsi-scom.c558
-rw-r--r--drivers/gnss/Kconfig43
-rw-r--r--drivers/gnss/Makefile16
-rw-r--r--drivers/gnss/core.c420
-rw-r--r--drivers/gnss/serial.c275
-rw-r--r--drivers/gnss/serial.h47
-rw-r--r--drivers/gnss/sirf.c408
-rw-r--r--drivers/gnss/ubx.c153
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-aspeed.c426
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c6
-rw-r--r--drivers/gpio/gpio-davinci.c70
-rw-r--r--drivers/gpio/gpio-dwapb.c6
-rw-r--r--drivers/gpio/gpio-em.c6
-rw-r--r--drivers/gpio/gpio-it87.c13
-rw-r--r--drivers/gpio/gpio-madera.c206
-rw-r--r--drivers/gpio/gpio-max732x.c12
-rw-r--r--drivers/gpio/gpio-menz127.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c3
-rw-r--r--drivers/gpio/gpio-mmio.c108
-rw-r--r--drivers/gpio/gpio-mt7621.c332
-rw-r--r--drivers/gpio/gpio-mxc.c73
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c88
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pisosr.c22
-rw-r--r--drivers/gpio/gpio-pxa.c42
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sch.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c70
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c41
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c18
-rw-r--r--drivers/gpio/gpio-syscon.c33
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c30
-rw-r--r--drivers/gpio/gpio-tegra186.c74
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c9
-rw-r--r--drivers/gpio/gpio-vr41xx.c8
-rw-r--r--drivers/gpio/gpio-xgene-sb.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c67
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpio/gpiolib.c82
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c403
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c242
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c418
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c235
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c507
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h458
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c283
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c43
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig17
-rw-r--r--drivers/gpu/drm/amd/display/TODO8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c153
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c722
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h)18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c562
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c329
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c405
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c215
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c512
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c937
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c724
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c431
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c362
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c518
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h180
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h46
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h136
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h59
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c147
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/luts_1d.h51
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h46
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h20
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h20
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h37
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h55
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h98
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h (renamed from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h)11
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h33
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h37
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h40
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c109
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c155
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c57
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c179
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c1137
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h23
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c108
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c150
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c220
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c85
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c5
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c2
-rw-r--r--drivers/gpu/drm/arm/Makefile1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c35
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c76
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c187
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h26
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c297
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h40
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c250
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.h14
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h24
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c24
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1020
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h56
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c54
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c30
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h6
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c17
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h16
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c663
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c289
-rw-r--r--drivers/gpu/drm/armada/armada_plane.h15
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c19
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c16
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c6
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c4
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c86
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c43
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_atomic.c408
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c122
-rw-r--r--drivers/gpu/drm/drm_client.c406
-rw-r--r--drivers/gpu/drm/drm_connector.c219
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c53
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h28
-rw-r--r--drivers/gpu/drm/drm_debugfs.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c9
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c428
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c33
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c355
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c359
-rw-r--r--drivers/gpu/drm/drm_file.c306
-rw-r--r--drivers/gpu/drm/drm_fourcc.c42
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c49
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c13
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c91
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c23
-rw-r--r--drivers/gpu/drm/drm_of.c27
-rw-r--r--drivers/gpu/drm/drm_panel.c27
-rw-r--r--drivers/gpu/drm/drm_pci.c58
-rw-r--r--drivers/gpu/drm/drm_plane.c169
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c12
-rw-r--r--drivers/gpu/drm/drm_prime.c34
-rw-r--r--drivers/gpu/drm/drm_print.c111
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c353
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c35
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c52
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c119
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c300
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c120
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c7
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c62
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h1
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c20
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c387
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c26
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c492
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h53
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c462
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c144
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c215
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c80
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c374
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c177
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h128
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c621
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1282
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c680
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c144
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c68
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4089
-rw-r--r--drivers/gpu/drm/i915/i915_request.c100
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h148
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c387
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h53
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c127
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c70
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c129
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c61
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c195
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c803
-rw-r--r--drivers/gpu/drm/i915/intel_display.h50
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c531
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c205
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h104
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h34
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c15
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c288
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c129
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c160
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c70
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h26
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c138
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c153
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c118
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c84
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c979
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c204
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c28
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c445
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c631
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h139
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c333
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c311
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c165
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c74
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h23
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h14
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c167
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c82
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c174
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c55
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c260
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c133
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c49
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)117
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/intel_dsi_pll.c)98
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c47
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c14
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c235
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c102
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c22
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c656
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c378
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h3
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile34
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h57
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h24
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h193
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h483
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c249
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4562
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1207
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h382
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c818
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c435
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h127
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c217
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h497
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c479
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h153
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c637
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h423
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2500
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h430
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c905
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c922
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c155
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c511
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h804
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h168
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c540
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1183
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c349
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c261
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h465
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h136
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h424
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c398
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h348
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c275
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c203
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1345
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h290
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1963
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c1079
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h1007
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c384
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1376
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c154
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h23
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h13
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c431
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c135
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c93
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c287
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h103
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c54
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c33
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c207
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h70
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c99
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c109
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c286
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c19
-rw-r--r--drivers/gpu/drm/panel/Kconfig9
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c503
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c352
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c58
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c298
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c1
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c56
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c61
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.c36
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.h18
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c7
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/cik.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/si.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c86
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c99
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c8
-rw-r--r--drivers/gpu/drm/savage/savage_state.c2
-rw-r--r--drivers/gpu/drm/scheduler/Makefile1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c386
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c13
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c71
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c8
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c21
-rw-r--r--drivers/gpu/drm/stm/ltdc.h1
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig7
-rw-r--r--drivers/gpu/drm/sun4i/Makefile6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c127
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c67
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c117
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c90
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c81
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c274
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.h44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c61
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c61
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.c14
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c4
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig11
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c3
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c1
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c232
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c2
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c1
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c62
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c18
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c28
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c15
-rw-r--r--drivers/gpu/drm/udl/udl_main.c45
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c7
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c49
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c28
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h11
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c13
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c26
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c147
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h10
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c57
-rw-r--r--drivers/gpu/drm/vc4/vc4_fence.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c101
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c477
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/vkms/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c130
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c156
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h78
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c179
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c111
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h86
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h300
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1094
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h334
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h211
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c1123
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c376
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h186
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c122
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c604
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c709
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c109
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c553
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c)10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c2
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c4
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c2
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c4
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/host1x/job.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c37
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c63
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c45
-rw-r--r--drivers/hid/hid-cougar.c312
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-elan.c235
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-microsoft.c49
-rw-r--r--drivers/hid/hid-multitouch.c989
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-redragon.c26
-rw-r--r--drivers/hid/hid-sony.c164
-rw-r--r--drivers/hid/hid-wiimote-core.c14
-rw-r--r--drivers/hid/hid-wiimote-modules.c440
-rw-r--r--drivers/hid/hid-wiimote.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c59
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c13
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c7
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hid/wacom_sys.c123
-rw-r--r--drivers/hid/wacom_wac.c30
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c1
-rw-r--r--drivers/hv/channel.c67
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/hv.c44
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_util.c3
-rw-r--r--drivers/hv/ring_buffer.c65
-rw-r--r--drivers/hv/vmbus_drv.c120
-rw-r--r--drivers/hwmon/Kconfig22
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adt7475.c340
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/ibmpowernv.c238
-rw-r--r--drivers/hwmon/iio_hwmon.c67
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/max197.c1
-rw-r--r--drivers/hwmon/mc13783-adc.c1
-rw-r--r--drivers/hwmon/mlxreg-fan.c489
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/nct7904.c68
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c1057
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c93
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c223
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c577
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h119
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c47
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c1074
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h113
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c53
-rw-r--r--drivers/i2c/busses/Kconfig42
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c40
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c8
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c91
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h24
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c223
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c68
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c62
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c22
-rw-r--r--drivers/i2c/busses/i2c-fsi.c752
-rw-r--r--drivers/i2c/busses/i2c-gpio.c97
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c28
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-owl.c495
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c3
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c17
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c673
-rw-r--r--drivers/i2c/busses/i2c-rcar.c123
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c10
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-stu300.c14
-rw-r--r--drivers/i2c/busses/i2c-tegra.c107
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c41
-rw-r--r--drivers/i2c/busses/i2c-xlr.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c82
-rw-r--r--drivers/i2c/i2c-core-slave.c8
-rw-r--r--drivers/i2c/i2c-core-smbus.c28
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c28
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c75
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c51
-rw-r--r--drivers/ide/hpt366.c5
-rw-r--r--drivers/ide/ide-cd.c58
-rw-r--r--drivers/ide/ide-cd.h6
-rw-r--r--drivers/ide/ide-cd_ioctl.c62
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/sis5513.c1
-rw-r--r--drivers/iio/accel/Kconfig4
-rw-r--r--drivers/iio/accel/adxl345.h7
-rw-r--r--drivers/iio/accel/adxl345_core.c140
-rw-r--r--drivers/iio/accel/adxl345_i2c.c7
-rw-r--r--drivers/iio/accel/adxl345_spi.c6
-rw-r--r--drivers/iio/accel/mma8452.c1
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/st_accel_i2c.c64
-rw-r--r--drivers/iio/adc/Kconfig13
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c5
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c783
-rw-r--r--drivers/iio/adc/hx711.c39
-rw-r--r--drivers/iio/adc/ina2xx-adc.c17
-rw-r--r--drivers/iio/adc/max1363.c8
-rw-r--r--drivers/iio/adc/meson_saradc.c9
-rw-r--r--drivers/iio/adc/sc27xx_adc.c522
-rw-r--r--drivers/iio/adc/ti-ads7950.c43
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c44
-rw-r--r--drivers/iio/adc/xilinx-xadc.h1
-rw-r--r--drivers/iio/chemical/Kconfig23
-rw-r--r--drivers/iio/chemical/Makefile3
-rw-r--r--drivers/iio/chemical/bme680.h96
-rw-r--r--drivers/iio/chemical/bme680_core.c959
-rw-r--r--drivers/iio/chemical/bme680_i2c.c85
-rw-r--r--drivers/iio/chemical/bme680_spi.c125
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c3
-rw-r--r--drivers/iio/counter/104-quad-8.c87
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5686.c7
-rw-r--r--drivers/iio/dac/ad5686.h1
-rw-r--r--drivers/iio/dac/ad5696-i2c.c1
-rw-r--r--drivers/iio/dac/ad5758.c897
-rw-r--r--drivers/iio/dac/ltc2632.c5
-rw-r--r--drivers/iio/dac/ti-dac5571.c6
-rw-r--r--drivers/iio/frequency/ad9523.c68
-rw-r--r--drivers/iio/imu/adis.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c37
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h38
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c143
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c29
-rw-r--r--drivers/iio/light/Kconfig17
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/si1133.c1071
-rw-r--r--drivers/iio/light/vcnl4000.c220
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c3
-rw-r--r--drivers/iio/proximity/Kconfig13
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/isl29501.c1027
-rw-r--r--drivers/iio/temperature/mlx90614.c4
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/cache.c724
-rw-r--r--drivers/infiniband/core/cm.c147
-rw-r--r--drivers/infiniband/core/cm_msgs.h7
-rw-r--r--drivers/infiniband/core/cma.c362
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c23
-rw-r--r--drivers/infiniband/core/mad.c113
-rw-r--r--drivers/infiniband/core/mad_priv.h7
-rw-r--r--drivers/infiniband/core/multicast.c40
-rw-r--r--drivers/infiniband/core/nldev.c16
-rw-r--r--drivers/infiniband/core/rdma_core.c1020
-rw-r--r--drivers/infiniband/core/rdma_core.h96
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c306
-rw-r--r--drivers/infiniband/core/rw.c8
-rw-r--r--drivers/infiniband/core/sa_query.c138
-rw-r--r--drivers/infiniband/core/sysfs.c66
-rw-r--r--drivers/infiniband/core/ucm.c15
-rw-r--r--drivers/infiniband/core/umem.c62
-rw-r--r--drivers/infiniband/core/umem_odp.c33
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs.h34
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c684
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c709
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c664
-rw-r--r--drivers/infiniband/core/uverbs_main.c232
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c200
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c108
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c154
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c61
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c170
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c88
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c346
-rw-r--r--drivers/infiniband/core/verbs.c523
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c144
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h15
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c64
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c44
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c32
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c90
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c269
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c20
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h57
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1051
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c51
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h164
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h68
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c205
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h30
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c63
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c10
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h243
-rw-r--r--drivers/infiniband/hw/hfi1/init.c44
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c11
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c23
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h24
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c14
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c10
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c18
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h4
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h45
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c440
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c698
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h136
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c55
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c83
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c54
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c70
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c29
-rw-r--r--drivers/infiniband/hw/mlx4/main.c41
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h21
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c421
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c27
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1119
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c252
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c572
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h85
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c34
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c294
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c8
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c26
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c50
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h12
-rw-r--r--drivers/infiniband/hw/qedr/main.c107
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h43
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c12
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c37
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h8
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c625
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h8
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c40
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c26
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c52
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c11
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c27
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h12
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c31
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c70
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c81
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c444
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c261
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c14
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c26
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c82
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/evbug.c4
-rw-r--r--drivers/input/evdev.c16
-rw-r--r--drivers/input/gameport/emu10k1-gp.c4
-rw-r--r--drivers/input/gameport/lightning.c4
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/a3d.c4
-rw-r--r--drivers/input/joystick/adi.c4
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/cobra.c4
-rw-r--r--drivers/input/joystick/db9.c9
-rw-r--r--drivers/input/joystick/gamecon.c4
-rw-r--r--drivers/input/joystick/gf2k.c4
-rw-r--r--drivers/input/joystick/grip.c4
-rw-r--r--drivers/input/joystick/guillemot.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c30
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c16
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c4
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/joystick/interact.c4
-rw-r--r--drivers/input/joystick/joydump.c4
-rw-r--r--drivers/input/joystick/magellan.c4
-rw-r--r--drivers/input/joystick/pxrc.c166
-rw-r--r--drivers/input/joystick/sidewinder.c4
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/spaceorb.c4
-rw-r--r--drivers/input/joystick/stinger.c4
-rw-r--r--drivers/input/joystick/tmdc.c4
-rw-r--r--drivers/input/joystick/turbografx.c4
-rw-r--r--drivers/input/joystick/warrior.c4
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/amikbd.c4
-rw-r--r--drivers/input/keyboard/atakbd.c4
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c69
-rw-r--r--drivers/input/keyboard/gpio_keys.c8
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/keyboard/imx_keypad.c12
-rw-r--r--drivers/input/keyboard/newtonkbd.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c15
-rw-r--r--drivers/input/keyboard/stowaway.c4
-rw-r--r--drivers/input/keyboard/sunkbd.c4
-rw-r--r--drivers/input/keyboard/xtkbd.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c70
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c183
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/appletouch.c7
-rw-r--r--drivers/input/mouse/cyapa_gen5.c1
-rw-r--r--drivers/input/mouse/cyapa_gen6.c1
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/inport.c4
-rw-r--r--drivers/input/mouse/logibm.c4
-rw-r--r--drivers/input/mouse/pc110pad.c4
-rw-r--r--drivers/input/mouse/sermouse.c8
-rw-r--r--drivers/input/serio/ams_delta_serio.c198
-rw-r--r--drivers/input/serio/ct82c710.c4
-rw-r--r--drivers/input/serio/hyperv-keyboard.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/q40kbd.c4
-rw-r--r--drivers/input/serio/rpckbd.c4
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c223
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c484
-rw-r--r--drivers/input/touchscreen/eeti_ts.c37
-rw-r--r--drivers/input/touchscreen/egalax_ts.c5
-rw-r--r--drivers/input/touchscreen/elo.c1
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c21
-rw-r--r--drivers/input/touchscreen/gunze.c4
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c14
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c4
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c204
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c4
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c7
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c95
-rw-r--r--drivers/iommu/intel-svm.c4
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/of_iommu.c21
-rw-r--r--drivers/iommu/qcom_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/ipack/carriers/tpci200.c7
-rw-r--r--drivers/irqchip/Kconfig28
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c243
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-renesas-h8s.c6
-rw-r--r--drivers/irqchip/irq-sifive-plic.c260
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c2
-rw-r--r--drivers/isdn/capi/capi.c5
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c29
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c36
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c1
-rw-r--r--drivers/isdn/hisax/callc.c3
-rw-r--r--drivers/isdn/hisax/config.c9
-rw-r--r--drivers/isdn/hisax/gazel.c4
-rw-r--r--drivers/isdn/hisax/hfc_usb.c10
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c1
-rw-r--r--drivers/isdn/hisax/l3dss1.c1
-rw-r--r--drivers/isdn/hisax/st5481_usb.c11
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/i4l/isdn_v110.c9
-rw-r--r--drivers/isdn/mISDN/stack.c1
-rw-r--r--drivers/leds/Kconfig5
-rw-r--r--drivers/leds/led-triggers.c39
-rw-r--r--drivers/leds/leds-apu.c44
-rw-r--r--drivers/leds/leds-lm3692x.c181
-rw-r--r--drivers/leds/leds-lt3593.c190
-rw-r--r--drivers/leds/leds-max8997.c2
-rw-r--r--drivers/leds/leds-ns2.c4
-rw-r--r--drivers/leds/trigger/Kconfig15
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c51
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c64
-rw-r--r--drivers/leds/trigger/ledtrig-camera.c3
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c20
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c92
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c49
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c101
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c91
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c58
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c102
-rw-r--r--drivers/lightnvm/Kconfig30
-rw-r--r--drivers/lightnvm/pblk-cache.c9
-rw-r--r--drivers/lightnvm/pblk-core.c78
-rw-r--r--drivers/lightnvm/pblk-gc.c34
-rw-r--r--drivers/lightnvm/pblk-init.c98
-rw-r--r--drivers/lightnvm/pblk-rb.c24
-rw-r--r--drivers/lightnvm/pblk-read.c247
-rw-r--r--drivers/lightnvm/pblk-recovery.c47
-rw-r--r--drivers/lightnvm/pblk-sysfs.c13
-rw-r--r--drivers/lightnvm/pblk-write.c35
-rw-r--r--drivers/lightnvm/pblk.h48
-rw-r--r--drivers/macintosh/Kconfig19
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/via-pmu.c346
-rw-r--r--drivers/macintosh/via-pmu68k.c850
-rw-r--r--drivers/mailbox/Kconfig16
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c358
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c6
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c571
-rw-r--r--drivers/mailbox/omap-mailbox.c31
-rw-r--r--drivers/mailbox/ti-msgmgr.c353
-rw-r--r--drivers/md/bcache/Kconfig7
-rw-r--r--drivers/md/bcache/alloc.c39
-rw-r--r--drivers/md/bcache/bcache.h230
-rw-r--r--drivers/md/bcache/bset.c205
-rw-r--r--drivers/md/bcache/bset.h146
-rw-r--r--drivers/md/bcache/btree.c135
-rw-r--r--drivers/md/bcache/btree.h88
-rw-r--r--drivers/md/bcache/closure.c19
-rw-r--r--drivers/md/bcache/closure.h10
-rw-r--r--drivers/md/bcache/debug.c40
-rw-r--r--drivers/md/bcache/debug.h6
-rw-r--r--drivers/md/bcache/extents.c37
-rw-r--r--drivers/md/bcache/extents.h6
-rw-r--r--drivers/md/bcache/io.c24
-rw-r--r--drivers/md/bcache/journal.c28
-rw-r--r--drivers/md/bcache/journal.h28
-rw-r--r--drivers/md/bcache/movinggc.c14
-rw-r--r--drivers/md/bcache/request.c136
-rw-r--r--drivers/md/bcache/request.h18
-rw-r--r--drivers/md/bcache/stats.c15
-rw-r--r--drivers/md/bcache/stats.h15
-rw-r--r--drivers/md/bcache/super.c166
-rw-r--r--drivers/md/bcache/sysfs.c84
-rw-r--r--drivers/md/bcache/sysfs.h6
-rw-r--r--drivers/md/bcache/util.c135
-rw-r--r--drivers/md/bcache/util.h43
-rw-r--r--drivers/md/bcache/writeback.c155
-rw-r--r--drivers/md/bcache/writeback.h38
-rw-r--r--drivers/md/dm-cache-metadata.c13
-rw-r--r--drivers/md/dm-cache-target.c35
-rw-r--r--drivers/md/dm-crypt.c66
-rw-r--r--drivers/md/dm-delay.c249
-rw-r--r--drivers/md/dm-integrity.c501
-rw-r--r--drivers/md/dm-kcopyd.c18
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap.c41
-rw-r--r--drivers/md/dm-thin.c31
-rw-r--r--drivers/md/dm-writecache.c49
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/md-bitmap.c305
-rw-r--r--drivers/md/md-bitmap.h60
-rw-r--r--drivers/md/md-cluster.c65
-rw-r--r--drivers/md/md.c81
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c20
-rw-r--r--drivers/md/raid1.c35
-rw-r--r--drivers/md/raid10.c59
-rw-r--r--drivers/md/raid5-cache.c10
-rw-r--r--drivers/md/raid5.c56
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/cec/cec-notifier.c11
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c10
-rw-r--r--drivers/media/common/siano/smsdvb-main.c6
-rw-r--r--drivers/media/common/siano/smsdvb.h7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c84
-rw-r--r--drivers/media/dvb-core/dvbdev.c18
-rw-r--r--drivers/media/dvb-frontends/Kconfig10
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.c15
-rw-r--r--drivers/media/dvb-frontends/af9033.c7
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c6
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c6
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c6
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c6
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c6
-rw-r--r--drivers/media/dvb-frontends/cx22700.c6
-rw-r--r--drivers/media/dvb-frontends/cx22702.c6
-rw-r--r--drivers/media/dvb-frontends/cx24110.c8
-rw-r--r--drivers/media/dvb-frontends/cx24113.c8
-rw-r--r--drivers/media/dvb-frontends/cx24116.c8
-rw-r--r--drivers/media/dvb-frontends/cx24117.c8
-rw-r--r--drivers/media/dvb-frontends/cx24120.c8
-rw-r--r--drivers/media/dvb-frontends/cx24123.c8
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c6
-rw-r--r--drivers/media/dvb-frontends/dib0070.c8
-rw-r--r--drivers/media/dvb-frontends/dib0090.c12
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c6
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c6
-rw-r--r--drivers/media/dvb-frontends/dib8000.c6
-rw-r--r--drivers/media/dvb-frontends/dib9000.c6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c25
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c13
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c30
-rw-r--r--drivers/media/dvb-frontends/ds3000.c8
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c27
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c24
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c6
-rw-r--r--drivers/media/dvb-frontends/helene.c105
-rw-r--r--drivers/media/dvb-frontends/helene.h3
-rw-r--r--drivers/media/dvb-frontends/horus3a.c6
-rw-r--r--drivers/media/dvb-frontends/itd1000.c8
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c8
-rw-r--r--drivers/media/dvb-frontends/l64781.c7
-rw-r--r--drivers/media/dvb-frontends/lg2160.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c12
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c7
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c6
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c6
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c8
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c7
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c6
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c802
-rw-r--r--drivers/media/dvb-frontends/mn88443x.h27
-rw-r--r--drivers/media/dvb-frontends/mt312.c10
-rw-r--r--drivers/media/dvb-frontends/mt352.c7
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c6
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c6
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c6
-rw-r--r--drivers/media/dvb-frontends/or51132.c6
-rw-r--r--drivers/media/dvb-frontends/or51211.c8
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c16
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c6
-rw-r--r--drivers/media/dvb-frontends/s921.c7
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c7
-rw-r--r--drivers/media/dvb-frontends/sp8870.c6
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c6
-rw-r--r--drivers/media/dvb-frontends/stb6000.c4
-rw-r--r--drivers/media/dvb-frontends/stb6100.c5
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv0297.c6
-rw-r--r--drivers/media/dvb-frontends/stv0299.c7
-rw-r--r--drivers/media/dvb-frontends/stv0367.c20
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c7
-rw-r--r--drivers/media/dvb-frontends/stv090x.c6
-rw-r--r--drivers/media/dvb-frontends/stv0910.c10
-rw-r--r--drivers/media/dvb-frontends/stv6110.c6
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c7
-rw-r--r--drivers/media/dvb-frontends/stv6111.c5
-rw-r--r--drivers/media/dvb-frontends/tc90522.c10
-rw-r--r--drivers/media/dvb-frontends/tda10021.c10
-rw-r--r--drivers/media/dvb-frontends/tda10023.c6
-rw-r--r--drivers/media/dvb-frontends/tda10048.c6
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c18
-rw-r--r--drivers/media/dvb-frontends/tda10071.c10
-rw-r--r--drivers/media/dvb-frontends/tda10086.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c7
-rw-r--r--drivers/media/dvb-frontends/tda665x.c6
-rw-r--r--drivers/media/dvb-frontends/tda8083.c7
-rw-r--r--drivers/media/dvb-frontends/tda8261.c9
-rw-r--r--drivers/media/dvb-frontends/tda826x.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/dvb-frontends/tua6100.c6
-rw-r--r--drivers/media/dvb-frontends/ves1820.c6
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c8
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10353.c7
-rw-r--r--drivers/media/firewire/firedtv-fe.c26
-rw-r--r--drivers/media/i2c/Kconfig115
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/ad9389b.c1
-rw-r--r--drivers/media/i2c/adv7180.c32
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c2
-rw-r--r--drivers/media/i2c/adv7511.c1
-rw-r--r--drivers/media/i2c/adv7604.c8
-rw-r--r--drivers/media/i2c/adv7842.c9
-rw-r--r--drivers/media/i2c/ak7375.c292
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h33
-rw-r--r--drivers/media/i2c/dw9807-vcm.c329
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1
-rw-r--r--drivers/media/i2c/imx258.c8
-rw-r--r--drivers/media/i2c/imx274.c742
-rw-r--r--drivers/media/i2c/lm3560.c3
-rw-r--r--drivers/media/i2c/mt9m032.c1
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/mt9t001.c1
-rw-r--r--drivers/media/i2c/mt9v032.c1
-rw-r--r--drivers/media/i2c/mt9v111.c1298
-rw-r--r--drivers/media/i2c/ov2680.c1186
-rw-r--r--drivers/media/i2c/ov5640.c175
-rw-r--r--drivers/media/i2c/ov5645.c13
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov772x.c353
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c1437
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c20
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c2
-rw-r--r--drivers/media/i2c/tc358743.c5
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/video-i2c.c81
-rw-r--r--drivers/media/i2c/vs6624.c4
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/dst.c26
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c12
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c82
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c679
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.h58
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-gpio.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c673
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.h135
-rw-r--r--drivers/media/pci/cx25821/cx25821.h12
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c20
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c45
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c5
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.c18
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.c409
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.h192
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h8
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-sx8.c488
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h14
-rw-r--r--drivers/media/pci/dm1105/dm1105.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp3030.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c5
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c7
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c11
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile5
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c27
-rw-r--r--drivers/media/platform/cadence/Kconfig2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c1
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c54
-rw-r--r--drivers/media/platform/coda/coda-bit.c123
-rw-r--r--drivers/media/platform/coda/coda-common.c189
-rw-r--r--drivers/media/platform/coda/coda-h264.c319
-rw-r--r--drivers/media/platform/coda/coda.h4
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c1
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c347
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c24
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c11
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/fsl-viu.c38
-rw-r--r--drivers/media/platform/m2m-deinterlace.c25
-rw-r--r--drivers/media/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c25
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c16
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c21
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/pxa_camera.c22
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss/Makefile (renamed from drivers/media/platform/qcom/camss-8x16/Makefile)4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.c)471
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.h)17
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c176
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c256
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.c)363
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.h)37
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.c)264
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.h)23
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1018
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1140
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-vfe.c)1569
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h186
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.c)133
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.h)12
-rw-r--r--drivers/media/platform/qcom/camss/camss.c (renamed from drivers/media/platform/qcom/camss-8x16/camss.c)450
-rw-r--r--drivers/media/platform/qcom/camss/camss.h (renamed from drivers/media/platform/qcom/camss-8x16/camss.h)43
-rw-r--r--drivers/media/platform/qcom/venus/Makefile3
-rw-r--r--drivers/media/platform/qcom/venus/core.c107
-rw-r--r--drivers/media/platform/qcom/venus/core.h100
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c568
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h10
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c62
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h112
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c407
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c278
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h110
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c108
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c329
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c10
-rw-r--r--drivers/media/platform/qcom/venus/venc.c227
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c10
-rw-r--r--drivers/media/platform/rcar-fcp.c7
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig1
-rw-r--r--drivers/media/platform/rcar-vin/Makefile1
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c321
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c20
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c18
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h37
-rw-r--r--drivers/media/platform/rcar_drif.c8
-rw-r--r--drivers/media/platform/rcar_fdp1.c6
-rw-r--r--drivers/media/platform/rcar_jpu.c27
-rw-r--r--drivers/media/platform/renesas-ceu.c91
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c45
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c20
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c19
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c29
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c15
-rw-r--r--drivers/media/platform/sh_veu.c5
-rw-r--r--drivers/media/platform/sh_vou.c5
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c5
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c18
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c1
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c259
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c20
-rw-r--r--drivers/media/platform/vicodec/Kconfig13
-rw-r--r--drivers/media/platform/vicodec/Makefile4
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.c797
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.h129
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c1506
-rw-r--r--drivers/media/platform/video-mux.c119
-rw-r--r--drivers/media/platform/vim2m.c42
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c1
-rw-r--r--drivers/media/platform/vimc/vimc-core.c1
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c1
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c1
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c1
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c433
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h28
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c20
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h5
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c72
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c3
-rw-r--r--drivers/media/rc/bpf-lirc.c25
-rw-r--r--drivers/media/rc/rc-ir-raw.c8
-rw-r--r--drivers/media/rc/rc-main.c12
-rw-r--r--drivers/media/tuners/e4000.c6
-rw-r--r--drivers/media/tuners/fc0011.c6
-rw-r--r--drivers/media/tuners/fc0012.c7
-rw-r--r--drivers/media/tuners/fc0013.c7
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/it913x.c6
-rw-r--r--drivers/media/tuners/m88rs6000t.c6
-rw-r--r--drivers/media/tuners/max2165.c8
-rw-r--r--drivers/media/tuners/mc44s803.c8
-rw-r--r--drivers/media/tuners/mt2060.c8
-rw-r--r--drivers/media/tuners/mt2063.c7
-rw-r--r--drivers/media/tuners/mt2131.c8
-rw-r--r--drivers/media/tuners/mt2266.c8
-rw-r--r--drivers/media/tuners/mxl301rf.c4
-rw-r--r--drivers/media/tuners/mxl5005s.c8
-rw-r--r--drivers/media/tuners/mxl5007t.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c4
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/tuners/qt1010.c8
-rw-r--r--drivers/media/tuners/qt1010_priv.h14
-rw-r--r--drivers/media/tuners/r820t.c6
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/tda18212.c8
-rw-r--r--drivers/media/tuners/tda18218.c8
-rw-r--r--drivers/media/tuners/tda18250.c6
-rw-r--r--drivers/media/tuners/tda18271-common.c8
-rw-r--r--drivers/media/tuners/tda18271-fe.c6
-rw-r--r--drivers/media/tuners/tda827x.c12
-rw-r--r--drivers/media/tuners/tua9001.c6
-rw-r--r--drivers/media/tuners/tuner-simple.c5
-rw-r--r--drivers/media/tuners/tuner-xc2028.c15
-rw-r--r--drivers/media/tuners/xc4000.c16
-rw-r--r--drivers/media/tuners/xc5000.c12
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c14
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c7
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig5
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c492
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig6
-rw-r--r--drivers/media/usb/dvb-usb/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c19
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c11
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c3
-rw-r--r--drivers/media/usb/dvb-usb/usb-urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c7
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c39
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c9
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c11
-rw-r--r--drivers/media/usb/gspca/kinect.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c12
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c215
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c5
-rw-r--r--drivers/media/usb/uvc/uvc_status.c121
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c4
-rw-r--r--drivers/media/usb/uvc/uvc_video.c62
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h18
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c266
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/tegra/tegra186.c1
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/88pm860x-i2c.c8
-rw-r--r--drivers/mfd/Kconfig83
-rw-r--r--drivers/mfd/Makefile17
-rw-r--r--drivers/mfd/arizona-core.c34
-rw-r--r--drivers/mfd/as3722.c12
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c28
-rw-r--r--drivers/mfd/cros_ec_dev.c33
-rw-r--r--drivers/mfd/cs47l35-tables.c1609
-rw-r--r--drivers/mfd/cs47l85-tables.c3009
-rw-r--r--drivers/mfd/cs47l90-tables.c2674
-rw-r--r--drivers/mfd/da9063-core.c44
-rw-r--r--drivers/mfd/da9063-i2c.c239
-rw-r--r--drivers/mfd/da9063-irq.c264
-rw-r--r--drivers/mfd/dln2.c10
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/kempld-core.c15
-rw-r--r--drivers/mfd/madera-core.c609
-rw-r--r--drivers/mfd/madera-i2c.c140
-rw-r--r--drivers/mfd/madera-spi.c139
-rw-r--r--drivers/mfd/madera.h44
-rw-r--r--drivers/mfd/rave-sp.c119
-rw-r--r--drivers/mfd/rohm-bd718x7.c211
-rw-r--r--drivers/mfd/sec-core.c1
-rw-r--r--drivers/mfd/sm501.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c3
-rw-r--r--drivers/mfd/wm8994-core.c15
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c84
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c12
-rw-r--r--drivers/misc/cxl/Kconfig8
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c154
-rw-r--r--drivers/misc/cxl/base.c83
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h41
-rw-r--r--drivers/misc/cxl/cxllib.c4
-rw-r--r--drivers/misc/cxl/debugfs.c5
-rw-r--r--drivers/misc/cxl/fault.c4
-rw-r--r--drivers/misc/cxl/guest.c8
-rw-r--r--drivers/misc/cxl/main.c7
-rw-r--r--drivers/misc/cxl/native.c3
-rw-r--r--drivers/misc/cxl/pci.c392
-rw-r--r--drivers/misc/cxl/phb.c44
-rw-r--r--drivers/misc/cxl/vphb.c46
-rw-r--r--drivers/misc/eeprom/at24.c17
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c3
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_dev.c5
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c92
-rw-r--r--drivers/misc/mei/bus.c36
-rw-r--r--drivers/misc/mei/client.c128
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c37
-rw-r--r--drivers/misc/mei/hw-me.c77
-rw-r--r--drivers/misc/mei/hw-me.h6
-rw-r--r--drivers/misc/mei/hw-txe.c66
-rw-r--r--drivers/misc/mei/hw.h73
-rw-r--r--drivers/misc/mei/interrupt.c22
-rw-r--r--drivers/misc/mei/main.c41
-rw-r--r--drivers/misc/mei/mei_dev.h77
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h5
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c6
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c21
-rw-r--r--drivers/misc/mic/scif/scif_dma.c7
-rw-r--r--drivers/misc/ocxl/context.c22
-rw-r--r--drivers/misc/ocxl/link.c27
-rw-r--r--drivers/misc/ocxl/sysfs.c5
-rw-r--r--drivers/misc/pci_endpoint_test.c290
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c7
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c5
-rw-r--r--drivers/misc/sram.c18
-rw-r--r--drivers/misc/ti-st/Kconfig3
-rw-r--r--drivers/misc/ti-st/st_kim.c7
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vmw_balloon.c185
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c9
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c16
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c1
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c33
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/mmci.c82
-rw-r--r--drivers/mmc/host/mmci.h81
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c18
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.h5
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/pxamci.c236
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c139
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c35
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c50
-rw-r--r--drivers/mmc/host/sdhci-msm.c510
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c116
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c91
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-dwc-mshc.c84
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c129
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-tegra.c37
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c1
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c52
-rw-r--r--drivers/mtd/Kconfig24
-rw-r--r--drivers/mtd/chips/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/gen_probe.c7
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/devices/m25p80.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c5
-rw-r--r--drivers/mtd/lpddr/Kconfig8
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/Kconfig24
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c3
-rw-r--r--drivers/mtd/maps/impa7.c5
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c3
-rw-r--r--drivers/mtd/maps/solutionengine.c6
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c28
-rw-r--r--drivers/mtd/mtdpart.c33
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/onenand/generic.c5
-rw-r--r--drivers/mtd/nand/onenand/samsung.c5
-rw-r--r--drivers/mtd/nand/raw/Kconfig157
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c175
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c67
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c1
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c143
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c231
-rw-r--r--drivers/mtd/nand/raw/denali.c216
-rw-r--r--drivers/mtd/nand/raw/denali.h1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c70
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c1
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/docg4.c89
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c183
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c15
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c76
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h11
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c78
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c51
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c41
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c61
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c77
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c319
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c79
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c157
-rw-r--r--drivers/mtd/nand/raw/nand_base.c345
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c10
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c23
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c351
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c32
-rw-r--r--drivers/mtd/nand/raw/nandsim.c84
-rw-r--r--drivers/mtd/nand/raw/ndfc.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c463
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c9
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c4
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c591
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c52
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c36
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c5
-rw-r--r--drivers/mtd/nand/raw/sm_common.c39
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c55
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c44
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c1246
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c44
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c127
-rw-r--r--drivers/mtd/nand/spi/Kconfig7
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/core.c1155
-rw-r--r--drivers/mtd/nand/spi/macronix.c144
-rw-r--r--drivers/mtd/nand/spi/micron.c133
-rw-r--r--drivers/mtd/nand/spi/winbond.c141
-rw-r--r--drivers/mtd/nftlmount.c3
-rw-r--r--drivers/mtd/parsers/parser_trx.c7
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c26
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c2
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c18
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c6
-rw-r--r--drivers/mux/Kconfig10
-rw-r--r--drivers/mux/Makefile2
-rw-r--r--drivers/mux/adgs1408.c131
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c33
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c39
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/Kconfig48
-rw-r--r--drivers/net/can/usb/Makefile7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2085
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h188
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c835
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2028
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c1358
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/can/usb/ucan.c1613
-rw-r--r--drivers/net/can/xilinx_can.c784
-rw-r--r--drivers/net/dsa/Kconfig24
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_srab.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c19
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c46
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c9
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c268
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h47
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c134
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c109
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h71
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c439
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h24
-rw-r--r--drivers/net/dsa/realtek-smi.c489
-rw-r--r--drivers/net/dsa/realtek-smi.h144
-rw-r--r--drivers/net/dsa/rtl8366.c515
-rw-r--r--drivers/net/dsa/rtl8366rb.c1454
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.c1365
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c6
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c117
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c59
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c49
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c57
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c52
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c227
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h14
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c257
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c375
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1214
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c126
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/cavium/Kconfig18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c26
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c708
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c186
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c165
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c376
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c4
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c80
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c135
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig40
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c103
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c22
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c20
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig15
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c408
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h120
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c676
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c177
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c36
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c211
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c347
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c389
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c440
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c973
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c179
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c223
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h75
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h276
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h973
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c438
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c354
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c392
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h36
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c3
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c723
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c284
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h234
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1159
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h74
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c445
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c313
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h58
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c213
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c169
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c8
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c129
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c423
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c195
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1093
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c102
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c141
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h228
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c411
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c133
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c60
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c488
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c112
-rw-r--r--drivers/net/fjes/fjes_main.c8
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c132
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/Kconfig11
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/adf7242.c34
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c937
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.h73
-rw-r--r--drivers/net/ieee802154/mcr20a.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c36
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/net_failover.c11
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c91
-rw-r--r--drivers/net/netdevsim/devlink.c1
-rw-r--r--drivers/net/netdevsim/ipsec.c297
-rw-r--r--drivers/net/netdevsim/netdev.c119
-rw-r--r--drivers/net/netdevsim/netdevsim.h71
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/dp83tc811.c48
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell.c56
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c110
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c22
-rw-r--r--drivers/net/phy/mscc.c2
-rw-r--r--drivers/net/phy/phy.c105
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/phy/phylink.c30
-rw-r--r--drivers/net/phy/realtek.c80
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/phy/sfp.c803
-rw-r--r--drivers/net/phy/vitesse.c175
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c20
-rw-r--r--drivers/net/ppp/ppp_mppe.c56
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/asix_devices.c42
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/hso.c44
-rw-r--r--drivers/net/usb/kaweth.c8
-rw-r--r--drivers/net/usb/lan78xx.c48
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c18
-rw-r--r--drivers/net/usb/rtl8150.c7
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/veth.c744
-rw-r--r--drivers/net/virtio_net.c267
-rw-r--r--drivers/net/vxlan.c144
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c3
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig24
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c47
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c67
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c30
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c95
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c80
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c847
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c289
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c425
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c73
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c708
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1608
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h568
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h316
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c723
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h824
-rw-r--r--drivers/net/wireless/atmel/atmel.c18
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c25
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c123
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c30
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c55
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig27
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c658
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c377
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c641
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c106
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c211
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h105
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.h6
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c180
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c38
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c29
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c90
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c538
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c146
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c21
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c23
-rw-r--r--drivers/nfc/nfcmrvl/usb.c5
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c3
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pmem.c13
-rw-r--r--drivers/nvme/host/core.c171
-rw-r--r--drivers/nvme/host/fabrics.c12
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c3
-rw-r--r--drivers/nvme/host/lightnvm.c27
-rw-r--r--drivers/nvme/host/multipath.c349
-rw-r--r--drivers/nvme/host/nvme.h78
-rw-r--r--drivers/nvme/host/pci.c89
-rw-r--r--drivers/nvme/host/rdma.c247
-rw-r--r--drivers/nvme/host/trace.c11
-rw-r--r--drivers/nvme/host/trace.h142
-rw-r--r--drivers/nvme/target/admin-cmd.c221
-rw-r--r--drivers/nvme/target/configfs.c259
-rw-r--r--drivers/nvme/target/core.c106
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c7
-rw-r--r--drivers/nvme/target/io-cmd-file.c80
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/nvmet.h62
-rw-r--r--drivers/nvme/target/rdma.c202
-rw-r--r--drivers/nvmem/Kconfig11
-rw-r--r--drivers/nvmem/Makefile3
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/mtk-efuse.c1
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c6
-rw-r--r--drivers/nvmem/sc27xx-efuse.c264
-rw-r--r--drivers/nvmem/uniphier-efuse.c1
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/device.c21
-rw-r--r--drivers/of/fdt.c9
-rw-r--r--drivers/of/of_mdio.c17
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/parport/ieee1284.c3
-rw-r--r--drivers/parport/parport_serial.c9
-rw-r--r--drivers/parport/parport_sunbpp.c8
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/controller/Kconfig12
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c210
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h29
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c1
-rw-r--r--drivers/pci/controller/pci-aardvark.c81
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c11
-rw-r--r--drivers/pci/controller/pci-mvebu.c153
-rw-r--r--drivers/pci/controller/pci-v3-semi.c2
-rw-r--r--drivers/pci/controller/pci-versatile.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c33
-rw-r--r--drivers/pci/controller/pcie-cadence.c123
-rw-r--r--drivers/pci/controller/pcie-cadence.h13
-rw-r--r--drivers/pci/controller/pcie-iproc.c159
-rw-r--r--drivers/pci/controller/pcie-iproc.h8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c6
-rw-r--r--drivers/pci/controller/pcie-rcar.c16
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx.c1
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c86
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c68
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c62
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c36
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c22
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c16
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c15
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c20
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c134
-rw-r--r--drivers/pci/hotplug/pciehp.h117
-rw-r--r--drivers/pci/hotplug/pciehp_core.c127
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c351
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c281
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c348
-rw-r--r--drivers/pci/hotplug/pnv_php.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c13
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c13
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c9
-rw-r--r--drivers/pci/hotplug/shpchp_core.c41
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c2
-rw-r--r--drivers/pci/iov.c20
-rw-r--r--drivers/pci/irq.c6
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/of.c16
-rw-r--r--drivers/pci/pci-acpi.c29
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c543
-rw-r--r--drivers/pci/pci.h84
-rw-r--r--drivers/pci/pcie/aer.c341
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/pcie/dpc.c107
-rw-r--r--drivers/pci/pcie/err.c23
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c30
-rw-r--r--drivers/pci/pcie/portdrv_pci.c27
-rw-r--r--drivers/pci/probe.c106
-rw-r--r--drivers/pci/quirks.c398
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/rom.c11
-rw-r--r--drivers/pci/switch/switchtec.c14
-rw-r--r--drivers/pci/vpd.c7
-rw-r--r--drivers/pcmcia/pcmcia_resource.c36
-rw-r--r--drivers/perf/arm-cci.c38
-rw-r--r--drivers/perf/arm-ccn.c15
-rw-r--r--drivers/perf/arm_pmu.c38
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c12
-rw-r--r--drivers/phy/broadcom/Kconfig10
-rw-r--r--drivers/phy/broadcom/Makefile2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c305
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/marvell/phy-berlin-usb.c5
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c5
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c85
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c1
-rw-r--r--drivers/phy/renesas/Kconfig7
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c151
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/Kconfig1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c270
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.h22
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c31
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/berlin/Kconfig5
-rw-r--r--drivers/pinctrl/berlin/Makefile1
-rw-r--r--drivers/pinctrl/berlin/berlin.c14
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c368
-rw-r--r--drivers/pinctrl/cirrus/Kconfig14
-rw-r--r--drivers/pinctrl/cirrus/Makefile13
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l35.c45
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l85.c59
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs47l90.c57
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1076
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera.h41
-rw-r--r--drivers/pinctrl/core.c36
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/devicetree.c15
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c351
-rw-r--r--drivers/pinctrl/intel/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c97
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c436
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c53
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c118
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c11
-rw-r--r--drivers/pinctrl/pinctrl-amd.c17
-rw-r--r--drivers/pinctrl/pinctrl-amd.h4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c46
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c26
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c104
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c24
-rw-r--r--drivers/pinctrl/pinctrl-single.c127
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1
-rw-r--r--drivers/pinctrl/pinmux.c17
-rw-r--r--drivers/pinctrl/pinmux.h7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c68
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c333
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c14
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c69
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c43
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c8
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c21
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c6
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c16
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c6
-rw-r--r--drivers/platform/chrome/Kconfig20
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_tbmc.c10
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c (renamed from drivers/mfd/cros_ec_i2c.c)0
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c40
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c (renamed from drivers/mfd/cros_ec_spi.c)0
-rw-r--r--drivers/platform/goldfish/Kconfig5
-rw-r--r--drivers/platform/goldfish/Makefile1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/goldfish/pdev_bus.c232
-rw-r--r--drivers/platform/mellanox/Kconfig11
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c60
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c245
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/platform/x86/Kconfig27
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wireless.c24
-rw-r--r--drivers/platform/x86/asus-wmi.c46
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell-smbios-base.c35
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c4
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c12
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c132
-rw-r--r--drivers/platform/x86/ideapad-laptop.c18
-rw-r--r--drivers/platform/x86/intel-hid.c178
-rw-r--r--drivers/platform/x86/intel-vbtn.c5
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c1
-rw-r--r--drivers/platform/x86/intel_ips.c5
-rw-r--r--drivers/platform/x86/intel_pmc_core.c120
-rw-r--r--drivers/platform/x86/intel_pmc_core.h6
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c486
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c136
-rw-r--r--drivers/platform/x86/toshiba_acpi.c33
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c (renamed from drivers/platform/x86/silead_dmi.c)639
-rw-r--r--drivers/platform/x86/wmi.c9
-rw-r--r--drivers/power/avs/smartreflex.c1
-rw-r--r--drivers/power/reset/Kconfig11
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gemini-poweroff.c12
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/reset/qcom-pon.c91
-rw-r--r--drivers/power/reset/vexpress-poweroff.c12
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/Kconfig23
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_fg.c14
-rw-r--r--drivers/power/supply/adp5061.c745
-rw-r--r--drivers/power/supply/axp20x_usb_power.c1
-rw-r--r--drivers/power/supply/axp288_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c545
-rw-r--r--drivers/power/supply/ds2760_battery.c348
-rw-r--r--drivers/power/supply/generic-adc-battery.c25
-rw-r--r--drivers/power/supply/lego_ev3_battery.c20
-rw-r--r--drivers/power/supply/max1721x_battery.c2
-rw-r--r--drivers/power/supply/max77693_charger.c1
-rw-r--r--drivers/power/supply/max8998_charger.c1
-rw-r--r--drivers/power/supply/olpc_battery.c1
-rw-r--r--drivers/power/supply/power_supply_core.c11
-rw-r--r--drivers/power/supply/sbs-battery.c67
-rw-r--r--drivers/power/supply/tps65217_charger.c22
-rw-r--r--drivers/power/supply/wm8350_power.c3
-rw-r--r--drivers/powercap/Kconfig10
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/idle_inject.c356
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_dte.c1
-rw-r--r--drivers/ptp/ptp_qoriq.c217
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/regulator/Kconfig25
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/arizona-ldo1.c27
-rw-r--r--drivers/regulator/bd71837-regulator.c44
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c72
-rw-r--r--drivers/regulator/core.c44
-rw-r--r--drivers/regulator/cpcap-regulator.c103
-rw-r--r--drivers/regulator/da9063-regulator.c84
-rw-r--r--drivers/regulator/max14577-regulator.c22
-rw-r--r--drivers/regulator/max77686-regulator.c32
-rw-r--r--drivers/regulator/max77693-regulator.c32
-rw-r--r--drivers/regulator/max77802-regulator.c34
-rw-r--r--drivers/regulator/max8997-regulator.c33
-rw-r--r--drivers/regulator/max8998.c28
-rw-r--r--drivers/regulator/pfuze100-regulator.c112
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c769
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c38
-rw-r--r--drivers/regulator/s2mpa01.c14
-rw-r--r--drivers/regulator/s2mps11.c21
-rw-r--r--drivers/regulator/s5m8767.c16
-rw-r--r--drivers/regulator/tps65217-regulator.c2
-rw-r--r--drivers/regulator/tps65912-regulator.c1
-rw-r--r--drivers/regulator/uniphier-regulator.c213
-rw-r--r--drivers/remoteproc/Kconfig23
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c37
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c156
-rw-r--r--drivers/remoteproc/qcom_common.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5.c252
-rw-r--r--drivers/remoteproc/qcom_q6v5.h46
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c158
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c601
-rw-r--r--drivers/remoteproc/qcom_sysmon.c5
-rw-r--r--drivers/remoteproc/remoteproc_core.c117
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c4
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c3
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-axs10x.c1
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c51
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_char.c2
-rw-r--r--drivers/rpmsg/rpmsg_core.c7
-rw-r--r--drivers/rtc/Kconfig21
-rw-r--r--drivers/rtc/class.c5
-rw-r--r--drivers/rtc/interface.c105
-rw-r--r--drivers/rtc/rtc-armada38x.c23
-rw-r--r--drivers/rtc/rtc-bq4802.c4
-rw-r--r--drivers/rtc/rtc-coh901331.c1
-rw-r--r--drivers/rtc/rtc-core.h14
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-dev.c8
-rw-r--r--drivers/rtc/rtc-ds1307.c18
-rw-r--r--drivers/rtc/rtc-ds1685.c590
-rw-r--r--drivers/rtc/rtc-ftrtc010.c1
-rw-r--r--drivers/rtc/rtc-isl1208.c192
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-max77686.c20
-rw-r--r--drivers/rtc/rtc-max8997.c20
-rw-r--r--drivers/rtc/rtc-max8998.c20
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/rtc/rtc-mxc_v2.c1
-rw-r--r--drivers/rtc/rtc-omap.c23
-rw-r--r--drivers/rtc/rtc-pcf2127.c68
-rw-r--r--drivers/rtc/rtc-pcf85063.c21
-rw-r--r--drivers/rtc/rtc-r7301.c1
-rw-r--r--drivers/rtc/rtc-s5m.c22
-rw-r--r--drivers/rtc/rtc-sa1100.c1
-rw-r--r--drivers/rtc/rtc-sh.c91
-rw-r--r--drivers/rtc/rtc-snvs.c105
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c20
-rw-r--r--drivers/rtc/rtc-sysfs.c43
-rw-r--r--drivers/rtc/rtc-tegra.c1
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/s390/block/dasd.c30
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_int.h8
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/keyboard.c58
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp_async.c38
-rw-r--r--drivers/s390/char/tape_3590.c8
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chsc.c18
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/cio.c77
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c82
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/cio/trace.h102
-rw-r--r--drivers/s390/crypto/ap_asm.h236
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_card.c1
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c12
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h20
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c20
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c12
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/Makefile3
-rw-r--r--drivers/s390/net/ism.h221
-rw-r--r--drivers/s390/net/ism_drv.c623
-rw-r--r--drivers/s390/net/qeth_core.h52
-rw-r--r--drivers/s390/net/qeth_core_main.c661
-rw-r--r--drivers/s390/net/qeth_core_mpc.c11
-rw-r--r--drivers/s390/net/qeth_core_mpc.h7
-rw-r--r--drivers/s390/net/qeth_core_sys.c18
-rw-r--r--drivers/s390/net/qeth_l2.h5
-rw-r--r--drivers/s390/net/qeth_l2_main.c235
-rw-r--r--drivers/s390/net/qeth_l3_main.c403
-rw-r--r--drivers/s390/net/qeth_l3_sys.c6
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/scsi/3w-9xxx.c6
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/Kconfig33
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR_D700.c405
-rw-r--r--drivers/scsi/NCR_D700.h30
-rw-r--r--drivers/scsi/NCR_Q720.c376
-rw-r--r--drivers/scsi/NCR_Q720.h29
-rw-r--r--drivers/scsi/a100u2w.c4
-rw-r--r--drivers/scsi/aacraid/aachba.c56
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/aacraid/src.c6
-rw-r--r--drivers/scsi/advansys.c10
-rw-r--r--drivers/scsi/aha152x.c71
-rw-r--r--drivers/scsi/aha1740.c9
-rw-r--r--drivers/scsi/aha1740.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/atp870u.c16
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c23
-rw-r--r--drivers/scsi/bfa/bfad_im.c19
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c115
-rw-r--r--drivers/scsi/csiostor/csio_wr.c84
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c30
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c7
-rw-r--r--drivers/scsi/dc395x.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c18
-rw-r--r--drivers/scsi/gdth.c67
-rw-r--r--drivers/scsi/gdth.h10
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c319
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c290
-rw-r--r--drivers/scsi/hosts.c32
-rw-r--r--drivers/scsi/hpsa.c25
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c25
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c47
-rw-r--r--drivers/scsi/libfc/fc_lport.c108
-rw-r--r--drivers/scsi/libfc/fc_rport.c101
-rw-r--r--drivers/scsi/libiscsi.c14
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c47
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c462
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h46
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/megaraid.c29
-rw-r--r--drivers/scsi/megaraid.h14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c61
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c36
-rw-r--r--drivers/scsi/mesh.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c454
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c81
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c395
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c463
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c10
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c30
-rw-r--r--drivers/scsi/qedi/qedi_main.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h24
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c738
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c207
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c188
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c20
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi.h3
-rw-r--r--drivers/scsi/scsi_debug.c57
-rw-r--r--drivers/scsi/scsi_error.c20
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c411
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsi_typedefs.h2
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sd.h9
-rw-r--r--drivers/scsi/sd_dif.c113
-rw-r--r--drivers/scsi/sd_zbc.c11
-rw-r--r--drivers/scsi/sg.c73
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h12
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c160
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c6
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/sr_ioctl.c22
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/ufs/Kconfig9
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c619
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h115
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c21
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/vmw_pvscsi.c11
-rw-r--r--drivers/sh/maple/maple.c7
-rw-r--r--drivers/siox/siox-bus-gpio.c1
-rw-r--r--drivers/siox/siox-core.c30
-rw-r--r--drivers/slimbus/Kconfig12
-rw-r--r--drivers/slimbus/Makefile5
-rw-r--r--drivers/slimbus/core.c41
-rw-r--r--drivers/slimbus/messaging.c79
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c1526
-rw-r--r--drivers/slimbus/slimbus.h198
-rw-r--r--drivers/slimbus/stream.c477
-rw-r--r--drivers/soc/imx/gpc.c23
-rw-r--r--drivers/soc/qcom/mdt_loader.c87
-rw-r--r--drivers/soc/renesas/Makefile3
-rw-r--r--drivers/soc/renesas/r9a06g032-smp.c96
-rw-r--r--drivers/soc/renesas/rcar-sysc.c64
-rw-r--r--drivers/soc/ti/pm33xx.c16
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-bitbang.c50
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c90
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-fsl-dspi.c505
-rw-r--r--drivers/spi/spi-fsl-espi.c5
-rw-r--r--drivers/spi/spi-gpio.c49
-rw-r--r--drivers/spi/spi-img-spfi.c3
-rw-r--r--drivers/spi/spi-imx.c162
-rw-r--r--drivers/spi/spi-lm70llp.c5
-rw-r--r--drivers/spi/spi-mem.c28
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-orion.c77
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c53
-rw-r--r--drivers/spi/spi-sh-sci.c20
-rw-r--r--drivers/spi/spi-sprd-adi.c11
-rw-r--r--drivers/spi/spi-uniphier.c523
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/ssb/Kconfig21
-rw-r--r--drivers/ssb/b43_pci_bridge.c4
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c6
-rw-r--r--drivers/ssb/driver_chipcommon.c14
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c40
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c6
-rw-r--r--drivers/ssb/driver_extif.c4
-rw-r--r--drivers/ssb/driver_gige.c2
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/ssb/driver_mipscore.c17
-rw-r--r--drivers/ssb/driver_pcicore.c23
-rw-r--r--drivers/ssb/embedded.c18
-rw-r--r--drivers/ssb/host_soc.c16
-rw-r--r--drivers/ssb/main.c83
-rw-r--r--drivers/ssb/pci.c75
-rw-r--r--drivers/ssb/pcmcia.c62
-rw-r--r--drivers/ssb/scan.c38
-rw-r--r--drivers/ssb/sdio.c16
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h39
-rw-r--r--drivers/staging/Kconfig14
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/ashmem.c48
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c2
-rw-r--r--drivers/staging/android/vsoc.c11
-rw-r--r--drivers/staging/axis-fifo/Kconfig9
-rw-r--r--drivers/staging/axis-fifo/Makefile1
-rw-r--r--drivers/staging/axis-fifo/README (renamed from arch/arm/mach-uniphier/Makefile)0
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c1107
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt89
-rw-r--r--drivers/staging/clocking-wizard/Kconfig2
-rw-r--r--drivers/staging/comedi/Kconfig8
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/comedi_compat32.h2
-rw-r--r--drivers/staging/comedi/comedi_fops.c6
-rw-r--r--drivers/staging/comedi/comedi_pci.h2
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h2
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/comedilib.h2
-rw-r--r--drivers/staging/comedi/drivers.c26
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.h6
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c4
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c4
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c778
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c7
-rw-r--r--drivers/staging/erofs/Kconfig141
-rw-r--r--drivers/staging/erofs/Makefile13
-rw-r--r--drivers/staging/erofs/TODO45
-rw-r--r--drivers/staging/erofs/data.c385
-rw-r--r--drivers/staging/erofs/dir.c145
-rw-r--r--drivers/staging/erofs/erofs_fs.h266
-rw-r--r--drivers/staging/erofs/include/linux/tagptr.h110
-rw-r--r--drivers/staging/erofs/include/trace/events/erofs.h240
-rw-r--r--drivers/staging/erofs/inode.c283
-rw-r--r--drivers/staging/erofs/internal.h556
-rw-r--r--drivers/staging/erofs/lz4defs.h227
-rw-r--r--drivers/staging/erofs/namei.c251
-rw-r--r--drivers/staging/erofs/super.c649
-rw-r--r--drivers/staging/erofs/unzip_lz4.c251
-rw-r--r--drivers/staging/erofs/unzip_pagevec.h172
-rw-r--r--drivers/staging/erofs/unzip_vle.c1656
-rw-r--r--drivers/staging/erofs/unzip_vle.h239
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c209
-rw-r--r--drivers/staging/erofs/utils.c271
-rw-r--r--drivers/staging/erofs/xattr.c577
-rw-r--r--drivers/staging/erofs/xattr.h93
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c31
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c3
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c9
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c41
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c15
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c13
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c26
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c17
-rw-r--r--drivers/staging/fbtft/fbtft-core.c41
-rw-r--r--drivers/staging/fbtft/fbtft-io.c17
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c3
-rw-r--r--drivers/staging/fbtft/fbtft.h15
-rw-r--r--drivers/staging/fbtft/fbtft_device.c50
-rw-r--r--drivers/staging/fbtft/flexfb.c31
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h29
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c71
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h43
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c31
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpkg.h450
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h30
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c30
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h159
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst (renamed from drivers/staging/fsl-dpaa2/ethernet/README)39
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/net.h480
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h27
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c2
-rw-r--r--drivers/staging/gasket/Kconfig23
-rw-r--r--drivers/staging/gasket/Makefile9
-rw-r--r--drivers/staging/gasket/TODO9
-rw-r--r--drivers/staging/gasket/apex.h30
-rw-r--r--drivers/staging/gasket/apex_driver.c741
-rw-r--r--drivers/staging/gasket/gasket.h122
-rw-r--r--drivers/staging/gasket/gasket_constants.h47
-rw-r--r--drivers/staging/gasket/gasket_core.c1816
-rw-r--r--drivers/staging/gasket/gasket_core.h649
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c550
-rw-r--r--drivers/staging/gasket/gasket_interrupt.h117
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c391
-rw-r--r--drivers/staging/gasket/gasket_ioctl.h28
-rw-r--r--drivers/staging/gasket/gasket_page_table.c1381
-rw-r--r--drivers/staging/gasket/gasket_page_table.h249
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c400
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h179
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c18
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c5
-rw-r--r--drivers/staging/goldfish/README6
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c79
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c6
-rw-r--r--drivers/staging/iio/accel/adis16240.c11
-rw-r--r--drivers/staging/iio/adc/ad7606.h2
-rw-r--r--drivers/staging/iio/gyro/Kconfig16
-rw-r--r--drivers/staging/iio/gyro/Makefile6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c234
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c57
-rw-r--r--drivers/staging/ks7010/ks_hostif.c14
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c5
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c38
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c112
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c1
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/most/sound/sound.c1
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts24
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi110
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c19
-rw-r--r--drivers/staging/mt7621-gpio/Kconfig6
-rw-r--r--drivers/staging/mt7621-gpio/Makefile3
-rw-r--r--drivers/staging/mt7621-gpio/TODO3
-rw-r--r--drivers/staging/mt7621-gpio/gpio-mt7621.c370
-rw-r--r--drivers/staging/mt7621-gpio/mediatek,mt7621-gpio.txt68
-rw-r--r--drivers/staging/mt7621-mmc/board.h8
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c54
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h6
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h588
-rw-r--r--drivers/staging/mt7621-mmc/sd.c584
-rw-r--r--drivers/staging/mt7621-pci/Kconfig7
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c827
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c174
-rw-r--r--drivers/staging/mt7621-spi/spi-mt7621.c206
-rw-r--r--drivers/staging/netlogic/xlr_net.c9
-rw-r--r--drivers/staging/octeon-usb/Makefile1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1
-rw-r--r--drivers/staging/olpc_dcon/Kconfig6
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h6
-rw-r--r--drivers/staging/pi433/pi433_if.c93
-rw-r--r--drivers/staging/pi433/pi433_if.h9
-rw-r--r--drivers/staging/pi433/rf69.c283
-rw-r--r--drivers/staging/pi433/rf69.h12
-rw-r--r--drivers/staging/pi433/rf69_enum.h3
-rw-r--r--drivers/staging/pi433/rf69_registers.h3
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/Makefile4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c126
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c65
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c121
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c56
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c180
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c184
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c37
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c71
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c83
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c74
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c13
-rw-r--r--drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c (renamed from drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c)10
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c75
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c21
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_rtl8188e.c (renamed from drivers/staging/rtl8188eu/hal/odm_RTL8188E.c)10
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c14
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c18
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c43
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c13
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h10
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h12
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h13
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h (renamed from drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h (renamed from drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_rate_adaptive.h (renamed from drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h)2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h10
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h12
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h25
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/mon.h10
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h347
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h12
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h11
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h166
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h16
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_rtl8188e.h (renamed from drivers/staging/rtl8188eu/include/odm_RTL8188E.h)10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h10
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h10
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h11
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h28
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h74
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h13
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h1086
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h12
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h10
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h10
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h18
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h10
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c72
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c27
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c38
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c56
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c60
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h59
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c57
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c86
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c1018
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c18
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c179
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h127
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c643
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h595
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h126
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c280
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c44
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h114
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c75
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c195
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h240
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h33
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c276
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h71
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h735
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8712/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c18
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c143
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c108
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c268
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_rf.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_beamforming.h127
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_br_ext.h55
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c19
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c108
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c7
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c19
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtlwifi/base.c109
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h1
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h17
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h1
-rw-r--r--drivers/staging/rts5208/Makefile2
-rw-r--r--drivers/staging/rts5208/ms.c337
-rw-r--r--drivers/staging/rts5208/rtsx.c38
-rw-r--r--drivers/staging/rts5208/rtsx.h25
-rw-r--r--drivers/staging/rts5208/rtsx_card.c29
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c140
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h18
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c216
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c1
-rw-r--r--drivers/staging/rts5208/sd.c328
-rw-r--r--drivers/staging/rts5208/spi.c71
-rw-r--r--drivers/staging/rts5208/trace.c27
-rw-r--r--drivers/staging/rts5208/trace.h40
-rw-r--r--drivers/staging/rts5208/xd.c115
-rw-r--r--drivers/staging/skein/Kconfig16
-rw-r--r--drivers/staging/skein/Makefile11
-rw-r--r--drivers/staging/skein/TODO8
-rw-r--r--drivers/staging/skein/skein_api.c231
-rw-r--r--drivers/staging/skein/skein_api.h230
-rw-r--r--drivers/staging/skein/skein_base.c870
-rw-r--r--drivers/staging/skein/skein_base.h336
-rw-r--r--drivers/staging/skein/skein_block.c469
-rw-r--r--drivers/staging/skein/skein_block.h347
-rw-r--r--drivers/staging/skein/skein_generic.c214
-rw-r--r--drivers/staging/skein/skein_iv.h187
-rw-r--r--drivers/staging/skein/threefish_api.c78
-rw-r--r--drivers/staging/skein/threefish_api.h171
-rw-r--r--drivers/staging/skein/threefish_block.c8244
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/speakup/kobjects.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/staging/speakup/spk_types.h2
-rw-r--r--drivers/staging/speakup/synth.c40
-rw-r--r--drivers/staging/speakup/varhandlers.c5
-rw-r--r--drivers/staging/typec/Kconfig23
-rw-r--r--drivers/staging/typec/Makefile2
-rw-r--r--drivers/staging/typec/TODO5
-rw-r--r--drivers/staging/vboxvideo/TODO1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c34
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h2
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c38
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c127
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c31
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c4
-rw-r--r--drivers/staging/vt6655/dpc.c4
-rw-r--r--drivers/staging/vt6656/dpc.c4
-rw-r--r--drivers/staging/vt6656/rxtx.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/TODO11
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c89
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h25
-rw-r--r--drivers/staging/wilc1000/host_interface.c2401
-rw-r--r--drivers/staging/wilc1000/host_interface.h34
-rw-r--r--drivers/staging/wilc1000/linux_mon.c23
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c177
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,sdio.txt32
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,spi.txt26
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c28
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c57
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c54
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c347
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h41
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c291
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h21
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c38
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h16
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h70
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c44
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h30
-rw-r--r--drivers/staging/wlan-ng/p80211types.h27
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c7
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c41
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c8
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c55
-rw-r--r--drivers/target/loopback/Kconfig1
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c53
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_transport.c285
-rw-r--r--drivers/target/target_core_ua.c43
-rw-r--r--drivers/target/target_core_ua.h3
-rw-r--r--drivers/target/target_core_user.c377
-rw-r--r--drivers/target/target_core_xcopy.c5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c10
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/tee/tee_shm.c6
-rw-r--r--drivers/thermal/armada_thermal.c534
-rw-r--r--drivers/thermal/imx_thermal.c33
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8996.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c29
-rw-r--r--drivers/thermal/qcom/tsens-v2.c77
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/qcom/tsens.h8
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_hwmon.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h68
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-thermal-data.c65
-rw-r--r--drivers/thermal/ti-soc-thermal/omap3-thermal-data.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5-thermal-data.c46
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h41
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c370
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h43
-rw-r--r--drivers/thunderbolt/domain.c59
-rw-r--r--drivers/thunderbolt/icm.c174
-rw-r--r--drivers/thunderbolt/nhi.c46
-rw-r--r--drivers/thunderbolt/switch.c65
-rw-r--r--drivers/thunderbolt/tb.h10
-rw-r--r--drivers/thunderbolt/tb_msgs.h4
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/xdomain.c18
-rw-r--r--drivers/tty/goldfish.c1
-rw-r--r--drivers/tty/hvc/hvc_console.c194
-rw-r--r--drivers/tty/hvc/hvc_console.h1
-rw-r--r--drivers/tty/hvc/hvc_opal.c31
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serdev/core.c48
-rw-r--r--drivers/tty/serial/8250/8250_core.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c74
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c54
-rw-r--r--drivers/tty/serial/8250/8250_of.c6
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/8250/8250_port.c63
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/imx.c5
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c3
-rw-r--r--drivers/tty/serial/max310x.c14
-rw-r--r--drivers/tty/serial/pxa.c3
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c261
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial_core.c17
-rw-r--r--drivers/tty/serial/sh-sci.c191
-rw-r--r--drivers/tty/serial/uartlite.c112
-rw-r--r--drivers/tty/serial/xilinx_uartps.c23
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_baudrate.c22
-rw-r--r--drivers/tty/tty_io.c21
-rw-r--r--drivers/tty/tty_ldsem.c82
-rw-r--r--drivers/tty/vt/keyboard.c34
-rw-r--r--drivers/tty/vt/selection.c48
-rw-r--r--drivers/tty/vt/vc_screen.c90
-rw-r--r--drivers/tty/vt/vt.c367
-rw-r--r--drivers/uio/uio.c151
-rw-r--r--drivers/uio/uio_cif.c4
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c1
-rw-r--r--drivers/uio/uio_hv_generic.c4
-rw-r--r--drivers/uio/uio_netx.c3
-rw-r--r--drivers/uio/uio_pci_generic.c3
-rw-r--r--drivers/uio/uio_pruss.c69
-rw-r--r--drivers/uio/uio_sercos3.c1
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c4
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c43
-rw-r--r--drivers/usb/class/cdc-wdm.c47
-rw-r--r--drivers/usb/class/usblp.c10
-rw-r--r--drivers/usb/class/usbtmc.c320
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/hub.c50
-rw-r--r--drivers/usb/core/ledtrig-usbport.c34
-rw-r--r--drivers/usb/core/message.c9
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.c241
-rw-r--r--drivers/usb/dwc2/core.h109
-rw-r--r--drivers/usb/dwc2/core_intr.c118
-rw-r--r--drivers/usb/dwc2/debugfs.c55
-rw-r--r--drivers/usb/dwc2/gadget.c537
-rw-r--r--drivers/usb/dwc2/hcd.c513
-rw-r--r--drivers/usb/dwc2/hcd.h10
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c10
-rw-r--r--drivers/usb/dwc2/hcd_intr.c105
-rw-r--r--drivers/usb/dwc2/hcd_queue.c10
-rw-r--r--drivers/usb/dwc2/params.c24
-rw-r--r--drivers/usb/dwc2/platform.c19
-rw-r--r--drivers/usb/dwc3/Kconfig13
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c118
-rw-r--r--drivers/usb/dwc3/core.h17
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c137
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c29
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c223
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/configfs.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c31
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h4
-rw-r--r--drivers/usb/gadget/function/f_tcm.c19
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/f_uvc.c31
-rw-r--r--drivers/usb/gadget/function/f_uvc.h6
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/function/u_uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc.h53
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c12
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h12
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/core.c18
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c1
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c84
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c8
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c212
-rw-r--r--drivers/usb/host/ehci-ps3.c6
-rw-r--r--drivers/usb/host/ehci-sched.c4
-rw-r--r--drivers/usb/host/ohci-ps3.c6
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/whci/pzl.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c14
-rw-r--r--drivers/usb/host/xhci-hub.c11
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-rcar.c43
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/adutux.c10
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/misc/iowarrior.c5
-rw-r--r--drivers/usb/misc/ldusb.c7
-rw-r--r--drivers/usb/misc/legousbtower.c5
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c4
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c1
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c422
-rw-r--r--drivers/usb/serial/cyberjack.c17
-rw-r--r--drivers/usb/serial/digi_acceleport.c35
-rw-r--r--drivers/usb/serial/io_edgeport.c17
-rw-r--r--drivers/usb/serial/io_ti.c5
-rw-r--r--drivers/usb/serial/ir-usb.c2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c5
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kl5kusb105.h3
-rw-r--r--drivers/usb/serial/kobil_sct.c24
-rw-r--r--drivers/usb/serial/mos7720.c14
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/quatech2.c7
-rw-r--r--drivers/usb/serial/sierra.c13
-rw-r--r--drivers/usb/serial/ssu100.c2
-rw-r--r--drivers/usb/serial/symbolserial.c5
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c5
-rw-r--r--drivers/usb/typec/Kconfig18
-rw-r--r--drivers/usb/typec/Makefile5
-rw-r--r--drivers/usb/typec/altmodes/Kconfig14
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c580
-rw-r--r--drivers/usb/typec/bus.c402
-rw-r--r--drivers/usb/typec/bus.h38
-rw-r--r--drivers/usb/typec/class.c544
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c12
-rw-r--r--drivers/usb/typec/mux.c6
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c13
-rw-r--r--drivers/usb/typec/tcpci.c (renamed from drivers/staging/typec/tcpci.c)66
-rw-r--r--drivers/usb/typec/tcpci.h (renamed from drivers/staging/typec/tcpci.h)0
-rw-r--r--drivers/usb/typec/tcpci_rt1711h.c (renamed from drivers/staging/typec/tcpci_rt1711h.c)0
-rw-r--r--drivers/usb/typec/tcpm.c337
-rw-r--r--drivers/usb/typec/tps6598x.c11
-rw-r--r--drivers/usb/usb-skeleton.c10
-rw-r--r--drivers/usb/usbip/vudc_dev.c2
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c8
-rw-r--r--drivers/uwb/hwa-rc.c1
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/vfio_pci.c25
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c73
-rw-r--r--drivers/vfio/vfio_iommu_type1.c17
-rw-r--r--drivers/vhost/net.c370
-rw-r--r--drivers/vhost/scsi.c16
-rw-r--r--drivers/vhost/vhost.c80
-rw-r--r--drivers/vhost/vhost.h11
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c232
-rw-r--r--drivers/video/console/Kconfig11
-rw-r--r--drivers/video/console/dummycon.c69
-rw-r--r--drivers/video/fbdev/core/fbcon.c83
-rw-r--r--drivers/video/fbdev/efifb.c51
-rw-r--r--drivers/video/fbdev/hyperv_fb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_pci_common.h2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds2482.c2
-rw-r--r--drivers/w1/masters/ds2490.c16
-rw-r--r--drivers/w1/masters/mxc_w1.c1
-rw-r--r--drivers/w1/slaves/Kconfig12
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2760.c175
-rw-r--r--drivers/w1/slaves/w1_ds2760.h59
-rw-r--r--drivers/w1/w1.c3
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/imgpdc_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c5
-rw-r--r--drivers/watchdog/kempld_wdt.c5
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c170
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c63
-rw-r--r--drivers/watchdog/sprd_wdt.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c116
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c5
-rw-r--r--drivers/watchdog/tangox_wdt.c1
-rw-r--r--drivers/xen/Kconfig24
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c75
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/gntdev-common.h94
-rw-r--r--drivers/xen/gntdev-dmabuf.c856
-rw-r--r--drivers/xen/gntdev-dmabuf.h33
-rw-r--r--drivers/xen/gntdev.c264
-rw-r--r--drivers/xen/grant-table.c151
-rw-r--r--drivers/xen/mem-reservation.c118
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-scsiback.c17
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c7
-rw-r--r--fs/9p/xattr.c6
-rw-r--r--fs/Kconfig.binfmt5
-rw-r--r--fs/adfs/inode.c13
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c45
-rw-r--r--fs/afs/dynroot.c25
-rw-r--r--fs/afs/rxrpc.c30
-rw-r--r--fs/aio.c237
-rw-r--r--fs/anon_inodes.c30
-rw-r--r--fs/attr.c5
-rw-r--r--fs/autofs/Makefile4
-rw-r--r--fs/autofs/autofs_i.h13
-rw-r--r--fs/autofs/dev-ioctl.c22
-rw-r--r--fs/autofs/expire.c143
-rw-r--r--fs/autofs/init.c2
-rw-r--r--fs/autofs/inode.c1
-rw-r--r--fs/autofs/root.c33
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/binfmt_elf.c7
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c15
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c9
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c53
-rw-r--r--fs/btrfs/ctree.h94
-rw-r--r--fs/btrfs/delayed-inode.c14
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c43
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dev-replace.c29
-rw-r--r--fs/btrfs/dir-item.c4
-rw-r--r--fs/btrfs/disk-io.c113
-rw-r--r--fs/btrfs/disk-io.h5
-rw-r--r--fs/btrfs/extent-tree.c883
-rw-r--r--fs/btrfs/extent_io.c165
-rw-r--r--fs/btrfs/extent_io.h16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c128
-rw-r--r--fs/btrfs/free-space-cache.c19
-rw-r--r--fs/btrfs/free-space-tree.c2
-rw-r--r--fs/btrfs/inode-map.c12
-rw-r--r--fs/btrfs/inode.c267
-rw-r--r--fs/btrfs/ioctl.c82
-rw-r--r--fs/btrfs/ordered-data.c138
-rw-r--r--fs/btrfs/ordered-data.h23
-rw-r--r--fs/btrfs/print-tree.c39
-rw-r--r--fs/btrfs/qgroup.c270
-rw-r--r--fs/btrfs/qgroup.h46
-rw-r--r--fs/btrfs/raid56.c109
-rw-r--r--fs/btrfs/reada.c3
-rw-r--r--fs/btrfs/relocation.c216
-rw-r--r--fs/btrfs/root-tree.c22
-rw-r--r--fs/btrfs/scrub.c678
-rw-r--r--fs/btrfs/send.c172
-rw-r--r--fs/btrfs/struct-funcs.c1
-rw-r--r--fs/btrfs/super.c115
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c24
-rw-r--r--fs/btrfs/transaction.c11
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-checker.c115
-rw-r--r--fs/btrfs/tree-log.c270
-rw-r--r--fs/btrfs/volumes.c609
-rw-r--r--fs/btrfs/volumes.h31
-rw-r--r--fs/buffer.c88
-rw-r--r--fs/cachefiles/bind.c3
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c17
-rw-r--r--fs/ceph/acl.c30
-rw-r--r--fs/ceph/addr.c74
-rw-r--r--fs/ceph/cache.c11
-rw-r--r--fs/ceph/caps.c138
-rw-r--r--fs/ceph/dir.c20
-rw-r--r--fs/ceph/file.c41
-rw-r--r--fs/ceph/inode.c83
-rw-r--r--fs/ceph/mds_client.c98
-rw-r--r--fs/ceph/mds_client.h14
-rw-r--r--fs/ceph/quota.c2
-rw-r--r--fs/ceph/snap.c6
-rw-r--r--fs/ceph/super.c6
-rw-r--r--fs/ceph/super.h15
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/Kconfig59
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c66
-rw-r--r--fs/cifs/cifsencrypt.c12
-rw-r--r--fs/cifs/cifsfs.c33
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h57
-rw-r--r--fs/cifs/cifsproto.h11
-rw-r--r--fs/cifs/cifssmb.c22
-rw-r--r--fs/cifs/connect.c109
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c38
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/netmisc.c19
-rw-r--r--fs/cifs/smb1ops.c5
-rw-r--r--fs/cifs/smb2file.c11
-rw-r--r--fs/cifs/smb2inode.c6
-rw-r--r--fs/cifs/smb2misc.c13
-rw-r--r--fs/cifs/smb2ops.c552
-rw-r--r--fs/cifs/smb2pdu.c589
-rw-r--r--fs/cifs/smb2pdu.h30
-rw-r--r--fs/cifs/smb2proto.h28
-rw-r--r--fs/cifs/smb2transport.c67
-rw-r--r--fs/cifs/smbdirect.c37
-rw-r--r--fs/cifs/smbdirect.h4
-rw-r--r--fs/cifs/trace.h64
-rw-r--r--fs/cifs/transport.c238
-rw-r--r--fs/cifs/xattr.c28
-rw-r--r--fs/compat_ioctl.c40
-rw-r--r--fs/configfs/dir.c11
-rw-r--r--fs/configfs/item.c24
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/dax.c10
-rw-r--r--fs/dcache.c84
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/eventpoll.c118
-rw-r--r--fs/exec.c8
-rw-r--r--fs/exofs/ore.c4
-rw-r--r--fs/ext2/file.c1
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext2/super.c7
-rw-r--r--fs/ext4/balloc.c30
-rw-r--r--fs/ext4/ext4.h43
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/extents.c23
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c30
-rw-r--r--fs/ext4/inline.c58
-rw-r--r--fs/ext4/inode.c93
-rw-r--r--fs/ext4/mballoc.c13
-rw-r--r--fs/ext4/mmp.c13
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/readpage.c5
-rw-r--r--fs/ext4/super.c171
-rw-r--r--fs/ext4/sysfs.c38
-rw-r--r--fs/ext4/truncate.h4
-rw-r--r--fs/ext4/xattr.c42
-rw-r--r--fs/f2fs/checkpoint.c159
-rw-r--r--fs/f2fs/data.c185
-rw-r--r--fs/f2fs/debug.c3
-rw-r--r--fs/f2fs/dir.c3
-rw-r--r--fs/f2fs/f2fs.h193
-rw-r--r--fs/f2fs/file.c216
-rw-r--r--fs/f2fs/gc.c163
-rw-r--r--fs/f2fs/inline.c36
-rw-r--r--fs/f2fs/inode.c154
-rw-r--r--fs/f2fs/namei.c6
-rw-r--r--fs/f2fs/node.c380
-rw-r--r--fs/f2fs/node.h9
-rw-r--r--fs/f2fs/recovery.c31
-rw-r--r--fs/f2fs/segment.c399
-rw-r--r--fs/f2fs/segment.h16
-rw-r--r--fs/f2fs/super.c165
-rw-r--r--fs/f2fs/sysfs.c44
-rw-r--r--fs/f2fs/xattr.c18
-rw-r--r--fs/fat/cache.c19
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fat/fat.h12
-rw-r--r--fs/fat/fatent.c108
-rw-r--r--fs/fat/file.c33
-rw-r--r--fs/fat/inode.c40
-rw-r--r--fs/fat/misc.c13
-rw-r--r--fs/fat/namei_msdos.c17
-rw-r--r--fs/fat/namei_vfat.c20
-rw-r--r--fs/fcntl.c74
-rw-r--r--fs/file_table.c146
-rw-r--r--fs/fscache/cache.c2
-rw-r--r--fs/fscache/cookie.c7
-rw-r--r--fs/fscache/object.c1
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/fuse/dev.c69
-rw-r--r--fs/fuse/dir.c35
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_i.h5
-rw-r--r--fs/fuse/inode.c45
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/aops.c337
-rw-r--r--fs/gfs2/aops.h19
-rw-r--r--fs/gfs2/bmap.c414
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/gfs2/file.c161
-rw-r--r--fs/gfs2/incore.h23
-rw-r--r--fs/gfs2/inode.c32
-rw-r--r--fs/gfs2/lock_dlm.c20
-rw-r--r--fs/gfs2/log.c28
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/recovery.c7
-rw-r--r--fs/gfs2/rgrp.c169
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/sys.c11
-rw-r--r--fs/gfs2/trace_gfs2.h3
-rw-r--r--fs/gfs2/trans.h6
-rw-r--r--fs/gfs2/util.c38
-rw-r--r--fs/gfs2/util.h10
-rw-r--r--fs/gfs2/xattr.c59
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/Kconfig15
-rw-r--r--fs/hfsplus/Makefile2
-rw-r--r--fs/hfsplus/acl.h28
-rw-r--r--fs/hfsplus/dir.c9
-rw-r--r--fs/hfsplus/extents.c18
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/inode.c11
-rw-r--r--fs/hfsplus/posix_acl.c144
-rw-r--r--fs/hfsplus/super.c8
-rw-r--r--fs/hfsplus/unicode.c62
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hfsplus/xattr.h3
-rw-r--r--fs/hfsplus/xattr_security.c13
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c28
-rw-r--r--fs/hpfs/dir.c23
-rw-r--r--fs/hpfs/hpfs_fn.h13
-rw-r--r--fs/hpfs/namei.c12
-rw-r--r--fs/hugetlbfs/inode.c58
-rw-r--r--fs/inode.c105
-rw-r--r--fs/internal.h19
-rw-r--r--fs/ioctl.c1
-rw-r--r--fs/iomap.c782
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jbd2/transaction.c9
-rw-r--r--fs/jffs2/dir.c32
-rw-r--r--fs/jffs2/file.c6
-rw-r--r--fs/jffs2/fs.c12
-rw-r--r--fs/jffs2/os-linux.h13
-rw-r--r--fs/jfs/jfs_dinode.h7
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_incore.h3
-rw-r--r--fs/jfs/jfs_inode.c10
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/jfs/super.c5
-rw-r--r--fs/kernfs/dir.c29
-rw-r--r--fs/kernfs/file.c8
-rw-r--r--fs/kernfs/inode.c2
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/symlink.c13
-rw-r--r--fs/locks.c42
-rw-r--r--fs/mpage.c122
-rw-r--r--fs/namei.c263
-rw-r--r--fs/namespace.c97
-rw-r--r--fs/nfs/dir.c23
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c28
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/notify/dnotify/dnotify.c8
-rw-r--r--fs/notify/fanotify/fanotify.c15
-rw-r--r--fs/notify/fanotify/fanotify_user.c104
-rw-r--r--fs/notify/fdinfo.c8
-rw-r--r--fs/notify/fsnotify.h16
-rw-r--r--fs/notify/group.c3
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c7
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--fs/notify/mark.c103
-rw-r--r--fs/ntfs/aops.c9
-rw-r--r--fs/ntfs/compress.c28
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/time.h27
-rw-r--r--fs/ocfs2/alloc.c60
-rw-r--r--fs/ocfs2/cluster/heartbeat.c12
-rw-r--r--fs/ocfs2/cluster/nodemanager.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/file.c17
-rw-r--r--fs/ocfs2/inode.c5
-rw-r--r--fs/ocfs2/localalloc.c9
-rw-r--r--fs/ocfs2/quota_local.c15
-rw-r--r--fs/open.c130
-rw-r--r--fs/orangefs/file.c19
-rw-r--r--fs/orangefs/inode.c3
-rw-r--r--fs/overlayfs/Kconfig19
-rw-r--r--fs/overlayfs/Makefile4
-rw-r--r--fs/overlayfs/copy_up.c190
-rw-r--r--fs/overlayfs/dir.c109
-rw-r--r--fs/overlayfs/export.c3
-rw-r--r--fs/overlayfs/file.c511
-rw-r--r--fs/overlayfs/inode.c175
-rw-r--r--fs/overlayfs/namei.c195
-rw-r--r--fs/overlayfs/overlayfs.h47
-rw-r--r--fs/overlayfs/ovl_entry.h6
-rw-r--r--fs/overlayfs/readdir.c19
-rw-r--r--fs/overlayfs/super.c103
-rw-r--r--fs/overlayfs/util.c252
-rw-r--r--fs/pipe.c43
-rw-r--r--fs/proc/Kconfig1
-rw-r--r--fs/proc/base.c50
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/internal.h25
-rw-r--r--fs/proc/kcore.c532
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/stat.c2
-rw-r--r--fs/proc/task_mmu.c318
-rw-r--r--fs/proc/task_nommu.c39
-rw-r--r--fs/proc/uptime.c4
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/pstore/Kconfig17
-rw-r--r--fs/pstore/platform.c16
-rw-r--r--fs/read_write.c94
-rw-r--r--fs/reiserfs/item_ops.c16
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/prints.c141
-rw-r--r--fs/reiserfs/procfs.c11
-rw-r--r--fs/reiserfs/reiserfs.h4
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/seq_file.c54
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c58
-rw-r--r--fs/squashfs/file_cache.c4
-rw-r--r--fs/squashfs/file_direct.c24
-rw-r--r--fs/squashfs/fragment.c17
-rw-r--r--fs/squashfs/squashfs.h3
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/statfs.c14
-rw-r--r--fs/super.c11
-rw-r--r--fs/sysfs/dir.c7
-rw-r--r--fs/sysfs/file.c77
-rw-r--r--fs/sysfs/group.c47
-rw-r--r--fs/sysfs/sysfs.h5
-rw-r--r--fs/sysv/inode.c6
-rw-r--r--fs/timerfd.c14
-rw-r--r--fs/tracefs/inode.c5
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c45
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/udf/super.c8
-rw-r--r--fs/udf/udf_i.h2
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/udf/udftime.c6
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--fs/ufs/ialloc.c5
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/ufs/util.h14
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xattr.c9
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c13
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c46
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c120
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c70
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c26
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c444
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h48
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c22
-rw-r--r--fs/xfs/libxfs/xfs_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c37
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_defer.c323
-rw-r--r--fs/xfs/libxfs/xfs_defer.h41
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c74
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h10
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c17
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c33
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c12
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c20
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c6
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c74
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h13
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c107
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h25
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c13
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c99
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h22
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c278
-rw-r--r--fs/xfs/libxfs/xfs_shared.h12
-rw-r--r--fs/xfs/libxfs/xfs_types.c34
-rw-r--r--fs/xfs/libxfs/xfs_types.h1
-rw-r--r--fs/xfs/scrub/agheader.c518
-rw-r--r--fs/xfs/scrub/agheader_repair.c889
-rw-r--r--fs/xfs/scrub/alloc.c116
-rw-r--r--fs/xfs/scrub/attr.c130
-rw-r--r--fs/xfs/scrub/bitmap.c303
-rw-r--r--fs/xfs/scrub/bitmap.h36
-rw-r--r--fs/xfs/scrub/bmap.c331
-rw-r--r--fs/xfs/scrub/btree.c334
-rw-r--r--fs/xfs/scrub/btree.h45
-rw-r--r--fs/xfs/scrub/common.c394
-rw-r--r--fs/xfs/scrub/common.h115
-rw-r--r--fs/xfs/scrub/dabtree.c170
-rw-r--r--fs/xfs/scrub/dabtree.h33
-rw-r--r--fs/xfs/scrub/dir.c264
-rw-r--r--fs/xfs/scrub/ialloc.c214
-rw-r--r--fs/xfs/scrub/inode.c282
-rw-r--r--fs/xfs/scrub/parent.c132
-rw-r--r--fs/xfs/scrub/quota.c140
-rw-r--r--fs/xfs/scrub/refcount.c194
-rw-r--r--fs/xfs/scrub/repair.c565
-rw-r--r--fs/xfs/scrub/repair.h95
-rw-r--r--fs/xfs/scrub/rmap.c172
-rw-r--r--fs/xfs/scrub/rtbitmap.c78
-rw-r--r--fs/xfs/scrub/scrub.c210
-rw-r--r--fs/xfs/scrub/scrub.h134
-rw-r--r--fs/xfs/scrub/symlink.c28
-rw-r--r--fs/xfs/scrub/trace.c6
-rw-r--r--fs/xfs/scrub/trace.h141
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_aops.c1007
-rw-r--r--fs/xfs/xfs_aops.h4
-rw-r--r--fs/xfs/xfs_attr_inactive.c1
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c25
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c118
-rw-r--r--fs/xfs/xfs_buf.c212
-rw-r--r--fs/xfs/xfs_buf.h15
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c49
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c43
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c16
-rw-r--r--fs/xfs/xfs_inode.c177
-rw-r--r--fs/xfs/xfs_inode.h32
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_iomap.c56
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c7
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_log.c156
-rw-r--r--fs/xfs/xfs_log.h1
-rw-r--r--fs/xfs/xfs_log_recover.c68
-rw-r--r--fs/xfs/xfs_mount.c101
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_qm.c22
-rw-r--r--fs/xfs/xfs_qm_syscalls.c9
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_refcount_item.c28
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c173
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rtalloc.c20
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_symlink.c51
-rw-r--r--fs/xfs/xfs_trace.h78
-rw-r--r--fs/xfs/xfs_trans.c26
-rw-r--r--fs/xfs/xfs_trans.h21
-rw-r--r--fs/xfs/xfs_trans_bmap.c6
-rw-r--r--fs/xfs/xfs_trans_extfree.c2
-rw-r--r--fs/xfs/xfs_trans_refcount.c6
-rw-r--r--fs/xfs/xfs_trans_rmap.c1
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/bug.h16
-rw-r--r--include/asm-generic/export.h12
-rw-r--r--include/asm-generic/pgtable.h38
-rw-r--r--include/asm-generic/tlb.h18
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drmP.h18
-rw-r--r--include/drm/drm_atomic.h14
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_audio_component.h118
-rw-r--r--include/drm/drm_bridge.h48
-rw-r--r--include/drm/drm_client.h139
-rw-r--r--include/drm/drm_connector.h279
-rw-r--r--include/drm/drm_crtc.h276
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_device.h21
-rw-r--r--include/drm/drm_dp_helper.h56
-rw-r--r--include/drm/drm_drv.h29
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_fb_cma_helper.h6
-rw-r--r--include/drm/drm_fb_helper.h38
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_mm.h34
-rw-r--r--include/drm/drm_mode_config.h36
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h17
-rw-r--r--include/drm/drm_of.h8
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h7
-rw-r--r--include/drm/drm_plane.h197
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h77
-rw-r--r--include/drm/drm_property.h4
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/drm_writeback.h136
-rw-r--r--include/drm/gpu_scheduler.h174
-rw-r--r--include/drm/i915_component.h85
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h37
-rw-r--r--include/drm/tinydrm/tinydrm.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/dt-bindings/clock/actions,s700-cmu.h118
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h94
-rw-r--r--include/dt-bindings/clock/axg-clkc.h4
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h9
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h46
-rw-r--r--include/dt-bindings/clock/maxim,max9485.h18
-rw-r--r--include/dt-bindings/clock/px30-cru.h389
-rw-r--r--include/dt-bindings/clock/pxa-clock.h3
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h45
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h148
-rw-r--r--include/dt-bindings/clock/rk3399-ddr.h56
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun8i-tcon-top.h11
-rw-r--r--include/dt-bindings/gce/mt8173-gce.h44
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h16
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h5
-rw-r--r--include/dt-bindings/regulator/qcom,rpmh-regulator.h36
-rw-r--r--include/dt-bindings/usb/pd.h62
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/acpi.h25
-rw-r--r--include/linux/ascii85.h38
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/backlight.h1
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitfield.h9
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bitops.h25
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blk-cgroup.h146
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h66
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/bpf-cgroup.h81
-rw-r--r--include/linux/bpf.h87
-rw-r--r--include/linux/bpf_lirc.h5
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/ceph/auth.h8
-rw-r--r--include/linux/ceph/ceph_features.h7
-rw-r--r--include/linux/ceph/decode.h18
-rw-r--r--include/linux/ceph/messenger.h8
-rw-r--r--include/linux/ceph/msgr.h2
-rw-r--r--include/linux/ceph/osd_client.h10
-rw-r--r--include/linux/ceph/pagelist.h2
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h30
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/clk/at91_pmc.h15
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/compiler-clang.h20
-rw-r--r--include/linux/compiler-gcc.h158
-rw-r--r--include/linux/compiler-intel.h13
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/compiler_types.h257
-rw-r--r--include/linux/console.h5
-rw-r--r--include/linux/console_struct.h5
-rw-r--r--include/linux/coresight.h68
-rw-r--r--include/linux/cpu.h25
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cpumask.h18
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/crc64.h11
-rw-r--r--include/linux/cred.h15
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dcache.h18
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h135
-rw-r--r--include/linux/dm-kcopyd.h12
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/dma-direction.h6
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/dma/pxa-dma.h9
-rw-r--r--include/linux/dma/xilinx_dma.h2
-rw-r--r--include/linux/dmaengine.h6
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/export.h57
-rw-r--r--include/linux/f2fs_fs.h5
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/filter.h114
-rw-r--r--include/linux/fpga/fpga-mgr.h24
-rw-r--r--include/linux/fpga/fpga-region.h2
-rw-r--r--include/linux/fs.h74
-rw-r--r--include/linux/fsi-sbefifo.h33
-rw-r--r--include/linux/fsi.h12
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/fsl/ptp_qoriq.h44
-rw-r--r--include/linux/fsnotify.h14
-rw-r--r--include/linux/fsnotify_backend.h52
-rw-r--r--include/linux/ftrace.h19
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/gnss.h75
-rw-r--r--include/linux/goldfish.h14
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/aspeed.h15
-rw-r--r--include/linux/gpio/consumer.h14
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hid.h20
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--include/linux/hwmon.h32
-rw-r--r--include/linux/hwspinlock.h37
-rw-r--r--include/linux/hyperv.h33
-rw-r--r--include/linux/i2c.h44
-rw-r--r--include/linux/idle_inject.h29
-rw-r--r--include/linux/idr.h11
-rw-r--r--include/linux/ieee80211.h437
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/init.h44
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/inotify.h2
-rw-r--r--include/linux/integrity.h13
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iomap.h47
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h5
-rw-r--r--include/linux/irqchip/arm-gic-v3.h13
-rw-r--r--include/linux/irqchip/arm-gic.h11
-rw-r--r--include/linux/irqflags.h15
-rw-r--r--include/linux/jiffies.h5
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--include/linux/kasan.h13
-rw-r--r--include/linux/kcore.h2
-rw-r--r--include/linux/kernel.h35
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kobject.h21
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/kvm_host.h28
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/list_lru.h43
-rw-r--r--include/linux/lockdep.h6
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h77
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/memblock.h76
-rw-r--r--include/linux/memcontrol.h105
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mfd/as3722.h3
-rw-r--r--include/linux/mfd/cros_ec.h2
-rw-r--r--include/linux/mfd/cros_ec_commands.h229
-rw-r--r--include/linux/mfd/da9063/core.h15
-rw-r--r--include/linux/mfd/madera/core.h187
-rw-r--r--include/linux/mfd/madera/pdata.h59
-rw-r--r--include/linux/mfd/madera/registers.h3968
-rw-r--r--include/linux/mfd/rave-sp.h1
-rw-r--r--include/linux/mfd/rohm-bd718x7.h332
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h3
-rw-r--r--include/linux/mfd/tmio.h3
-rw-r--r--include/linux/mfd/wm8994/pdata.h6
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h33
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h193
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h1
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h45
-rw-r--r--include/linux/mm_types.h246
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmu_notifier.h33
-rw-r--r--include/linux/mmzone.h57
-rw-r--r--include/linux/mod_devicetable.h15
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mroute_base.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/rawnand.h126
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mtd/spinand.h421
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/net_dim.h1
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h235
-rw-r--r--include/linux/netfilter.h37
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h (renamed from include/linux/netfilter/nf_osf.h)23
-rw-r--r--include/linux/netfilter_bridge.h11
-rw-r--r--include/linux/netfilter_ipv4.h11
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/node.h12
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/nvme.h72
-rw-r--r--include/linux/of_iommu.h4
-rw-r--r--include/linux/omap-mailbox.h5
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/openvswitch.h5
-rw-r--r--include/linux/overflow.h31
-rw-r--r--include/linux/page_ext.h15
-rw-r--r--include/linux/pci-dma-compat.h8
-rw-r--r--include/linux/pci-epc.h16
-rw-r--r--include/linux/pci-epf.h1
-rw-r--r--include/linux/pci.h67
-rw-r--r--include/linux/pci_hotplug.h15
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/perf/arm_pmu.h11
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pid.h11
-rw-r--r--include/linux/pinctrl/pinconf.h3
-rw-r--r--include/linux/platform_data/ams-delta-fiq.h (renamed from arch/arm/mach-omap1/include/mach/ams-delta-fiq.h)31
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/gpio-davinci.h3
-rw-r--r--include/linux/platform_data/i2c-hid.h7
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h (renamed from arch/mips/include/asm/mach-jz4740/jz4740_nand.h)4
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h2
-rw-r--r--include/linux/platform_data/mmp_dma.h4
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h1
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/pm33xx.h29
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h (renamed from arch/mips/include/asm/txx9/ndfmc.h)6
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm_domain.h15
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/preempt.h2
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/pxa2xx_ssp.h10
-rw-r--r--include/linux/qed/qed_eth_if.h6
-rw-r--r--include/linux/qed/qed_if.h15
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reciprocal_div.h68
-rw-r--r--include/linux/refcount.h34
-rw-r--r--include/linux/regmap.h54
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/pfuze100.h11
-rw-r--r--include/linux/remoteproc.h19
-rw-r--r--include/linux/rfkill.h20
-rw-r--r--include/linux/rhashtable-types.h137
-rw-r--r--include/linux/rhashtable.h164
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched.h54
-rw-r--r--include/linux/sched/mm.h37
-rw-r--r--include/linux/sched/signal.h51
-rw-r--r--include/linux/sched/sysctl.h2
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/user.h5
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/selection.h6
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_core.h7
-rw-r--r--include/linux/sfp.h72
-rw-r--r--include/linux/shrinker.h21
-rw-r--r--include/linux/signal.h10
-rw-r--r--include/linux/skbuff.h29
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slimbus.h60
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/soc/qcom/mdt_loader.h4
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h13
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h8
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/spi-mem.h18
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h70
-rw-r--r--include/linux/srcu.h22
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/swap.h13
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/switchtec.h4
-rw-r--r--include/linux/syscalls.h22
-rw-r--r--include/linux/sysfs.h20
-rw-r--r--include/linux/t10-pi.h24
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h7
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/tracepoint.h59
-rw-r--r--include/linux/tty_ldisc.h4
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/audio-v3.h19
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/tcpm.h11
-rw-r--r--include/linux/usb/typec.h55
-rw-r--r--include/linux/usb/typec_altmode.h160
-rw-r--r--include/linux/usb/typec_dp.h95
-rw-r--r--include/linux/usb/typec_mux.h2
-rw-r--r--include/linux/verification.h6
-rw-r--r--include/linux/vga_switcheroo.h8
-rw-r--r--include/linux/virtio_config.h7
-rw-r--r--include/linux/vmacache.h6
-rw-r--r--include/linux/w1.h2
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--include/media/cec-notifier.h27
-rw-r--r--include/media/cec-pin.h4
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/dvb_frontend.h49
-rw-r--r--include/media/i2c/lm3560.h1
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/misc/cxl-base.h10
-rw-r--r--include/misc/cxl.h68
-rw-r--r--include/net/9p/client.h11
-rw-r--r--include/net/act_api.h31
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_ieee802154.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/bluetooth/hci.h224
-rw-r--r--include/net/bluetooth/hci_core.h34
-rw-r--r--include/net/bluetooth/mgmt.h55
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h16
-rw-r--r--include/net/cfg80211.h118
-rw-r--r--include/net/dcbnl.h13
-rw-r--r--include/net/devlink.h195
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/flow_dissector.h21
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/ieee80211_radiotap.h123
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h27
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_tunnels.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/ipv6_frag.h104
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/net_namespace.h11
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h15
-rw-r--r--include/net/netfilter/nf_conntrack_count.h37
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h84
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h39
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_tables.h11
-rw-r--r--include/net/netfilter/nf_tables_core.h13
-rw-r--r--include/net/netfilter/nf_tproxy.h12
-rw-r--r--include/net/netns/hash.h7
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/pkt_cls.h30
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h70
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/structs.h52
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/net/seg6_hmac.h2
-rw-r--r--include/net/seg6_local.h4
-rw-r--r--include/net/smc.h65
-rw-r--r--include/net/sock.h81
-rw-r--r--include/net/sock_reuseport.h19
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_skbedit.h37
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h75
-rw-r--r--include/net/tls.h86
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xfrm.h61
-rw-r--r--include/pcmcia/ds.h10
-rw-r--r--include/rdma/ib.h4
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_cache.h83
-rw-r--r--include/rdma/ib_cm.h18
-rw-r--r--include/rdma/ib_mad.h33
-rw-r--r--include/rdma/ib_sa.h49
-rw-r--r--include/rdma/ib_umem_odp.h3
-rw-r--r--include/rdma/ib_verbs.h257
-rw-r--r--include/rdma/opa_addr.h2
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/rdma/rdmavt_qp.h30
-rw-r--r--include/rdma/uverbs_ioctl.h627
-rw-r--r--include/rdma/uverbs_named_ioctl.h109
-rw-r--r--include/rdma/uverbs_std_types.h96
-rw-r--r--include/rdma/uverbs_types.h133
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h13
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/sound/ac97/codec.h8
-rw-r--r--include/sound/ac97/compat.h9
-rw-r--r--include/sound/ac97/controller.h8
-rw-r--r--include/sound/ac97/regs.h20
-rw-r--r--include/sound/ac97_codec.h25
-rw-r--r--include/sound/compress_driver.h21
-rw-r--r--include/sound/dmaengine_pcm.h14
-rw-r--r--include/sound/hda_component.h61
-rw-r--r--include/sound/hda_i915.h37
-rw-r--r--include/sound/hdaudio.h65
-rw-r--r--include/sound/hdaudio_ext.h123
-rw-r--r--include/sound/memalloc.h18
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h10
-rw-r--r--include/sound/pxa2xx-lib.h13
-rw-r--r--include/sound/rt5682.h40
-rw-r--r--include/sound/sb16_csp.h2
-rw-r--r--include/sound/seq_midi_event.h6
-rw-r--r--include/sound/seq_virmidi.h3
-rw-r--r--include/sound/sh_fsi.h13
-rw-r--r--include/sound/simple_card.h7
-rw-r--r--include/sound/simple_card_utils.h23
-rw-r--r--include/sound/soc-acpi-intel-match.h19
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dai.h15
-rw-r--r--include/sound/soc-dapm.h11
-rw-r--r--include/sound/soc-dpcm.h7
-rw-r--r--include/sound/soc-topology.h37
-rw-r--r--include/sound/soc.h31
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/clk.h36
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/fsi_master_ast_cf.h150
-rw-r--r--include/trace/events/fsi_master_gpio.h102
-rw-r--r--include/trace/events/net.h7
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/preemptirq.h23
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/trace/events/rxrpc.h129
-rw-r--r--include/trace/events/sock.h30
-rw-r--r--include/trace/events/xdp.h5
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h27
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h176
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h166
-rw-r--r--include/uapi/linux/aio_abi.h12
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/auto_fs.h8
-rw-r--r--include/uapi/linux/bcache.h12
-rw-r--r--include/uapi/linux/blkzoned.h2
-rw-r--r--include/uapi/linux/bpf.h130
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dcbnl.h3
-rw-r--r--include/uapi/linux/devlink.h42
-rw-r--r--include/uapi/linux/dvb/audio.h37
-rw-r--r--include/uapi/linux/dvb/video.h58
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/errqueue.h4
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/eventpoll.h8
-rw-r--r--include/uapi/linux/fpga-dfl.h179
-rw-r--r--include/uapi/linux/fsi.h58
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/iio/types.h3
-rw-r--r--include/uapi/linux/ila.h1
-rw-r--r--include/uapi/linux/inotify.h1
-rw-r--r--include/uapi/linux/input.h9
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/joystick.h4
-rw-r--r--include/uapi/linux/keyboard.h23
-rw-r--r--include/uapi/linux/kfd_ioctl.h33
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--include/uapi/linux/kvm_para.h2
-rw-r--r--include/uapi/linux/l2tp.h15
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h46
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/mroute.h2
-rw-r--r--include/uapi/linux/net_tstamp.h18
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h124
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_osf.h (renamed from include/uapi/linux/netfilter/nf_osf.h)34
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h24
-rw-r--r--include/uapi/linux/netfilter_bridge.h11
-rw-r--r--include/uapi/linux/nl80211.h102
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/pcitest.h3
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/pkt_cls.h41
-rw-r--r--include/uapi/linux/pkt_sched.h150
-rw-r--r--include/uapi/linux/pmu.h4
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/rds.h69
-rw-r--r--include/uapi/linux/rseq.h102
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h9
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h28
-rw-r--r--include/uapi/linux/tcp.h14
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h14
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/linux/usb/audio.h49
-rw-r--r--include/uapi/linux/usb/g_uvc.h39
-rw-r--r--include/uapi/linux/usb/tmc.h13
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h20
-rw-r--r--include/uapi/linux/v4l2-subdev.h4
-rw-r--r--include/uapi/linux/vhost.h18
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/xfrm.h5
-rw-r--r--include/uapi/rdma/cxgb4-abi.h32
-rw-r--r--include/uapi/rdma/hns-abi.h1
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h7
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h58
-rw-r--r--include/uapi/rdma/ib_user_verbs.h5
-rw-r--r--include/uapi/rdma/mlx5-abi.h6
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h121
-rw-r--r--include/uapi/rdma/qedr-abi.h17
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h7
-rw-r--r--include/uapi/xen/gntdev.h106
-rw-r--r--include/video/mipi_display.h3
-rw-r--r--include/xen/grant_table.h21
-rw-r--r--include/xen/interface/io/displif.h8
-rw-r--r--include/xen/interface/io/kbdif.h78
-rw-r--r--include/xen/interface/io/sndif.h10
-rw-r--r--include/xen/mem-reservation.h59
-rw-r--r--init/Kconfig75
-rw-r--r--init/do_mounts.c10
-rw-r--r--init/do_mounts_initrd.c10
-rw-r--r--init/do_mounts_md.c10
-rw-r--r--init/do_mounts_rd.c10
-rw-r--r--init/init_task.c13
-rw-r--r--init/initramfs.c10
-rw-r--r--init/main.c47
-rw-r--r--init/version.c3
-rw-r--r--ipc/msg.c39
-rw-r--r--ipc/namespace.c20
-rw-r--r--ipc/sem.c41
-rw-r--r--ipc/shm.c123
-rw-r--r--ipc/util.c215
-rw-r--r--ipc/util.h33
-rw-r--r--kernel/Kconfig.preempt2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c7
-rw-r--r--kernel/audit_tree.c19
-rw-r--r--kernel/audit_watch.c41
-rw-r--r--kernel/auditfilter.c17
-rw-r--r--kernel/auditsc.c27
-rw-r--r--kernel/bpf/Makefile4
-rw-r--r--kernel/bpf/arraymap.c28
-rw-r--r--kernel/bpf/btf.c60
-rw-r--r--kernel/bpf/cgroup.c216
-rw-r--r--kernel/bpf/core.c109
-rw-r--r--kernel/bpf/cpumap.c18
-rw-r--r--kernel/bpf/devmap.c23
-rw-r--r--kernel/bpf/hashtab.c42
-rw-r--r--kernel/bpf/helpers.c20
-rw-r--r--kernel/bpf/inode.c11
-rw-r--r--kernel/bpf/local_storage.c379
-rw-r--r--kernel/bpf/lpm_trie.c12
-rw-r--r--kernel/bpf/map_in_map.c3
-rw-r--r--kernel/bpf/offload.c223
-rw-r--r--kernel/bpf/reuseport_array.c363
-rw-r--r--kernel/bpf/sockmap.c409
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/syscall.c210
-rw-r--r--kernel/bpf/verifier.c87
-rw-r--r--kernel/bpf/xskmap.c4
-rw-r--r--kernel/cgroup/cgroup.c4
-rw-r--r--kernel/compat.c29
-rw-r--r--kernel/cpu.c297
-rw-r--r--kernel/crash_core.c8
-rw-r--r--kernel/dma/contiguous.c6
-rw-r--r--kernel/dma/direct.c9
-rw-r--r--kernel/dma/noncoherent.c8
-rw-r--r--kernel/dma/swiotlb.c18
-rw-r--r--kernel/events/core.c36
-rw-r--r--kernel/events/hw_breakpoint.c92
-rw-r--r--kernel/events/uprobes.c82
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fail_function.c3
-rw-r--r--kernel/fork.c135
-rw-r--r--kernel/freezer.c4
-rw-r--r--kernel/hung_task.c15
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/irqdesc.c13
-rw-r--r--kernel/irq/manage.c56
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/kprobes.c167
-rw-r--r--kernel/kthread.c42
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/livepatch/transition.c7
-rw-r--r--kernel/locking/lockdep.c35
-rw-r--r--kernel/locking/locktorture.c7
-rw-r--r--kernel/locking/mutex.c345
-rw-r--r--kernel/locking/rtmutex.c29
-rw-r--r--kernel/locking/test-ww_mutex.c2
-rw-r--r--kernel/memremap.c32
-rw-r--r--kernel/module-internal.h25
-rw-r--r--kernel/module.c177
-rw-r--r--kernel/module_signing.c12
-rw-r--r--kernel/pid.c42
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/hibernate.c16
-rw-r--r--kernel/power/main.c12
-rw-r--r--kernel/power/suspend.c8
-rw-r--r--kernel/power/swap.c4
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk/internal.h9
-rw-r--r--kernel/printk/printk.c198
-rw-r--r--kernel/printk/printk_safe.c58
-rw-r--r--kernel/rcu/rcu.h104
-rw-r--r--kernel/rcu/rcuperf.c57
-rw-r--r--kernel/rcu/rcutorture.c462
-rw-r--r--kernel/rcu/srcutiny.c4
-rw-r--r--kernel/rcu/srcutree.c39
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c1027
-rw-r--r--kernel/rcu/tree.h71
-rw-r--r--kernel/rcu/tree_exp.h18
-rw-r--r--kernel/rcu/tree_plugin.h188
-rw-r--r--kernel/rcu/update.c45
-rw-r--r--kernel/reboot.c6
-rw-r--r--kernel/rseq.c41
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/clock.c57
-rw-r--r--kernel/sched/completion.c8
-rw-r--r--kernel/sched/core.c245
-rw-r--r--kernel/sched/cpufreq_schedutil.c103
-rw-r--r--kernel/sched/deadline.c27
-rw-r--r--kernel/sched/debug.c37
-rw-r--r--kernel/sched/fair.c709
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/pelt.c399
-rw-r--r--kernel/sched/pelt.h72
-rw-r--r--kernel/sched/rt.c33
-rw-r--r--kernel/sched/sched.h98
-rw-r--r--kernel/sched/swait.c32
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/sched/wait.c57
-rw-r--r--kernel/signal.c268
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/smpboot.c54
-rw-r--r--kernel/softirq.c14
-rw-r--r--kernel/stop_machine.c29
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c27
-rw-r--r--kernel/test_kprobes.c94
-rw-r--r--kernel/time/alarmtimer.c7
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/clocksource.c149
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/itimer.c5
-rw-r--r--kernel/time/ntp.c23
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c89
-rw-r--r--kernel/time/posix-timers.h2
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/tick-common.c3
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c31
-rw-r--r--kernel/time/timekeeping.c183
-rw-r--r--kernel/time/timekeeping_debug.c2
-rw-r--r--kernel/time/timekeeping_internal.h2
-rw-r--r--kernel/time/timer.c31
-rw-r--r--kernel/torture.c15
-rw-r--r--kernel/trace/Kconfig78
-rw-r--r--kernel/trace/Makefile13
-rw-r--r--kernel/trace/blktrace.c24
-rw-r--r--kernel/trace/bpf_trace.c5
-rw-r--r--kernel/trace/ftrace.c57
-rw-r--r--kernel/trace/preemptirq_delay_test.c72
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c30
-rw-r--r--kernel/trace/trace.h27
-rw-r--r--kernel/trace/trace_benchmark.h2
-rw-r--r--kernel/trace/trace_clock.c1
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_event_perf.c1
-rw-r--r--kernel/trace/trace_events.c13
-rw-r--r--kernel/trace/trace_events_filter.c35
-rw-r--r--kernel/trace/trace_events_filter_test.h2
-rw-r--r--kernel/trace/trace_events_hist.c15
-rw-r--r--kernel/trace/trace_events_trigger.c45
-rw-r--r--kernel/trace/trace_functions_graph.c5
-rw-r--r--kernel/trace/trace_hwlat.c7
-rw-r--r--kernel/trace/trace_irqsoff.c270
-rw-r--r--kernel/trace/trace_kprobe.c133
-rw-r--r--kernel/trace/trace_kprobe_selftest.c10
-rw-r--r--kernel/trace/trace_kprobe_selftest.h7
-rw-r--r--kernel/trace/trace_output.c6
-rw-r--r--kernel/trace/trace_output.h2
-rw-r--r--kernel/trace/trace_preemptirq.c89
-rw-r--r--kernel/trace/trace_printk.c1
-rw-r--r--kernel/trace/trace_probe.c14
-rw-r--r--kernel/trace/trace_probe.h14
-rw-r--r--kernel/trace/trace_seq.c1
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_uprobe.c16
-rw-r--r--kernel/trace/tracing_map.c11
-rw-r--r--kernel/trace/tracing_map.h2
-rw-r--r--kernel/tracepoint.c97
-rw-r--r--kernel/user.c11
-rw-r--r--kernel/watchdog.c147
-rw-r--r--kernel/watchdog_hld.c4
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug52
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/Makefile15
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/bch.c23
-rw-r--r--lib/bitmap.c29
-rw-r--r--lib/bucket_locks.c11
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/crc64.c56
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/devres.c36
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/gen_crc64table.c68
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/iov_iter.c77
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c30
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/percpu_ida.c370
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/reciprocal_div.c41
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/refcount.c55
-rw-r--r--lib/rhashtable.c106
-rw-r--r--lib/test_bitfield.c168
-rw-r--r--lib/test_bpf.c20
-rw-r--r--lib/test_debug_virtual.c2
-rw-r--r--lib/test_hexdump.c28
-rw-r--r--lib/test_overflow.c198
-rw-r--r--lib/test_printf.c24
-rw-r--r--lib/test_rhashtable.c8
-rw-r--r--lib/vsprintf.c28
-rw-r--r--lib/xz/xz_crc32.c3
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/Kconfig.debug6
-rw-r--r--mm/backing-dev.c15
-rw-r--r--mm/bootmem.c159
-rw-r--r--mm/cma.c8
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/debug.c18
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c26
-rw-r--r--mm/huge_memory.c19
-rw-r--r--mm/hugetlb.c47
-rw-r--r--mm/init-mm.c11
-rw-r--r--mm/internal.h12
-rw-r--r--mm/kasan/kasan.c5
-rw-r--r--mm/kasan/kasan_init.c316
-rw-r--r--mm/khugepaged.c60
-rw-r--r--mm/ksm.c13
-rw-r--r--mm/list_lru.c147
-rw-r--r--mm/memblock.c255
-rw-r--r--mm/memcontrol.c599
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c225
-rw-r--r--mm/memory_hotplug.c112
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mempool.c13
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mlock.c3
-rw-r--r--mm/mm_init.c9
-rw-r--r--mm/mmap.c80
-rw-r--r--mm/mmu_notifier.c19
-rw-r--r--mm/mprotect.c49
-rw-r--r--mm/nobootmem.c20
-rw-r--r--mm/nommu.c16
-rw-r--r--mm/oom_kill.c239
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c218
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/percpu.c29
-rw-r--r--mm/readahead.c19
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c64
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c8
-rw-r--r--mm/slub.c3
-rw-r--r--mm/sparse-vmemmap.c61
-rw-r--r--mm/sparse.c290
-rw-r--r--mm/swap_slots.c12
-rw-r--r--mm/swapfile.c270
-rw-r--r--mm/usercopy.c25
-rw-r--r--mm/vmacache.c38
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c244
-rw-r--r--mm/workingset.c30
-rw-r--r--mm/zsmalloc.c36
-rw-r--r--mm/zswap.c9
-rw-r--r--net/6lowpan/iphc.c1
-rw-r--r--net/8021q/Makefile1
-rw-r--r--net/8021q/vlan.c15
-rw-r--r--net/9p/client.c122
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/protocol.c2
-rw-r--r--net/9p/trans_fd.c22
-rw-r--r--net/9p/trans_rdma.c12
-rw-r--r--net/9p/trans_virtio.c66
-rw-r--r--net/9p/trans_xen.c3
-rw-r--r--net/9p/util.c1
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile4
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/mpoa_proc.c6
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/ax25_addr.c1
-rw-r--r--net/ax25/ax25_ds_in.c1
-rw-r--r--net/ax25/ax25_ds_subr.c1
-rw-r--r--net/ax25/ax25_ip.c1
-rw-r--r--net/ax25/ax25_out.c1
-rw-r--r--net/batman-adv/Kconfig8
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.h6
-rw-r--r--net/batman-adv/bat_v.c4
-rw-r--r--net/batman-adv/bat_v_ogm.h6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/debugfs.c42
-rw-r--r--net/batman-adv/debugfs.h11
-rw-r--r--net/batman-adv/hard-interface.c37
-rw-r--r--net/batman-adv/originator.c17
-rw-r--r--net/batman-adv/translation-table.c7
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_conn.c189
-rw-r--r--net/bluetooth/hci_core.c105
-rw-r--r--net/bluetooth/hci_debugfs.c19
-rw-r--r--net/bluetooth/hci_event.c579
-rw-r--r--net/bluetooth/hci_request.c616
-rw-r--r--net/bluetooth/hci_request.h8
-rw-r--r--net/bluetooth/hidp/core.c6
-rw-r--r--net/bluetooth/leds.c6
-rw-r--r--net/bluetooth/mgmt.c402
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bpf/test_run.c30
-rw-r--r--net/bpfilter/Kconfig3
-rw-r--r--net/bpfilter/Makefile21
-rw-r--r--net/bpfilter/bpfilter_kern.c11
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S7
-rw-r--r--net/bridge/br_forward.c16
-rw-r--r--net/bridge/br_if.c62
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_netlink.c30
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_sysfs_if.c94
-rw-r--r--net/bridge/netfilter/ebtable_filter.c1
-rw-r--r--net/bridge/netfilter/ebtable_nat.c1
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c3
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/ceph/Kconfig1
-rw-r--r--net/ceph/Makefile1
-rw-r--r--net/ceph/auth.c16
-rw-r--r--net/ceph/auth_none.c1
-rw-r--r--net/ceph/auth_none.h1
-rw-r--r--net/ceph/auth_x.c239
-rw-r--r--net/ceph/auth_x.h3
-rw-r--r--net/ceph/auth_x_protocol.h7
-rw-r--r--net/ceph/ceph_common.c13
-rw-r--r--net/ceph/cls_lock_client.c4
-rw-r--r--net/ceph/crush/mapper.c4
-rw-r--r--net/ceph/messenger.c113
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c27
-rw-r--r--net/ceph/pagevec.c1
-rw-r--r--net/compat.c6
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c891
-rw-r--r--net/core/dev_ioctl.c14
-rw-r--r--net/core/devlink.c1322
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c83
-rw-r--r--net/core/filter.c869
-rw-r--r--net/core/flow_dissector.c65
-rw-r--r--net/core/gen_estimator.c21
-rw-r--r--net/core/gen_stats.c16
-rw-r--r--net/core/lwt_bpf.c4
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net-sysfs.c159
-rw-r--r--net/core/net_namespace.c28
-rw-r--r--net/core/page_pool.c2
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/rtnetlink.c91
-rw-r--r--net/core/secure_seq.c1
-rw-r--r--net/core/skbuff.c32
-rw-r--r--net/core/sock.c119
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sock_reuseport.c92
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/xdp.c58
-rw-r--r--net/dcb/dcbnl.c97
-rw-r--r--net/dccp/ccids/ccid2.c6
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/Kconfig1
-rw-r--r--net/decnet/Makefile1
-rw-r--r--net/decnet/TODO5
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_nsp_out.c1
-rw-r--r--net/decnet/dn_route.c5
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/decnet/netfilter/Makefile1
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c1
-rw-r--r--net/dns_resolver/dns_key.c29
-rw-r--r--net/dsa/dsa2.c14
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/dsa/switch.c22
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ieee802154/6lowpan/core.c6
-rw-r--r--net/ieee802154/6lowpan/reassembly.c7
-rw-r--r--net/ieee802154/6lowpan/tx.c21
-rw-r--r--net/ieee802154/core.c1
-rw-r--r--net/ieee802154/nl_policy.c1
-rw-r--r--net/ieee802154/socket.c17
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/bpfilter/Makefile1
-rw-r--r--net/ipv4/devinet.c11
-rw-r--r--net/ipv4/esp4_offload.c10
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/fou.c24
-rw-r--r--net/ipv4/gre_offload.c10
-rw-r--r--net/ipv4/icmp.c9
-rw-r--r--net/ipv4/igmp.c54
-rw-r--r--net/ipv4/inet_connection_sock.c9
-rw-r--r--net/ipv4/inet_fragment.c25
-rw-r--r--net/ipv4/inet_hashtables.c19
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_fragment.c355
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_input.c147
-rw-r--r--net/ipv4/ip_output.c24
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv4/ipmr.c22
-rw-r--r--net/ipv4/ipmr_base.c1
-rw-r--r--net/ipv4/netfilter.c53
-rw-r--r--net/ipv4/netfilter/Kconfig22
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c472
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c18
-rw-r--r--net/ipv4/ping.c16
-rw-r--r--net/ipv4/proc.c3
-rw-r--r--net/ipv4/raw.c11
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c49
-rw-r--r--net/ipv4/tcp.c91
-rw-r--r--net/ipv4/tcp_bbr.c10
-rw-r--r--net/ipv4/tcp_dctcp.c75
-rw-r--r--net/ipv4/tcp_input.c153
-rw-r--r--net/ipv4/tcp_ipv4.c26
-rw-r--r--net/ipv4/tcp_minisocks.c229
-rw-r--r--net/ipv4/tcp_offload.c17
-rw-r--r--net/ipv4/tcp_output.c55
-rw-r--r--net/ipv4/tcp_rate.c4
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_timer.c51
-rw-r--r--net/ipv4/tcp_ulp.c4
-rw-r--r--net/ipv4/udp.c20
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv6/Kconfig3
-rw-r--r--net/ipv6/addrconf.c57
-rw-r--r--net/ipv6/af_inet6.c8
-rw-r--r--net/ipv6/calipso.c9
-rw-r--r--net/ipv6/datagram.c13
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/esp6_offload.c10
-rw-r--r--net/ipv6/exthdrs.c111
-rw-r--r--net/ipv6/icmp.c37
-rw-r--r--net/ipv6/ila/Makefile2
-rw-r--r--net/ipv6/ila/ila.h27
-rw-r--r--net/ipv6/ila/ila_common.c31
-rw-r--r--net/ipv6/ila/ila_main.c121
-rw-r--r--net/ipv6/ila/ila_xlat.c292
-rw-r--r--net/ipv6/inet6_hashtables.c14
-rw-r--r--net/ipv6/ip6_fib.c156
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_gre.c11
-rw-r--r--net/ipv6/ip6_input.c131
-rw-r--r--net/ipv6/ip6_offload.c16
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ip6_vti.c27
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ipv6_sockglue.c35
-rw-r--r--net/ipv6/mcast.c61
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter.c62
-rw-r--r--net/ipv6/netfilter/Kconfig27
-rw-r--r--net/ipv6/netfilter/Makefile6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c460
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c30
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c4
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c18
-rw-r--r--net/ipv6/ping.c7
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/reassembly.c97
-rw-r--r--net/ipv6/route.c55
-rw-r--r--net/ipv6/seg6.c1
-rw-r--r--net/ipv6/seg6_hmac.c3
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/seg6_local.c54
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/tcpv6_offload.c4
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/ipv6/udp_offload.c4
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/iucv/af_iucv.c6
-rw-r--r--net/kcm/Kconfig1
-rw-r--r--net/kcm/kcmsock.c1
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/l2tp_core.c86
-rw-r--r--net/l2tp/l2tp_core.h73
-rw-r--r--net/l2tp/l2tp_debugfs.c8
-rw-r--r--net/l2tp/l2tp_eth.c32
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c15
-rw-r--r--net/l2tp/l2tp_netlink.c37
-rw-r--r--net/l2tp/l2tp_ppp.c557
-rw-r--r--net/llc/Kconfig2
-rw-r--r--net/llc/Makefile2
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/llc/llc_if.c1
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/agg-rx.c10
-rw-r--r--net/mac80211/agg-tx.c19
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/ethtool.c6
-rw-r--r--net/mac80211/he.c55
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ieee80211_i.h47
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/led.c20
-rw-r--r--net/mac80211/main.c36
-rw-r--r--net/mac80211/mlme.c312
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c1
-rw-r--r--net/mac80211/rx.c134
-rw-r--r--net/mac80211/scan.c56
-rw-r--r--net/mac80211/sta_info.c101
-rw-r--r--net/mac80211/sta_info.h20
-rw-r--r--net/mac80211/trace.h2
-rw-r--r--net/mac80211/tx.c25
-rw-r--r--net/mac80211/util.c162
-rw-r--r--net/mac802154/tx.c15
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/netfilter/Kconfig82
-rw-r--r--net/netfilter/Makefile19
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c89
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c18
-rw-r--r--net/netfilter/nf_conncount.c424
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c319
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c15
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c66
-rw-r--r--net/netfilter/nf_conntrack_netlink.c124
-rw-r--r--net/netfilter/nf_conntrack_proto.c851
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c52
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c32
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c (renamed from net/ipv4/netfilter/nf_conntrack_proto_icmp.c)19
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c (renamed from net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c)17
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c46
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c52
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c55
-rw-r--r--net/netfilter/nf_conntrack_standalone.c28
-rw-r--r--net/netfilter/nf_conntrack_timeout.c21
-rw-r--r--net/netfilter/nf_flow_table_core.c13
-rw-r--r--net/netfilter/nf_log.c13
-rw-r--r--net/netfilter/nf_log_common.c5
-rw-r--r--net/netfilter/nf_nat_core.c18
-rw-r--r--net/netfilter/nf_osf.c218
-rw-r--r--net/netfilter/nf_tables_api.c548
-rw-r--r--net/netfilter/nf_tables_core.c16
-rw-r--r--net/netfilter/nf_tables_set_core.c28
-rw-r--r--net/netfilter/nfnetlink.c23
-rw-r--r--net/netfilter/nfnetlink_acct.c29
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c74
-rw-r--r--net/netfilter/nfnetlink_osf.c436
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/nft_chain_filter.c18
-rw-r--r--net/netfilter/nft_compat.c13
-rw-r--r--net/netfilter/nft_connlimit.c36
-rw-r--r--net/netfilter/nft_ct.c221
-rw-r--r--net/netfilter/nft_dynset.c4
-rw-r--r--net/netfilter/nft_immediate.c3
-rw-r--r--net/netfilter/nft_lookup.c19
-rw-r--r--net/netfilter/nft_meta.c15
-rw-r--r--net/netfilter/nft_numgen.c4
-rw-r--r--net/netfilter/nft_osf.c104
-rw-r--r--net/netfilter/nft_set_bitmap.c25
-rw-r--r--net/netfilter/nft_set_hash.c38
-rw-r--r--net/netfilter/nft_set_rbtree.c30
-rw-r--r--net/netfilter/nft_socket.c22
-rw-r--r--net/netfilter/nft_tproxy.c318
-rw-r--r--net/netfilter/nft_tunnel.c566
-rw-r--r--net/netfilter/utils.c131
-rw-r--r--net/netfilter/x_tables.c7
-rw-r--r--net/netfilter/xt_AUDIT.c2
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/netfilter/xt_TPROXY.c17
-rw-r--r--net/netfilter/xt_cgroup.c6
-rw-r--r--net/netfilter/xt_connlimit.c4
-rw-r--r--net/netfilter/xt_osf.c149
-rw-r--r--net/netfilter/xt_owner.c2
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/netfilter/xt_socket.c8
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlink/af_netlink.c12
-rw-r--r--net/nfc/llcp_commands.c9
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/nsh/nsh.c2
-rw-r--r--net/openvswitch/actions.c33
-rw-r--r--net/openvswitch/conntrack.c20
-rw-r--r--net/openvswitch/flow_netlink.c80
-rw-r--r--net/openvswitch/meter.c10
-rw-r--r--net/packet/af_packet.c76
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/qrtr/qrtr.c13
-rw-r--r--net/rds/Kconfig2
-rw-r--r--net/rds/Makefile1
-rw-r--r--net/rds/af_rds.c205
-rw-r--r--net/rds/bind.c138
-rw-r--r--net/rds/cong.c23
-rw-r--r--net/rds/connection.c294
-rw-r--r--net/rds/ib.c138
-rw-r--r--net/rds/ib.h53
-rw-r--r--net/rds/ib_cm.c320
-rw-r--r--net/rds/ib_frmr.c17
-rw-r--r--net/rds/ib_mr.h5
-rw-r--r--net/rds/ib_rdma.c47
-rw-r--r--net/rds/ib_recv.c39
-rw-r--r--net/rds/ib_send.c19
-rw-r--r--net/rds/loop.c63
-rw-r--r--net/rds/loop.h2
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/rdma.c19
-rw-r--r--net/rds/rdma_transport.c95
-rw-r--r--net/rds/rdma_transport.h5
-rw-r--r--net/rds/rds.h93
-rw-r--r--net/rds/recv.c78
-rw-r--r--net/rds/send.c128
-rw-r--r--net/rds/tcp.c154
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_connect.c68
-rw-r--r--net/rds/tcp_listen.c87
-rw-r--r--net/rds/tcp_recv.c9
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c16
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-internal.h12
-rw-r--r--net/rxrpc/call_accept.c4
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_event.c21
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c15
-rw-r--r--net/rxrpc/local_event.c5
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/net_ns.c6
-rw-r--r--net/rxrpc/output.c44
-rw-r--r--net/rxrpc/peer_event.c156
-rw-r--r--net/rxrpc/peer_object.c10
-rw-r--r--net/rxrpc/proc.c22
-rw-r--r--net/rxrpc/recvmsg.c56
-rw-r--r--net/rxrpc/rxkad.c35
-rw-r--r--net/rxrpc/sysctl.c1
-rw-r--r--net/sched/Kconfig39
-rw-r--r--net/sched/Makefile5
-rw-r--r--net/sched/act_api.c442
-rw-r--r--net/sched/act_bpf.c54
-rw-r--r--net/sched/act_connmark.c36
-rw-r--r--net/sched/act_csum.c80
-rw-r--r--net/sched/act_gact.c47
-rw-r--r--net/sched/act_ife.c106
-rw-r--r--net/sched/act_ipt.c55
-rw-r--r--net/sched/act_mirred.c185
-rw-r--r--net/sched/act_nat.c36
-rw-r--r--net/sched/act_pedit.c142
-rw-r--r--net/sched/act_police.c56
-rw-r--r--net/sched/act_sample.c63
-rw-r--r--net/sched/act_simple.c43
-rw-r--r--net/sched/act_skbedit.c175
-rw-r--r--net/sched/act_skbmod.c73
-rw-r--r--net/sched/act_tunnel_key.c329
-rw-r--r--net/sched/act_vlan.c92
-rw-r--r--net/sched/cls_api.c721
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c43
-rw-r--r--net/sched/cls_flower.c647
-rw-r--r--net/sched/cls_matchall.c34
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/cls_u32.c111
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_cake.c3020
-rw-r--r--net/sched/sch_cbs.c134
-rw-r--r--net/sched/sch_etf.c484
-rw-r--r--net/sched/sch_fq_codel.c25
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sched/sch_netem.c73
-rw-r--r--net/sched/sch_skbprio.c320
-rw-r--r--net/sctp/Kconfig4
-rw-r--r--net/sctp/associola.c15
-rw-r--r--net/sctp/chunk.c6
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/ipv6.c20
-rw-r--r--net/sctp/outqueue.c11
-rw-r--r--net/sctp/protocol.c16
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/socket.c249
-rw-r--r--net/sctp/stream.c153
-rw-r--r--net/sctp/stream_interleave.c20
-rw-r--r--net/sctp/stream_sched.c13
-rw-r--r--net/sctp/stream_sched_prio.c22
-rw-r--r--net/sctp/stream_sched_rr.c8
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c457
-rw-r--r--net/smc/smc.h17
-rw-r--r--net/smc/smc_cdc.c116
-rw-r--r--net/smc/smc_cdc.h86
-rw-r--r--net/smc/smc_clc.c200
-rw-r--r--net/smc/smc_clc.h99
-rw-r--r--net/smc/smc_close.c2
-rw-r--r--net/smc/smc_core.c350
-rw-r--r--net/smc/smc_core.h85
-rw-r--r--net/smc/smc_diag.c33
-rw-r--r--net/smc/smc_ib.c171
-rw-r--r--net/smc/smc_ib.h7
-rw-r--r--net/smc/smc_ism.c348
-rw-r--r--net/smc/smc_ism.h48
-rw-r--r--net/smc/smc_llc.c80
-rw-r--r--net/smc/smc_llc.h7
-rw-r--r--net/smc/smc_pnet.c171
-rw-r--r--net/smc/smc_pnet.h19
-rw-r--r--net/smc/smc_rx.c21
-rw-r--r--net/smc/smc_tx.c248
-rw-r--r--net/smc/smc_tx.h6
-rw-r--r--net/smc/smc_wr.c41
-rw-r--r--net/smc/smc_wr.h3
-rw-r--r--net/socket.c56
-rw-r--r--net/strparser/strparser.c47
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c4
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c5
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/discover.c18
-rw-r--r--net/tipc/group.c41
-rw-r--r--net/tipc/group.h1
-rw-r--r--net/tipc/link.c134
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/msg.c35
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/net.c15
-rw-r--r--net/tipc/node.c97
-rw-r--r--net/tipc/node.h14
-rw-r--r--net/tipc/socket.c12
-rw-r--r--net/tls/tls_device.c304
-rw-r--r--net/tls/tls_device_fallback.c11
-rw-r--r--net/tls/tls_main.c33
-rw-r--r--net/tls/tls_sw.c347
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/vmw_vsock/af_vsock.c15
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wimax/Makefile2
-rw-r--r--net/wimax/debugfs.c2
-rw-r--r--net/wimax/op-msg.c1
-rw-r--r--net/wimax/stack.c4
-rw-r--r--net/wireless/core.c21
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c55
-rw-r--r--net/wireless/nl80211.c254
-rw-r--r--net/wireless/reg.c28
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/trace.h18
-rw-r--r--net/wireless/util.c87
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/x25/x25_subr.c1
-rw-r--r--net/xdp/xdp_umem.c70
-rw-r--r--net/xdp/xsk.c34
-rw-r--r--net/xdp/xsk_queue.h11
-rw-r--r--net/xfrm/Kconfig9
-rw-r--r--net/xfrm/Makefile1
-rw-r--r--net/xfrm/xfrm_device.c19
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_interface.c975
-rw-r--r--net/xfrm/xfrm_output.c3
-rw-r--r--net/xfrm/xfrm_policy.c317
-rw-r--r--net/xfrm/xfrm_state.c48
-rw-r--r--net/xfrm/xfrm_user.c113
-rw-r--r--samples/bpf/.gitignore49
-rw-r--r--samples/bpf/Makefile47
-rw-r--r--samples/bpf/bpf_load.c3
-rw-r--r--samples/bpf/hash_func01.h55
-rw-r--r--samples/bpf/parse_varlen.c6
-rw-r--r--samples/bpf/test_cgrp2_attach2.c21
-rw-r--r--samples/bpf/test_cgrp2_sock2.c2
-rw-r--r--samples/bpf/test_overhead_user.c19
-rw-r--r--samples/bpf/trace_event_user.c27
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh6
-rw-r--r--samples/bpf/xdp_fwd_kern.c8
-rw-r--r--samples/bpf/xdp_fwd_user.c34
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c116
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c11
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c43
-rw-r--r--samples/bpf/xdp_rxq_info_user.c48
-rw-r--r--samples/bpf/xdp_sample_pkts_kern.c66
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c169
-rw-r--r--samples/bpf/xdpsock_user.c45
-rw-r--r--samples/seccomp/Makefile6
-rw-r--r--samples/vfio-mdev/mbochs.c23
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kbuild.include13
-rw-r--r--scripts/Makefile5
-rw-r--r--scripts/Makefile.build19
-rw-r--r--scripts/Makefile.clean4
-rw-r--r--scripts/Makefile.gcc-plugins37
-rw-r--r--scripts/Makefile.host24
-rw-r--r--scripts/Makefile.lib7
-rw-r--r--scripts/Makefile.modbuiltin8
-rw-r--r--scripts/Makefile.modinst4
-rw-r--r--scripts/Makefile.modpost4
-rw-r--r--scripts/Makefile.modsign3
-rw-r--r--scripts/Makefile.ubsan4
-rw-r--r--scripts/basic/.gitignore1
-rw-r--r--scripts/basic/Makefile1
-rw-r--r--scripts/bin2c.c (renamed from scripts/basic/bin2c.c)0
-rwxr-xr-xscripts/cc-can-link.sh2
-rwxr-xr-xscripts/checkpatch.pl220
-rwxr-xr-xscripts/checkstack.pl11
-rwxr-xr-xscripts/coccicheck5
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci129
-rw-r--r--scripts/coccinelle/tests/doubletest.cocci34
-rwxr-xr-xscripts/depmod.sh8
-rwxr-xr-xscripts/documentation-file-ref-check6
-rwxr-xr-xscripts/extract-vmlinux2
-rw-r--r--scripts/gcc-plugins/Kconfig142
-rw-r--r--scripts/gcc-plugins/Makefile5
-rw-r--r--scripts/gcc-plugins/gcc-common.h26
-rwxr-xr-xscripts/get_maintainer.pl56
-rw-r--r--scripts/kconfig/Makefile25
-rw-r--r--scripts/kconfig/check-pkgconfig.sh8
-rw-r--r--scripts/kconfig/conf.c39
-rw-r--r--scripts/kconfig/confdata.c156
-rw-r--r--scripts/kconfig/expr.h3
-rw-r--r--scripts/kconfig/gconf.c5
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lkc_proto.h4
-rw-r--r--scripts/kconfig/mconf.c10
-rw-r--r--scripts/kconfig/menu.c2
-rw-r--r--scripts/kconfig/nconf.c8
-rw-r--r--scripts/kconfig/qconf.cc3
-rw-r--r--scripts/kconfig/symbol.c13
-rw-r--r--scripts/kconfig/util.c30
-rw-r--r--scripts/kconfig/zconf.y6
-rwxr-xr-xscripts/kernel-doc20
-rw-r--r--scripts/mod/devicetable-offsets.c4
-rw-r--r--scripts/mod/file2alias.c13
-rw-r--r--scripts/mod/modpost.c3
-rwxr-xr-xscripts/package/buildtar12
-rwxr-xr-xscripts/package/mkdebian68
-rwxr-xr-xscripts/spdxcheck.py11
-rw-r--r--scripts/spelling.txt88
-rwxr-xr-xscripts/tags.sh3
-rwxr-xr-xscripts/tracing/draw_functrace.py2
-rwxr-xr-xscripts/ver_linux14
-rw-r--r--security/Kconfig3
-rw-r--r--security/apparmor/lsm.c4
-rw-r--r--security/integrity/digsig_asymmetric.c23
-rw-r--r--security/integrity/evm/Kconfig1
-rw-r--r--security/integrity/evm/evm.h10
-rw-r--r--security/integrity/evm/evm_crypto.c50
-rw-r--r--security/integrity/evm/evm_main.c19
-rw-r--r--security/integrity/evm/evm_secfs.c4
-rw-r--r--security/integrity/iint.c9
-rw-r--r--security/integrity/ima/Kconfig59
-rw-r--r--security/integrity/ima/ima.h7
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/ima/ima_init.c16
-rw-r--r--security/integrity/ima/ima_main.c84
-rw-r--r--security/integrity/ima/ima_policy.c57
-rw-r--r--security/integrity/ima/ima_queue.c4
-rw-r--r--security/integrity/integrity.h15
-rw-r--r--security/integrity/integrity_audit.c6
-rw-r--r--security/keys/dh.c2
-rw-r--r--security/loadpin/loadpin.c6
-rw-r--r--security/security.c41
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c87
-rw-r--r--security/selinux/netif.c11
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/netnode.c5
-rw-r--r--security/selinux/netport.c5
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/selinuxfs.c45
-rw-r--r--security/selinux/ss/avtab.c51
-rw-r--r--security/selinux/ss/conditional.c16
-rw-r--r--security/selinux/ss/ebitmap.c15
-rw-r--r--security/selinux/ss/policydb.c91
-rw-r--r--security/selinux/ss/services.c71
-rw-r--r--security/selinux/ss/sidtab.c5
-rw-r--r--security/smack/smack_lsm.c28
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/tomoyo.c2
-rw-r--r--sound/ac97/bus.c26
-rw-r--r--sound/aoa/core/gpio-feature.c4
-rw-r--r--sound/arm/Kconfig5
-rw-r--r--sound/arm/Makefile3
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c12
-rw-r--r--sound/arm/pxa2xx-ac97.c124
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c75
-rw-r--r--sound/arm/pxa2xx-pcm.c129
-rw-r--r--sound/arm/pxa2xx-pcm.h27
-rw-r--r--sound/core/compress_offload.c12
-rw-r--r--sound/core/memalloc.c8
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/oss/pcm_plugin.c9
-rw-r--r--sound/core/pcm.c7
-rw-r--r--sound/core/pcm_lib.c38
-rw-r--r--sound/core/pcm_local.h2
-rw-r--r--sound/core/pcm_native.c10
-rw-r--r--sound/core/rawmidi.c249
-rw-r--r--sound/core/seq/oss/seq_oss.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_timer.c2
-rw-r--r--sound/core/seq/seq.c33
-rw-r--r--sound/core/seq/seq_clientmgr.c30
-rw-r--r--sound/core/seq/seq_info.c10
-rw-r--r--sound/core/seq/seq_info.h6
-rw-r--r--sound/core/seq/seq_memory.c12
-rw-r--r--sound/core/seq/seq_memory.h6
-rw-r--r--sound/core/seq/seq_midi.c24
-rw-r--r--sound/core/seq/seq_midi_emul.c14
-rw-r--r--sound/core/seq/seq_midi_event.c87
-rw-r--r--sound/core/seq/seq_queue.c12
-rw-r--r--sound/core/seq/seq_queue.h27
-rw-r--r--sound/core/seq/seq_virmidi.c133
-rw-r--r--sound/core/timer.c5
-rw-r--r--sound/drivers/aloop.c1
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c16
-rw-r--r--sound/drivers/opl3/opl3_drums.c2
-rw-r--r--sound/drivers/opl3/opl3_lib.c19
-rw-r--r--sound/drivers/opl3/opl3_midi.c17
-rw-r--r--sound/drivers/opl3/opl3_oss.c8
-rw-r--r--sound/drivers/opl3/opl3_synth.c1
-rw-r--r--sound/drivers/opl3/opl3_voice.h4
-rw-r--r--sound/drivers/opl4/opl4_lib.c12
-rw-r--r--sound/drivers/vx/vx_core.c15
-rw-r--r--sound/drivers/vx/vx_pcm.c2
-rw-r--r--sound/firewire/bebob/bebob_pcm.c1
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c2
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c1
-rw-r--r--sound/firewire/fireface/ff-pcm.c1
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c1
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/firewire/motu/motu-pcm.c2
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c64
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c19
-rw-r--r--sound/firewire/motu/motu.c19
-rw-r--r--sound/firewire/motu/motu.h5
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/firewire/tascam/tascam-pcm.c1
-rw-r--r--sound/hda/Kconfig7
-rw-r--r--sound/hda/Makefile1
-rw-r--r--sound/hda/ext/hdac_ext_bus.c80
-rw-r--r--sound/hda/ext/hdac_ext_controller.c64
-rw-r--r--sound/hda/ext/hdac_ext_stream.c104
-rw-r--r--sound/hda/hdac_component.c335
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/hdac_i915.c353
-rw-r--r--sound/hda/hdac_stream.c4
-rw-r--r--sound/i2c/cs8427.c12
-rw-r--r--sound/i2c/i2c.c13
-rw-r--r--sound/i2c/other/ak4xxx-adda.c12
-rw-r--r--sound/i2c/tea6330t.c16
-rw-r--r--sound/isa/Kconfig2
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c3
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c16
-rw-r--r--sound/isa/es18xx.c1
-rw-r--r--sound/isa/galaxy/galaxy.c3
-rw-r--r--sound/isa/gus/gus_io.c2
-rw-r--r--sound/isa/gus/gus_main.c16
-rw-r--r--sound/isa/gus/gus_reset.c2
-rw-r--r--sound/isa/msnd/msnd.c18
-rw-r--r--sound/isa/msnd/msnd.h2
-rw-r--r--sound/isa/msnd/msnd_midi.c2
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c8
-rw-r--r--sound/isa/opti9xx/miro.c5
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c3
-rw-r--r--sound/isa/sb/emu8000_patch.c7
-rw-r--r--sound/isa/sb/emu8000_pcm.c2
-rw-r--r--sound/isa/sb/sb16_csp.c52
-rw-r--r--sound/isa/sb/sb16_main.c25
-rw-r--r--sound/isa/sb/sb8_main.c19
-rw-r--r--sound/isa/sb/sb_common.c16
-rw-r--r--sound/isa/wss/wss_lib.c18
-rw-r--r--sound/mips/sgio2audio.c3
-rw-r--r--sound/pci/ac97/ac97_codec.c16
-rw-r--r--sound/pci/ali5451/ali5451.c5
-rw-r--r--sound/pci/asihpi/asihpi.c24
-rw-r--r--sound/pci/asihpi/hpi6205.c5
-rw-r--r--sound/pci/atiixp.c4
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0.h2
-rw-r--r--sound/pci/au88x0/au88x0_core.c2
-rw-r--r--sound/pci/bt87x.c4
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c3
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c7
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h6
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c6
-rw-r--r--sound/pci/ctxfi/cthw20k2.c12
-rw-r--r--sound/pci/ctxfi/ctmixer.c15
-rw-r--r--sound/pci/echoaudio/echoaudio.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.h2
-rw-r--r--sound/pci/echoaudio/echoaudio_3g.c14
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c6
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.h50
-rw-r--r--sound/pci/echoaudio/echoaudio_gml.c8
-rw-r--r--sound/pci/emu10k1/emu10k1_patch.c7
-rw-r--r--sound/pci/emu10k1/emufx.c24
-rw-r--r--sound/pci/emu10k1/emupcm.c7
-rw-r--r--sound/pci/ens1370.c3
-rw-r--r--sound/pci/hda/dell_wmi_helper.c116
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/pci/hda/hda_codec.h6
-rw-r--r--sound/pci/hda/hda_generic.c144
-rw-r--r--sound/pci/hda/hda_generic.h16
-rw-r--r--sound/pci/hda/hda_intel.c18
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c282
-rw-r--r--sound/pci/hda/patch_cirrus.c29
-rw-r--r--sound/pci/hda/patch_conexant.c105
-rw-r--r--sound/pci/hda/patch_hdmi.c147
-rw-r--r--sound/pci/hda/patch_realtek.c861
-rw-r--r--sound/pci/hda/patch_sigmatel.c31
-rw-r--r--sound/pci/hda/patch_via.c294
-rw-r--r--sound/pci/hda/thinkpad_helper.c27
-rw-r--r--sound/pci/ice1712/ak4xxx.c12
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c21
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/intel8x0m.c6
-rw-r--r--sound/pci/korg1212/korg1212.c4
-rw-r--r--sound/pci/lola/lola.c4
-rw-r--r--sound/pci/lola/lola.h4
-rw-r--r--sound/pci/lola/lola_pcm.c8
-rw-r--r--sound/pci/maestro3.c6
-rw-r--r--sound/pci/mixart/mixart.c1
-rw-r--r--sound/pci/mixart/mixart_core.c6
-rw-r--r--sound/pci/mixart/mixart_hwdep.c42
-rw-r--r--sound/pci/mixart/mixart_hwdep.h8
-rw-r--r--sound/pci/riptide/riptide.c10
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/trident/trident.c2
-rw-r--r--sound/pci/trident/trident.h2
-rw-r--r--sound/pci/trident/trident_main.c2
-rw-r--r--sound/pci/vx222/vx222_ops.c8
-rw-r--r--sound/pci/ymfpci/ymfpci.h78
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c6
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c1
-rw-r--r--sound/pcmcia/vx/vxp_ops.c10
-rw-r--r--sound/ppc/snd_ps3.c5
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/amd/Kconfig1
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c109
-rw-r--r--sound/soc/amd/acp-pcm-dma.c214
-rw-r--r--sound/soc/amd/acp.h13
-rw-r--r--sound/soc/atmel/atmel-i2s.c46
-rw-r--r--sound/soc/codecs/Kconfig25
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/adau17x1.c1
-rw-r--r--sound/soc/codecs/adav80x.c1
-rw-r--r--sound/soc/codecs/ak4458.c2
-rw-r--r--sound/soc/codecs/ak4554.c17
-rw-r--r--sound/soc/codecs/ak4613.c26
-rw-r--r--sound/soc/codecs/ak4642.c26
-rw-r--r--sound/soc/codecs/ak5558.c4
-rw-r--r--sound/soc/codecs/cs4270.c2
-rw-r--r--sound/soc/codecs/cs47l24.c11
-rw-r--r--sound/soc/codecs/cx20442.c23
-rw-r--r--sound/soc/codecs/da7210.c27
-rw-r--r--sound/soc/codecs/da7213.c4
-rw-r--r--sound/soc/codecs/da7219-aad.c5
-rw-r--r--sound/soc/codecs/da7219.c48
-rw-r--r--sound/soc/codecs/da7219.h8
-rw-r--r--sound/soc/codecs/da9055.c4
-rw-r--r--sound/soc/codecs/es7134.c227
-rw-r--r--sound/soc/codecs/es7241.c322
-rw-r--r--sound/soc/codecs/hdac_hdmi.c495
-rw-r--r--sound/soc/codecs/hdmi-codec.c21
-rw-r--r--sound/soc/codecs/max98373.c1
-rw-r--r--sound/soc/codecs/max9850.c4
-rw-r--r--sound/soc/codecs/nau8540.c3
-rw-r--r--sound/soc/codecs/nau8824.c2
-rw-r--r--sound/soc/codecs/nau8825.c2
-rw-r--r--sound/soc/codecs/pcm1789.c3
-rw-r--r--sound/soc/codecs/pcm186x.c2
-rw-r--r--sound/soc/codecs/rt1305.c15
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c8
-rw-r--r--sound/soc/codecs/rt5631.c12
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5651.c235
-rw-r--r--sound/soc/codecs/rt5651.h8
-rw-r--r--sound/soc/codecs/rt5677.c3
-rw-r--r--sound/soc/codecs/rt5682.c2681
-rw-r--r--sound/soc/codecs/rt5682.h1324
-rw-r--r--sound/soc/codecs/simple-amplifier.c (renamed from sound/soc/codecs/dio2125.c)42
-rw-r--r--sound/soc/codecs/tas571x.c110
-rw-r--r--sound/soc/codecs/tas571x.h16
-rw-r--r--sound/soc/codecs/tda7419.c4
-rw-r--r--sound/soc/codecs/tscs42xx.c37
-rw-r--r--sound/soc/codecs/tscs42xx.h8
-rw-r--r--sound/soc/codecs/twl6040.c2
-rw-r--r--sound/soc/codecs/wm2200.c10
-rw-r--r--sound/soc/codecs/wm5100-tables.c12
-rw-r--r--sound/soc/codecs/wm5102.c10
-rw-r--r--sound/soc/codecs/wm5110.c13
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/codecs/wm8904.c1
-rw-r--r--sound/soc/codecs/wm8955.c1
-rw-r--r--sound/soc/codecs/wm8960.c1
-rw-r--r--sound/soc/codecs/wm8961.c1
-rw-r--r--sound/soc/codecs/wm8962.c1
-rw-r--r--sound/soc/codecs/wm8988.c4
-rw-r--r--sound/soc/codecs/wm8990.c4
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/codecs/wm8995.c1
-rw-r--r--sound/soc/codecs/wm8996.c9
-rw-r--r--sound/soc/codecs/wm9081.c1
-rw-r--r--sound/soc/codecs/wm_adsp.c216
-rw-r--r--sound/soc/codecs/wm_adsp.h12
-rw-r--r--sound/soc/codecs/wmfw.h1
-rw-r--r--sound/soc/davinci/davinci-i2s.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c16
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c20
-rw-r--r--sound/soc/fsl/fsl_asrc.c18
-rw-r--r--sound/soc/fsl/fsl_asrc.h5
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c18
-rw-r--r--sound/soc/fsl/fsl_esai.c1
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/fsl/fsl_utils.c18
-rw-r--r--sound/soc/fsl/fsl_utils.h7
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c15
-rw-r--r--sound/soc/generic/audio-graph-card.c41
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c25
-rw-r--r--sound/soc/generic/simple-card-utils.c74
-rw-r--r--sound/soc/generic/simple-card.c106
-rw-r--r--sound/soc/generic/simple-scu-card.c21
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c29
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c6
-rw-r--r--sound/soc/intel/boards/Kconfig14
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c4
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c20
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c55
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c364
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c643
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c3
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c4
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c4
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c2
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c2
-rw-r--r--sound/soc/intel/boards/skl_rt286.c2
-rw-r--r--sound/soc/intel/common/Makefile6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c59
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c40
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c56
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c32
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c41
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c16
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c91
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c47
-rw-r--r--sound/soc/intel/common/sst-firmware.c6
-rw-r--r--sound/soc/intel/haswell/sst-haswell-dsp.c53
-rw-r--r--sound/soc/intel/skylake/skl-messages.c50
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c8
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c119
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c8
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.h2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c28
-rw-r--r--sound/soc/intel/skylake/skl-topology.h11
-rw-r--r--sound/soc/intel/skylake/skl.c360
-rw-r--r--sound/soc/intel/skylake/skl.h7
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c64
-rw-r--r--sound/soc/mediatek/common/mtk-base-afe.h6
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-common.h1
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c65
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-adda.c20
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-hostless.c16
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-pcm.c19
-rw-r--r--sound/soc/meson/Kconfig65
-rw-r--r--sound/soc/meson/Makefile21
-rw-r--r--sound/soc/meson/axg-card.c671
-rw-r--r--sound/soc/meson/axg-fifo.c341
-rw-r--r--sound/soc/meson/axg-fifo.h80
-rw-r--r--sound/soc/meson/axg-frddr.c141
-rw-r--r--sound/soc/meson/axg-spdifout.c456
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c381
-rw-r--r--sound/soc/meson/axg-tdm-formatter.h39
-rw-r--r--sound/soc/meson/axg-tdm-interface.c542
-rw-r--r--sound/soc/meson/axg-tdm.h78
-rw-r--r--sound/soc/meson/axg-tdmin.c229
-rw-r--r--sound/soc/meson/axg-tdmout.c259
-rw-r--r--sound/soc/meson/axg-toddr.c199
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c2
-rw-r--r--sound/soc/omap/omap-dmic.c2
-rw-r--r--sound/soc/omap/omap-mcpdm.c4
-rw-r--r--sound/soc/pxa/Kconfig6
-rw-r--r--sound/soc/pxa/magician.c106
-rw-r--r--sound/soc/pxa/pxa-ssp.c181
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c47
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c9
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c73
-rw-r--r--sound/soc/pxa/zylonite.c9
-rw-r--r--sound/soc/qcom/Kconfig14
-rw-r--r--sound/soc/qcom/Makefile4
-rw-r--r--sound/soc/qcom/apq8096.c188
-rw-r--r--sound/soc/qcom/common.c112
-rw-r--r--sound/soc/qcom/common.h11
-rw-r--r--sound/soc/qcom/lpass-platform.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c225
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c43
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c42
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c17
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c71
-rw-r--r--sound/soc/qcom/sdm845.c285
-rw-r--r--sound/soc/rockchip/Makefile3
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c3
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c45
-rw-r--r--sound/soc/rockchip/rockchip_pcm.h14
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c27
-rw-r--r--sound/soc/samsung/i2s.c1
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/dma-sh7760.c26
-rw-r--r--sound/soc/sh/fsi.c22
-rw-r--r--sound/soc/sh/hac.c20
-rw-r--r--sound/soc/sh/migor.c14
-rw-r--r--sound/soc/sh/rcar/Makefile1
-rw-r--r--sound/soc/sh/rcar/adg.c15
-rw-r--r--sound/soc/sh/rcar/cmd.c19
-rw-r--r--sound/soc/sh/rcar/core.c41
-rw-r--r--sound/soc/sh/rcar/ctu.c15
-rw-r--r--sound/soc/sh/rcar/dma.c17
-rw-r--r--sound/soc/sh/rcar/dvc.c16
-rw-r--r--sound/soc/sh/rcar/gen.c16
-rw-r--r--sound/soc/sh/rcar/mix.c14
-rw-r--r--sound/soc/sh/rcar/rsnd.h17
-rw-r--r--sound/soc/sh/rcar/src.c16
-rw-r--r--sound/soc/sh/rcar/ssi.c59
-rw-r--r--sound/soc/sh/rcar/ssiu.c15
-rw-r--r--sound/soc/sh/sh7760-ac97.c14
-rw-r--r--sound/soc/sh/siu.h26
-rw-r--r--sound/soc/sh/siu_dai.c26
-rw-r--r--sound/soc/sh/siu_pcm.c27
-rw-r--r--sound/soc/sh/ssi.c21
-rw-r--r--sound/soc/sirf/sirf-usp.c7
-rw-r--r--sound/soc/soc-ac97.c29
-rw-r--r--sound/soc/soc-acpi.c20
-rw-r--r--sound/soc/soc-compress.c120
-rw-r--r--sound/soc/soc-core.c200
-rw-r--r--sound/soc/soc-dapm.c51
-rw-r--r--sound/soc/soc-devres.c15
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c32
-rw-r--r--sound/soc/soc-io.c19
-rw-r--r--sound/soc/soc-jack.c19
-rw-r--r--sound/soc/soc-ops.c29
-rw-r--r--sound/soc/soc-pcm.c469
-rw-r--r--sound/soc/soc-topology.c91
-rw-r--r--sound/soc/soc-utils.c24
-rw-r--r--sound/soc/sti/uniperif_player.c6
-rw-r--r--sound/soc/sti/uniperif_reader.c2
-rw-r--r--sound/soc/stm/Kconfig1
-rw-r--r--sound/soc/stm/stm32_adfsdm.c10
-rw-r--r--sound/soc/stm/stm32_sai_sub.c146
-rw-r--r--sound/soc/tegra/tegra20_ac97.c2
-rw-r--r--sound/soc/tegra/tegra30_i2s.h2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c17
-rw-r--r--sound/soc/tegra/tegra_rt5677.c17
-rw-r--r--sound/soc/uniphier/aio-core.c84
-rw-r--r--sound/soc/uniphier/aio-cpu.c5
-rw-r--r--sound/soc/uniphier/aio-ld11.c2
-rw-r--r--sound/soc/uniphier/aio-reg.h1
-rw-r--r--sound/soc/uniphier/aio.h6
-rw-r--r--sound/soc/zte/zx-tdm.c4
-rw-r--r--sound/synth/emux/emux.c17
-rw-r--r--sound/synth/util_mem.c16
-rw-r--r--sound/usb/6fire/pcm.c1
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/caiaq/audio.c6
-rw-r--r--sound/usb/card.c9
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/clock.c24
-rw-r--r--sound/usb/endpoint.c2
-rw-r--r--sound/usb/hiface/pcm.c1
-rw-r--r--sound/usb/line6/toneport.c5
-rw-r--r--sound/usb/midi.c5
-rw-r--r--sound/usb/misc/ua101.c2
-rw-r--r--sound/usb/mixer.c214
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/pcm.c71
-rw-r--r--sound/usb/pcm.h2
-rw-r--r--sound/usb/power.c104
-rw-r--r--sound/usb/power.h19
-rw-r--r--sound/usb/quirks-table.h3
-rw-r--r--sound/usb/quirks.c16
-rw-r--r--sound/usb/stream.c70
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--sound/xen/xen_snd_front_alsa.c4
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h (renamed from arch/riscv/include/asm/compat.h)15
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/asm/mcsafe_test.h13
-rw-r--r--tools/arch/x86/lib/memcpy_64.S112
-rw-r--r--tools/bpf/.gitignore5
-rw-r--r--tools/bpf/Makefile.helpers59
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst12
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst33
-rw-r--r--tools/bpf/bpftool/Makefile10
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool134
-rw-r--r--tools/bpf/bpftool/btf_dumper.c251
-rw-r--r--tools/bpf/bpftool/cgroup.c170
-rw-r--r--tools/bpf/bpftool/common.c13
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h36
-rw-r--r--tools/bpf/bpftool/map.c238
-rw-r--r--tools/bpf/bpftool/prog.c257
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c6
-rw-r--r--tools/build/Build.include6
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-reallocarray.c8
-rw-r--r--tools/hv/hv_vss_daemon.c65
-rw-r--r--tools/hv/lsvmbus12
-rw-r--r--tools/iio/iio_event_monitor.c6
-rw-r--r--tools/include/linux/bitmap.h17
-rw-r--r--tools/include/linux/compiler-gcc.h4
-rw-r--r--tools/include/linux/overflow.h278
-rw-r--r--tools/include/tools/libc_compat.h20
-rw-r--r--tools/include/uapi/asm-generic/unistd.h783
-rw-r--r--tools/include/uapi/linux/bpf.h130
-rw-r--r--tools/include/uapi/linux/btf.h2
-rw-r--r--tools/include/uapi/linux/in.h301
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile6
-rw-r--r--tools/lib/bpf/bpf.c1
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c82
-rw-r--r--tools/lib/bpf/btf.h14
-rw-r--r--tools/lib/bpf/libbpf.c382
-rw-r--r--tools/lib/bpf/libbpf.h20
-rw-r--r--tools/lib/bpf/libbpf_errno.c74
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/memory-model/Documentation/recipes.txt12
-rw-r--r--tools/memory-model/README20
-rw-r--r--tools/memory-model/linux-kernel.bell2
-rw-r--r--tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus (renamed from tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus2
-rw-r--r--tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus (renamed from tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus (renamed from tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/R+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/R+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/README25
-rw-r--r--tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus (renamed from tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/SB+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus32
-rw-r--r--tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus (renamed from tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus (renamed from tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus)2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checkalllitmus.sh2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checklitmus.sh2
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h2
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/elf.c47
-rw-r--r--tools/objtool/orc_dump.c3
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/pci/pcitest.c51
-rw-r--r--tools/pci/pcitest.sh15
-rw-r--r--tools/perf/Documentation/perf-list.txt10
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.config9
-rw-r--r--tools/perf/Makefile.perf10
-rw-r--r--tools/perf/arch/arm64/Makefile21
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl62
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c10
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c2
-rw-r--r--tools/perf/arch/x86/util/pmu.c1
-rw-r--r--tools/perf/arch/x86/util/tsc.c1
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S1
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-lib.c24
-rw-r--r--tools/perf/builtin-c2c.c7
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-report.c4
-rw-r--r--tools/perf/builtin-stat.c62
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c19
-rwxr-xr-xtools/perf/check-headers.sh3
-rw-r--r--tools/perf/include/bpf/bpf.h3
-rw-r--r--tools/perf/jvmti/jvmti_agent.c3
-rw-r--r--tools/perf/perf.h3
-rw-r--r--tools/perf/pmu-events/Build2
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json87
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json18
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json56
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json8
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json53
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json24
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json35
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json7
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py40
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py4
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py11
-rw-r--r--tools/perf/scripts/python/sched-migration.py14
-rw-r--r--tools/perf/tests/builtin-test.c4
-rw-r--r--tools/perf/tests/parse-events.c18
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh69
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh2
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/beauty.h3
-rwxr-xr-xtools/perf/trace/beauty/drm_ioctl.sh9
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh2
-rwxr-xr-xtools/perf/trace/beauty/kvm_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh2
-rwxr-xr-xtools/perf/trace/beauty/perf_ioctl.sh2
-rwxr-xr-xtools/perf/trace/beauty/pkey_alloc_access_rights.sh2
-rwxr-xr-xtools/perf/trace/beauty/sndrv_ctl_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/sndrv_pcm_ioctl.sh4
-rw-r--r--tools/perf/trace/beauty/socket.c28
-rwxr-xr-xtools/perf/trace/beauty/socket_ipproto.sh11
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh6
-rw-r--r--tools/perf/ui/stdio/hist.c8
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/comm.c16
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c10
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h1
-rw-r--r--tools/perf/util/cs-etm.c71
-rw-r--r--tools/perf/util/evsel.c25
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/llvm-utils.c6
-rw-r--r--tools/perf/util/machine.c79
-rw-r--r--tools/perf/util/metricgroup.c26
-rw-r--r--tools/perf/util/metricgroup.h1
-rw-r--r--tools/perf/util/namespaces.h1
-rw-r--r--tools/perf/util/pmu.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c37
-rw-r--r--tools/perf/util/stat-shadow.c5
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.84
-rw-r--r--tools/power/x86/turbostat/turbostat.c120
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/selftests/android/ion/ionapp_export.c1
-rw-r--r--tools/testing/selftests/bpf/Makefile17
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h21
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h4
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h6
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/socket_cookie_prog.c60
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py12
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py16
-rw-r--r--tools/testing/selftests/bpf/test_align.c5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c206
-rw-r--r--tools/testing/selftests/bpf/test_btf_haskv.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c131
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh15
-rw-r--r--tools/testing/selftests/bpf/test_maps.c262
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py232
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport.c688
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport_common.h36
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport_kern.c180
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh62
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c47
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c187
-rw-r--r--tools/testing/selftests/bpf/test_sock.c5
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c42
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c225
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c8
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h1
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c17
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c119
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c240
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c48
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h4
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c8
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h1
-rw-r--r--tools/testing/selftests/cgroup/test_core.c395
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh217
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh197
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh189
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh233
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh366
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh119
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh117
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh55
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh18
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh19
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh134
-rwxr-xr-xtools/testing/selftests/drivers/usb/usbip/usbip_test.sh2
-rw-r--r--tools/testing/selftests/ftrace/config3
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc30
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc73
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c6
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile7
-rw-r--r--tools/testing/selftests/kvm/cr4_cpuid_sync_test.c113
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c308
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h49
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h2
-rw-r--r--tools/testing/selftests/kvm/include/vmx.h66
-rw-r--r--tools/testing/selftests/kvm/include/x86.h8
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c152
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h7
-rw-r--r--tools/testing/selftests/kvm/lib/vmx.c104
-rw-r--r--tools/testing/selftests/kvm/lib/x86.c274
-rw-r--r--tools/testing/selftests/kvm/set_sregs_test.c2
-rw-r--r--tools/testing/selftests/kvm/state_test.c196
-rw-r--r--tools/testing/selftests/kvm/sync_regs_test.c21
-rw-r--r--tools/testing/selftests/kvm/vmx_tsc_adjust_test.c102
-rw-r--r--tools/testing/selftests/mount/config1
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/fib_tests.sh41
-rw-r--r--tools/testing/selftests/net/forwarding/README2
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_port_isolation.sh151
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh108
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh253
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh291
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh126
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh285
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh21
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge.sh113
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh233
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh86
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_shblocks.sh2
-rwxr-xr-xtools/testing/selftests/net/ip6_gre_headroom.sh65
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh128
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c2
-rw-r--r--tools/testing/selftests/net/tls.c692
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh3
-rw-r--r--tools/testing/selftests/powerpc/alignment/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c107
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c33
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.c53
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.h26
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_unaligned.c41
-rw-r--r--tools/testing/selftests/powerpc/alignment/paste_last_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/alignment/paste_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/futex_bench.c1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/mmap_bench.c2
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore17
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile45
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/asm-compat.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.)0
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/feature-fixups.h0
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h44
-rw-r--r--tools/testing/selftests/powerpc/copyloops/copy_tofrom_user_reference.S24
-rw-r--r--tools/testing/selftests/powerpc/copyloops/exc_validate.c124
-rw-r--r--tools/testing/selftests/powerpc/copyloops/stubs.S19
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c8
-rw-r--r--tools/testing/selftests/powerpc/harness.c18
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c1
l---------tools/testing/selftests/powerpc/primitives/asm/asm-const.h1
l---------tools/testing/selftests/powerpc/primitives/asm/feature-fixups.h1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c4
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile29
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/cache.h1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h39
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h25
-rw-r--r--tools/testing/selftests/powerpc/stringloops/memcmp.c99
l---------tools/testing/selftests/powerpc/stringloops/memcmp_32.S1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/string.c21
-rw-r--r--tools/testing/selftests/powerpc/stringloops/strlen.c127
l---------tools/testing/selftests/powerpc/stringloops/strlen_32.S1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-sigreturn.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c1
-rw-r--r--tools/testing/selftests/powerpc/utils.c17
-rw-r--r--tools/testing/selftests/proc/.gitignore2
-rw-r--r--tools/testing/selftests/proc/Makefile3
-rw-r--r--tools/testing/selftests/proc/proc.h12
-rw-r--r--tools/testing/selftests/proc/self.c39
-rw-r--r--tools/testing/selftests/proc/thread-self.c64
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh26
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh7
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rseq/param_test.c44
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h594
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h513
-rw-r--r--tools/testing/selftests/rseq/rseq.h28
-rw-r--r--tools/testing/selftests/tc-testing/README16
-rw-r--r--tools/testing/selftests/tc-testing/config48
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json3
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json593
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json26
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json917
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/fw.json1049
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json4
-rw-r--r--tools/testing/selftests/timers/raw_skew.c5
-rw-r--r--tools/testing/selftests/vDSO/Makefile13
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c7
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rw-r--r--tools/testing/selftests/vm/map_populate.c113
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests11
-rw-r--r--tools/usb/ffs-test.c19
-rw-r--r--tools/virtio/asm/barrier.h4
-rw-r--r--tools/virtio/linux/kernel.h5
-rw-r--r--tools/vm/page-types.c118
-rw-r--r--virt/kvm/arm/aarch32.c20
-rw-r--r--virt/kvm/arm/arch_timer.c15
-rw-r--r--virt/kvm/arm/arm.c55
-rw-r--r--virt/kvm/arm/mmu.c87
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c50
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c24
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c27
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c66
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c72
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c56
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h25
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c13
-rw-r--r--virt/kvm/arm/vgic/vgic.c19
-rw-r--r--virt/kvm/arm/vgic/vgic.h7
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/eventfd.c17
-rw-r--r--virt/kvm/kvm_main.c54
10291 files changed, 482873 insertions, 199808 deletions
diff --git a/.clang-format b/.clang-format
index faffc0d5af4e..1d5da22e0ba5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -382,7 +382,7 @@ IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 8
-IndentWrappedFunctionNames: true
+IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
diff --git a/.mailmap b/.mailmap
index 29ddeb1bf015..2a6f685bf706 100644
--- a/.mailmap
+++ b/.mailmap
@@ -31,6 +31,8 @@ Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
+Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
+Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
@@ -81,6 +83,9 @@ Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
+Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
diff --git a/CREDITS b/CREDITS
index 989cda91c427..5befd2d714d0 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2571,6 +2571,11 @@ S: Helstorfer Str. 7
S: D-30625 Hannover
S: Germany
+N: Ron Minnich
+E: rminnich@sandia.gov
+E: rminnich@gmail.com
+D: 9p filesystem development
+
N: Corey Minyard
E: minyard@wf-rch.cirr.com
E: minyard@mvista.com
diff --git a/Documentation/ABI/obsolete/sysfs-class-typec b/Documentation/ABI/obsolete/sysfs-class-typec
new file mode 100644
index 000000000000..32623514ee87
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-class-typec
@@ -0,0 +1,48 @@
+These files are deprecated and will be removed. The same files are available
+under /sys/bus/typec (see Documentation/ABI/testing/sysfs-bus-typec).
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/svid
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The SVID (Standard or Vendor ID) assigned by USB-IF for this
+ alternate mode.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Every supported mode will have its own directory. The name of
+ a mode will be "mode<index>" (for example mode1), where <index>
+ is the actual index to the mode VDO returned by Discover Modes
+ USB power delivery command.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/description
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows description of the mode. The description is optional for
+ the drivers, just like with the Billboard Devices.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/vdo
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the VDO in hexadecimal returned by Discover Modes command
+ for this mode.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/active
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows if the mode is active or not. The attribute can be used
+ for entering/exiting the mode with partners and cable plugs, and
+ with the port alternate modes it can be used for disabling
+ support for specific alternate modes. Entering/exiting modes is
+ supported as synchronous operation so write(2) to the attribute
+ does not return until the enter/exit mode operation has
+ finished. The attribute is notified when the mode is
+ entered/exited so poll(2) on the attribute wakes up.
+ Entering/exiting a mode will also generate uevent KOBJ_CHANGE.
+
+ Valid values: yes, no
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 3eaffbb2d468..3fed8fdb873d 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -42,6 +42,13 @@ Contact: K. Y. Srinivasan <kys@microsoft.com>
Description: The 16 bit vendor ID of the device
Users: tools/hv/lsvmbus and user level RDMA libraries
+What: /sys/bus/vmbus/devices/<UUID>/numa_node
+Date: Jul 2018
+KernelVersion: 4.19
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: This NUMA node to which the VMBUS device is
+ attached, or -1 if the node is unknown.
+
What: /sys/bus/vmbus/devices/<UUID>/channels/<N>
Date: September. 2017
KernelVersion: 4.14
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
index e1ba4a104753..80151a409d67 100644
--- a/Documentation/ABI/stable/sysfs-class-rfkill
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -11,7 +11,7 @@ KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org,
Description: The rfkill class subsystem folder.
Each registered rfkill driver is represented by an rfkillX
- subfolder (X being an integer > 0).
+ subfolder (X being an integer >= 0).
What: /sys/class/rfkill/rfkill[0-9]+/name
@@ -48,8 +48,8 @@ Contact: linux-wireless@vger.kernel.org
Description: Current state of the transmitter.
This file was scheduled to be removed in 2014, but due to its
large number of users it will be sticking around for a bit
- longer. Despite it being marked as stabe, the newer "hard" and
- "soft" interfaces should be preffered, since it is not possible
+ longer. Despite it being marked as stable, the newer "hard" and
+ "soft" interfaces should be preferred, since it is not possible
to express the 'soft and hard block' state of the rfkill driver
through this interface. There will likely be another attempt to
remove it in the future.
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
new file mode 100644
index 000000000000..d9d117d457e1
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -0,0 +1,78 @@
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ asic_health
+
+Date: June 2018
+KernelVersion: 4.19
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file shows ASIC health status. The possible values are:
+ 0 - health failed, 2 - health OK, 3 - ASIC in booting state.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ cpld1_version
+ cpld2_version
+
+Date: June 2018
+KernelVersion: 4.19
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show with which CPLD versions have been burned
+ on carrier and switch boards.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/select_iio
+Date: June 2018
+KernelVersion: 4.19
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file allows iio devices selection.
+
+ Attribute select_iio can be written with 0 or with 1. It
+ selects which one of iio devices can be accessed.
+
+ The file is read/write.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/psu1_on
+ /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/psu2_on
+ /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pwr_cycle
+ /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pwr_down
+Date: June 2018
+KernelVersion: 4.19
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files allow asserting system power cycling, switching
+ power supply units on and off and system's main power domain
+ shutdown.
+ Expected behavior:
+ When pwr_cycle is written 1: auxiliary power domain will go
+ down and after short period (about 1 second) up.
+ When psu1_on or psu2_on is written 1, related unit will be
+ disconnected from the power source, when written 0 - connected.
+ If both are written 1 - power supplies main power domain will
+ go down.
+ When pwr_down is written 1, system's main power domain will go
+ down.
+
+ The files are write only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ reset_aux_pwr_or_ref
+ reset_asic_thermal
+ reset_hotswap_or_halt
+ reset_hotswap_or_wd
+ reset_fw_reset
+ reset_long_pb
+ reset_main_pwr_fail
+ reset_short_pb
+ reset_sw_reset
+Date: June 2018
+KernelVersion: 4.19
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show the system reset cause, as following: power
+ auxiliary outage or power refresh, ASIC thermal shutdown, halt,
+ hotswap, watchdog, firmware reset, long press power button,
+ short press power button, software reset. Value 1 in file means
+ this is reset cause, 0 - otherwise. Only one of the above
+ causes could be 1 at the same time, representing only last
+ reset cause.
+
+ The files are read only.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 1ba0d0fda9c0..9281e2aa38df 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -263,3 +263,8 @@ Description: Specific streaming header descriptors
is connected
bmInfo - capabilities of this video streaming
interface
+
+What: /sys/class/udc/udc.name/device/gadget/video4linux/video.name/function_name
+Date: May 2018
+KernelVersion: 4.19
+Description: UVC configfs function instance name
diff --git a/Documentation/ABI/testing/ppc-memtrace b/Documentation/ABI/testing/ppc-memtrace
index 2e8b93741270..9606aed33137 100644
--- a/Documentation/ABI/testing/ppc-memtrace
+++ b/Documentation/ABI/testing/ppc-memtrace
@@ -13,10 +13,11 @@ Contact: linuxppc-dev@lists.ozlabs.org
Description: Write an integer containing the size in bytes of the memory
you want removed from each NUMA node to this file - it must be
aligned to the memblock size. This amount of RAM will be removed
- from the kernel mappings and the following debugfs files will be
- created. This can only be successfully done once per boot. Once
- memory is successfully removed from each node, the following
- files are created.
+ from each NUMA node in the kernel mappings and the following
+ debugfs files will be created. Once memory is successfully
+ removed from each node, the following files are created. To
+ re-add memory to the kernel, echo 0 into this file (it will be
+ automatically onlined).
What: /sys/kernel/debug/powerpc/memtrace/<node-id>
Date: Aug 2017
diff --git a/Documentation/ABI/testing/procfs-diskstats b/Documentation/ABI/testing/procfs-diskstats
index f91a973a37fe..abac31d216de 100644
--- a/Documentation/ABI/testing/procfs-diskstats
+++ b/Documentation/ABI/testing/procfs-diskstats
@@ -5,6 +5,7 @@ Description:
The /proc/diskstats file displays the I/O statistics
of block devices. Each line contains the following 14
fields:
+
1 - major number
2 - minor mumber
3 - device name
@@ -19,4 +20,13 @@ Description:
12 - I/Os currently in progress
13 - time spent doing I/Os (ms)
14 - weighted time spent doing I/Os (ms)
+
+ Kernel 4.18+ appends four more fields for discard
+ tracking putting the total at 18:
+
+ 15 - discards completed successfully
+ 16 - discards merged
+ 17 - sectors discarded
+ 18 - time spent discarding
+
For more details refer to Documentation/iostats.txt
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
index 4fe677ed1305..ab49b9ac3bcb 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
@@ -83,3 +83,11 @@ KernelVersion: 4.7
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the capabilities of the Coresight TMC.
The value is read directly from the DEVID register, 0xFC8,
+
+What: /sys/bus/coresight/devices/<memory_map>.tmc/buffer_size
+Date: December 2018
+KernelVersion: 4.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Size of the trace buffer for TMC-ETR when used in SYSFS
+ mode. Writable only for TMC-ETR configurations. The value
+ should be aligned to the kernel pagesize.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 731146c3b138..a5b4f223641d 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -197,6 +197,18 @@ Description:
Angle of rotation. Units after application of scale and offset
are radians.
+What: /sys/bus/iio/devices/iio:deviceX/in_positionrelative_x_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_positionrelative_y_raw
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Relative position in direction x or y on a pad (may be
+ arbitrarily assigned but should match other such assignments on
+ device).
+ Units after application of scale and offset are milli percents
+ from the pad's size in both directions. Should be calibrated by
+ the consumer.
+
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_raw
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_raw
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_raw
@@ -1295,13 +1307,16 @@ What: /sys/.../iio:deviceX/in_intensityY_raw
What: /sys/.../iio:deviceX/in_intensityY_ir_raw
What: /sys/.../iio:deviceX/in_intensityY_both_raw
What: /sys/.../iio:deviceX/in_intensityY_uv_raw
+What: /sys/.../iio:deviceX/in_intensityY_duv_raw
KernelVersion: 3.4
Contact: linux-iio@vger.kernel.org
Description:
Unit-less light intensity. Modifiers both and ir indicate
that measurements contain visible and infrared light
- components or just infrared light, respectively. Modifier uv indicates
- that measurements contain ultraviolet light components.
+ components or just infrared light, respectively. Modifier
+ uv indicates that measurements contain ultraviolet light
+ components. Modifier duv indicates that measurements
+ contain deep ultraviolet light components.
What: /sys/.../iio:deviceX/in_uvindex_input
KernelVersion: 4.6
@@ -1663,3 +1678,10 @@ KernelVersion: 4.12
Contact: linux-iio@vger.kernel.org
Description:
Raw counter device counters direction for channel Y.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_phaseY_raw
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw (unscaled) phase difference reading from channel Y
+ that can be processed to radians. \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-isl29501 b/Documentation/ABI/testing/sysfs-bus-iio-isl29501
new file mode 100644
index 000000000000..d009cfbbd72b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-isl29501
@@ -0,0 +1,47 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_agc_gain
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_agc_gain_bias
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ This sensor has an automatic gain control (agc) loop
+ which sets the analog signal levels at an optimum
+ level by controlling programmable gain amplifiers. The
+ criteria for optimal gain is determined by the sensor.
+
+ Return the actual gain value as an integer in [0; 65536]
+ range when read from.
+
+ The agc gain read when measuring crosstalk shall be
+ written into in_proximity0_agc_gain_bias.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calib_phase_temp_a
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calib_phase_temp_b
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calib_phase_light_a
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calib_phase_light_b
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ The sensor is able to perform correction of distance
+ measurements due to changing temperature and ambient
+ light conditions. It can be programmed to correct for
+ a second order error polynomial.
+
+ Phase data has to be collected when temperature and
+ ambient light are modulated independently.
+
+ Then a least squares curve fit to a second order
+ polynomial has to be generated from the data. The
+ resultant curves have the form ax^2 + bx + c.
+
+ From those two curves, a and b coefficients shall be
+ stored in in_proximity0_calib_phase_temp_a and
+ in_proximity0_calib_phase_temp_b for temperature and
+ in in_proximity0_calib_phase_light_a and
+ in_proximity0_calib_phase_light_b for ambient light.
+
+ Those values must be integer in [0; 8355840] range.
+
+ Finally, the c constant is set by the sensor
+ internally.
+
+ The value stored in sensor is displayed when read from.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-si1133 b/Documentation/ABI/testing/sysfs-bus-iio-light-si1133
new file mode 100644
index 000000000000..6f130cdb26a6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-si1133
@@ -0,0 +1,22 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ir_small_raw
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Unit-less infrared intensity. The intensity is measured from 1
+ dark photodiode. "small" indicate the surface area capturing
+ infrared.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ir_large_raw
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Unit-less infrared intensity. The intensity is measured from 4
+ dark photodiodes. "large" indicate the surface area capturing
+ infrared.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_large_raw
+KernelVersion: 4.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Unit-less light intensity with more diodes.
+
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
new file mode 100644
index 000000000000..4b0318c99507
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
@@ -0,0 +1,122 @@
+==========================
+PCIe Device AER statistics
+==========================
+These attributes show up under all the devices that are AER capable. These
+statistical counters indicate the errors "as seen/reported by the device".
+Note that this may mean that if an endpoint is causing problems, the AER
+counters may increment at its link partner (e.g. root port) because the
+errors may be "seen" / reported by the link partner and not the
+problematic endpoint itself (which may report all counters as 0 as it never
+saw any problems).
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_correctable
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of correctable errors seen and reported by this
+ PCI device using ERR_COR. Note that since multiple errors may
+ be reported using a single ERR_COR message, thus
+ TOTAL_ERR_COR at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_correctable
+Receiver Error 2
+Bad TLP 0
+Bad DLLP 0
+RELAY_NUM Rollover 0
+Replay Timer Timeout 0
+Advisory Non-Fatal 0
+Corrected Internal Error 0
+Header Log Overflow 0
+TOTAL_ERR_COR 2
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable fatal errors seen and reported by this
+ PCI device using ERR_FATAL. Note that since multiple errors may
+ be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_FATAL at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_fatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_FATAL 0
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable nonfatal errors seen and reported by this
+ PCI device using ERR_NONFATAL. Note that since multiple errors
+ may be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_NONFATAL at the end of the file may not match the
+ actual total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_nonfatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_NONFATAL 0
+-------------------------------------------------------------------------
+
+============================
+PCIe Rootport AER statistics
+============================
+These attributes show up under only the rootports (or root complex event
+collectors) that are AER capable. These indicate the number of error messages as
+"reported to" the rootport. Please note that the rootports also transmit
+(internally) the ERR_* messages for errors seen by the internal rootport PCI
+device, so these counters include them and are thus cumulative of all the error
+messages on the PCI hierarchy originating at that root port.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_cor
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_COR messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_FATAL messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_NONFATAL messages reported to rootport.
diff --git a/Documentation/ABI/testing/sysfs-bus-typec b/Documentation/ABI/testing/sysfs-bus-typec
new file mode 100644
index 000000000000..205d9c91e2e1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-typec
@@ -0,0 +1,51 @@
+What: /sys/bus/typec/devices/.../active
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows if the mode is active or not. The attribute can be used
+ for entering/exiting the mode. Entering/exiting modes is
+ supported as synchronous operation so write(2) to the attribute
+ does not return until the enter/exit mode operation has
+ finished. The attribute is notified when the mode is
+ entered/exited so poll(2) on the attribute wakes up.
+ Entering/exiting a mode will also generate uevent KOBJ_CHANGE.
+
+ Valid values are boolean.
+
+What: /sys/bus/typec/devices/.../description
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows description of the mode. The description is optional for
+ the drivers, just like with the Billboard Devices.
+
+What: /sys/bus/typec/devices/.../mode
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The index number of the mode returned by Discover Modes USB
+ Power Delivery command. Depending on the alternate mode, the
+ mode index may be significant.
+
+ With some alternate modes (SVIDs), the mode index is assigned
+ for specific functionality in the specification for that
+ alternate mode.
+
+ With other alternate modes, the mode index values are not
+ assigned, and can not be therefore used for identification. When
+ the mode index is not assigned, identifying the alternate mode
+ must be done with either mode VDO or the description.
+
+What: /sys/bus/typec/devices/.../svid
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The Standard or Vendor ID (SVID) assigned by USB-IF for this
+ alternate mode.
+
+What: /sys/bus/typec/devices/.../vdo
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the VDO in hexadecimal returned by Discover Modes command
+ for this mode.
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-manager b/Documentation/ABI/testing/sysfs-class-fpga-manager
index 23056c532fdd..5284fa33d4c5 100644
--- a/Documentation/ABI/testing/sysfs-class-fpga-manager
+++ b/Documentation/ABI/testing/sysfs-class-fpga-manager
@@ -35,3 +35,27 @@ Description: Read fpga manager state as a string.
* write complete = Doing post programming steps
* write complete error = Error while doing post programming
* operating = FPGA is programmed and operating
+
+What: /sys/class/fpga_manager/<fpga>/status
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read fpga manager status as a string.
+ If FPGA programming operation fails, it could be caused by crc
+ error or incompatible bitstream image. The intent of this
+ interface is to provide more detailed information for FPGA
+ programming errors to userspace. This is a list of strings for
+ the supported status.
+
+ * reconfig operation error - invalid operations detected by
+ reconfiguration hardware.
+ e.g. start reconfiguration
+ with errors not cleared
+ * reconfig CRC error - CRC error detected by
+ reconfiguration hardware.
+ * reconfig incompatible image - reconfiguration image is
+ incompatible with hardware
+ * reconfig IP protocol error - protocol errors detected by
+ reconfiguration hardware
+ * reconfig fifo overflow error - FIFO overflow detected by
+ reconfiguration hardware
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-region b/Documentation/ABI/testing/sysfs-class-fpga-region
new file mode 100644
index 000000000000..bc7ec644acc9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fpga-region
@@ -0,0 +1,9 @@
+What: /sys/class/fpga_region/<region>/compat_id
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: FPGA region id for compatibility check, e.g. compatibility
+ of the FPGA reconfiguration hardware and image. This value
+ is defined or calculated by the layer that is creating the
+ FPGA region. This interface returns the compat_id value or
+ just error code -ENOENT in case compat_id is not used.
diff --git a/Documentation/ABI/testing/sysfs-class-gnss b/Documentation/ABI/testing/sysfs-class-gnss
new file mode 100644
index 000000000000..2467b6900eae
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-gnss
@@ -0,0 +1,15 @@
+What: /sys/class/gnss/gnssN/type
+Date: May 2018
+KernelVersion: 4.18
+Contact: Johan Hovold <johan@kernel.org>
+Description:
+ The GNSS receiver type. The currently identified types reflect
+ the protocol(s) supported by the receiver:
+
+ "NMEA" NMEA 0183
+ "SiRF" SiRF Binary
+ "UBX" UBX
+
+ Note that also non-"NMEA" type receivers typically support a
+ subset of NMEA 0183 with vendor extensions (e.g. to allow
+ switching to a vendor protocol).
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 81ff6abf9673..17d7444a2397 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -54,3 +54,14 @@ Description: Configure tx queue limit
Set maximal number of pending writes
per opened session.
+
+What: /sys/class/mei/meiN/fw_ver
+Date: May 2018
+KernelVersion: 4.18
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the ME firmware version.
+
+ The version of the platform ME firmware is in format:
+ <platform>:<major>.<minor>.<milestone>.<build_no>.
+ There can be up to three such blocks for different
+ FW components.
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 0c0df91b1516..978b76358661 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -42,6 +42,17 @@ Description:
network device transmit queue. Possible vaules depend on the
number of available CPU(s) in the system.
+What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+Date: June 2018
+KernelVersion: 4.18.0
+Contact: netdev@vger.kernel.org
+Description:
+ Mask of the receive queue(s) currently enabled to participate
+ into the Transmit Packet Steering packet processing flow for this
+ network device transmit queue. Possible values depend on the
+ number of available receive queue(s) in the network device.
+ Default is disabled.
+
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
Date: November 2011
KernelVersion: 3.3
diff --git a/Documentation/ABI/testing/sysfs-class-typec b/Documentation/ABI/testing/sysfs-class-typec
index 5be552e255e9..d7647b258c3c 100644
--- a/Documentation/ABI/testing/sysfs-class-typec
+++ b/Documentation/ABI/testing/sysfs-class-typec
@@ -222,70 +222,12 @@ Description:
available. The value can be polled.
-Alternate Mode devices.
+USB Type-C port alternate mode devices.
-The alternate modes will have Standard or Vendor ID (SVID) assigned by USB-IF.
-The ports, partners and cable plugs can have alternate modes. A supported SVID
-will consist of a set of modes. Every SVID a port/partner/plug supports will
-have a device created for it, and every supported mode for a supported SVID will
-have its own directory under that device. Below <dev> refers to the device for
-the alternate mode.
-
-What: /sys/class/typec/<port|partner|cable>/<dev>/svid
-Date: April 2017
-Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
- The SVID (Standard or Vendor ID) assigned by USB-IF for this
- alternate mode.
-
-What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/
-Date: April 2017
-Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
- Every supported mode will have its own directory. The name of
- a mode will be "mode<index>" (for example mode1), where <index>
- is the actual index to the mode VDO returned by Discover Modes
- USB power delivery command.
-
-What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/description
-Date: April 2017
-Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
- Shows description of the mode. The description is optional for
- the drivers, just like with the Billboard Devices.
-
-What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/vdo
-Date: April 2017
-Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
- Shows the VDO in hexadecimal returned by Discover Modes command
- for this mode.
-
-What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/active
-Date: April 2017
-Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
- Shows if the mode is active or not. The attribute can be used
- for entering/exiting the mode with partners and cable plugs, and
- with the port alternate modes it can be used for disabling
- support for specific alternate modes. Entering/exiting modes is
- supported as synchronous operation so write(2) to the attribute
- does not return until the enter/exit mode operation has
- finished. The attribute is notified when the mode is
- entered/exited so poll(2) on the attribute wakes up.
- Entering/exiting a mode will also generate uevent KOBJ_CHANGE.
-
- Valid values: yes, no
-
-What: /sys/class/typec/<port>/<dev>/mode<index>/supported_roles
+What: /sys/class/typec/<port>/<alt mode>/supported_roles
Date: April 2017
Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Description:
Space separated list of the supported roles.
- This attribute is available for the devices describing the
- alternate modes a port supports, and it will not be exposed with
- the devices presenting the alternate modes the partners or cable
- plugs support.
-
Valid values: source, sink
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 9c5e7732d249..73318225a368 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -476,6 +476,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/l1tf
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
@@ -487,3 +488,26 @@ Description: Information about CPU vulnerabilities
"Not affected" CPU is not affected by the vulnerability
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
+
+ Details about the l1tf file can be found in
+ Documentation/admin-guide/l1tf.rst
+
+What: /sys/devices/system/cpu/smt
+ /sys/devices/system/cpu/smt/active
+ /sys/devices/system/cpu/smt/control
+Date: June 2018
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Control Symetric Multi Threading (SMT)
+
+ active: Tells whether SMT is active (enabled and siblings online)
+
+ control: Read/write interface to control SMT. Possible
+ values:
+
+ "on" SMT is enabled
+ "off" SMT is disabled
+ "forceoff" SMT is force disabled. Cannot be changed.
+ "notsupported" SMT is not supported by the CPU
+
+ If control status is "forceoff" or "notsupported" writes
+ are rejected.
diff --git a/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator
new file mode 100644
index 000000000000..4d63a7904b94
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator
@@ -0,0 +1,27 @@
+What: /sys/bus/i2c/devices/.../bd9571mwv-regulator.*.auto/backup_mode
+Date: Jul 2018
+KernelVersion: 4.19
+Contact: Geert Uytterhoeven <geert+renesas@glider.be>
+Description: Read/write the current state of DDR Backup Mode, which controls
+ if DDR power rails will be kept powered during system suspend.
+ ("on"/"1" = enabled, "off"/"0" = disabled).
+ Two types of power switches (or control signals) can be used:
+ A. With a momentary power switch (or pulse signal), DDR
+ Backup Mode is enabled by default when available, as the
+ PMIC will be configured only during system suspend.
+ B. With a toggle power switch (or level signal), the
+ following steps must be followed exactly:
+ 1. Configure PMIC for backup mode, to change the role of
+ the accessory power switch from a power switch to a
+ wake-up switch,
+ 2. Switch accessory power switch off, to prepare for
+ system suspend, which is a manual step not controlled
+ by software,
+ 3. Suspend system,
+ 4. Switch accessory power switch on, to resume the
+ system.
+ DDR Backup Mode must be explicitly enabled by the user,
+ to invoke step 1.
+ See also Documentation/devicetree/bindings/mfd/bd9571mwv.txt.
+Users: User space applications for embedded boards equipped with a
+ BD9571MWV PMIC.
diff --git a/Documentation/ABI/testing/sysfs-driver-typec-displayport b/Documentation/ABI/testing/sysfs-driver-typec-displayport
new file mode 100644
index 000000000000..231471ad0d4b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-typec-displayport
@@ -0,0 +1,49 @@
+What: /sys/bus/typec/devices/.../displayport/configuration
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the current DisplayPort configuration for the connector.
+ Valid values are USB, source and sink. Source means DisplayPort
+ source, and sink means DisplayPort sink.
+
+ All supported configurations are listed as space separated list
+ with the active one wrapped in square brackets.
+
+ Source example:
+
+ USB [source] sink
+
+ The configuration can be changed by writing to the file
+
+ Note. USB configuration does not equal to Exit Mode. It is
+ separate configuration defined in VESA DisplayPort Alt Mode on
+ USB Type-C Standard. Functionally it equals to the situation
+ where the mode has been exited (to exit the mode, see
+ Documentation/ABI/testing/sysfs-bus-typec, and use file
+ /sys/bus/typec/devices/.../active).
+
+What: /sys/bus/typec/devices/.../displayport/pin_assignment
+Date: July 2018
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ VESA DisplayPort Alt Mode on USB Type-C Standard defines six
+ different pin assignments for USB Type-C connector that are
+ labeled A, B, C, D, E, and F. The supported pin assignments are
+ listed as space separated list with the active one wrapped in
+ square brackets.
+
+ Example:
+
+ C [D]
+
+ Pin assignment can be changed by writing to the file. It is
+ possible to set pin assignment before configuration has been
+ set, but the assignment will not be active before the
+ connector is actually configured.
+
+ Note. As of VESA DisplayPort Alt Mode on USB Type-C Standard
+ version 1.0b, pin assignments A, B, and F are deprecated. Only
+ pin assignment D can now carry simultaneously one channel of
+ USB SuperSpeed protocol. From user perspective pin assignments C
+ and E are equal, where all channels on the connector are used
+ for carrying DisplayPort protocol (allowing higher resolutions).
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 9b0123388f18..94a24aedcdb2 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -51,6 +51,14 @@ Description:
Controls the dirty page count condition for the in-place-update
policies.
+What: /sys/fs/f2fs/<disk>/min_seq_blocks
+Date: August 2018
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the dirty page count condition for batched sequential
+ writes in ->writepages.
+
+
What: /sys/fs/f2fs/<disk>/min_hot_blocks
Date: March 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-fme b/Documentation/ABI/testing/sysfs-platform-dfl-fme
new file mode 100644
index 000000000000..8fa4febfa4b2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-fme
@@ -0,0 +1,23 @@
+What: /sys/bus/platform/devices/dfl-fme.0/ports_num
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. One DFL FPGA device may have more than 1
+ port/Accelerator Function Unit (AFU). It returns the
+ number of ports on the FPGA device when read it.
+
+What: /sys/bus/platform/devices/dfl-fme.0/bitstream_id
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns Bitstream (static FPGA region)
+ identifier number, which includes the detailed version
+ and other information of this static FPGA region.
+
+What: /sys/bus/platform/devices/dfl-fme.0/bitstream_metadata
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns Bitstream (static FPGA region) meta
+ data, which includes the synthesis date, seed and other
+ information of this static FPGA region.
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-port b/Documentation/ABI/testing/sysfs-platform-dfl-port
new file mode 100644
index 000000000000..6a92dda517b0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-port
@@ -0,0 +1,16 @@
+What: /sys/bus/platform/devices/dfl-port.0/id
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns id of this port. One DFL FPGA device
+ may have more than one port. Userspace could use this id to
+ distinguish different ports under same FPGA device.
+
+What: /sys/bus/platform/devices/dfl-port.0/afu_id
+Date: June 2018
+KernelVersion: 4.19
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. User can program different PR bitstreams to FPGA
+ Accelerator Function Unit (AFU) for different functions. It
+ returns uuid which could be used to identify which PR bitstream
+ is programmed in this AFU.
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
index 00c9a90b6f38..206b1d5c1e71 100644
--- a/Documentation/PCI/00-INDEX
+++ b/Documentation/PCI/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file
+acpi-info.txt
+ - info on how PCI host bridges are represented in ACPI
MSI-HOWTO.txt
- the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ.
PCIEBUS-HOWTO.txt
diff --git a/Documentation/PCI/acpi-info.txt b/Documentation/PCI/acpi-info.txt
new file mode 100644
index 000000000000..3ffa3b03970e
--- /dev/null
+++ b/Documentation/PCI/acpi-info.txt
@@ -0,0 +1,187 @@
+ ACPI considerations for PCI host bridges
+
+The general rule is that the ACPI namespace should describe everything the
+OS might use unless there's another way for the OS to find it [1, 2].
+
+For example, there's no standard hardware mechanism for enumerating PCI
+host bridges, so the ACPI namespace must describe each host bridge, the
+method for accessing PCI config space below it, the address space windows
+the host bridge forwards to PCI (using _CRS), and the routing of legacy
+INTx interrupts (using _PRT).
+
+PCI devices, which are below the host bridge, generally do not need to be
+described via ACPI. The OS can discover them via the standard PCI
+enumeration mechanism, using config accesses to discover and identify
+devices and read and size their BARs. However, ACPI may describe PCI
+devices if it provides power management or hotplug functionality for them
+or if the device has INTx interrupts connected by platform interrupt
+controllers and a _PRT is needed to describe those connections.
+
+ACPI resource description is done via _CRS objects of devices in the ACPI
+namespace [2].   The _CRS is like a generalized PCI BAR: the OS can read
+_CRS and figure out what resource is being consumed even if it doesn't have
+a driver for the device [3].  That's important because it means an old OS
+can work correctly even on a system with new devices unknown to the OS.
+The new devices might not do anything, but the OS can at least make sure no
+resources conflict with them.
+
+Static tables like MCFG, HPET, ECDT, etc., are *not* mechanisms for
+reserving address space. The static tables are for things the OS needs to
+know early in boot, before it can parse the ACPI namespace. If a new table
+is defined, an old OS needs to operate correctly even though it ignores the
+table. _CRS allows that because it is generic and understood by the old
+OS; a static table does not.
+
+If the OS is expected to manage a non-discoverable device described via
+ACPI, that device will have a specific _HID/_CID that tells the OS what
+driver to bind to it, and the _CRS tells the OS and the driver where the
+device's registers are.
+
+PCI host bridges are PNP0A03 or PNP0A08 devices.  Their _CRS should
+describe all the address space they consume.  This includes all the windows
+they forward down to the PCI bus, as well as registers of the host bridge
+itself that are not forwarded to PCI.  The host bridge registers include
+things like secondary/subordinate bus registers that determine the bus
+range below the bridge, window registers that describe the apertures, etc.
+These are all device-specific, non-architected things, so the only way a
+PNP0A03/PNP0A08 driver can manage them is via _PRS/_CRS/_SRS, which contain
+the device-specific details.  The host bridge registers also include ECAM
+space, since it is consumed by the host bridge.
+
+ACPI defines a Consumer/Producer bit to distinguish the bridge registers
+("Consumer") from the bridge apertures ("Producer") [4, 5], but early
+BIOSes didn't use that bit correctly. The result is that the current ACPI
+spec defines Consumer/Producer only for the Extended Address Space
+descriptors; the bit should be ignored in the older QWord/DWord/Word
+Address Space descriptors. Consequently, OSes have to assume all
+QWord/DWord/Word descriptors are windows.
+
+Prior to the addition of Extended Address Space descriptors, the failure of
+Consumer/Producer meant there was no way to describe bridge registers in
+the PNP0A03/PNP0A08 device itself. The workaround was to describe the
+bridge registers (including ECAM space) in PNP0C02 catch-all devices [6].
+With the exception of ECAM, the bridge register space is device-specific
+anyway, so the generic PNP0A03/PNP0A08 driver (pci_root.c) has no need to
+know about it.  
+
+New architectures should be able to use "Consumer" Extended Address Space
+descriptors in the PNP0A03 device for bridge registers, including ECAM,
+although a strict interpretation of [6] might prohibit this. Old x86 and
+ia64 kernels assume all address space descriptors, including "Consumer"
+Extended Address Space ones, are windows, so it would not be safe to
+describe bridge registers this way on those architectures.
+
+PNP0C02 "motherboard" devices are basically a catch-all.  There's no
+programming model for them other than "don't use these resources for
+anything else."  So a PNP0C02 _CRS should claim any address space that is
+(1) not claimed by _CRS under any other device object in the ACPI namespace
+and (2) should not be assigned by the OS to something else.
+
+The PCIe spec requires the Enhanced Configuration Access Method (ECAM)
+unless there's a standard firmware interface for config access, e.g., the
+ia64 SAL interface [7]. A host bridge consumes ECAM memory address space
+and converts memory accesses into PCI configuration accesses. The spec
+defines the ECAM address space layout and functionality; only the base of
+the address space is device-specific. An ACPI OS learns the base address
+from either the static MCFG table or a _CBA method in the PNP0A03 device.
+
+The MCFG table must describe the ECAM space of non-hot pluggable host
+bridges [8]. Since MCFG is a static table and can't be updated by hotplug,
+a _CBA method in the PNP0A03 device describes the ECAM space of a
+hot-pluggable host bridge [9]. Note that for both MCFG and _CBA, the base
+address always corresponds to bus 0, even if the bus range below the bridge
+(which is reported via _CRS) doesn't start at 0.
+
+
+[1] ACPI 6.2, sec 6.1:
+ For any device that is on a non-enumerable type of bus (for example, an
+ ISA bus), OSPM enumerates the devices' identifier(s) and the ACPI
+ system firmware must supply an _HID object ... for each device to
+ enable OSPM to do that.
+
+[2] ACPI 6.2, sec 3.7:
+ The OS enumerates motherboard devices simply by reading through the
+ ACPI Namespace looking for devices with hardware IDs.
+
+ Each device enumerated by ACPI includes ACPI-defined objects in the
+ ACPI Namespace that report the hardware resources the device could
+ occupy [_PRS], an object that reports the resources that are currently
+ used by the device [_CRS], and objects for configuring those resources
+ [_SRS]. The information is used by the Plug and Play OS (OSPM) to
+ configure the devices.
+
+[3] ACPI 6.2, sec 6.2:
+ OSPM uses device configuration objects to configure hardware resources
+ for devices enumerated via ACPI. Device configuration objects provide
+ information about current and possible resource requirements, the
+ relationship between shared resources, and methods for configuring
+ hardware resources.
+
+ When OSPM enumerates a device, it calls _PRS to determine the resource
+ requirements of the device. It may also call _CRS to find the current
+ resource settings for the device. Using this information, the Plug and
+ Play system determines what resources the device should consume and
+ sets those resources by calling the device’s _SRS control method.
+
+ In ACPI, devices can consume resources (for example, legacy keyboards),
+ provide resources (for example, a proprietary PCI bridge), or do both.
+ Unless otherwise specified, resources for a device are assumed to be
+ taken from the nearest matching resource above the device in the device
+ hierarchy.
+
+[4] ACPI 6.2, sec 6.4.3.5.1, 2, 3, 4:
+ QWord/DWord/Word Address Space Descriptor (.1, .2, .3)
+ General Flags: Bit [0] Ignored
+
+ Extended Address Space Descriptor (.4)
+ General Flags: Bit [0] Consumer/Producer:
+ 1–This device consumes this resource
+ 0–This device produces and consumes this resource
+
+[5] ACPI 6.2, sec 19.6.43:
+ ResourceUsage specifies whether the Memory range is consumed by
+ this device (ResourceConsumer) or passed on to child devices
+ (ResourceProducer). If nothing is specified, then
+ ResourceConsumer is assumed.
+
+[6] PCI Firmware 3.2, sec 4.1.2:
+ If the operating system does not natively comprehend reserving the
+ MMCFG region, the MMCFG region must be reserved by firmware. The
+ address range reported in the MCFG table or by _CBA method (see Section
+ 4.1.3) must be reserved by declaring a motherboard resource. For most
+ systems, the motherboard resource would appear at the root of the ACPI
+ namespace (under \_SB) in a node with a _HID of EISAID (PNP0C02), and
+ the resources in this case should not be claimed in the root PCI bus’s
+ _CRS. The resources can optionally be returned in Int15 E820 or
+ EFIGetMemoryMap as reserved memory but must always be reported through
+ ACPI as a motherboard resource.
+
+[7] PCI Express 4.0, sec 7.2.2:
+ For systems that are PC-compatible, or that do not implement a
+ processor-architecture-specific firmware interface standard that allows
+ access to the Configuration Space, the ECAM is required as defined in
+ this section.
+
+[8] PCI Firmware 3.2, sec 4.1.2:
+ The MCFG table is an ACPI table that is used to communicate the base
+ addresses corresponding to the non-hot removable PCI Segment Groups
+ range within a PCI Segment Group available to the operating system at
+ boot. This is required for the PC-compatible systems.
+
+ The MCFG table is only used to communicate the base addresses
+ corresponding to the PCI Segment Groups available to the system at
+ boot.
+
+[9] PCI Firmware 3.2, sec 4.1.3:
+ The _CBA (Memory mapped Configuration Base Address) control method is
+ an optional ACPI object that returns the 64-bit memory mapped
+ configuration base address for the hot plug capable host bridge. The
+ base address returned by _CBA is processor-relative address. The _CBA
+ control method evaluates to an Integer.
+
+ This control method appears under a host bridge object. When the _CBA
+ method appears under an active host bridge object, the operating system
+ evaluates this structure to identify the memory mapped configuration
+ base address corresponding to the PCI Segment Group for the bus number
+ range specified in _CRS method. An ACPI name space object that contains
+ the _CBA method must also contain a corresponding _SEG method.
diff --git a/Documentation/PCI/endpoint/function/binding/pci-test.txt b/Documentation/PCI/endpoint/function/binding/pci-test.txt
index 3b68b955fb50..cd76ba47394b 100644
--- a/Documentation/PCI/endpoint/function/binding/pci-test.txt
+++ b/Documentation/PCI/endpoint/function/binding/pci-test.txt
@@ -15,3 +15,5 @@ subsys_id : don't care
interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts
to test
+msix_interrupts : Should be 1 to 2048 depending on the number of MSI-X
+ interrupts to test
diff --git a/Documentation/PCI/endpoint/pci-endpoint.txt b/Documentation/PCI/endpoint/pci-endpoint.txt
index 9b1d66829290..e86a96b66a6a 100644
--- a/Documentation/PCI/endpoint/pci-endpoint.txt
+++ b/Documentation/PCI/endpoint/pci-endpoint.txt
@@ -44,7 +44,7 @@ by the PCI controller driver.
* clear_bar: ops to reset the BAR
* alloc_addr_space: ops to allocate in PCI controller address space
* free_addr_space: ops to free the allocated address space
- * raise_irq: ops to raise a legacy or MSI interrupt
+ * raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* start: ops to start the PCI link
* stop: ops to stop the PCI link
@@ -96,7 +96,7 @@ by the PCI endpoint function driver.
*) pci_epc_raise_irq()
The PCI endpoint function driver should use pci_epc_raise_irq() to raise
- Legacy Interrupt or MSI Interrupt.
+ Legacy Interrupt, MSI or MSI-X Interrupt.
*) pci_epc_mem_alloc_addr()
diff --git a/Documentation/PCI/endpoint/pci-test-function.txt b/Documentation/PCI/endpoint/pci-test-function.txt
index 0c519c9bf94a..5916f1f592bb 100644
--- a/Documentation/PCI/endpoint/pci-test-function.txt
+++ b/Documentation/PCI/endpoint/pci-test-function.txt
@@ -20,6 +20,8 @@ The PCI endpoint test device has the following registers:
5) PCI_ENDPOINT_TEST_DST_ADDR
6) PCI_ENDPOINT_TEST_SIZE
7) PCI_ENDPOINT_TEST_CHECKSUM
+ 8) PCI_ENDPOINT_TEST_IRQ_TYPE
+ 9) PCI_ENDPOINT_TEST_IRQ_NUMBER
*) PCI_ENDPOINT_TEST_MAGIC
@@ -34,10 +36,10 @@ that the endpoint device must perform.
Bitfield Description:
Bit 0 : raise legacy IRQ
Bit 1 : raise MSI IRQ
- Bit 2 - 7 : MSI interrupt number
- Bit 8 : read command (read data from RC buffer)
- Bit 9 : write command (write data to RC buffer)
- Bit 10 : copy command (copy data from one RC buffer to another
+ Bit 2 : raise MSI-X IRQ
+ Bit 3 : read command (read data from RC buffer)
+ Bit 4 : write command (write data to RC buffer)
+ Bit 5 : copy command (copy data from one RC buffer to another
RC buffer)
*) PCI_ENDPOINT_TEST_STATUS
@@ -64,3 +66,22 @@ COPY/READ command.
This register contains the destination address (RC buffer address) for
the COPY/WRITE command.
+
+*) PCI_ENDPOINT_TEST_IRQ_TYPE
+
+This register contains the interrupt type (Legacy/MSI) triggered
+for the READ/WRITE/COPY and raise IRQ (Legacy/MSI) commands.
+
+Possible types:
+ - Legacy : 0
+ - MSI : 1
+ - MSI-X : 2
+
+*) PCI_ENDPOINT_TEST_IRQ_NUMBER
+
+This register contains the triggered ID interrupt.
+
+Admissible values:
+ - Legacy : 0
+ - MSI : [1 .. 32]
+ - MSI-X : [1 .. 2048]
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
index 75f48c3bb191..e40cf0fb58d7 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.txt
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -45,9 +45,9 @@ The PCI endpoint framework populates the directory with the following
configurable fields.
# ls functions/pci_epf_test/func1
- baseclass_code interrupt_pin revid subsys_vendor_id
- cache_line_size msi_interrupts subclass_code vendorid
- deviceid progif_code subsys_id
+ baseclass_code interrupt_pin progif_code subsys_id
+ cache_line_size msi_interrupts revid subsys_vendorid
+ deviceid msix_interrupts subclass_code vendorid
The PCI endpoint function driver populates these entries with default values
when the device is bound to the driver. The pci-epf-test driver populates
@@ -67,6 +67,7 @@ device, the following commands can be used.
# echo 0x104c > functions/pci_epf_test/func1/vendorid
# echo 0xb500 > functions/pci_epf_test/func1/deviceid
# echo 16 > functions/pci_epf_test/func1/msi_interrupts
+ # echo 8 > functions/pci_epf_test/func1/msix_interrupts
1.5 Binding pci-epf-test Device to EP Controller
@@ -120,7 +121,9 @@ following commands.
Interrupt tests
+ SET IRQ TYPE TO LEGACY: OKAY
LEGACY IRQ: NOT OKAY
+ SET IRQ TYPE TO MSI: OKAY
MSI1: OKAY
MSI2: OKAY
MSI3: OKAY
@@ -153,9 +156,30 @@ following commands.
MSI30: NOT OKAY
MSI31: NOT OKAY
MSI32: NOT OKAY
+ SET IRQ TYPE TO MSI-X: OKAY
+ MSI-X1: OKAY
+ MSI-X2: OKAY
+ MSI-X3: OKAY
+ MSI-X4: OKAY
+ MSI-X5: OKAY
+ MSI-X6: OKAY
+ MSI-X7: OKAY
+ MSI-X8: OKAY
+ MSI-X9: NOT OKAY
+ MSI-X10: NOT OKAY
+ MSI-X11: NOT OKAY
+ MSI-X12: NOT OKAY
+ MSI-X13: NOT OKAY
+ MSI-X14: NOT OKAY
+ MSI-X15: NOT OKAY
+ MSI-X16: NOT OKAY
+ [...]
+ MSI-X2047: NOT OKAY
+ MSI-X2048: NOT OKAY
Read Tests
+ SET IRQ TYPE TO MSI: OKAY
READ ( 1 bytes): OKAY
READ ( 1024 bytes): OKAY
READ ( 1025 bytes): OKAY
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index acd0dddd6bb8..48ce7903e3c6 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -73,6 +73,11 @@ In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
other fields.
+2.4 AER Statistics / Counters
+
+When PCIe AER errors are captured, the counters / statistics are also exposed
+in the form of sysfs attributes which are documented at
+Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
3. Developer Guide
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 6c06e10bd04b..f5120a00f511 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -380,31 +380,26 @@ and therefore need no protection.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
</pre>
<p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
<tt>-&gt;lock</tt> field.
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
as well.
The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
fashion.
The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
</pre>
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure. The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ Suppose that this <tt>rcu_node</tt> structure doesn't see
+ a request for a very long time.
+ Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+ problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+ <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+ will be updated at the end of the grace period.
+ Modulo-arithmetic comparisons therefore will always get the
+ correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
<h5>Quiescent-State Tracking</h5>
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
</ol>
<p><font color="ffffff">So the locking is absolutely required in
- order to coordinate
- clearing of the bits with the grace-period numbers in
- <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+ order to coordinate clearing of the bits with updating of the
+ grace-period sequence number in <tt>-&gt;gp_seq</tt>.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
as follows:
<pre>
- 1 unsigned long completed;
- 2 unsigned long gpnum;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
3 bool cpu_no_qs;
4 bool core_needs_qs;
5 bool gpwrap;
6 unsigned long rcu_qs_ctr_snap;
</pre>
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
fields are the counterparts of the fields of the same name
in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
<table>
<tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
<tr><td>
All this replication of the grace period numbers can only cause
massive confusion.
- Why not just keep a global pair of counters and be done with it???
+ Why not just keep a global sequence number and be done with it???
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because if there was only a single global pair of grace-period
+ Because if there was only a single global sequence
numbers, there would need to be a single global lock to allow
- safely accessing and updating them.
+ safely accessing and updating it.
And if we are not going to have a single global lock, we need
to carefully manage the numbers on a per-node basis.
Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
RCU core needs a quiescent state from the corresponding CPU.
The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
will cause the CPU to disregard the values of its counters on
its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
whenever it notices that another RCU grace period has completed.
The CPU detects the completion of an RCU grace period by noticing
that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
<tt>rcu_node</tt> structure.
Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
grace period.
<p>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8651b0b4fd79..a346ce0116eb 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
grace-period initialization.
<p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
grace-period-number counter, as shown below:
</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
<tt>rcu_state</tt> structure, as shown in the following diagram.
</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
to notice that a new grace period has started, as described in the next
section.
But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
the start of the grace period will happen after the actual start
of the grace period.
@@ -466,7 +466,7 @@ section that the grace period must wait on.
<tr><td>
But a RCU read-side critical section might have started
after the beginning of the grace period
- (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+ (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
the grace period wait on such a critical section?
</td></tr>
<tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
The ordering effects are shown below:
</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
CPU has reported its quiescent state, but it may be some
milliseconds before RCU becomes aware of this.
The latest reasonable candidate is once the <tt>rcu_state</tt>
- structure's <tt>-&gt;completed</tt> field has been updated,
+ structure's <tt>-&gt;gp_seq</tt> field has been updated,
but it is quite possible that some CPUs have already completed
phase two of their updates by that time.
In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
<h4><a name="Callback Invocation">Callback Invocation</a></h4>
<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
invoking its RCU callbacks that were waiting for this grace period
to end.
These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
index 754f426b297a..bf84fbab27ee 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
@@ -384,11 +384,11 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89017"
- inkscape:cy="542.52419"
- inkscape:window-x="86"
- inkscape:window-y="28"
+ inkscape:zoom="0.78716603"
+ inkscape:cx="513.06403"
+ inkscape:cy="623.1214"
+ inkscape:window-x="102"
+ inkscape:window-y="38"
inkscape:window-maximized="0"
inkscape:current-layer="g3188-3"
fit-margin-top="5"
@@ -417,13 +417,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3199.1516"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -502,13 +504,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4731"
+ y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -547,15 +551,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="7479.5796"
- y="17699.943"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="cc"
inkscape:connector-curvature="0"
@@ -566,15 +561,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(-737.93887,7732.6672)"
id="g3188-3">
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13175.802"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -607,15 +593,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13390.038"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<flowRoot
xml:space="preserve"
id="flowRoot3356"
@@ -627,7 +604,18 @@
height="63.63961"
x="332.34018"
y="681.87292" /></flowRegion><flowPara
- id="flowPara3362" /></flowRoot> </g>
+ id="flowPara3362" /></flowRoot> <text
+ xml:space="preserve"
+ x="3156.6121"
+ y="13317.754"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ </g>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(-858.40227,7769.0342)"
@@ -859,6 +847,17 @@
id="path3414-8-3-6-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="7418.769"
+ y="17646.104"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-70"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-1642.5377,-11611.245)"
@@ -887,13 +886,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
+ x="5274.1133"
y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-151.71746,-11647.612)"
@@ -972,13 +973,15 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7408.5918"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-6817.1997,-11647.612)"
@@ -1019,13 +1022,15 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8003"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -1059,15 +1064,6 @@
id="path3414-8-3-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="7318.9653"
- y="6031.6353"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9"
@@ -1123,4 +1119,15 @@
id="path3134-9-0-3-5"
d="m 6875.6003,15833.906 1595.7755,0"
style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+ <text
+ xml:space="preserve"
+ x="7275.2612"
+ y="5971.8916"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
index 0161262904ec..8c207550818f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
@@ -272,13 +272,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89019"
- inkscape:cy="636.57143"
- inkscape:window-x="697"
+ inkscape:zoom="2.6330492"
+ inkscape:cx="524.82797"
+ inkscape:cy="519.31194"
+ inkscape:window-x="79"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3188"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -305,13 +305,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3119.363"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
index de6ecc51b00e..d24d7d555dbc 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
@@ -19,7 +19,7 @@
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
- sodipodi:docname="TreeRCU-gp-init-2.svg">
+ sodipodi:docname="TreeRCU-gp-init-3.svg">
<metadata
id="metadata212">
<rdf:RDF>
@@ -257,18 +257,22 @@
inkscape:window-width="1087"
inkscape:window-height="1144"
id="namedview208"
- showgrid="false"
- inkscape:zoom="0.70710678"
+ showgrid="true"
+ inkscape:zoom="0.68224756"
inkscape:cx="617.89019"
inkscape:cy="625.84293"
- inkscape:window-x="697"
+ inkscape:window-x="54"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3153"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
- fit-margin-bottom="5" />
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3090" />
+ </sodipodi:namedview>
<path
sodipodi:nodetypes="cccccccccccccccccccccccc"
inkscape:connector-curvature="0"
@@ -281,13 +285,13 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -366,13 +370,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5253.6904"
+ y="15407.032"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -413,13 +417,13 @@
id="tspan3104-6-5-6-0">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7415.4365"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-1642.5375,-11610.962)"
@@ -448,13 +452,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5258.0688"
+ y="15412.313"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-151.71726,-11647.329)"
@@ -533,13 +537,13 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7405.2607"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-6817.1998,-11647.329)"
@@ -580,13 +584,13 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7413.4688"
+ y="17670.566"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-75"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -622,11 +626,11 @@
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
- x="7370.856"
- y="5997.5972"
+ x="7271.9297"
+ y="6023.2412"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index b13b7b01bb3a..acd73c7ad0f4 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -1070,13 +1070,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.6004608"
- inkscape:cx="826.65969"
- inkscape:cy="483.3047"
- inkscape:window-x="66"
- inkscape:window-y="28"
+ inkscape:zoom="0.81932583"
+ inkscape:cx="840.45848"
+ inkscape:cy="5052.4242"
+ inkscape:window-x="787"
+ inkscape:window-y="24"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g4"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -1543,15 +1543,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1749.0282,658.72243)"
id="g3188">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -1584,6 +1575,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3137.9988"
+ y="13271.316"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-626"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -2318,15 +2320,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1739.0986,17188.625)"
id="g3188-6">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<g
id="g3107-5"
transform="translate(947.90548,11584.029)">
@@ -2359,6 +2352,15 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-1">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3147.9268"
+ y="13240.524"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2387,13 +2389,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5263.1094"
+ y="15411.646"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-7"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-92"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2434,13 +2436,13 @@
id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7417.4053"
+ y="17655.502"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-759"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-2353.8462,17224.992)"
@@ -2469,13 +2471,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5246.1548"
+ y="15411.648"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-87"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-863.02613,17188.625)"
@@ -2554,13 +2556,13 @@
id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7433.8257"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-7528.5085,17188.625)"
@@ -2601,13 +2603,13 @@
id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7415.4404"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-75-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -2641,15 +2643,6 @@
id="path3414-8-3-6-4"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6659.5469"
- y="34833.551"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
@@ -3844,7 +3837,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
@@ -4284,15 +4277,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1874.038,53203.538)"
id="g3188-7">
- <text
- xml:space="preserve"
- x="3199.1516"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-82"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
id="g3107-53"
transform="translate(947.90548,11584.029)">
@@ -4325,6 +4309,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-19">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3175.896"
+ y="13240.11"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -4371,13 +4366,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4829"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -4412,30 +4409,12 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="10084.225"
- y="70903.312"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9-0"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="path3134-9-0-3-9"
d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74111.672"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
<g
style="fill:none;stroke-width:0.025in"
id="g3107-62-6"
@@ -4469,15 +4448,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74325.906"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(1746.2528,60972.572)"
@@ -4736,13 +4706,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
- y="15428.84"
+ x="5274.1216"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-728.08545,53203.538)"
@@ -4821,13 +4793,15 @@
id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7435.1987"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-7393.5687,53203.538)"
@@ -4868,13 +4842,15 @@
id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8125"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-35"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -4908,15 +4884,6 @@
id="path3414-8-3-6-67"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6742.6001"
- y="70882.617"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9-6"
@@ -5131,5 +5098,47 @@
font-size="192"
id="text202-7-9-6-6-7"
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+ <text
+ xml:space="preserve"
+ x="6698.9019"
+ y="70885.211"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10023.457"
+ y="70885.234"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="5023.3389"
+ y="74209.773"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-36"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="6562.5884"
+ y="34870.727"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index de3992f4cbe1..149bec2a4493 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -300,13 +300,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="616.47598"
- inkscape:cy="595.41964"
- inkscape:window-x="813"
+ inkscape:zoom="0.96484375"
+ inkscape:cx="507.0191"
+ inkscape:cy="885.62207"
+ inkscape:window-x="47"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="g4405"
+ inkscape:current-layer="g3115"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -710,7 +710,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4259f95c3261..f99cf11b314b 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -172,7 +172,7 @@ it will print a message similar to the following:
INFO: rcu_sched detected stalls on CPUs/tasks:
2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
- (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+ (detected by 32, t=2603 jiffies, g=7075, q=625)
This message indicates that CPU 32 detected that CPUs 2 and 16 were both
causing stalls, and that the stall was affecting RCU-sched. This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
period.
The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
the stall warning, as was the case in the "All QSes seen" line above,
the following additional line is printed:
- kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+ kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
Starving the grace-period kthreads of CPU time can of course result
in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states. The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states. The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
Multiple Warnings From One Stall
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 65eb856526b7..c2a7facf7ff9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -588,6 +588,7 @@ It is extremely simple:
void synchronize_rcu(void)
{
write_lock(&rcu_gp_mutex);
+ smp_mb__after_spinlock();
write_unlock(&rcu_gp_mutex);
}
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
and release a global reader-writer lock. The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it. This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it. This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+ Documentation/RCU/Design/Requirements/Requirements.html.
It is possible to nest rcu_read_lock(), since reader-writer locks may
be recursively acquired. Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
list_next_rcu
list_for_each_entry_rcu
list_for_each_entry_continue_rcu
+ list_for_each_entry_from_rcu
hlist_first_rcu
hlist_next_rcu
hlist_pprev_rcu
hlist_for_each_entry_rcu
hlist_for_each_entry_rcu_bh
+ hlist_for_each_entry_from_rcu
hlist_for_each_entry_continue_rcu
hlist_for_each_entry_continue_rcu_bh
hlist_nulls_first_rcu
diff --git a/Documentation/acpi/dsd/data-node-references.txt b/Documentation/acpi/dsd/data-node-references.txt
new file mode 100644
index 000000000000..c3871565c8cf
--- /dev/null
+++ b/Documentation/acpi/dsd/data-node-references.txt
@@ -0,0 +1,89 @@
+Copyright (C) 2018 Intel Corporation
+Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+
+Referencing hierarchical data nodes
+-----------------------------------
+
+ACPI in general allows referring to device objects in the tree only.
+Hierarchical data extension nodes may not be referred to directly, hence this
+document defines a scheme to implement such references.
+
+A reference consist of the device object name followed by one or more
+hierarchical data extension [1] keys. Specifically, the hierarchical data
+extension node which is referred to by the key shall lie directly under the
+parent object i.e. either the device object or another hierarchical data
+extension node.
+
+The keys in the hierarchical data nodes shall consist of the name of the node,
+"@" character and the number of the node in hexadecimal notation (without pre-
+or postfixes). The same ACPI object shall include the _DSD property extension
+with a property "reg" that shall have the same numerical value as the number of
+the node.
+
+In case a hierarchical data extensions node has no numerical value, then the
+"reg" property shall be omitted from the ACPI object's _DSD properties and the
+"@" character and the number shall be omitted from the hierarchical data
+extension key.
+
+
+Example
+-------
+
+ In the ASL snippet below, the "reference" _DSD property [2] contains a
+ device object reference to DEV0 and under that device object, a
+ hierarchical data extension key "node@1" referring to the NOD1 object
+ and lastly, a hierarchical data extension key "anothernode" referring to
+ the ANOD object which is also the final target node of the reference.
+
+ Device (DEV0)
+ {
+ Name (_DSD, Package () {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "node@0", NOD0 },
+ Package () { "node@1", NOD1 },
+ }
+ })
+ Name (NOD0, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "random-property", 3 },
+ }
+ })
+ Name (NOD1, Package() {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "anothernode", ANOD },
+ }
+ })
+ Name (ANOD, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "random-property", 0 },
+ }
+ })
+ }
+
+ Device (DEV1)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reference", ^DEV0, "node@1", "anothernode" },
+ }
+ })
+ }
+
+Please also see a graph example in graph.txt .
+
+References
+----------
+
+[1] Hierarchical Data Extension UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+ referenced 2018-07-17.
+
+[2] Device Properties UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+ referenced 2016-10-04.
diff --git a/Documentation/acpi/dsd/graph.txt b/Documentation/acpi/dsd/graph.txt
index ac09e3138b79..b9ce910781dc 100644
--- a/Documentation/acpi/dsd/graph.txt
+++ b/Documentation/acpi/dsd/graph.txt
@@ -36,29 +36,41 @@ The port and endpoint concepts are very similar to those in Devicetree
[3]. A port represents an interface in a device, and an endpoint
represents a connection to that interface.
-All port nodes are located under the device's "_DSD" node in the
-hierarchical data extension tree. The property extension related to
-each port node must contain the key "port" and an integer value which
-is the number of the port. The object it refers to should be called "PRTX",
-where "X" is the number of the port.
-
-Further on, endpoints are located under the individual port nodes. The
-first hierarchical data extension package list entry of the endpoint
-nodes must begin with "endpoint" and must be followed by the number
-of the endpoint. The object it refers to should be called "EPXY", where
-"X" is the number of the port and "Y" is the number of the endpoint.
-
-Each port node contains a property extension key "port", the value of
-which is the number of the port node. The each endpoint is similarly numbered
-with a property extension key "endpoint". Port numbers must be unique within a
-device and endpoint numbers must be unique within a port.
+All port nodes are located under the device's "_DSD" node in the hierarchical
+data extension tree. The data extension related to each port node must begin
+with "port" and must be followed by the "@" character and the number of the port
+as its key. The target object it refers to should be called "PRTX", where "X" is
+the number of the port. An example of such a package would be:
+
+ Package() { "port@4", PRT4 }
+
+Further on, endpoints are located under the port nodes. The hierarchical
+data extension key of the endpoint nodes must begin with
+"endpoint" and must be followed by the "@" character and the number of the
+endpoint. The object it refers to should be called "EPXY", where "X" is the
+number of the port and "Y" is the number of the endpoint. An example of such a
+package would be:
+
+ Package() { "endpoint@0", EP40 }
+
+Each port node contains a property extension key "port", the value of which is
+the number of the port. Each endpoint is similarly numbered with a property
+extension key "reg", the value of which is the number of the endpoint. Port
+numbers must be unique within a device and endpoint numbers must be unique
+within a port. If a device object may only has a single port, then the number
+of that port shall be zero. Similarly, if a port may only have a single
+endpoint, the number of that endpoint shall be zero.
The endpoint reference uses property extension with "remote-endpoint" property
name followed by a reference in the same package. Such references consist of the
-the remote device reference, number of the port in the device and finally the
-number of the endpoint in that port. Individual references thus appear as:
+the remote device reference, the first package entry of the port data extension
+reference under the device and finally the first package entry of the endpoint
+data extension reference under the port. Individual references thus appear as:
- Package() { device, port_number, endpoint_number }
+ Package() { device, "port@X", "endpoint@Y" }
+
+In the above example, "X" is the number of the port and "Y" is the number of the
+endpoint.
The references to endpoints must be always done both ways, to the
remote endpoint and back from the referred remote endpoint node.
@@ -76,24 +88,24 @@ A simple example of this is show below:
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port0", "PRT0" },
+ Package () { "port@0", PRT0 },
}
})
Name (PRT0, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "port", 0 },
+ Package () { "reg", 0 },
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint0", "EP00" },
+ Package () { "endpoint@0", EP00 },
}
})
Name (EP00, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "endpoint", 0 },
- Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, 4, 0 } },
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
}
})
}
@@ -106,26 +118,26 @@ A simple example of this is show below:
Name (_DSD, Package () {
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port4", "PRT4" },
+ Package () { "port@4", PRT4 },
}
})
Name (PRT4, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "port", 4 }, /* CSI-2 port number */
+ Package () { "reg", 4 }, /* CSI-2 port number */
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint0", "EP40" },
+ Package () { "endpoint@0", EP40 },
}
})
Name (EP40, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "endpoint", 0 },
- Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, 0, 0 } },
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
}
})
}
@@ -151,7 +163,7 @@ References
referenced 2016-10-04.
[5] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.pdf>,
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
referenced 2016-10-04.
[6] Advanced Configuration and Power Interface Specification.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 02caa7efd5ef..15ea785b2dfa 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,3 +1,5 @@
+.. _readme:
+
Linux kernel release 4.x <http://kernel.org/>
=============================================
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 8a2c52d5c53b..184193bcb262 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -51,6 +51,9 @@ v1 is available under Documentation/cgroup-v1/.
5-3. IO
5-3-1. IO Interface Files
5-3-2. Writeback
+ 5-3-3. IO Latency
+ 5-3-3-1. How IO Latency Throttling Works
+ 5-3-3-2. IO Latency Interface Files
5-4. PID
5-4-1. PID Interface Files
5-5. Device
@@ -1069,6 +1072,24 @@ PAGE_SIZE multiple when read back.
high limit is used and monitored properly, this limit's
utility is limited to providing the final safety net.
+ memory.oom.group
+ A read-write single value file which exists on non-root
+ cgroups. The default value is "0".
+
+ Determines whether the cgroup should be treated as
+ an indivisible workload by the OOM killer. If set,
+ all tasks belonging to the cgroup or to its descendants
+ (if the memory cgroup is not a leaf cgroup) are killed
+ together or not at all. This can be used to avoid
+ partial kills to guarantee workload integrity.
+
+ Tasks with the OOM protection (oom_score_adj set to -1000)
+ are treated as an exception and are never killed.
+
+ If the OOM killer is invoked in a cgroup, it's not going
+ to kill any tasks outside of this cgroup, regardless
+ memory.oom.group values of ancestor cgroups.
+
memory.events
A read-only flat-keyed file which exists on non-root cgroups.
The following entries are defined. Unless specified
@@ -1314,17 +1335,19 @@ IO Interface Files
Lines are keyed by $MAJ:$MIN device numbers and not ordered.
The following nested keys are defined.
- ====== ===================
+ ====== =====================
rbytes Bytes read
wbytes Bytes written
rios Number of read IOs
wios Number of write IOs
- ====== ===================
+ dbytes Bytes discarded
+ dios Number of discard IOs
+ ====== =====================
An example read output follows:
- 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353
- 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252
+ 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0
+ 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021
io.weight
A read-write flat-keyed file which exists on non-root cgroups.
@@ -1446,6 +1469,85 @@ writeback as follows.
vm.dirty[_background]_ratio.
+IO Latency
+~~~~~~~~~~
+
+This is a cgroup v2 controller for IO workload protection. You provide a group
+with a latency target, and if the average latency exceeds that target the
+controller will throttle any peers that have a lower latency target than the
+protected workload.
+
+The limits are only applied at the peer level in the hierarchy. This means that
+in the diagram below, only groups A, B, and C will influence each other, and
+groups D and F will influence each other. Group G will influence nobody.
+
+ [root]
+ / | \
+ A B C
+ / \ |
+ D F G
+
+
+So the ideal way to configure this is to set io.latency in groups A, B, and C.
+Generally you do not want to set a value lower than the latency your device
+supports. Experiment to find the value that works best for your workload.
+Start at higher than the expected latency for your device and watch the
+avg_lat value in io.stat for your workload group to get an idea of the
+latency you see during normal operation. Use the avg_lat value as a basis for
+your real setting, setting at 10-15% higher than the value in io.stat.
+
+How IO Latency Throttling Works
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+io.latency is work conserving; so as long as everybody is meeting their latency
+target the controller doesn't do anything. Once a group starts missing its
+target it begins throttling any peer group that has a higher target than itself.
+This throttling takes 2 forms:
+
+- Queue depth throttling. This is the number of outstanding IO's a group is
+ allowed to have. We will clamp down relatively quickly, starting at no limit
+ and going all the way down to 1 IO at a time.
+
+- Artificial delay induction. There are certain types of IO that cannot be
+ throttled without possibly adversely affecting higher priority groups. This
+ includes swapping and metadata IO. These types of IO are allowed to occur
+ normally, however they are "charged" to the originating group. If the
+ originating group is being throttled you will see the use_delay and delay
+ fields in io.stat increase. The delay value is how many microseconds that are
+ being added to any process that runs in this group. Because this number can
+ grow quite large if there is a lot of swapping or metadata IO occurring we
+ limit the individual delay events to 1 second at a time.
+
+Once the victimized group starts meeting its latency target again it will start
+unthrottling any peer groups that were throttled previously. If the victimized
+group simply stops doing IO the global counter will unthrottle appropriately.
+
+IO Latency Interface Files
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ io.latency
+ This takes a similar format as the other controllers.
+
+ "MAJOR:MINOR target=<target time in microseconds"
+
+ io.stat
+ If the controller is enabled you will see extra stats in io.stat in
+ addition to the normal ones.
+
+ depth
+ This is the current queue depth for the group.
+
+ avg_lat
+ This is an exponential moving average with a decay rate of 1/exp
+ bound by the sampling interval. The decay rate interval can be
+ calculated by multiplying the win value in io.stat by the
+ corresponding number of samples based on the win value.
+
+ win
+ The sampling window size in milliseconds. This is the minimum
+ duration of time between evaluation events. Windows only elapse
+ with IO activity. Idle periods extend the most recent window.
+
PID
---
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index 4ec843123cc3..1649117e6087 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -173,14 +173,18 @@
they are redirected through the parport multiplex layer.
7 char Virtual console capture devices
- 0 = /dev/vcs Current vc text contents
- 1 = /dev/vcs1 tty1 text contents
+ 0 = /dev/vcs Current vc text (glyph) contents
+ 1 = /dev/vcs1 tty1 text (glyph) contents
...
- 63 = /dev/vcs63 tty63 text contents
- 128 = /dev/vcsa Current vc text/attribute contents
- 129 = /dev/vcsa1 tty1 text/attribute contents
+ 63 = /dev/vcs63 tty63 text (glyph) contents
+ 64 = /dev/vcsu Current vc text (unicode) contents
+ 65 = /dev/vcsu1 tty1 text (unicode) contents
...
- 191 = /dev/vcsa63 tty63 text/attribute contents
+ 127 = /dev/vcsu63 tty63 text (unicode) contents
+ 128 = /dev/vcsa Current vc text/attribute (glyph) contents
+ 129 = /dev/vcsa1 tty1 text/attribute (glyph) contents
+ ...
+ 191 = /dev/vcsa63 tty63 text/attribute (glyph) contents
NOTE: These devices permit both read and write access.
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 48d70af11652..0873685bab0f 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -17,6 +17,15 @@ etc.
kernel-parameters
devices
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+ :maxdepth: 1
+
+ l1tf
+
Here is a set of documents aimed at users who are trying to track down
problems and bugs in particular.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index efc7aa7a0670..970d837bd57f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -748,6 +748,14 @@
debug [KNL] Enable kernel debugging (events log level).
+ debug_boot_weak_hash
+ [KNL] Enable printing [hashed] pointers early in the
+ boot sequence. If enabled, we use a weak hash instead
+ of siphash to hash pointers. Use this option if you are
+ seeing instances of '(___ptrval___)') and need to see a
+ value (hashed pointer) instead. Cryptographically
+ insecure, please do not use on production kernels.
+
debug_locks_verbose=
[KNL] verbose self-tests
Format=<0|1>
@@ -804,6 +812,15 @@
Defaults to the default architecture's huge page size
if not specified.
+ deferred_probe_timeout=
+ [KNL] Debugging option to set a timeout in seconds for
+ deferred probe to give up waiting on dependencies to
+ probe. Only specific dependencies (subsystems or
+ drivers) that have opted in will be ignored. A timeout of 0
+ will timeout at the end of initcalls. This option will also
+ dump out devices still on the deferred probe list after
+ retrying.
+
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
@@ -816,6 +833,17 @@
disable= [IPV6]
See Documentation/networking/ipv6.txt.
+ hardened_usercopy=
+ [KNL] Under CONFIG_HARDENED_USERCOPY, whether
+ hardening is enabled for this boot. Hardened
+ usercopy checking is used to protect the kernel
+ from reading or writing beyond known memory
+ allocation boundaries as a proactive defense
+ against bounds-checking flaws in the kernel's
+ copy_to_user()/copy_from_user() interface.
+ on Perform hardened usercopy checks (default).
+ off Disable hardened usercopy checks.
+
disable_radix [PPC]
Disable RADIX MMU mode on POWER9
@@ -1967,10 +1995,84 @@
(virtualized real and unpaged mode) on capable
Intel chips. Default is 1 (enabled)
+ kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
+ CVE-2018-3620.
+
+ Valid arguments: never, cond, always
+
+ always: L1D cache flush on every VMENTER.
+ cond: Flush L1D on VMENTER only when the code between
+ VMEXIT and VMENTER can leak host memory.
+ never: Disables the mitigation
+
+ Default is cond (do L1 cache flush in specific instances)
+
kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
feature (tagged TLBs) on capable Intel chips.
Default is 1 (enabled)
+ l1tf= [X86] Control mitigation of the L1TF vulnerability on
+ affected CPUs
+
+ The kernel PTE inversion protection is unconditionally
+ enabled and cannot be disabled.
+
+ full
+ Provides all available mitigations for the
+ L1TF vulnerability. Disables SMT and
+ enables all mitigations in the
+ hypervisors, i.e. unconditional L1D flush.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ full,force
+ Same as 'full', but disables SMT and L1D
+ flush runtime control. Implies the
+ 'nosmt=force' command line option.
+ (i.e. sysfs control of SMT is disabled.)
+
+ flush
+ Leaves SMT enabled and enables the default
+ hypervisor mitigation, i.e. conditional
+ L1D flush.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ flush,nosmt
+
+ Disables SMT and enables the default
+ hypervisor mitigation.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ flush,nowarn
+ Same as 'flush', but hypervisors will not
+ warn when a VM is started in a potentially
+ insecure configuration.
+
+ off
+ Disables hypervisor mitigations and doesn't
+ emit any warnings.
+
+ Default is 'flush'.
+
+ For details see: Documentation/admin-guide/l1tf.rst
+
l2cr= [PPC]
l3cr= [PPC]
@@ -2687,6 +2789,14 @@
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
+ [KNL,x86] Disable symmetric multithreading (SMT).
+ nosmt=force: Force disable SMT, cannot be undone
+ via the sysfs control file.
+
+ nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
+ check bypass). With this option data leaks are possible
+ in the system.
+
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
@@ -2835,8 +2945,6 @@
nosync [HW,M68K] Disables sync negotiation for all devices.
- notsc [BUGS=X86-32] Disable Time Stamp Counter
-
nowatchdog [KNL] Disable both lockup detectors, i.e.
soft-lockup and NMI watchdog (hard-lockup).
@@ -2933,8 +3041,9 @@
on: enable the feature
page_poison= [KNL] Boot-time parameter changing the state of
- poisoning on the buddy allocator.
- off: turn off poisoning
+ poisoning on the buddy allocator, available with
+ CONFIG_PAGE_POISONING=y.
+ off: turn off poisoning (default)
on: turn on poisoning
panic= [KNL] Kernel behaviour on panic: delay <timeout>
@@ -2994,8 +3103,31 @@
See header of drivers/block/paride/pcd.c.
See also Documentation/blockdev/paride.txt.
- pci=option[,option...] [PCI] various PCI subsystem options:
- earlydump [X86] dump PCI config space before the kernel
+ pci=option[,option...] [PCI] various PCI subsystem options.
+
+ Some options herein operate on a specific device
+ or a set of devices (<pci_dev>). These are
+ specified in one of the following formats:
+
+ [<domain>:]<bus>:<dev>.<func>[/<dev>.<func>]*
+ pci:<vendor>:<device>[:<subvendor>:<subdevice>]
+
+ Note: the first format specifies a PCI
+ bus/device/function address which may change
+ if new hardware is inserted, if motherboard
+ firmware changes, or due to changes caused
+ by other kernel parameters. If the
+ domain is left unspecified, it is
+ taken to be zero. Optionally, a path
+ to a device through multiple device/function
+ addresses can be specified after the base
+ address (this is more robust against
+ renumbering issues). The second format
+ selects devices using IDs from the
+ configuration space which may match multiple
+ devices in the system.
+
+ earlydump dump PCI config space before the kernel
changes anything
off [X86] don't probe for the PCI bus
bios [X86-32] force use of PCI BIOS, don't access
@@ -3123,11 +3255,10 @@
window. The default value is 64 megabytes.
resource_alignment=
Format:
- [<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
- [<order of align>@]pci:<vendor>:<device>\
- [:<subvendor>:<subdevice>][; ...]
+ [<order of align>@]<pci_dev>[; ...]
Specifies alignment and device to reassign
- aligned memory resources.
+ aligned memory resources. How to
+ specify the device is described above.
If <order of align> is not specified,
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
@@ -3170,6 +3301,15 @@
Adding the window is slightly risky (it may
conflict with unreported devices), so this
taints the kernel.
+ disable_acs_redir=<pci_dev>[; ...]
+ Specify one or more PCI devices (in the format
+ specified above) separated by semicolons.
+ Each device specified will have the PCI ACS
+ redirect capabilities forced off which will
+ allow P2P traffic between devices through
+ bridges without forcing it upstream. Note:
+ this removes isolation between devices and
+ may put more devices in an IOMMU group.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -3632,8 +3772,8 @@
Set time (s) after boot for CPU-hotplug testing.
rcutorture.onoff_interval= [KNL]
- Set time (s) between CPU-hotplug operations, or
- zero to disable CPU-hotplug testing.
+ Set time (jiffies) between CPU-hotplug operations,
+ or zero to disable CPU-hotplug testing.
rcutorture.shuffle_interval= [KNL]
Set task-shuffle interval (s). Shuffling tasks
@@ -4060,6 +4200,8 @@
This parameter controls whether the Speculative Store
Bypass optimization is used.
+ On x86 the options are:
+
on - Unconditionally disable Speculative Store Bypass
off - Unconditionally enable Speculative Store Bypass
auto - Kernel detects whether the CPU model contains an
@@ -4075,12 +4217,20 @@
seccomp - Same as "prctl" above, but all seccomp threads
will disable SSB unless they explicitly opt out.
- Not specifying this option is equivalent to
- spec_store_bypass_disable=auto.
-
Default mitigations:
X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+ On powerpc the options are:
+
+ on,auto - On Power8 and Power9 insert a store-forwarding
+ barrier on kernel entry and exit. On Power7
+ perform a software flush on kernel entry and
+ exit.
+ off - No action.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
spia_io_base= [HW,MTD]
spia_fio_base=
spia_pedr=
@@ -4846,3 +4996,8 @@
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+
+ xhci-hcd.quirks [USB,KNL]
+ A hex value specifying bitmask with supplemental xhci
+ host controller quirks. Meaning of each bit can be
+ consulted in header drivers/usb/host/xhci.h.
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
new file mode 100644
index 000000000000..bae52b845de0
--- /dev/null
+++ b/Documentation/admin-guide/l1tf.rst
@@ -0,0 +1,610 @@
+L1TF - L1 Terminal Fault
+========================
+
+L1 Terminal Fault is a hardware vulnerability which allows unprivileged
+speculative access to data which is available in the Level 1 Data Cache
+when the page table entry controlling the virtual address, which is used
+for the access, has the Present bit cleared or other reserved bits set.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
+ Penwell, Pineview, Silvermont, Airmont, Merrifield)
+
+ - The Intel XEON PHI family
+
+ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
+ by the Meltdown vulnerability either. These CPUs should become
+ available by end of 2018.
+
+Whether a processor is affected or not can be read out from the L1TF
+vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the L1TF vulnerability:
+
+ ============= ================= ==============================
+ CVE-2018-3615 L1 Terminal Fault SGX related aspects
+ CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
+ CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
+ ============= ================= ==============================
+
+Problem
+-------
+
+If an instruction accesses a virtual address for which the relevant page
+table entry (PTE) has the Present bit cleared or other reserved bits set,
+then speculative execution ignores the invalid PTE and loads the referenced
+data if it is present in the Level 1 Data Cache, as if the page referenced
+by the address bits in the PTE was still present and accessible.
+
+While this is a purely speculative mechanism and the instruction will raise
+a page fault when it is retired eventually, the pure act of loading the
+data and making it available to other speculative instructions opens up the
+opportunity for side channel attacks to unprivileged malicious code,
+similar to the Meltdown attack.
+
+While Meltdown breaks the user space to kernel space protection, L1TF
+allows to attack any physical memory address in the system and the attack
+works across all protection domains. It allows an attack of SGX and also
+works from inside virtual machines because the speculation bypasses the
+extended page table (EPT) protection mechanism.
+
+
+Attack scenarios
+----------------
+
+1. Malicious user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Operating Systems store arbitrary information in the address bits of a
+ PTE which is marked non present. This allows a malicious user space
+ application to attack the physical memory to which these PTEs resolve.
+ In some cases user-space can maliciously influence the information
+ encoded in the address bits of the PTE, thus making attacks more
+ deterministic and more practical.
+
+ The Linux kernel contains a mitigation for this attack vector, PTE
+ inversion, which is permanently enabled and has no performance
+ impact. The kernel ensures that the address bits of PTEs, which are not
+ marked present, never point to cacheable physical memory space.
+
+ A system with an up to date kernel is protected against attacks from
+ malicious user space applications.
+
+2. Malicious guest in a virtual machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The fact that L1TF breaks all domain protections allows malicious guest
+ OSes, which can control the PTEs directly, and malicious guest user
+ space applications, which run on an unprotected guest kernel lacking the
+ PTE inversion mitigation for L1TF, to attack physical host memory.
+
+ A special aspect of L1TF in the context of virtualization is symmetric
+ multi threading (SMT). The Intel implementation of SMT is called
+ HyperThreading. The fact that Hyperthreads on the affected processors
+ share the L1 Data Cache (L1D) is important for this. As the flaw allows
+ only to attack data which is present in L1D, a malicious guest running
+ on one Hyperthread can attack the data which is brought into the L1D by
+ the context which runs on the sibling Hyperthread of the same physical
+ core. This context can be host OS, host user space or a different guest.
+
+ If the processor does not support Extended Page Tables, the attack is
+ only possible, when the hypervisor does not sanitize the content of the
+ effective (shadow) page tables.
+
+ While solutions exist to mitigate these attack vectors fully, these
+ mitigations are not enabled by default in the Linux kernel because they
+ can affect performance significantly. The kernel provides several
+ mechanisms which can be utilized to address the problem depending on the
+ deployment scenario. The mitigations, their protection scope and impact
+ are described in the next sections.
+
+ The default mitigations and the rationale for choosing them are explained
+ at the end of this document. See :ref:`default_mitigations`.
+
+.. _l1tf_sys_info:
+
+L1TF system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current L1TF
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/l1tf
+
+The possible values in this file are:
+
+ =========================== ===============================
+ 'Not affected' The processor is not vulnerable
+ 'Mitigation: PTE Inversion' The host protection is active
+ =========================== ===============================
+
+If KVM/VMX is enabled and the processor is vulnerable then the following
+information is appended to the 'Mitigation: PTE Inversion' part:
+
+ - SMT status:
+
+ ===================== ================
+ 'VMX: SMT vulnerable' SMT is enabled
+ 'VMX: SMT disabled' SMT is disabled
+ ===================== ================
+
+ - L1D Flush mode:
+
+ ================================ ====================================
+ 'L1D vulnerable' L1D flushing is disabled
+
+ 'L1D conditional cache flushes' L1D flush is conditionally enabled
+
+ 'L1D cache flushes' L1D flush is unconditionally enabled
+ ================================ ====================================
+
+The resulting grade of protection is discussed in the following sections.
+
+
+Host mitigation mechanism
+-------------------------
+
+The kernel is unconditionally protected against L1TF attacks from malicious
+user space running on the host.
+
+
+Guest mitigation mechanisms
+---------------------------
+
+.. _l1d_flush:
+
+1. L1D flush on VMENTER
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ To make sure that a guest cannot attack data which is present in the L1D
+ the hypervisor flushes the L1D before entering the guest.
+
+ Flushing the L1D evicts not only the data which should not be accessed
+ by a potentially malicious guest, it also flushes the guest
+ data. Flushing the L1D has a performance impact as the processor has to
+ bring the flushed guest data back into the L1D. Depending on the
+ frequency of VMEXIT/VMENTER and the type of computations in the guest
+ performance degradation in the range of 1% to 50% has been observed. For
+ scenarios where guest VMEXIT/VMENTER are rare the performance impact is
+ minimal. Virtio and mechanisms like posted interrupts are designed to
+ confine the VMEXITs to a bare minimum, but specific configurations and
+ application scenarios might still suffer from a high VMEXIT rate.
+
+ The kernel provides two L1D flush modes:
+ - conditional ('cond')
+ - unconditional ('always')
+
+ The conditional mode avoids L1D flushing after VMEXITs which execute
+ only audited code paths before the corresponding VMENTER. These code
+ paths have been verified that they cannot expose secrets or other
+ interesting data to an attacker, but they can leak information about the
+ address space layout of the hypervisor.
+
+ Unconditional mode flushes L1D on all VMENTER invocations and provides
+ maximum protection. It has a higher overhead than the conditional
+ mode. The overhead cannot be quantified correctly as it depends on the
+ workload scenario and the resulting number of VMEXITs.
+
+ The general recommendation is to enable L1D flush on VMENTER. The kernel
+ defaults to conditional mode on affected processors.
+
+ **Note**, that L1D flush does not prevent the SMT problem because the
+ sibling thread will also bring back its data into the L1D which makes it
+ attackable again.
+
+ L1D flush can be controlled by the administrator via the kernel command
+ line and sysfs control files. See :ref:`mitigation_control_command_line`
+ and :ref:`mitigation_control_kvm`.
+
+.. _guest_confinement:
+
+2. Guest VCPU confinement to dedicated physical cores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ To address the SMT problem, it is possible to make a guest or a group of
+ guests affine to one or more physical cores. The proper mechanism for
+ that is to utilize exclusive cpusets to ensure that no other guest or
+ host tasks can run on these cores.
+
+ If only a single guest or related guests run on sibling SMT threads on
+ the same physical core then they can only attack their own memory and
+ restricted parts of the host memory.
+
+ Host memory is attackable, when one of the sibling SMT threads runs in
+ host OS (hypervisor) context and the other in guest context. The amount
+ of valuable information from the host OS context depends on the context
+ which the host OS executes, i.e. interrupts, soft interrupts and kernel
+ threads. The amount of valuable data from these contexts cannot be
+ declared as non-interesting for an attacker without deep inspection of
+ the code.
+
+ **Note**, that assigning guests to a fixed set of physical cores affects
+ the ability of the scheduler to do load balancing and might have
+ negative effects on CPU utilization depending on the hosting
+ scenario. Disabling SMT might be a viable alternative for particular
+ scenarios.
+
+ For further information about confining guests to a single or to a group
+ of cores consult the cpusets documentation:
+
+ https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
+
+.. _interrupt_isolation:
+
+3. Interrupt affinity
+^^^^^^^^^^^^^^^^^^^^^
+
+ Interrupts can be made affine to logical CPUs. This is not universally
+ true because there are types of interrupts which are truly per CPU
+ interrupts, e.g. the local timer interrupt. Aside of that multi queue
+ devices affine their interrupts to single CPUs or groups of CPUs per
+ queue without allowing the administrator to control the affinities.
+
+ Moving the interrupts, which can be affinity controlled, away from CPUs
+ which run untrusted guests, reduces the attack vector space.
+
+ Whether the interrupts with are affine to CPUs, which run untrusted
+ guests, provide interesting data for an attacker depends on the system
+ configuration and the scenarios which run on the system. While for some
+ of the interrupts it can be assumed that they won't expose interesting
+ information beyond exposing hints about the host OS memory layout, there
+ is no way to make general assumptions.
+
+ Interrupt affinity can be controlled by the administrator via the
+ /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
+ available at:
+
+ https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
+
+.. _smt_control:
+
+4. SMT control
+^^^^^^^^^^^^^^
+
+ To prevent the SMT issues of L1TF it might be necessary to disable SMT
+ completely. Disabling SMT can have a significant performance impact, but
+ the impact depends on the hosting scenario and the type of workloads.
+ The impact of disabling SMT needs also to be weighted against the impact
+ of other mitigation solutions like confining guests to dedicated cores.
+
+ The kernel provides a sysfs interface to retrieve the status of SMT and
+ to control it. It also provides a kernel command line interface to
+ control SMT.
+
+ The kernel command line interface consists of the following options:
+
+ =========== ==========================================================
+ nosmt Affects the bring up of the secondary CPUs during boot. The
+ kernel tries to bring all present CPUs online during the
+ boot process. "nosmt" makes sure that from each physical
+ core only one - the so called primary (hyper) thread is
+ activated. Due to a design flaw of Intel processors related
+ to Machine Check Exceptions the non primary siblings have
+ to be brought up at least partially and are then shut down
+ again. "nosmt" can be undone via the sysfs interface.
+
+ nosmt=force Has the same effect as "nosmt" but it does not allow to
+ undo the SMT disable via the sysfs interface.
+ =========== ==========================================================
+
+ The sysfs interface provides two files:
+
+ - /sys/devices/system/cpu/smt/control
+ - /sys/devices/system/cpu/smt/active
+
+ /sys/devices/system/cpu/smt/control:
+
+ This file allows to read out the SMT control state and provides the
+ ability to disable or (re)enable SMT. The possible states are:
+
+ ============== ===================================================
+ on SMT is supported by the CPU and enabled. All
+ logical CPUs can be onlined and offlined without
+ restrictions.
+
+ off SMT is supported by the CPU and disabled. Only
+ the so called primary SMT threads can be onlined
+ and offlined without restrictions. An attempt to
+ online a non-primary sibling is rejected
+
+ forceoff Same as 'off' but the state cannot be controlled.
+ Attempts to write to the control file are rejected.
+
+ notsupported The processor does not support SMT. It's therefore
+ not affected by the SMT implications of L1TF.
+ Attempts to write to the control file are rejected.
+ ============== ===================================================
+
+ The possible states which can be written into this file to control SMT
+ state are:
+
+ - on
+ - off
+ - forceoff
+
+ /sys/devices/system/cpu/smt/active:
+
+ This file reports whether SMT is enabled and active, i.e. if on any
+ physical core two or more sibling threads are online.
+
+ SMT control is also possible at boot time via the l1tf kernel command
+ line parameter in combination with L1D flush control. See
+ :ref:`mitigation_control_command_line`.
+
+5. Disabling EPT
+^^^^^^^^^^^^^^^^
+
+ Disabling EPT for virtual machines provides full mitigation for L1TF even
+ with SMT enabled, because the effective page tables for guests are
+ managed and sanitized by the hypervisor. Though disabling EPT has a
+ significant performance impact especially when the Meltdown mitigation
+ KPTI is enabled.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+There is ongoing research and development for new mitigation mechanisms to
+address the performance impact of disabling SMT or EPT.
+
+.. _mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the L1TF mitigations at boot
+time with the option "l1tf=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full Provides all available mitigations for the L1TF
+ vulnerability. Disables SMT and enables all mitigations in
+ the hypervisors, i.e. unconditional L1D flushing
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ full,force Same as 'full', but disables SMT and L1D flush runtime
+ control. Implies the 'nosmt=force' command line option.
+ (i.e. sysfs control of SMT is disabled.)
+
+ flush Leaves SMT enabled and enables the default hypervisor
+ mitigation, i.e. conditional L1D flushing
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nosmt Disables SMT and enables the default hypervisor mitigation,
+ i.e. conditional L1D flushing.
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
+ started in a potentially insecure configuration.
+
+ off Disables hypervisor mitigations and doesn't emit any
+ warnings.
+ ============ =============================================================
+
+The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
+
+
+.. _mitigation_control_kvm:
+
+Mitigation control for KVM - module parameter
+-------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism, flushing the L1D cache when
+entering a guest, can be controlled with a module parameter.
+
+The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
+following arguments:
+
+ ============ ==============================================================
+ always L1D cache flush on every VMENTER.
+
+ cond Flush L1D on VMENTER only when the code between VMEXIT and
+ VMENTER can leak host memory which is considered
+ interesting for an attacker. This still can leak host memory
+ which allows e.g. to determine the hosts address space layout.
+
+ never Disables the mitigation
+ ============ ==============================================================
+
+The parameter can be provided on the kernel command line, as a module
+parameter when loading the modules and at runtime modified via the sysfs
+file:
+
+/sys/module/kvm_intel/parameters/vmentry_l1d_flush
+
+The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
+line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
+module parameter is ignored and writes to the sysfs file are rejected.
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The system is protected by the kernel unconditionally and no further
+ action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the guest comes from a trusted source and the guest OS kernel is
+ guaranteed to have the L1TF mitigations in place the system is fully
+ protected against L1TF and no further action is required.
+
+ To avoid the overhead of the default L1D flushing on VMENTER the
+ administrator can disable the flushing via the kernel command line and
+ sysfs control files. See :ref:`mitigation_control_command_line` and
+ :ref:`mitigation_control_kvm`.
+
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+3.1. SMT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+ If SMT is not supported by the processor or disabled in the BIOS or by
+ the kernel, it's only required to enforce L1D flushing on VMENTER.
+
+ Conditional L1D flushing is the default behaviour and can be tuned. See
+ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+3.2. EPT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+ If EPT is not supported by the processor or disabled in the hypervisor,
+ the system is fully protected. SMT can stay enabled and L1D flushing on
+ VMENTER is not required.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+3.3. SMT and EPT supported and active
+"""""""""""""""""""""""""""""""""""""
+
+ If SMT and EPT are supported and active then various degrees of
+ mitigations can be employed:
+
+ - L1D flushing on VMENTER:
+
+ L1D flushing on VMENTER is the minimal protection requirement, but it
+ is only potent in combination with other mitigation methods.
+
+ Conditional L1D flushing is the default behaviour and can be tuned. See
+ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+ - Guest confinement:
+
+ Confinement of guests to a single or a group of physical cores which
+ are not running any other processes, can reduce the attack surface
+ significantly, but interrupts, soft interrupts and kernel threads can
+ still expose valuable data to a potential attacker. See
+ :ref:`guest_confinement`.
+
+ - Interrupt isolation:
+
+ Isolating the guest CPUs from interrupts can reduce the attack surface
+ further, but still allows a malicious guest to explore a limited amount
+ of host physical memory. This can at least be used to gain knowledge
+ about the host address space layout. The interrupts which have a fixed
+ affinity to the CPUs which run the untrusted guests can depending on
+ the scenario still trigger soft interrupts and schedule kernel threads
+ which might expose valuable information. See
+ :ref:`interrupt_isolation`.
+
+The above three mitigation methods combined can provide protection to a
+certain degree, but the risk of the remaining attack surface has to be
+carefully analyzed. For full protection the following methods are
+available:
+
+ - Disabling SMT:
+
+ Disabling SMT and enforcing the L1D flushing provides the maximum
+ amount of protection. This mitigation is not depending on any of the
+ above mitigation methods.
+
+ SMT control and L1D flushing can be tuned by the command line
+ parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
+ time with the matching sysfs control files. See :ref:`smt_control`,
+ :ref:`mitigation_control_command_line` and
+ :ref:`mitigation_control_kvm`.
+
+ - Disabling EPT:
+
+ Disabling EPT provides the maximum amount of protection as well. It is
+ not depending on any of the above mitigation methods. SMT can stay
+ enabled and L1D flushing is not required, but the performance impact is
+ significant.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
+ parameter.
+
+3.4. Nested virtual machines
+""""""""""""""""""""""""""""
+
+When nested virtualization is in use, three operating systems are involved:
+the bare metal hypervisor, the nested hypervisor and the nested virtual
+machine. VMENTER operations from the nested hypervisor into the nested
+guest will always be processed by the bare metal hypervisor. If KVM is the
+bare metal hypervisor it wiil:
+
+ - Flush the L1D cache on every switch from the nested hypervisor to the
+ nested virtual machine, so that the nested hypervisor's secrets are not
+ exposed to the nested virtual machine;
+
+ - Flush the L1D cache on every switch from the nested virtual machine to
+ the nested hypervisor; this is a complex operation, and flushing the L1D
+ cache avoids that the bare metal hypervisor's secrets are exposed to the
+ nested virtual machine;
+
+ - Instruct the nested hypervisor to not perform any L1D cache flush. This
+ is an optimization to avoid double L1D flushing.
+
+
+.. _default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - PTE inversion to protect against malicious user space. This is done
+ unconditionally and cannot be controlled.
+
+ - L1D conditional flushing on VMENTER when EPT is enabled for
+ a guest.
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted guests with EPT enabled.
+
+ The rationale for this choice is:
+
+ - Force disabling SMT can break existing setups, especially with
+ unattended updates.
+
+ - If regular users run untrusted guests on their machine, then L1TF is
+ just an add on to other malware which might be embedded in an untrusted
+ guest, e.g. spam-bots or attacks on the local network.
+
+ There is no technical way to prevent a user from running untrusted code
+ on their machines blindly.
+
+ - It's technically extremely unlikely and from today's knowledge even
+ impossible that L1TF can be exploited via the most popular attack
+ mechanisms like JavaScript because these mechanisms have no way to
+ control PTEs. If this would be possible and not other mitigation would
+ be possible, then the default might be different.
+
+ - The administrators of cloud and hosting setups have to carefully
+ analyze the risk for their scenarios and make the appropriate
+ mitigation choices, which might even vary across their deployed
+ machines and also result in other changes of their overall setup.
+ There is no way for the kernel to provide a sensible default for this
+ kind of scenarios.
diff --git a/Documentation/admin-guide/mm/idle_page_tracking.rst b/Documentation/admin-guide/mm/idle_page_tracking.rst
index 6f7b7ca1add3..df9394fb39c2 100644
--- a/Documentation/admin-guide/mm/idle_page_tracking.rst
+++ b/Documentation/admin-guide/mm/idle_page_tracking.rst
@@ -65,6 +65,11 @@ workload one should:
are not reclaimable, he or she can filter them out using
``/proc/kpageflags``.
+The page-types tool in the tools/vm directory can be used to assist in this.
+If the tool is run initially with the appropriate option, it will mark all the
+queried pages as idle. Subsequent runs of the tool can then show which pages have
+their idle flag cleared in the interim.
+
See :ref:`Documentation/admin-guide/mm/pagemap.rst <pagemap>` for more
information about ``/proc/pid/pagemap``, ``/proc/kpageflags``, and
``/proc/kpagecgroup``.
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index 577af85beb41..3f7bade2c231 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -44,6 +44,9 @@ There are four components to pagemap:
* ``/proc/kpagecount``. This file contains a 64-bit count of the number of
times each page is mapped, indexed by PFN.
+The page-types tool in the tools/vm directory can be used to query the
+number of times a page is mapped.
+
* ``/proc/kpageflags``. This file contains a 64-bit set of flags for each
page, indexed by PFN.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 07f147381f32..ea2dafe49ae8 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -85,3 +85,10 @@ shared_tags=[0/1]: Default: 0
0: Tag set is not shared.
1: Tag set shared between devices for blk-mq. Only makes sense with
nr_devices > 1, otherwise there's no tag set to share.
+
+zoned=[0/1]: Default: 0
+ 0: Block device is exposed as a random-access block device.
+ 1: Block device is exposed as a host-managed zoned block device.
+
+zone_size=[MB]: Default: 256
+ Per zone size when exposed as a zoned block device. Must be a power of two.
diff --git a/Documentation/block/stat.txt b/Documentation/block/stat.txt
index 0dbc946de2ea..0aace9cc536c 100644
--- a/Documentation/block/stat.txt
+++ b/Documentation/block/stat.txt
@@ -31,28 +31,32 @@ write ticks milliseconds total wait time for write requests
in_flight requests number of I/Os currently in flight
io_ticks milliseconds total time this block device has been active
time_in_queue milliseconds total wait time for all requests
+discard I/Os requests number of discard I/Os processed
+discard merges requests number of discard I/Os merged with in-queue I/O
+discard sectors sectors number of sectors discarded
+discard ticks milliseconds total wait time for discard requests
-read I/Os, write I/Os
-=====================
+read I/Os, write I/Os, discard I/0s
+===================================
These values increment when an I/O request completes.
-read merges, write merges
-=========================
+read merges, write merges, discard merges
+=========================================
These values increment when an I/O request is merged with an
already-queued I/O request.
-read sectors, write sectors
-===========================
+read sectors, write sectors, discard_sectors
+============================================
-These values count the number of sectors read from or written to this
-block device. The "sectors" in question are the standard UNIX 512-byte
-sectors, not any device- or filesystem-specific block size. The
-counters are incremented when the I/O completes.
+These values count the number of sectors read from, written to, or
+discarded from this block device. The "sectors" in question are the
+standard UNIX 512-byte sectors, not any device- or filesystem-specific
+block size. The counters are incremented when the I/O completes.
-read ticks, write ticks
-=======================
+read ticks, write ticks, discard ticks
+======================================
These values count the number of milliseconds that I/O requests have
waited on this block device. If there are multiple I/O requests waiting,
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index 0e7c1d946e83..c9856b927055 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -106,9 +106,9 @@ into the bpf-next tree will make their way into net-next tree. net and
net-next are both run by David S. Miller. From there, they will go
into the kernel mainline tree run by Linus Torvalds. To read up on the
process of net and net-next being merged into the mainline tree, see
-the `netdev FAQ`_ under:
+the :ref:`netdev-FAQ`
+
- `Documentation/networking/netdev-FAQ.txt`_
Occasionally, to prevent merge conflicts, we might send pull requests
to other trees (e.g. tracing) with a small subset of the patches, but
@@ -125,8 +125,8 @@ request)::
Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be applied to?
---------------------------------------------------------------------------------
-A: The process is the very same as described in the `netdev FAQ`_, so
-please read up on it. The subject line must indicate whether the
+A: The process is the very same as described in the :ref:`netdev-FAQ`,
+so please read up on it. The subject line must indicate whether the
patch is a fix or rather "next-like" content in order to let the
maintainers know whether it is targeted at bpf or bpf-next.
@@ -184,7 +184,7 @@ ii) run extensive BPF test suite and
Once the BPF pull request was accepted by David S. Miller, then
the patches end up in net or net-next tree, respectively, and
make their way from there further into mainline. Again, see the
-`netdev FAQ`_ for additional information e.g. on how often they are
+:ref:`netdev-FAQ` for additional information e.g. on how often they are
merged to mainline.
Q: How long do I need to wait for feedback on my BPF patches?
@@ -208,7 +208,7 @@ Q: Are patches applied to bpf-next when the merge window is open?
-----------------------------------------------------------------
A: For the time when the merge window is open, bpf-next will not be
processed. This is roughly analogous to net-next patch processing,
-so feel free to read up on the `netdev FAQ`_ about further details.
+so feel free to read up on the :ref:`netdev-FAQ` about further details.
During those two weeks of merge window, we might ask you to resend
your patch series once bpf-next is open again. Once Linus released
@@ -372,7 +372,7 @@ netdev kernel mailing list in Cc and ask for the fix to be queued up:
netdev@vger.kernel.org
The process in general is the same as on netdev itself, see also the
-`netdev FAQ`_ document.
+:ref:`netdev-FAQ`.
Q: Do you also backport to kernels not currently maintained as stable?
----------------------------------------------------------------------
@@ -388,9 +388,7 @@ Q: The BPF patch I am about to submit needs to go to stable as well
What should I do?
A: The same rules apply as with netdev patch submissions in general, see
-`netdev FAQ`_ under:
-
- `Documentation/networking/netdev-FAQ.txt`_
+the :ref:`netdev-FAQ`.
Never add "``Cc: stable@vger.kernel.org``" to the patch description, but
ask the BPF maintainers to queue the patches instead. This can be done
@@ -630,8 +628,7 @@ when:
.. Links
.. _Documentation/process/: https://www.kernel.org/doc/html/latest/process/
.. _MAINTAINERS: ../../MAINTAINERS
-.. _Documentation/networking/netdev-FAQ.txt: ../networking/netdev-FAQ.txt
-.. _netdev FAQ: ../networking/netdev-FAQ.txt
+.. _netdev-FAQ: ../networking/netdev-FAQ.rst
.. _samples/bpf/: ../../samples/bpf/
.. _selftests: ../../tools/testing/selftests/bpf/
.. _Documentation/dev-tools/kselftest.rst:
diff --git a/Documentation/bpf/README.rst b/Documentation/bpf/index.rst
index b9a80c9e9392..00a8450a602f 100644
--- a/Documentation/bpf/README.rst
+++ b/Documentation/bpf/index.rst
@@ -1,5 +1,5 @@
=================
-BPF documentation
+BPF Documentation
=================
This directory contains documentation for the BPF (Berkeley Packet
@@ -22,14 +22,14 @@ Frequently asked questions (FAQ)
Two sets of Questions and Answers (Q&A) are maintained.
-* QA for common questions about BPF see: bpf_design_QA_
+.. toctree::
+ :maxdepth: 1
-* QA for developers interacting with BPF subsystem: bpf_devel_QA_
+ bpf_design_QA
+ bpf_devel_QA
.. Links:
-.. _bpf_design_QA: bpf_design_QA.rst
-.. _bpf_devel_QA: bpf_devel_QA.rst
.. _Documentation/networking/filter.txt: ../networking/filter.txt
.. _man-pages: https://www.kernel.org/doc/man-pages/
.. _bpf(2): http://man7.org/linux/man-pages/man2/bpf.2.html
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 62ac5a9f3a9f..b691af4831fa 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -34,7 +34,7 @@ needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure']
+extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure', 'sphinx.ext.ifconfig']
# The name of the math extension changed on Sphinx 1.4
if major == 1 and minor > 3:
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
index f93810d599ad..d73c2ab4beda 100644
--- a/Documentation/console/console.txt
+++ b/Documentation/console/console.txt
@@ -1,7 +1,7 @@
Console Drivers
===============
-The linux kernel has 2 general types of console drivers. The first type is
+The Linux kernel has 2 general types of console drivers. The first type is
assigned by the kernel to all the virtual consoles during the boot process.
This type will be called 'system driver', and only one system driver is allowed
to exist. The system driver is persistent and it can never be unloaded, though
@@ -17,10 +17,11 @@ of driver occupying the consoles.) They can only take over the console that is
occupied by the system driver. In the same token, if the modular driver is
released by the console, the system driver will take over.
-Modular drivers, from the programmer's point of view, has to call:
+Modular drivers, from the programmer's point of view, have to call:
do_take_over_console() - load and bind driver to console layer
- give_up_console() - unload driver, it will only work if driver is fully unbond
+ give_up_console() - unload driver; it will only work if driver
+ is fully unbound
In newer kernels, the following are also available:
@@ -56,7 +57,7 @@ What do these files signify?
cat /sys/class/vtconsole/vtcon0/name
(S) VGA+
- '(S)' stands for a (S)ystem driver, ie, it cannot be directly
+ '(S)' stands for a (S)ystem driver, i.e., it cannot be directly
commanded to bind or unbind
'VGA+' is the name of the driver
@@ -89,7 +90,7 @@ driver, make changes, recompile, reload and rebind the driver without any need
for rebooting the kernel. For regular users who may want to switch from
framebuffer console to VGA console and vice versa, this feature also makes
this possible. (NOTE NOTE NOTE: Please read fbcon.txt under Documentation/fb
-for more details).
+for more details.)
Notes for developers:
=====================
@@ -110,8 +111,8 @@ In order for binding to and unbinding from the console to properly work,
console drivers must follow these guidelines:
1. All drivers, except system drivers, must call either do_register_con_driver()
- or do_take_over_console(). do_register_con_driver() will just add the driver to
- the console's internal list. It won't take over the
+ or do_take_over_console(). do_register_con_driver() will just add the driver
+ to the console's internal list. It won't take over the
console. do_take_over_console(), as it name implies, will also take over (or
bind to) the console.
diff --git a/Documentation/core-api/atomic_ops.rst b/Documentation/core-api/atomic_ops.rst
index 2e7165f86f55..724583453e1f 100644
--- a/Documentation/core-api/atomic_ops.rst
+++ b/Documentation/core-api/atomic_ops.rst
@@ -29,7 +29,7 @@ updated by one CPU, local_t is probably more appropriate. Please see
local_t.
The first operations to implement for atomic_t's are the initializers and
-plain reads. ::
+plain writes. ::
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
diff --git a/Documentation/core-api/boot-time-mm.rst b/Documentation/core-api/boot-time-mm.rst
new file mode 100644
index 000000000000..03cb1643f46f
--- /dev/null
+++ b/Documentation/core-api/boot-time-mm.rst
@@ -0,0 +1,92 @@
+===========================
+Boot time memory management
+===========================
+
+Early system initialization cannot use "normal" memory management
+simply because it is not set up yet. But there is still need to
+allocate memory for various data structures, for instance for the
+physical page allocator. To address this, a specialized allocator
+called the :ref:`Boot Memory Allocator <bootmem>`, or bootmem, was
+introduced. Several years later PowerPC developers added a "Logical
+Memory Blocks" allocator, which was later adopted by other
+architectures and renamed to :ref:`memblock <memblock>`. There is also
+a compatibility layer called `nobootmem` that translates bootmem
+allocation interfaces to memblock calls.
+
+The selection of the early allocator is done using
+``CONFIG_NO_BOOTMEM`` and ``CONFIG_HAVE_MEMBLOCK`` kernel
+configuration options. These options are enabled or disabled
+statically by the architectures' Kconfig files.
+
+* Architectures that rely only on bootmem select
+ ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=n``.
+* The users of memblock with the nobootmem compatibility layer set
+ ``CONFIG_NO_BOOTMEM=y && CONFIG_HAVE_MEMBLOCK=y``.
+* And for those that use both memblock and bootmem the configuration
+ includes ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=y``.
+
+Whichever allocator is used, it is the responsibility of the
+architecture specific initialization to set it up in
+:c:func:`setup_arch` and tear it down in :c:func:`mem_init` functions.
+
+Once the early memory management is available it offers a variety of
+functions and macros for memory allocations. The allocation request
+may be directed to the first (and probably the only) node or to a
+particular node in a NUMA system. There are API variants that panic
+when an allocation fails and those that don't. And more recent and
+advanced memblock even allows controlling its own behaviour.
+
+.. _bootmem:
+
+Bootmem
+=======
+
+(mostly stolen from Mel Gorman's "Understanding the Linux Virtual
+Memory Manager" `book`_)
+
+.. _book: https://www.kernel.org/doc/gorman/
+
+.. kernel-doc:: mm/bootmem.c
+ :doc: bootmem overview
+
+.. _memblock:
+
+Memblock
+========
+
+.. kernel-doc:: mm/memblock.c
+ :doc: memblock overview
+
+
+Functions and structures
+========================
+
+Common API
+----------
+
+The functions that are described in this section are available
+regardless of what early memory manager is enabled.
+
+.. kernel-doc:: mm/nobootmem.c
+
+Bootmem specific API
+--------------------
+
+These interfaces available only with bootmem, i.e when ``CONFIG_NO_BOOTMEM=n``
+
+.. kernel-doc:: include/linux/bootmem.h
+.. kernel-doc:: mm/bootmem.c
+ :nodocs:
+
+Memblock specific API
+---------------------
+
+Here is the description of memblock data structures, functions and
+macros. Some of them are actually internal, but since they are
+documented it would be silly to omit them. Besides, reading the
+descriptions for the internal functions can help to understand what
+really happens under the hood.
+
+.. kernel-doc:: include/linux/memblock.h
+.. kernel-doc:: mm/memblock.c
+ :nodocs:
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
index 9078a5c3ac95..d351e880a2f6 100644
--- a/Documentation/core-api/idr.rst
+++ b/Documentation/core-api/idr.rst
@@ -76,4 +76,6 @@ Functions and structures
========================
.. kernel-doc:: include/linux/idr.h
+ :functions:
.. kernel-doc:: lib/idr.c
+ :functions:
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index f5a66b72f984..b5379fb740a5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -28,6 +28,8 @@ Core utilities
printk-formats
circular-buffers
gfp_mask-from-fs-io
+ timekeeping
+ boot-time-mm
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/timekeeping.rst b/Documentation/core-api/timekeeping.rst
new file mode 100644
index 000000000000..93cbeb9daec0
--- /dev/null
+++ b/Documentation/core-api/timekeeping.rst
@@ -0,0 +1,185 @@
+ktime accessors
+===============
+
+Device drivers can read the current time using ktime_get() and the many
+related functions declared in linux/timekeeping.h. As a rule of thumb,
+using an accessor with a shorter name is preferred over one with a longer
+name if both are equally fit for a particular use case.
+
+Basic ktime_t based interfaces
+------------------------------
+
+The recommended simplest form returns an opaque ktime_t, with variants
+that return time for different clock references:
+
+
+.. c:function:: ktime_t ktime_get( void )
+
+ CLOCK_MONOTONIC
+
+ Useful for reliable timestamps and measuring short time intervals
+ accurately. Starts at system boot time but stops during suspend.
+
+.. c:function:: ktime_t ktime_get_boottime( void )
+
+ CLOCK_BOOTTIME
+
+ Like ktime_get(), but does not stop when suspended. This can be
+ used e.g. for key expiration times that need to be synchronized
+ with other machines across a suspend operation.
+
+.. c:function:: ktime_t ktime_get_real( void )
+
+ CLOCK_REALTIME
+
+ Returns the time in relative to the UNIX epoch starting in 1970
+ using the Coordinated Universal Time (UTC), same as gettimeofday()
+ user space. This is used for all timestamps that need to
+ persist across a reboot, like inode times, but should be avoided
+ for internal uses, since it can jump backwards due to a leap
+ second update, NTP adjustment settimeofday() operation from user
+ space.
+
+.. c:function:: ktime_t ktime_get_clocktai( void )
+
+ CLOCK_TAI
+
+ Like ktime_get_real(), but uses the International Atomic Time (TAI)
+ reference instead of UTC to avoid jumping on leap second updates.
+ This is rarely useful in the kernel.
+
+.. c:function:: ktime_t ktime_get_raw( void )
+
+ CLOCK_MONOTONIC_RAW
+
+ Like ktime_get(), but runs at the same rate as the hardware
+ clocksource without (NTP) adjustments for clock drift. This is
+ also rarely needed in the kernel.
+
+nanosecond, timespec64, and second output
+-----------------------------------------
+
+For all of the above, there are variants that return the time in a
+different format depending on what is required by the user:
+
+.. c:function:: u64 ktime_get_ns( void )
+ u64 ktime_get_boottime_ns( void )
+ u64 ktime_get_real_ns( void )
+ u64 ktime_get_tai_ns( void )
+ u64 ktime_get_raw_ns( void )
+
+ Same as the plain ktime_get functions, but returning a u64 number
+ of nanoseconds in the respective time reference, which may be
+ more convenient for some callers.
+
+.. c:function:: void ktime_get_ts64( struct timespec64 * )
+ void ktime_get_boottime_ts64( struct timespec64 * )
+ void ktime_get_real_ts64( struct timespec64 * )
+ void ktime_get_clocktai_ts64( struct timespec64 * )
+ void ktime_get_raw_ts64( struct timespec64 * )
+
+ Same above, but returns the time in a 'struct timespec64', split
+ into seconds and nanoseconds. This can avoid an extra division
+ when printing the time, or when passing it into an external
+ interface that expects a 'timespec' or 'timeval' structure.
+
+.. c:function:: time64_t ktime_get_seconds( void )
+ time64_t ktime_get_boottime_seconds( void )
+ time64_t ktime_get_real_seconds( void )
+ time64_t ktime_get_clocktai_seconds( void )
+ time64_t ktime_get_raw_seconds( void )
+
+ Return a coarse-grained version of the time as a scalar
+ time64_t. This avoids accessing the clock hardware and rounds
+ down the seconds to the full seconds of the last timer tick
+ using the respective reference.
+
+Coarse and fast_ns access
+-------------------------
+
+Some additional variants exist for more specialized cases:
+
+.. c:function:: ktime_t ktime_get_coarse_boottime( void )
+ ktime_t ktime_get_coarse_real( void )
+ ktime_t ktime_get_coarse_clocktai( void )
+ ktime_t ktime_get_coarse_raw( void )
+
+.. c:function:: void ktime_get_coarse_ts64( struct timespec64 * )
+ void ktime_get_coarse_boottime_ts64( struct timespec64 * )
+ void ktime_get_coarse_real_ts64( struct timespec64 * )
+ void ktime_get_coarse_clocktai_ts64( struct timespec64 * )
+ void ktime_get_coarse_raw_ts64( struct timespec64 * )
+
+ These are quicker than the non-coarse versions, but less accurate,
+ corresponding to CLOCK_MONONOTNIC_COARSE and CLOCK_REALTIME_COARSE
+ in user space, along with the equivalent boottime/tai/raw
+ timebase not available in user space.
+
+ The time returned here corresponds to the last timer tick, which
+ may be as much as 10ms in the past (for CONFIG_HZ=100), same as
+ reading the 'jiffies' variable. These are only useful when called
+ in a fast path and one still expects better than second accuracy,
+ but can't easily use 'jiffies', e.g. for inode timestamps.
+ Skipping the hardware clock access saves around 100 CPU cycles
+ on most modern machines with a reliable cycle counter, but
+ up to several microseconds on older hardware with an external
+ clocksource.
+
+.. c:function:: u64 ktime_get_mono_fast_ns( void )
+ u64 ktime_get_raw_fast_ns( void )
+ u64 ktime_get_boot_fast_ns( void )
+ u64 ktime_get_real_fast_ns( void )
+
+ These variants are safe to call from any context, including from
+ a non-maskable interrupt (NMI) during a timekeeper update, and
+ while we are entering suspend with the clocksource powered down.
+ This is useful in some tracing or debugging code as well as
+ machine check reporting, but most drivers should never call them,
+ since the time is allowed to jump under certain conditions.
+
+Deprecated time interfaces
+--------------------------
+
+Older kernels used some other interfaces that are now being phased out
+but may appear in third-party drivers being ported here. In particular,
+all interfaces returning a 'struct timeval' or 'struct timespec' have
+been replaced because the tv_sec member overflows in year 2038 on 32-bit
+architectures. These are the recommended replacements:
+
+.. c:function:: void ktime_get_ts( struct timespec * )
+
+ Use ktime_get() or ktime_get_ts64() instead.
+
+.. c:function:: struct timeval do_gettimeofday( void )
+ struct timespec getnstimeofday( void )
+ struct timespec64 getnstimeofday64( void )
+ void ktime_get_real_ts( struct timespec * )
+
+ ktime_get_real_ts64() is a direct replacement, but consider using
+ monotonic time (ktime_get_ts64()) and/or a ktime_t based interface
+ (ktime_get()/ktime_get_real()).
+
+.. c:function:: struct timespec current_kernel_time( void )
+ struct timespec64 current_kernel_time64( void )
+ struct timespec get_monotonic_coarse( void )
+ struct timespec64 get_monotonic_coarse64( void )
+
+ These are replaced by ktime_get_coarse_real_ts64() and
+ ktime_get_coarse_ts64(). However, A lot of code that wants
+ coarse-grained times can use the simple 'jiffies' instead, while
+ some drivers may actually want the higher resolution accessors
+ these days.
+
+.. c:function:: struct timespec getrawmonotonic( void )
+ struct timespec64 getrawmonotonic64( void )
+ struct timespec timekeeping_clocktai( void )
+ struct timespec64 timekeeping_clocktai64( void )
+ struct timespec get_monotonic_boottime( void )
+ struct timespec64 get_monotonic_boottime64( void )
+
+ These are replaced by ktime_get_raw()/ktime_get_raw_ts64(),
+ ktime_get_clocktai()/ktime_get_clocktai_ts64() as well
+ as ktime_get_boottime()/ktime_get_boottime_ts64().
+ However, if the particular choice of clock source is not
+ important for the user, consider converting to
+ ktime_get()/ktime_get_ts64() instead for consistency.
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 006827e30d06..0f6ca8b7261e 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -162,7 +162,7 @@ Code Example For Use of Operational State Memory With SHASH
char *hash_alg_name = "sha1-padlock-nano";
int ret;
- alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
+ alg = crypto_alloc_shash(hash_alg_name, 0, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 3bf371a938d0..6f653acea248 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -156,6 +156,11 @@ Contributing new tests (details)
installed by the distro on the system should be the primary focus to be able
to find regressions.
+ * If a test needs specific kernel config options enabled, add a config file in
+ the test directory to enable them.
+
+ e.g: tools/testing/selftests/android/ion/config
+
Test Harness
============
diff --git a/Documentation/device-mapper/delay.txt b/Documentation/device-mapper/delay.txt
index 4b1d22a44ce4..6426c45273cb 100644
--- a/Documentation/device-mapper/delay.txt
+++ b/Documentation/device-mapper/delay.txt
@@ -5,7 +5,8 @@ Device-Mapper's "delay" target delays reads and/or writes
and maps them to different devices.
Parameters:
- <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
+ <device> <offset> <delay> [<write_device> <write_offset> <write_delay>
+ [<flush_device> <flush_offset> <flush_delay>]]
With separate write parameters, the first set is only used for reads.
Offsets are specified in sectors.
diff --git a/Documentation/device-mapper/dm-integrity.txt b/Documentation/device-mapper/dm-integrity.txt
index f33e3ade7a09..297251b0d2d5 100644
--- a/Documentation/device-mapper/dm-integrity.txt
+++ b/Documentation/device-mapper/dm-integrity.txt
@@ -113,6 +113,10 @@ internal_hash:algorithm(:key) (the key is optional)
from an upper layer target, such as dm-crypt. The upper layer
target should check the validity of the integrity tags.
+recalculate
+ Recalculate the integrity tags automatically. It is only valid
+ when using internal hash.
+
journal_crypt:algorithm(:key) (the key is optional)
Encrypt the journal using given algorithm to make sure that the
attacker can't read the journal. You can use a block cipher here
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 3d01948ea061..883e7ca5f745 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -28,17 +28,18 @@ administrator some freedom, for example to:
Status
======
-These targets are very much still in the EXPERIMENTAL state. Please
-do not yet rely on them in production. But do experiment and offer us
-feedback. Different use cases will have different performance
-characteristics, for example due to fragmentation of the data volume.
+These targets are considered safe for production use. But different use
+cases will have different performance characteristics, for example due
+to fragmentation of the data volume.
If you find this software is not performing as expected please mail
dm-devel@redhat.com with details and we'll try our best to improve
things for you.
-Userspace tools for checking and repairing the metadata are under
-development.
+Userspace tools for checking and repairing the metadata have been fully
+developed and are available as 'thin_check' and 'thin_repair'. The name
+of the package that provides these utilities varies by distribution (on
+a Red Hat distribution it is named 'device-mapper-persistent-data').
Cookbook
========
@@ -280,7 +281,7 @@ ii) Status
<transaction id> <used metadata blocks>/<total metadata blocks>
<used data blocks>/<total data blocks> <held metadata root>
ro|rw|out_of_data_space [no_]discard_passdown [error|queue]_if_no_space
- needs_check|-
+ needs_check|- metadata_low_watermark
transaction id:
A 64-bit number used by userspace to help synchronise with metadata
@@ -327,6 +328,11 @@ ii) Status
thin-pool can be made fully operational again. '-' indicates
needs_check is not set.
+ metadata_low_watermark:
+ Value of metadata low watermark in blocks. The kernel sets this
+ value internally but userspace needs to know this value to
+ determine if an event was caused by crossing this threshold.
+
iii) Messages
create_thin <dev id>
diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt
index 4424fa2c67d7..01532b3008ae 100644
--- a/Documentation/device-mapper/writecache.txt
+++ b/Documentation/device-mapper/writecache.txt
@@ -15,6 +15,8 @@ Constructor parameters:
size)
5. the number of optional parameters (the parameters with an argument
count as two)
+ start_sector n (default: 0)
+ offset from the start of cache device in 512-byte sectors
high_watermark n (default: 50)
start writeback when the number of used blocks reach this
watermark
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 15ac8e8dcfdf..5d1ad09bafb4 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -39,6 +39,8 @@ its hardware characteristcs.
- System Trace Macrocell:
"arm,coresight-stm", "arm,primecell"; [1]
+ - Coresight Address Translation Unit (CATU)
+ "arm,coresight-catu", "arm,primecell";
* reg: physical base address and length of the register
set(s) of the component.
@@ -84,8 +86,15 @@ its hardware characteristcs.
* Optional property for TMC:
* arm,buffer-size: size of contiguous buffer space for TMC ETR
- (embedded trace router)
+ (embedded trace router). This property is obsolete. The buffer size
+ can be configured dynamically via buffer_size property in sysfs.
+ * arm,scatter-gather: boolean. Indicates that the TMC-ETR can safely
+ use the SG mode on this system.
+
+* Optional property for CATU :
+ * interrupts : Exactly one SPI may be listed for reporting the address
+ error
Example:
@@ -118,6 +127,35 @@ Example:
};
};
+ etr@20070000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x20070000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* input port */
+ port@0 {
+ reg = <0>;
+ etr_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator2_out_port0>;
+ };
+ };
+
+ /* CATU link represented by output port */
+ port@1 {
+ reg = <1>;
+ etr_out_port: endpoint {
+ remote-endpoint = <&catu_in_port>;
+ };
+ };
+ };
+ };
+
2. Links
replicator {
/* non-configurable replicators don't show up on the
@@ -247,5 +285,23 @@ Example:
};
};
+5. CATU
+
+ catu@207e0000 {
+ compatible = "arm,coresight-catu", "arm,primecell";
+ reg = <0 0x207e0000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ port {
+ catu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&etr_out_port>;
+ };
+ };
+ };
+
[1]. There is currently two version of STM: STM32 and STM500. Both
have the same HW interface and as such don't need an explicit binding name.
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
index 669808b2af49..6dd6f399236d 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
@@ -18,9 +18,6 @@ Required properties:
assignment of the interrupt router is required.
Flags get passed only when using GIC as parent. Flags
encoding as documented by the GIC bindings.
-- interrupt-parent: Should be the phandle for the interrupt controller of
- the CPU the device tree is intended to be used on. This
- is either the node of the GIC or NVIC controller.
Example:
mscm_ir: interrupt-controller@40001800 {
diff --git a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
index 0b887440e08a..3fd21bb7cb37 100644
--- a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
@@ -2,14 +2,17 @@ Marvell Armada AP806 System Controller
======================================
The AP806 is one of the two core HW blocks of the Marvell Armada 7K/8K
-SoCs. It contains a system controller, which provides a number
-registers giving access to numerous features: clocks, pin-muxing and
-many other SoC configuration items. This DT binding allows to describe
-this system controller.
+SoCs. It contains system controllers, which provide several registers
+giving access to numerous features: clocks, pin-muxing and many other
+SoC configuration items. This DT binding allows to describe these
+system controllers.
For the top level node:
- compatible: must be: "syscon", "simple-mfd";
- - reg: register area of the AP806 system controller
+ - reg: register area of the AP806 system controller
+
+SYSTEM CONTROLLER 0
+===================
Clocks:
-------
@@ -98,3 +101,38 @@ ap_syscon: system-controller@6f4000 {
gpio-ranges = <&ap_pinctrl 0 0 19>;
};
};
+
+SYSTEM CONTROLLER 1
+===================
+
+Thermal:
+--------
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/thermal/thermal.txt
+
+The thermal IP can probe the temperature all around the processor. It
+may feature several channels, each of them wired to one sensor.
+
+Required properties:
+- compatible: must be one of:
+ * marvell,armada-ap806-thermal
+- reg: register range associated with the thermal functions.
+
+Optional properties:
+- #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer
+ to this IP and represents the channel ID. There is one sensor per
+ channel. O refers to the thermal IP internal channel, while positive
+ IDs refer to each CPU.
+
+Example:
+ap_syscon1: system-controller@6f8000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x6f8000 0x1000>;
+
+ ap_thermal: thermal-sensor@80 {
+ compatible = "marvell,armada-ap806-thermal";
+ reg = <0x80 0x10>;
+ #thermal-sensor-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
index 35c3c3460d17..eddde4faef01 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
@@ -33,3 +33,18 @@ nb_pm: syscon@14000 {
compatible = "marvell,armada-3700-nb-pm", "syscon";
reg = <0x14000 0x60>;
}
+
+AVS
+---
+
+For AVS an other component is needed:
+
+Required properties:
+- compatible : should contain "marvell,armada-3700-avs", "syscon";
+- reg : the register start and length for the AVS
+
+Example:
+avs: avs@11500 {
+ compatible = "marvell,armada-3700-avs", "syscon";
+ reg = <0x11500 0x40>;
+}
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
index 29cdbae6c5ac..81ce742d2760 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
@@ -1,15 +1,18 @@
-Marvell Armada CP110 System Controller 0
-========================================
+Marvell Armada CP110 System Controller
+======================================
The CP110 is one of the two core HW blocks of the Marvell Armada 7K/8K
-SoCs. It contains two sets of system control registers, System
-Controller 0 and System Controller 1. This Device Tree binding allows
-to describe the first system controller, which provides registers to
-configure various aspects of the SoC.
+SoCs. It contains system controllers, which provide several registers
+giving access to numerous features: clocks, pin-muxing and many other
+SoC configuration items. This DT binding allows to describe these
+system controllers.
For the top level node:
- compatible: must be: "syscon", "simple-mfd";
- - reg: register area of the CP110 system controller 0
+ - reg: register area of the CP110 system controller
+
+SYSTEM CONTROLLER 0
+===================
Clocks:
-------
@@ -163,26 +166,60 @@ Required properties:
Example:
-cpm_syscon0: system-controller@440000 {
+CP110_LABEL(syscon0): system-controller@440000 {
compatible = "syscon", "simple-mfd";
reg = <0x440000 0x1000>;
- cpm_clk: clock {
+ CP110_LABEL(clk): clock {
compatible = "marvell,cp110-clock";
#clock-cells = <2>;
};
- cpm_pinctrl: pinctrl {
+ CP110_LABEL(pinctrl): pinctrl {
compatible = "marvell,armada-8k-cpm-pinctrl";
};
- cpm_gpio1: gpio@100 {
+ CP110_LABEL(gpio1): gpio@100 {
compatible = "marvell,armada-8k-gpio";
offset = <0x100>;
ngpios = <32>;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&cpm_pinctrl 0 0 32>;
+ gpio-ranges = <&CP110_LABEL(pinctrl) 0 0 32>;
};
};
+
+SYSTEM CONTROLLER 1
+===================
+
+Thermal:
+--------
+
+The thermal IP can probe the temperature all around the processor. It
+may feature several channels, each of them wired to one sensor.
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/thermal/thermal.txt
+
+Required properties:
+- compatible: must be one of:
+ * marvell,armada-cp110-thermal
+- reg: register range associated with the thermal functions.
+
+Optional properties:
+- #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer
+ to this IP and represents the channel ID. There is one sensor per
+ channel. O refers to the thermal IP internal channel.
+
+Example:
+CP110_LABEL(syscon1): system-controller@6f8000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x6f8000 0x1000>;
+
+ CP110_LABEL(thermal): thermal-sensor@70 {
+ compatible = "marvell,armada-cp110-thermal";
+ reg = <0x70 0x10>;
+ #thermal-sensor-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt
index 7d21ab37c19c..48fac4ec91fc 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek.txt
@@ -11,6 +11,7 @@ compatible: Must contain one of
"mediatek,mt6589"
"mediatek,mt6592"
"mediatek,mt6755"
+ "mediatek,mt6765"
"mediatek,mt6795"
"mediatek,mt6797"
"mediatek,mt7622"
@@ -41,6 +42,9 @@ Supported boards:
- Evaluation phone for MT6755(Helio P10):
Required root node properties:
- compatible = "mediatek,mt6755-evb", "mediatek,mt6755";
+- Evaluation board for MT6765(Helio P22):
+ Required root node properties:
+ - compatible = "mediatek,mt6765-evb", "mediatek,mt6765";
- Evaluation board for MT6795(Helio X10):
Required root node properties:
- compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
diff --git a/Documentation/devicetree/bindings/arm/omap/crossbar.txt b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
index ecb360ed0e33..4cd5d873fc3a 100644
--- a/Documentation/devicetree/bindings/arm/omap/crossbar.txt
+++ b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible : Should be "ti,irq-crossbar"
- reg: Base address and the size of the crossbar registers.
- interrupt-controller: indicates that this block is an interrupt controller.
-- interrupt-parent: the interrupt controller this block is connected to.
- ti,max-irqs: Total number of irqs available at the parent interrupt controller.
- ti,max-crossbar-sources: Maximum number of crossbar sources that can be routed.
- ti,reg-size: Size of a individual register in bytes. Every individual
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
index 16685787d2bd..433bfd7593ac 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
@@ -40,9 +40,6 @@ following properties:
- #interrupt-cells: must be identical to the that of the parent interrupt
controller.
-- interrupt-parent: a phandle indicating which interrupt controller
- this PMU signals interrupts to.
-
Optional nodes:
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index bdadc3da9556..6970f30a3770 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -66,7 +66,7 @@ Required root node properties:
- "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
Octa board.
- "insignal,origen" - for Exynos4210-based Insignal Origen board.
- - "insignal,origen4412 - for Exynos4412-based Insignal Origen board.
+ - "insignal,origen4412" - for Exynos4412-based Insignal Origen board.
Optional nodes:
diff --git a/Documentation/devicetree/bindings/ata/fsl-sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt
index b46bcf46c3d8..fd63bb3becc9 100644
--- a/Documentation/devicetree/bindings/ata/fsl-sata.txt
+++ b/Documentation/devicetree/bindings/ata/fsl-sata.txt
@@ -16,7 +16,6 @@ Required properties:
4 for controller @ 0x1b000
Optional properties:
-- interrupt-parent : optional, if needed for interrupt mapping
- reg : <registers mapping>
Example:
diff --git a/Documentation/devicetree/bindings/ata/pata-arasan.txt b/Documentation/devicetree/bindings/ata/pata-arasan.txt
index 2aff154be84e..872edc105680 100644
--- a/Documentation/devicetree/bindings/ata/pata-arasan.txt
+++ b/Documentation/devicetree/bindings/ata/pata-arasan.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: "arasan,cf-spear1340"
- reg: Address range of the CF registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the CF interrupt number
- clock-frequency: Interface clock rate, in Hz, one of
25000000
diff --git a/Documentation/devicetree/bindings/board/fsl-board.txt b/Documentation/devicetree/bindings/board/fsl-board.txt
index fb7b03ec2071..eb52f6b35159 100644
--- a/Documentation/devicetree/bindings/board/fsl-board.txt
+++ b/Documentation/devicetree/bindings/board/fsl-board.txt
@@ -29,7 +29,6 @@ Required properties:
- reg: should contain the address and the length of the FPGA register set.
Optional properties:
-- interrupt-parent: should specify phandle for the interrupt controller.
- interrupts: should specify event (wakeup) IRQ.
Example (P1022DS):
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
index 8a6c3c2e58fe..729def62f0c5 100644
--- a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
+++ b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
@@ -9,8 +9,6 @@ Required properties:
"brcm,bcm7400-gisb-arb" for older 40nm chips and all 65nm chips
"brcm,bcm7038-gisb-arb" for 130nm chips
- reg: specifies the base physical address and size of the registers
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this arbiter gets interrupt line from
- interrupts: specifies the two interrupts (timeout and TEA) to be used from
the parent interrupt controller
diff --git a/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
index 93e4fb827cd6..d1e60d297387 100644
--- a/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt
+++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -1,12 +1,14 @@
-* Actions S900 Clock Management Unit (CMU)
+* Actions Semi Owl Clock Management Unit (CMU)
-The Actions S900 clock management unit generates and supplies clock to various
-controllers within the SoC. The clock binding described here is applicable to
-S900 SoC.
+The Actions Semi Owl Clock Management Unit generates and supplies clock
+to various controllers within the SoC. The clock binding described here is
+applicable to S900 and S700 SoC's.
Required Properties:
-- compatible: should be "actions,s900-cmu"
+- compatible: should be one of the following,
+ "actions,s900-cmu"
+ "actions,s700-cmu"
- reg: physical base address of the controller and length of memory mapped
region.
- clocks: Reference to the parent clocks ("hosc", "losc")
@@ -15,16 +17,16 @@ Required Properties:
Each clock is assigned an identifier, and client nodes can use this identifier
to specify the clock which they consume.
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/actions,s900-cmu.h header and can be used in device
-tree sources.
+All available clocks are defined as preprocessor macros in corresponding
+dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h header and can be
+used in device tree sources.
External clocks:
The hosc clock used as input for the plls is generated outside the SoC. It is
expected that it is defined using standard clock bindings as "hosc".
-Actions S900 CMU also requires one more clock:
+Actions Semi S900 CMU also requires one more clock:
- "losc" - internal low frequency oscillator
Example: Clock Management Unit node:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
new file mode 100644
index 000000000000..61777ad24f61
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
@@ -0,0 +1,56 @@
+* Amlogic AXG Audio Clock Controllers
+
+The Amlogic AXG audio clock controller generates and supplies clock to the
+other elements of the audio subsystem, such as fifos, i2s, spdif and pdm
+devices.
+
+Required Properties:
+
+- compatible : should be "amlogic,axg-audio-clkc" for the A113X and A113D
+- reg : physical base address of the clock controller and length of
+ memory mapped region.
+- clocks : a list of phandle + clock-specifier pairs for the clocks listed
+ in clock-names.
+- clock-names : must contain the following:
+ * "pclk" - Main peripheral bus clock
+ may contain the following:
+ * "mst_in[0-7]" - 8 input plls to generate clock signals
+ * "slv_sclk[0-9]" - 10 slave bit clocks provided by external
+ components.
+ * "slv_lrclk[0-9]" - 10 slave sample clocks provided by external
+ components.
+- resets : phandle of the internal reset line
+- #clock-cells : should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/axg-audio-clkc.h header and can be
+used in device tree sources.
+
+Example:
+
+clkc_audio: clock-controller@0 {
+ compatible = "amlogic,axg-audio-clkc";
+ reg = <0x0 0x0 0x0 0xb4>;
+ #clock-cells = <1>;
+
+ clocks = <&clkc CLKID_AUDIO>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL3>,
+ <&clkc CLKID_HIFI_PLL>,
+ <&clkc CLKID_FCLK_DIV3>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_GP0_PLL>;
+ clock-names = "pclk",
+ "mst_in0",
+ "mst_in1",
+ "mst_in2",
+ "mst_in3",
+ "mst_in4",
+ "mst_in5",
+ "mst_in6",
+ "mst_in7";
+ resets = <&reset RESET_AUDIO>;
+};
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index 51c259a92d02..09968ee224d8 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -91,6 +91,9 @@ Required properties:
at91 audio pll output on AUDIOPLLCLK that feeds the PMC
and can be used by peripheral clock or generic clock
+ "atmel,sama5d2-clk-i2s-mux" (under pmc node):
+ at91 I2S clock source selection
+
Required properties for SCKC node:
- reg : defines the IO memory reserved for the SCKC.
- #size-cells : shall be 0 (reg is used to encode clk id).
@@ -180,7 +183,6 @@ For example:
};
Required properties for main clock internal RC oscillator:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- clock-frequency : define the internal RC oscillator frequency.
@@ -197,7 +199,6 @@ For example:
};
Required properties for main clock oscillator:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall encode the main osc source clk sources (see atmel datasheet).
@@ -218,7 +219,6 @@ For example:
};
Required properties for main clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall encode the main clk sources (see atmel datasheet).
@@ -233,7 +233,6 @@ For example:
};
Required properties for master clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<3>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the master clock sources (see atmel datasheet) phandles.
@@ -292,7 +291,6 @@ For example:
Required properties for pll clocks:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<1>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the main clock phandle.
@@ -348,7 +346,6 @@ For example:
};
Required properties for programmable clocks:
-- interrupt-parent : must reference the PMC node.
- #size-cells : shall be 0 (reg is used to encode clk id).
- #address-cells : shall be 1 (reg is used to encode clk id).
- clocks : shall be the programmable clock source phandles.
@@ -451,7 +448,6 @@ For example:
Required properties for utmi clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the main clock source phandle.
@@ -507,3 +503,35 @@ For example:
atmel,clk-output-range = <0 83000000>;
};
};
+
+Required properties for I2S mux clocks:
+- #size-cells : shall be 0 (reg is used to encode I2S bus id).
+- #address-cells : shall be 1 (reg is used to encode I2S bus id).
+- name: device tree node describing a specific mux clock.
+ * #clock-cells : from common clock binding; shall be set to 0.
+ * clocks : shall be the mux clock parent phandles; shall be 2 phandles:
+ peripheral and generated clock; the first phandle shall belong to the
+ peripheral clock and the second one shall belong to the generated
+ clock; "clock-indices" property can be user to specify
+ the correct order.
+ * reg: I2S bus id of the corresponding mux clock.
+ e.g. reg = <0>; for i2s0, reg = <1>; for i2s1
+
+For example:
+ i2s_clkmux {
+ compatible = "atmel,sama5d2-clk-i2s-mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2s0muxck: i2s0_muxclk {
+ clocks = <&i2s0_clk>, <&i2s0_gclk>;
+ #clock-cells = <0>;
+ reg = <0>;
+ };
+
+ i2s1muxck: i2s1_muxclk {
+ clocks = <&i2s1_clk>, <&i2s1_gclk>;
+ #clock-cells = <0>;
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/maxim,max9485.txt b/Documentation/devicetree/bindings/clock/maxim,max9485.txt
new file mode 100644
index 000000000000..61bec1100a94
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/maxim,max9485.txt
@@ -0,0 +1,59 @@
+Devicetree bindings for Maxim MAX9485 Programmable Audio Clock Generator
+
+This device exposes 4 clocks in total:
+
+- MAX9485_MCLKOUT: A gated, buffered output of the input clock of 27 MHz
+- MAX9485_CLKOUT: A PLL that can be configured to 16 different discrete
+ frequencies
+- MAX9485_CLKOUT[1,2]: Two gated outputs for MAX9485_CLKOUT
+
+MAX9485_CLKOUT[1,2] are children of MAX9485_CLKOUT which upchain all rate set
+requests.
+
+Required properties:
+- compatible: "maxim,max9485"
+- clocks: Input clock, must provice 27.000 MHz
+- clock-names: Must be set to "xclk"
+- #clock-cells: From common clock binding; shall be set to 1
+
+Optional properties:
+- reset-gpios: GPIO descriptor connected to the #RESET input pin
+- vdd-supply: A regulator node for Vdd
+- clock-output-names: Name of output clocks, as defined in common clock
+ bindings
+
+If not explicitly set, the output names are "mclkout", "clkout", "clkout1"
+and "clkout2".
+
+Clocks are defined as preprocessor macros in the dt-binding header.
+
+Example:
+
+ #include <dt-bindings/clock/maxim,max9485.h>
+
+ xo-27mhz: xo-27mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ &i2c0 {
+ max9485: audio-clock@63 {
+ reg = <0x63>;
+ compatible = "maxim,max9485";
+ clock-names = "xclk";
+ clocks = <&xo-27mhz>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&3v3-reg>;
+ #clock-cells = <1>;
+ };
+ };
+
+ // Clock consumer node
+
+ foo@0 {
+ compatible = "bar,foo";
+ /* ... */
+ clock-names = "foo-input-clk";
+ clocks = <&max9485 MAX9485_CLKOUT1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
new file mode 100644
index 000000000000..d639e18d0b85
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. Display Clock Controller Binding
+------------------------------------------------------------
+
+Required properties :
+
+- compatible : shall contain "qcom,sdm845-dispcc"
+- reg : shall contain base register location and length.
+- #clock-cells : from common clock binding, shall contain 1.
+- #reset-cells : from common reset binding, shall contain 1.
+- #power-domain-cells : from generic power domain binding, shall contain 1.
+
+Example:
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sdm845-dispcc";
+ reg = <0xaf00000 0x100000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
new file mode 100644
index 000000000000..d60b99756bb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
@@ -0,0 +1,43 @@
+* Renesas R9A06G032 SYSCTRL
+
+Required Properties:
+
+ - compatible: Must be:
+ - "renesas,r9a06g032-sysctrl"
+ - reg: Base address and length of the SYSCTRL IO block.
+ - #clock-cells: Must be 1
+ - clocks: References to the parent clocks:
+ - external 40mhz crystal.
+ - external (optional) 32.768khz
+ - external (optional) jtag input
+ - external (optional) RGMII_REFCLK
+ - clock-names: Must be:
+ clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
+
+Examples
+--------
+
+ - SYSCTRL node:
+
+ sysctrl: system-controller@4000c000 {
+ compatible = "renesas,r9a06g032-sysctrl";
+ reg = <0x4000c000 0x1000>;
+ #clock-cells = <1>;
+
+ clocks = <&ext_mclk>, <&ext_rtc_clk>,
+ <&ext_jtag_clk>, <&ext_rgmii_ref>;
+ clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
+ };
+
+ - Other nodes can use the clocks provided by SYSCTRL as in:
+
+ #include <dt-bindings/clock/r9a06g032-sysctrl.h>
+ uart0: serial@40060000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x40060000 0x400>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&sysctrl R9A06G032_CLK_UART0>;
+ clock-names = "baudclk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
new file mode 100644
index 000000000000..39f0c1ac84ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
@@ -0,0 +1,65 @@
+* Rockchip PX30 Clock and Reset Unit
+
+The PX30 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: PMU for CRU should be "rockchip,px30-pmu-cru"
+- compatible: CRU should be "rockchip,px30-cru"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+ If missing, pll rates are not changeable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/px30-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "i2sx_clkin" - external I2S clock - optional,
+ - "gmac_clkin" - external GMAC clock - optional
+
+Example: Clock controller node:
+
+ pmucru: clock-controller@ff2bc000 {
+ compatible = "rockchip,px30-pmucru";
+ reg = <0x0 0xff2bc000 0x0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ cru: clock-controller@ff2b0000 {
+ compatible = "rockchip,px30-cru";
+ reg = <0x0 0xff2b0000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+Example: UART controller node that consumes the clock generated by the clock
+ controller:
+
+ uart0: serial@ff030000 {
+ compatible = "rockchip,px30-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff030000 0x0 0x100>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmucru SCLK_UART0_PMU>, <&pmucru PCLK_UART0_PMU>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/sun8i-de2.txt b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
index f2fa87c4765c..e94582e8b8a9 100644
--- a/Documentation/devicetree/bindings/clock/sun8i-de2.txt
+++ b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
@@ -6,6 +6,7 @@ Required properties :
- "allwinner,sun8i-a83t-de2-clk"
- "allwinner,sun8i-h3-de2-clk"
- "allwinner,sun8i-v3s-de2-clk"
+ - "allwinner,sun50i-a64-de2-clk"
- "allwinner,sun50i-h5-de2-clk"
- reg: Must contain the registers base address and length
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index e1463f14af38..8855bfcfd778 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -15,6 +15,33 @@ Optional properties:
- type: size of the connector, should be specified in case of USB-A, USB-B
non-fullsize connectors: "mini", "micro".
+Optional properties for usb-c-connector:
+- power-role: should be one of "source", "sink" or "dual"(DRP) if typec
+ connector has power support.
+- try-power-role: preferred power role if "dual"(DRP) can support Try.SNK
+ or Try.SRC, should be "sink" for Try.SNK or "source" for Try.SRC.
+- data-role: should be one of "host", "device", "dual"(DRD) if typec
+ connector supports USB data.
+
+Required properties for usb-c-connector with power delivery support:
+- source-pdos: An array of u32 with each entry providing supported power
+ source data object(PDO), the detailed bit definitions of PDO can be found
+ in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
+ Source_Capabilities Message, the order of each entry(PDO) should follow
+ the PD spec chapter 6.4.1. Required for power source and power dual role.
+ User can specify the source PDO array via PDO_FIXED/BATT/VAR() defined in
+ dt-bindings/usb/pd.h.
+- sink-pdos: An array of u32 with each entry providing supported power
+ sink data object(PDO), the detailed bit definitions of PDO can be found
+ in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
+ Sink Capabilities Message, the order of each entry(PDO) should follow
+ the PD spec chapter 6.4.1. Required for power sink and power dual role.
+ User can specify the sink PDO array via PDO_FIXED/BATT/VAR() defined in
+ dt-bindings/usb/pd.h.
+- op-sink-microwatt: Sink required operating power in microwatt, if source
+ can't offer the power, Capability Mismatch is set. Required for power
+ sink and power dual role.
+
Required nodes:
- any data bus to the connector should be modeled using the OF graph bindings
specified in bindings/graph.txt, unless the bus is between parent node and
@@ -73,3 +100,20 @@ ccic: s2mm005@33 {
};
};
};
+
+3. USB-C connector attached to a typec port controller(ptn5110), which has
+power delivery support and enables drp.
+
+typec: ptn5110@50 {
+ ...
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
index af2385795d78..73470ecd1f12 100644
--- a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -29,8 +29,6 @@ Required properties:
- reg: Specifies base physical address and size of the registers.
- interrupts: The interrupt that the AVS CPU will use to interrupt the host
when a command completed.
-- interrupt-parent: The interrupt controller the above interrupt is routed
- through.
- interrupt-names: The name of the interrupt used to interrupt the host.
Optional properties:
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
index 8c61183b41e0..d87579d63da6 100644
--- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "amd,ccp-seattle-v1a"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the CCP interrupt
Optional properties:
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
index c2598ab27f2e..999fb2a810f6 100644
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -7,8 +7,6 @@ Required properties:
- interrupts: Interrupt number for the device.
Optional properties:
-- interrupt-parent: The phandle for the interrupt controller that services
- interrupts for this device.
- clocks: Reference to the crypto engine clock.
- dma-coherent: Present if dma operations are coherent.
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
index f0d926bf9f36..125f155d00d0 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
@@ -50,11 +50,6 @@ remaining bits are reserved for future SEC EUs.
..and so on and so forth.
-Optional properties:
-
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
-
Example:
/* MPC8548E */
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 3c1f3a229eab..2fe245ca816a 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -99,13 +99,6 @@ PROPERTIES
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
- clocks
Usage: required if SEC 4.0 requires explicit enablement of clocks
Value type: <prop_encoded-array>
@@ -199,13 +192,6 @@ Job Ring (JR) Node
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
EXAMPLE
jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
@@ -370,13 +356,6 @@ Secure Non-Volatile Storage (SNVS) Node
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
EXAMPLE
sec_mon@314000 {
compatible = "fsl,sec-v4.0-mon", "syscon";
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
new file mode 100644
index 000000000000..78d2db9d4de5
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
@@ -0,0 +1,67 @@
+* Hisilicon hip07 Security Accelerator (SEC)
+
+Required properties:
+- compatible: Must contain one of
+ - "hisilicon,hip06-sec"
+ - "hisilicon,hip07-sec"
+- reg: Memory addresses and lengths of the memory regions through which
+ this device is controlled.
+ Region 0 has registers to control the backend processing engines.
+ Region 1 has registers for functionality common to all queues.
+ Regions 2-18 have registers for the 16 individual queues which are isolated
+ both in hardware and within the driver.
+- interrupts: Interrupt specifiers.
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client node
+ bindings.
+ Interrupt 0 is for the SEC unit error queue.
+ Interrupt 2N + 1 is the completion interrupt for queue N.
+ Interrupt 2N + 2 is the error interrupt for queue N.
+- dma-coherent: The driver assumes coherent dma is possible.
+
+Optional properties:
+- iommus: The SEC units are behind smmu-v3 iommus.
+ Refer to iommu/arm,smmu-v3.txt for more information.
+
+Example:
+
+p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+};
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 5dba55cdfa63..3bbf144c9988 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,8 +1,9 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197" or
- "inside-secure,safexcel-eip97".
+- compatible: Should be "inside-secure,safexcel-eip197b",
+ "inside-secure,safexcel-eip197d" or
+ "inside-secure,safexcel-eip97ies".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
@@ -14,10 +15,18 @@ Optional properties:
name must be "core" for the first clock and "reg" for
the second one.
+Backward compatibility:
+Two compatibles are kept for backward compatibility, but shouldn't be used for
+new submissions:
+- "inside-secure,safexcel-eip197" is equivalent to
+ "inside-secure,safexcel-eip197b".
+- "inside-secure,safexcel-eip97" is equivalent to
+ "inside-secure,safexcel-eip97ies".
+
Example:
crypto: crypto@800000 {
- compatible = "inside-secure,safexcel-eip197";
+ compatible = "inside-secure,safexcel-eip197b";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/crypto/picochip-spacc.txt b/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
index d8609ece1f4c..df1151f87745 100644
--- a/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
+++ b/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
@@ -7,8 +7,6 @@ Required properties:
- compatible : "picochip,spacc-ipsec" for the IPSEC offload engine
"picochip,spacc-l2" for the femtocell layer 2 ciphering engine.
- reg : Offset and length of the register set for this device
- - interrupt-parent : The interrupt controller that controls the SPAcc
- interrupt.
- interrupts : The interrupt line from the SPAcc.
- ref-clock : The input clock that drives the SPAcc.
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
index 8e5853c2879b..7ee0e9eac973 100644
--- a/Documentation/devicetree/bindings/rng/qcom,prng.txt
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
@@ -2,7 +2,9 @@ Qualcomm MSM pseudo random number generator.
Required properties:
-- compatible : should be "qcom,prng"
+- compatible : should be "qcom,prng" for 8916 etc
+ : should be "qcom,prng-ee" for 8996 and later using EE
+ (Execution Environment) slice of prng
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
index fc2bcbe26b1e..0ec68141f85a 100644
--- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
+++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
@@ -1,14 +1,10 @@
-* Rockchip rk3399 DMC(Dynamic Memory Controller) device
+* Rockchip rk3399 DMC (Dynamic Memory Controller) device
Required properties:
- compatible: Must be "rockchip,rk3399-dmc".
- devfreq-events: Node to get DDR loading, Refer to
- Documentation/devicetree/bindings/devfreq/
+ Documentation/devicetree/bindings/devfreq/event/
rockchip-dfi.txt
-- interrupts: The interrupt number to the CPU. The interrupt
- specifier format depends on the interrupt controller.
- It should be DCF interrupts, when DDR dvfs finish,
- it will happen.
- clocks: Phandles for clock specified in "clock-names" property
- clock-names : The name of clock used by the DFI, must be
"pclk_ddr_mon";
@@ -17,139 +13,148 @@ Required properties:
- center-supply: DMC supply node.
- status: Marks the node enabled/disabled.
-Following properties are ddr timing:
-
-- rockchip,dram_speed_bin : Value reference include/dt-bindings/clock/ddr.h,
- it select ddr3 cl-trp-trcd type, default value
- "DDR3_DEFAULT".it must selected according to
- "Speed Bin" in ddr3 datasheet, DO NOT use
- smaller "Speed Bin" than ddr3 exactly is.
-
-- rockchip,pd_idle : Config the PD_IDLE value, defined the power-down
- idle period, memories are places into power-down
- mode if bus is idle for PD_IDLE DFI clocks.
-
-- rockchip,sr_idle : Configure the SR_IDLE value, defined the
- selfrefresh idle period, memories are places
- into self-refresh mode if bus is idle for
- SR_IDLE*1024 DFI clocks (DFI clocks freq is
- half of dram's clocks), defaule value is "0".
-
-- rockchip,sr_mc_gate_idle : Defined the self-refresh with memory and
- controller clock gating idle period, memories
- are places into self-refresh mode and memory
- controller clock arg gating if bus is idle for
- sr_mc_gate_idle*1024 DFI clocks.
-
-- rockchip,srpd_lite_idle : Defined the self-refresh power down idle
- period, memories are places into self-refresh
- power down mode if bus is idle for
- srpd_lite_idle*1024 DFI clocks. This parameter
- is for LPDDR4 only.
-
-- rockchip,standby_idle : Defined the standby idle period, memories are
- places into self-refresh than controller, pi,
- phy and dram clock will gating if bus is idle
- for standby_idle * DFI clocks.
-
-- rockchip,dram_dll_disb_freq : It's defined the DDR3 dll bypass frequency in
- MHz, when ddr freq less than DRAM_DLL_DISB_FREQ,
- ddr3 dll will bypssed note: if dll was bypassed,
- the odt also stop working.
-
-- rockchip,phy_dll_disb_freq : Defined the PHY dll bypass frequency in
- MHz (Mega Hz), when ddr freq less than
- DRAM_DLL_DISB_FREQ, phy dll will bypssed.
- note: phy dll and phy odt are independent.
-
-- rockchip,ddr3_odt_disb_freq : When dram type is DDR3, this parameter defined
- the odt disable frequency in MHz (Mega Hz),
- when ddr frequency less then ddr3_odt_disb_freq,
- the odt on dram side and controller side are
+Optional properties:
+- interrupts: The CPU interrupt number. The interrupt specifier
+ format depends on the interrupt controller.
+ It should be a DCF interrupt. When DDR DVFS finishes
+ a DCF interrupt is triggered.
+
+Following properties relate to DDR timing:
+
+- rockchip,dram_speed_bin : Value reference include/dt-bindings/clock/rk3399-ddr.h,
+ it selects the DDR3 cl-trp-trcd type. It must be
+ set according to "Speed Bin" in DDR3 datasheet,
+ DO NOT use a smaller "Speed Bin" than specified
+ for the DDR3 being used.
+
+- rockchip,pd_idle : Configure the PD_IDLE value. Defines the
+ power-down idle period in which memories are
+ placed into power-down mode if bus is idle
+ for PD_IDLE DFI clock cycles.
+
+- rockchip,sr_idle : Configure the SR_IDLE value. Defines the
+ self-refresh idle period in which memories are
+ placed into self-refresh mode if bus is idle
+ for SR_IDLE * 1024 DFI clock cycles (DFI
+ clocks freq is half of DRAM clock), default
+ value is "0".
+
+- rockchip,sr_mc_gate_idle : Defines the memory self-refresh and controller
+ clock gating idle period. Memories are placed
+ into self-refresh mode and memory controller
+ clock arg gating started if bus is idle for
+ sr_mc_gate_idle*1024 DFI clock cycles.
+
+- rockchip,srpd_lite_idle : Defines the self-refresh power down idle
+ period in which memories are placed into
+ self-refresh power down mode if bus is idle
+ for srpd_lite_idle * 1024 DFI clock cycles.
+ This parameter is for LPDDR4 only.
+
+- rockchip,standby_idle : Defines the standby idle period in which
+ memories are placed into self-refresh mode.
+ The controller, pi, PHY and DRAM clock will
+ be gated if bus is idle for standby_idle * DFI
+ clock cycles.
+
+- rockchip,dram_dll_dis_freq : Defines the DDR3 DLL bypass frequency in MHz.
+ When DDR frequency is less than DRAM_DLL_DISB_FREQ,
+ DDR3 DLL will be bypassed. Note: if DLL was bypassed,
+ the odt will also stop working.
+
+- rockchip,phy_dll_dis_freq : Defines the PHY dll bypass frequency in
+ MHz (Mega Hz). When DDR frequency is less than
+ DRAM_DLL_DISB_FREQ, PHY DLL will be bypassed.
+ Note: PHY DLL and PHY ODT are independent.
+
+- rockchip,ddr3_odt_dis_freq : When the DRAM type is DDR3, this parameter defines
+ the ODT disable frequency in MHz (Mega Hz).
+ when the DDR frequency is less then ddr3_odt_dis_freq,
+ the ODT on the DRAM side and controller side are
both disabled.
-- rockchip,ddr3_drv : When dram type is DDR3, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,ddr3_drv : When the DRAM type is DDR3, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is DDR3_DS_40ohm.
-- rockchip,ddr3_odt : When dram type is DDR3, this parameter define
- the dram side ODT stength in ohm, default value
+- rockchip,ddr3_odt : When the DRAM type is DDR3, this parameter defines
+ the DRAM side ODT strength in ohms. Default value
is DDR3_ODT_120ohm.
-- rockchip,phy_ddr3_ca_drv : When dram type is DDR3, this parameter define
- the phy side CA line(incluing command line,
+- rockchip,phy_ddr3_ca_drv : When the DRAM type is DDR3, this parameter defines
+ the phy side CA line (incluing command line,
address line and clock line) driver strength.
Default value is PHY_DRV_ODT_40.
-- rockchip,phy_ddr3_dq_drv : When dram type is DDR3, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is PHY_DRV_ODT_40.
+- rockchip,phy_ddr3_dq_drv : When the DRAM type is DDR3, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is PHY_DRV_ODT_40.
-- rockchip,phy_ddr3_odt : When dram type is DDR3, this parameter define the
- phy side odt strength, default value is
+- rockchip,phy_ddr3_odt : When the DRAM type is DDR3, this parameter defines
+ the PHY side ODT strength. Default value is
PHY_DRV_ODT_240.
-- rockchip,lpddr3_odt_disb_freq : When dram type is LPDDR3, this parameter defined
- then odt disable frequency in MHz (Mega Hz),
- when ddr frequency less then ddr3_odt_disb_freq,
- the odt on dram side and controller side are
+- rockchip,lpddr3_odt_dis_freq : When the DRAM type is LPDDR3, this parameter defines
+ then ODT disable frequency in MHz (Mega Hz).
+ When DDR frequency is less then ddr3_odt_dis_freq,
+ the ODT on the DRAM side and controller side are
both disabled.
-- rockchip,lpddr3_drv : When dram type is LPDDR3, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,lpddr3_drv : When the DRAM type is LPDDR3, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is LP3_DS_34ohm.
-- rockchip,lpddr3_odt : When dram type is LPDDR3, this parameter define
- the dram side ODT stength in ohm, default value
+- rockchip,lpddr3_odt : When the DRAM type is LPDDR3, this parameter defines
+ the DRAM side ODT strength in ohms. Default value
is LP3_ODT_240ohm.
-- rockchip,phy_lpddr3_ca_drv : When dram type is LPDDR3, this parameter define
- the phy side CA line(incluing command line,
+- rockchip,phy_lpddr3_ca_drv : When the DRAM type is LPDDR3, this parameter defines
+ the PHY side CA line (including command line,
address line and clock line) driver strength.
- default value is PHY_DRV_ODT_40.
+ Default value is PHY_DRV_ODT_40.
-- rockchip,phy_lpddr3_dq_drv : When dram type is LPDDR3, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is
+- rockchip,phy_lpddr3_dq_drv : When the DRAM type is LPDDR3, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is
PHY_DRV_ODT_40.
- rockchip,phy_lpddr3_odt : When dram type is LPDDR3, this parameter define
the phy side odt strength, default value is
PHY_DRV_ODT_240.
-- rockchip,lpddr4_odt_disb_freq : When dram type is LPDDR4, this parameter
- defined the odt disable frequency in
- MHz (Mega Hz), when ddr frequency less then
- ddr3_odt_disb_freq, the odt on dram side and
+- rockchip,lpddr4_odt_dis_freq : When the DRAM type is LPDDR4, this parameter
+ defines the ODT disable frequency in
+ MHz (Mega Hz). When the DDR frequency is less then
+ ddr3_odt_dis_freq, the ODT on the DRAM side and
controller side are both disabled.
-- rockchip,lpddr4_drv : When dram type is LPDDR4, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,lpddr4_drv : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is LP4_PDDS_60ohm.
-- rockchip,lpddr4_dq_odt : When dram type is LPDDR4, this parameter define
- the dram side ODT on dqs/dq line stength in ohm,
- default value is LP4_DQ_ODT_40ohm.
+- rockchip,lpddr4_dq_odt : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side ODT on DQS/DQ line strength in ohms.
+ Default value is LP4_DQ_ODT_40ohm.
-- rockchip,lpddr4_ca_odt : When dram type is LPDDR4, this parameter define
- the dram side ODT on ca line stength in ohm,
- default value is LP4_CA_ODT_40ohm.
+- rockchip,lpddr4_ca_odt : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side ODT on CA line strength in ohms.
+ Default value is LP4_CA_ODT_40ohm.
-- rockchip,phy_lpddr4_ca_drv : When dram type is LPDDR4, this parameter define
- the phy side CA line(incluing command address
- line) driver strength. default value is
+- rockchip,phy_lpddr4_ca_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side CA line (including command address
+ line) driver strength. Default value is
PHY_DRV_ODT_40.
-- rockchip,phy_lpddr4_ck_cs_drv : When dram type is LPDDR4, this parameter define
- the phy side clock line and cs line driver
- strength. default value is PHY_DRV_ODT_80.
+- rockchip,phy_lpddr4_ck_cs_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side clock line and CS line driver
+ strength. Default value is PHY_DRV_ODT_80.
-- rockchip,phy_lpddr4_dq_drv : When dram type is LPDDR4, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is PHY_DRV_ODT_80.
+- rockchip,phy_lpddr4_dq_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is PHY_DRV_ODT_80.
-- rockchip,phy_lpddr4_odt : When dram type is LPDDR4, this parameter define
- the phy side odt strength, default value is
+- rockchip,phy_lpddr4_odt : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side ODT strength. Default value is
PHY_DRV_ODT_60.
Example:
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index 284e2b14cfbe..26649b4c4dd8 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -74,6 +74,12 @@ Required properties for DSI:
The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
dsi[01]_ddr2, and dsi[01]_ddr
+Required properties for the TXP (writeback) block:
+- compatible: Should be "brcm,bcm2835-txp"
+- reg: Physical base address and length of the TXP block's registers
+- interrupts: The interrupt number
+ See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
[1] Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
index 0c7473dd0e51..027d76c27a41 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+++ b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
@@ -15,8 +15,6 @@ Required properties for dp-controller:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
- -interrupt-parent:
- phandle to Interrupt combiner node.
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
diff --git a/Documentation/devicetree/bindings/display/bridge/anx7814.txt b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
index b2a22c28c9b3..dbd7c84ee584 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx7814.txt
+++ b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
@@ -8,8 +8,6 @@ Required properties:
- compatible : "analogix,anx7814"
- reg : I2C address of the device
- - interrupt-parent : Should be the phandle of the interrupt controller
- that services interrupts for this device
- interrupts : Should contain the INTP interrupt
- hpd-gpios : Which GPIO to use for hpd
- pd-gpios : Which GPIO to use for power down
diff --git a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
index aacc8b92968c..09e0a21f705e 100644
--- a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
+++ b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
@@ -19,8 +19,6 @@ hardware are EDID, HPD, and interrupts.
stdp4028-ge-b850v3-fw required properties:
- compatible : "megachips,stdp4028-ge-b850v3-fw"
- reg : I2C bus address
- - interrupt-parent : phandle of the interrupt controller that services
- interrupts to the device
- interrupts : one interrupt should be described here, as in
<0 IRQ_TYPE_LEVEL_HIGH>
- ports : One input port(reg = <0>) and one output port(reg = <1>)
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
index 56a3e68ccb80..72d2dc6c3e6b 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -5,8 +5,8 @@ Required properties:
- reg: i2c address of the bridge
Optional properties:
- - interrupts-extended or interrupt-parent + interrupts: describe
- the interrupt line used to inform the host about hotplug events.
+ - interrupts: describe the interrupt line used to inform the host
+ about hotplug events.
- reset-gpios: OF device-tree gpio specification for RST_N pin.
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/display/bridge/sii9234.txt b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
index 88041ba23d56..a55bf77bd960 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii9234.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
@@ -7,7 +7,7 @@ Required properties:
- iovcc18-supply : I/O Supply Voltage (1.8V)
- avcc12-supply : TMDS Analog Supply Voltage (1.2V)
- cvcc12-supply : Digital Core Supply Voltage (1.2V)
- - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - interrupts: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin (active low)
- video interfaces: Device node can contain two video interface port
nodes for HDMI encoder and connector according to [1].
diff --git a/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
index 9409d9c6a260..b05052f7d62f 100644
--- a/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
@@ -5,7 +5,7 @@ Required properties:
- reg: i2c address of the bridge
- cvcc10-supply: Digital Core Supply Voltage (1.0V)
- iovcc18-supply: I/O Supply Voltage (1.8V)
- - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - interrupts: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin
- clocks, clock-names: specification and name of "xtal" clock
- video interfaces: Device node can contain video interface port
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt b/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
index 9e2e7f6f7609..53912c99ec38 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
@@ -9,9 +9,6 @@ Required properties:
- reg: physical base address and length of the DECON registers set.
-- interrupt-parent: should be the phandle of the decon controller's
- parent interrupt controller.
-
- interrupts: should contain a list of all DECON IP block interrupts in the
order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index ade5d8eebf85..9b6cba3f82af 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -25,8 +25,6 @@ Required properties for dp-controller:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
- -interrupt-parent:
- phandle to Interrupt combiner node.
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 5837402c3ade..b3096421d42b 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -16,9 +16,6 @@ Required properties:
- reg: physical base address and length of the FIMD registers set.
-- interrupt-parent: should be the phandle of the fimd controller's
- parent interrupt controller.
-
- interrupts: should contain a list of all FIMD IP block interrupts in the
order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
index 8e5b30b87754..d5a8b070b467 100644
--- a/Documentation/devicetree/bindings/display/ht16k33.txt
+++ b/Documentation/devicetree/bindings/display/ht16k33.txt
@@ -4,8 +4,6 @@ Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
Required properties:
- compatible: "holtek,ht16k33"
- reg: I2C slave address of the chip.
-- interrupt-parent: A phandle pointing to the interrupt controller
- serving the interrupt for this chip.
- interrupts: Interrupt specification for the key pressed interrupt.
- refresh-rate-hz: Display update interval in HZ.
- debounce-delay-ms: Debouncing interval time in milliseconds.
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
new file mode 100644
index 000000000000..169b32e4ee4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
@@ -0,0 +1,27 @@
+Ilitek ILI9341 display panels
+
+This binding is for display panels using an Ilitek ILI9341 controller in SPI
+mode.
+
+Required properties:
+- compatible: "adafruit,yx240qv29", "ilitek,ili9341"
+- dc-gpios: D/C pin
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ display@0{
+ compatible = "adafruit,yx240qv29", "ilitek,ili9341";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 383183a89164..8469de510001 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -40,7 +40,7 @@ Required properties (all function blocks):
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt2712 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt b/Documentation/devicetree/bindings/display/msm/dpu.txt
new file mode 100644
index 000000000000..ad2e8830324e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/dpu.txt
@@ -0,0 +1,131 @@
+Qualcomm Technologies, Inc. DPU KMS
+
+Description:
+
+Device tree bindings for MSM Mobile Display Subsytem(MDSS) that encapsulates
+sub-blocks like DPU display controller, DSI and DP interfaces etc.
+The DPU display controller is found in SDM845 SoC.
+
+MDSS:
+Required properties:
+- compatible: "qcom,sdm845-mdss"
+- reg: physical base address and length of contoller's registers.
+- reg-names: register region names. The following region is required:
+ * "mdss"
+- power-domains: a power domain consumer specifier according to
+ Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+ The following clocks are required:
+ * "iface"
+ * "bus"
+ * "core"
+- interrupts: interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- iommus: phandle of iommu device node.
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+ the assigned-clocks property.
+
+MDP:
+Required properties:
+- compatible: "qcom,sdm845-dpu"
+- reg: physical base address and length of controller's registers.
+- reg-names : register region names. The following region is required:
+ * "mdp"
+ * "vbif"
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+ The following clocks are required.
+ * "bus"
+ * "iface"
+ * "core"
+ * "vsync"
+- interrupts: interrupt line from DPU to MDSS.
+- ports: contains the list of output ports from DPU device. These ports connect
+ to interfaces that are external to the DPU hardware, such as DSI, DP etc.
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ Port 0 -> DPU_INTF1 (DSI1)
+ Port 1 -> DPU_INTF2 (DSI2)
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+ the assigned-clocks property.
+
+Example:
+
+ mdss: mdss@ae00000 {
+ compatible = "qcom,sdm845-mdss";
+ reg = <0xae00000 0x1000>;
+ reg-names = "mdss";
+
+ power-domains = <&clock_dispcc 0>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>, <&gcc GCC_DISP_AXI_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "bus", "core";
+
+ assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+ assigned-clock-rates = <300000000>;
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_iommu 0>;
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0xae00000 0xb2008>;
+
+ mdss_mdp: mdp@ae01000 {
+ compatible = "qcom,sdm845-dpu";
+ reg = <0 0x1000 0x8f000>, <0 0xb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "iface", "bus", "core", "vsync";
+
+ assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <0 0 300000000 19200000>;
+
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index 518e9cdf0d4b..dfc743219bd8 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -43,8 +43,6 @@ Optional properties:
the master link of the 2-DSI panel.
- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
driving a 2-DSI panel whose 2 links need receive command simultaneously.
-- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
- through MDP block
- pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state
@@ -121,6 +119,20 @@ Required properties:
Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
+- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
+ panels in microseconds. Driver uses this number to adjust
+ the clock rate according to the expected transfer time.
+ Increasing this value would slow down the mdp processing
+ and can result in slower performance.
+ Decreasing this value can speed up the mdp processing,
+ but this can also impact power consumption.
+ As a rule this time should not be higher than the time
+ that would be expected with the processing at the
+ dsi link rate since anyways this would be the maximum
+ transfer time that could be achieved.
+ If ping pong split is enabled, this time should not be higher
+ than two times the dsi link rate time.
+ If the property is not specified, then the default value is 14000 us.
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/graph.txt
@@ -171,6 +183,8 @@ Example:
qcom,master-dsi;
qcom,sync-dual-dsi;
+ qcom,mdss-mdp-transfer-time-us = <12000>;
+
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dsi_active>;
pinctrl-1 = <&dsi_suspend>;
diff --git a/Documentation/devicetree/bindings/display/msm/edp.txt b/Documentation/devicetree/bindings/display/msm/edp.txt
index 95ce19ca7bc5..eff9daff418c 100644
--- a/Documentation/devicetree/bindings/display/msm/edp.txt
+++ b/Documentation/devicetree/bindings/display/msm/edp.txt
@@ -25,10 +25,6 @@ Required properties:
- panel-hpd-gpios: GPIO pin used for eDP hpd.
-Optional properties:
-- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
- through MDP block
-
Example:
mdss_edp: qcom,mdss_edp@fd923400 {
compatible = "qcom,mdss-edp";
diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt
index 1b31977a68ba..4e11338548aa 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp5.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp5.txt
@@ -41,8 +41,6 @@ Required properties:
- reg-names: The names of register regions. The following regions are required:
* "mdp_phys"
- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
-- interrupt-parent: phandle to the MDSS block
- through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "bus"
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
new file mode 100644
index 000000000000..49e4105378f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
@@ -0,0 +1,29 @@
+AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g070vvn01"
+- backlight: phandle of the backlight device attached to the panel
+- power-supply: single regulator to provide the supply voltage
+
+Required nodes:
+- port: Parallel port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX6Q based board
+
+ lcd_panel: lcd-panel {
+ compatible = "auo,g070vvn01";
+ backlight = <&backlight_lcd>;
+ power-supply = <&reg_display>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
new file mode 100644
index 000000000000..55183d360032
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
@@ -0,0 +1,28 @@
+BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "boe,hv070wsa-100"
+- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
+- enable-gpios: GPIO pin to enable and disable panel (active high)
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in [1]. This
+node should describe panel's video bus.
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+ panel: panel {
+ compatible = "boe,hv070wsa-100";
+ power-supply = <&vcc_3v3_reg>;
+ enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
+ port {
+ panel_ep: endpoint {
+ remote-endpoint = <&bridge_out_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
new file mode 100644
index 000000000000..897085ee3cd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
@@ -0,0 +1,8 @@
+DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+
+Required properties:
+- compatible: should be "dataimage,scf0700c48ggu18"
+- power-supply: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt
new file mode 100644
index 000000000000..bf06bb025b08
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt
@@ -0,0 +1,13 @@
+DLC Display Co. DLC0700YZG-1 7.0" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "dlc,dlc0700yzg-1"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- reset-gpios: See panel-common.txt
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
new file mode 100644
index 000000000000..f56b99ebd9be
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
@@ -0,0 +1,39 @@
+Emerging Display Technology Corp. Displays
+==========================================
+
+
+Display bindings for EDT Display Technology Corp. Displays which are
+compatible with the simple-panel binding, which is specified in
+simple-panel.txt
+
+
+5,7" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ET057090DHU | edt,et057090dhu | 5.7" VGA TFT LCD panel |
++-----------------+---------------------+-------------------------------------+
+
+
+7,0" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ETM0700G0DH6 | edt,etm070080dh6 | WVGA TFT Display with capacitive |
+| | | Touchscreen |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0BDH6 | edt,etm070080bdh6 | Same as ETM0700G0DH6 but with |
+| | | inverted pixel clock. |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0EDH6 | edt,etm070080edh6 | Same display as the ETM0700G0BDH6, |
+| | | but with changed Hardware for the |
+| | | backlight and the touch interface |
++-----------------+---------------------+-------------------------------------+
+| ET070080DH6 | edt,etm070080dh6 | Same timings as the ETM0700G0DH6, |
+| | | but with resistive touch. |
++-----------------+---------------------+-------------------------------------+
+
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt
deleted file mode 100644
index 20cb38e836e4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,et070080dh6"
-
-This panel is the same as ETM0700G0DH6 except for the touchscreen.
-ET070080DH6 is the model with resistive touch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt
deleted file mode 100644
index ee4b18053e40..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,etm0700g0dh6"
-
-This panel is the same as ET070080DH6 except for the touchscreen.
-ETM0700G0DH6 is the model with capacitive multitouch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
new file mode 100644
index 000000000000..4a041acb4e18
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
@@ -0,0 +1,20 @@
+Ilitek ILI9881c based MIPI-DSI panels
+
+Required properties:
+ - compatible: must be "ilitek,ili9881c" and one of:
+ * "bananapi,lhr050h41"
+ - reg: DSI virtual channel used by that screen
+ - power-supply: phandle to the power regulator
+ - reset-gpios: a GPIO phandle for the reset pin
+
+Optional properties:
+ - backlight: phandle to the backlight used
+
+Example:
+panel@0 {
+ compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+ reg = <0>;
+ power-supply = <&reg_display>;
+ reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+ backlight = <&pwm_bl>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
new file mode 100644
index 000000000000..7c234cf68e11
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
@@ -0,0 +1,12 @@
+Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g070y2-l01"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
new file mode 100644
index 000000000000..595d9dfeffd3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
@@ -0,0 +1,24 @@
+Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,p097pfg"
+- reg: DSI virtual channel of the peripheral
+- avdd-supply: phandle of the regulator that provides positive voltage
+- avee-supply: phandle of the regulator that provides negative voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ &mipi_dsi {
+ panel {
+ compatible = "innolux,p079zca";
+ reg = <0>;
+ avdd-supply = <...>;
+ avee-supply = <...>;
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
new file mode 100644
index 000000000000..a9b35265fa13
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
@@ -0,0 +1,20 @@
+Innolux TV123WAM 12.3 inch eDP 2K display panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "innolux,tv123wam"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ panel_edp: panel-edp {
+ compatible = "innolux,tv123wam";
+ enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+ power-supply = <&pm8916_l2>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
new file mode 100644
index 000000000000..164a5fa236da
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
@@ -0,0 +1,22 @@
+Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "kingdisplay,kd097d04"
+- reg: DSI virtual channel of the peripheral
+- power-supply: phandle of the regulator that provides the supply voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ &mipi_dsi {
+ panel {
+ compatible = "kingdisplay,kd097d04";
+ reg = <0>;
+ power-supply = <...>;
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
index 4903d7b1d947..e78292b1a131 100644
--- a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt
+++ b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
@@ -1,7 +1,7 @@
-Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+Newhaven Display International 480 x 272 TFT LCD panel
Required properties:
-- compatible: should be "edt,et057090dhu"
+- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
new file mode 100644
index 000000000000..eb1fb9f8d1f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
@@ -0,0 +1,25 @@
+Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "rocktech,rk070er9427"
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for LCD panel input.
+
+Example:
+ panel {
+ compatible = "rocktech,rk070er9427";
+ backlight = <&backlight_lcd>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
new file mode 100644
index 000000000000..0753f6967279
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
@@ -0,0 +1,12 @@
+Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq035q7db03"
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 7c6854bd0a04..ec9d34be2ff7 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -19,7 +19,6 @@ Required Properties:
- reg: the memory-mapped I/O registers base address and length
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifiers for the DU interrupts.
- clocks: A list of phandles + clock-specifier pairs, one for each entry in
diff --git a/Documentation/devicetree/bindings/display/sm501fb.txt b/Documentation/devicetree/bindings/display/sm501fb.txt
index 9d9f0098092b..1c79c267a57f 100644
--- a/Documentation/devicetree/bindings/display/sm501fb.txt
+++ b/Documentation/devicetree/bindings/display/sm501fb.txt
@@ -9,8 +9,6 @@ Required properties:
- First entry: System Configuration register
- Second entry: IO space (Display Controller register)
- interrupts : SMI interrupt to the cpu should be described here.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- mode : select a video mode:
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 3346c1e2a7a0..f8773ecb7525 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -103,6 +103,7 @@ Required properties:
- compatible: value must be one of:
* allwinner,sun8i-a83t-hdmi-phy
* allwinner,sun8i-h3-hdmi-phy
+ * allwinner,sun50i-a64-hdmi-phy
- reg: base address and size of memory-mapped region
- clocks: phandles to the clocks feeding the HDMI PHY
* bus: the HDMI PHY interface clock
@@ -111,8 +112,9 @@ Required properties:
- resets: phandle to the reset controller driving the PHY
- reset-names: must be "phy"
-H3 HDMI PHY requires additional clock:
+H3 and A64 HDMI PHY require additional clocks:
- pll-0: parent of phy clock
+ - pll-1: second possible phy clock parent (A64 only)
TV Encoder
----------
@@ -145,6 +147,7 @@ Required properties:
* allwinner,sun8i-a33-tcon
* allwinner,sun8i-a83t-tcon-lcd
* allwinner,sun8i-a83t-tcon-tv
+ * allwinner,sun8i-r40-tcon-tv
* allwinner,sun8i-v3s-tcon
* allwinner,sun9i-a80-tcon-lcd
* allwinner,sun9i-a80-tcon-tv
@@ -179,7 +182,7 @@ For TCONs with channel 0, there is one more clock required:
For TCONs with channel 1, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
-When TCON support LVDS (all TCONs except TV TCON on A83T and those found
+When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found
in A13, H3, H5 and V3s SoCs), you need one more reset line:
- 'lvds': The reset line driving the LVDS logic
@@ -187,6 +190,62 @@ And on the A23, A31, A31s and A33, you need one more clock line:
- 'lvds-alt': An alternative clock source, separate from the TCON channel 0
clock, that can be used to drive the LVDS clock
+TCON TOP
+--------
+
+TCON TOPs main purpose is to configure whole display pipeline. It determines
+relationships between mixers and TCONs, selects source TCON for HDMI, muxes
+LCD and TV encoder GPIO output, selects TV encoder clock source and contains
+additional TV TCON and DSI gates.
+
+It allows display pipeline to be configured in very different ways:
+
+ / LCD0/LVDS0
+ / [0] TCON-LCD0
+ | \ MIPI DSI
+ mixer0 |
+ \ / [1] TCON-LCD1 - LCD1/LVDS1
+ TCON-TOP
+ / \ [2] TCON-TV0 [0] - TVE0/RGB
+ mixer1 | \
+ | TCON-TOP - HDMI
+ | /
+ \ [3] TCON-TV1 [1] - TVE1/RGB
+
+Note that both TCON TOP references same physical unit. Both mixers can be
+connected to any TCON.
+
+Required properties:
+ - compatible: value must be one of:
+ * allwinner,sun8i-r40-tcon-top
+ - reg: base address and size of the memory-mapped region.
+ - clocks: phandle to the clocks feeding the TCON TOP
+ * bus: TCON TOP interface clock
+ * tcon-tv0: TCON TV0 clock
+ * tve0: TVE0 clock
+ * tcon-tv1: TCON TV1 clock
+ * tve1: TVE0 clock
+ * dsi: MIPI DSI clock
+ - clock-names: clock name mentioned above
+ - resets: phandle to the reset line driving the TCON TOP
+ - #clock-cells : must contain 1
+ - clock-output-names: Names of clocks created for TCON TV0 channel clock,
+ TCON TV1 channel clock and DSI channel clock, in that order.
+
+- ports: A ports node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
+ be defined:
+ * port 0 is input for mixer0 mux
+ * port 1 is output for mixer0 mux
+ * port 2 is input for mixer1 mux
+ * port 3 is output for mixer1 mux
+ * port 4 is input for HDMI mux
+ * port 5 is output for HDMI mux
+ All output endpoints for mixer muxes and input endpoints for HDMI mux should
+ have reg property with the id of the target TCON, as shown in above graph
+ (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one
+ endpoint connected to remote endpoint.
+
DRC
---
@@ -341,6 +400,7 @@ Required properties:
* allwinner,sun8i-a33-display-engine
* allwinner,sun8i-a83t-display-engine
* allwinner,sun8i-h3-display-engine
+ * allwinner,sun8i-r40-display-engine
* allwinner,sun8i-v3s-display-engine
* allwinner,sun9i-a80-display-engine
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 6fddb4f4f71a..7bf1bb444812 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -8,8 +8,6 @@ Required properties:
- reg: base address and size of the LCDC device
Recommended properties:
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
- ti,hwmods: Name of the hwmod associated to the LCDC
Optional properties:
@@ -36,7 +34,7 @@ Optional nodes:
- port/ports: to describe a connection to an external encoder. The
binding follows Documentation/devicetree/bindings/graph.txt and
- suppors a single port with a single endpoint.
+ supports a single port with a single endpoint.
- See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
index f25feee62b15..03e9cf7b42e0 100644
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -5,7 +5,6 @@ Required properties:
- compatible: Should be "ingenic,jz4780-dma"
- reg: Should contain the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
-- interrupt-parent: Should be the phandle of the interrupt controller that
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
DMA clients (see below).
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
index 1e1dc8f972e4..2f35b047f772 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Should contain DMA registers location and length. This should be
a single entry that includes all of the per-channel registers in one
contiguous bank.
-- interrupt-parent: Phandle to the interrupt parent controller.
- interrupts: Should contain all of the per-channel DMA interrupts in
ascending order with respect to the DMA channel index.
- clocks: Must contain one entry for the ADMA module clock
diff --git a/Documentation/devicetree/bindings/dma/owl-dma.txt b/Documentation/devicetree/bindings/dma/owl-dma.txt
new file mode 100644
index 000000000000..03e9bb12b75f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/owl-dma.txt
@@ -0,0 +1,47 @@
+* Actions Semi Owl SoCs DMA controller
+
+This binding follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+- compatible: Should be "actions,s900-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain 4 interrupts shared by all channel.
+- #dma-cells: Must be <1>. Used to represent the number of integer
+ cells in the dmas property of client device.
+- dma-channels: Physical channels supported.
+- dma-requests: Number of DMA request signals supported by the controller.
+ Refer to Documentation/devicetree/bindings/dma/dma.txt
+- clocks: Phandle and Specifier of the clock feeding the DMA controller.
+
+Example:
+
+Controller:
+ dma: dma-controller@e0260000 {
+ compatible = "actions,s900-dma";
+ reg = <0x0 0xe0260000 0x0 0x1000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ dma-channels = <12>;
+ dma-requests = <46>;
+ clocks = <&clock CLK_DMAC>;
+ };
+
+Client:
+
+DMA clients connected to the Actions Semi Owl SoCs DMA controller must
+use the format described in the dma.txt file, using a two-cell specifier
+for each channel.
+
+The two cells in order are:
+1. A phandle pointing to the DMA controller.
+2. The channel id.
+
+uart5: serial@e012a000 {
+ ...
+ dma-names = "tx", "rx";
+ dmas = <&dma 26>, <&dma 27>;
+ ...
+};
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index b1ba639554c0..946229c48657 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -29,6 +29,7 @@ Required Properties:
- "renesas,dmac-r8a77965" (R-Car M3-N)
- "renesas,dmac-r8a77970" (R-Car V3M)
- "renesas,dmac-r8a77980" (R-Car V3H)
+ - "renesas,dmac-r8a77990" (R-Car E3)
- "renesas,dmac-r8a77995" (R-Car D3)
- reg: base address and length of the registers block for the DMAC
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
index f237b7928283..dbe160400adc 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
@@ -5,8 +5,6 @@ Required properties:
- reg: Address range of the DMAC registers. This should include
all of the per-channel registers.
- interrupt: Should contain the DMAC interrupt number.
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device.
- dma-channels: Number of channels supported by hardware.
- snps,dma-masters: Number of AXI masters supported by the hardware.
- snps,data-width: Maximum AXI data width supported by hardware.
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index 99acc712f83a..39e2b26be344 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -23,8 +23,6 @@ Deprecated properties:
Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- is_private: The device channels should be marked as private and not for by the
general purpose DMA channel allocator. False if not passed.
- multi-block: Multi block transfers supported by hardware. Array property with
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 3f15f6644527..4bbc94d829c8 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -201,7 +201,6 @@ Required properties:
- #dma-cells: Should be set to <1>
Clients should use a single channel number per DMA request.
- reg: Memory map for accessing module
-- interrupt-parent: Interrupt controller the interrupt is routed through
- interrupts: Exactly 3 interrupts need to be specified in the order:
1. Transfer completion interrupt.
2. Memory protection interrupt.
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index a2b8bfaec43c..174af2c45e77 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -66,6 +66,8 @@ Optional child node properties:
Optional child node properties for VDMA:
- xlnx,genlock-mode: Tells Genlock synchronization is
enabled/disabled in hardware.
+- xlnx,enable-vert-flip: Tells vertical flip is
+ enabled/disabled in hardware(S2MM path).
Optional child node properties for AXI DMA:
-dma-channels: Number of dma channels in child node.
diff --git a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
index a784cdd94790..07a5a7aa9ea0 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
@@ -5,7 +5,6 @@ control and rate control support for slave/peripheral dma access.
Required properties:
- compatible : Should be "xlnx,zynqmp-dma-1.0"
- reg : Memory map for gdma/adma module access.
-- interrupt-parent : Interrupt controller the interrupt is routed through
- interrupts : Should contain DMA channel interrupt.
- xlnx,bus-width : Axi buswidth in bits. Should contain 128 or 64
- clock-names : List of input clocks "clk_main", "clk_apb"
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
index 61d833abafbf..aededdbc262b 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
@@ -72,6 +72,8 @@ Optional properties:
- wp-gpios: GPIO to which the write-protect pin of the chip is connected.
+ - address-width: number of address bits (one of 8, 16).
+
Example:
eeprom@52 {
diff --git a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt b/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
index 6dede7d11532..cfcf455ad4de 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
@@ -11,8 +11,6 @@ for USB D-/D+ switching.
Required properties:
- compatible: Should be "richtek,rt8973a-muic"
- reg: Specifies the I2C slave address of the MUIC block. It should be 0x14
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from rt8973a are delivered to.
- interrupts: Interrupt specifiers for detection interrupt sources.
Example:
diff --git a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
index 4ecda224955f..fc3888e09549 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
@@ -9,8 +9,6 @@ the host controller using an I2C interface.
Required properties:
- compatible: Should be "siliconmitus,sm5502-muic"
- reg: Specifies the I2C slave address of the MUIC block. It should be 0x25
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from sm5502 are delivered to.
- interrupts: Interrupt specifiers for detection interrupt sources.
Example:
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-ast-cf.txt b/Documentation/devicetree/bindings/fsi/fsi-master-ast-cf.txt
new file mode 100644
index 000000000000..3dc752db748b
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-ast-cf.txt
@@ -0,0 +1,36 @@
+Device-tree bindings for ColdFire offloaded gpio-based FSI master driver
+------------------------------------------------------------------------
+
+Required properties:
+ - compatible =
+ "aspeed,ast2400-cf-fsi-master" for an AST2400 based system
+ or
+ "aspeed,ast2500-cf-fsi-master" for an AST2500 based system
+
+ - clock-gpios = <gpio-descriptor>; : GPIO for FSI clock
+ - data-gpios = <gpio-descriptor>; : GPIO for FSI data signal
+ - enable-gpios = <gpio-descriptor>; : GPIO for enable signal
+ - trans-gpios = <gpio-descriptor>; : GPIO for voltage translator enable
+ - mux-gpios = <gpio-descriptor>; : GPIO for pin multiplexing with other
+ functions (eg, external FSI masters)
+ - memory-region = <phandle>; : Reference to the reserved memory for
+ the ColdFire. Must be 2M aligned on
+ AST2400 and 1M aligned on AST2500
+ - aspeed,sram = <phandle>; : Reference to the SRAM node.
+ - aspeed,cvic = <phandle>; : Reference to the CVIC node.
+
+Examples:
+
+ fsi-master {
+ compatible = "aspeed,ast2500-cf-fsi-master", "fsi-master";
+
+ clock-gpios = <&gpio 0>;
+ data-gpios = <&gpio 1>;
+ enable-gpios = <&gpio 2>;
+ trans-gpios = <&gpio 3>;
+ mux-gpios = <&gpio 4>;
+
+ memory-region = <&coldfire_memory>;
+ aspeed,sram = <&sram>;
+ aspeed,cvic = <&cvic>;
+ }
diff --git a/Documentation/devicetree/bindings/fsi/fsi.txt b/Documentation/devicetree/bindings/fsi/fsi.txt
index ab516c673a4b..afb4eccab131 100644
--- a/Documentation/devicetree/bindings/fsi/fsi.txt
+++ b/Documentation/devicetree/bindings/fsi/fsi.txt
@@ -83,6 +83,10 @@ addresses and sizes in the slave address space:
#address-cells = <1>;
#size-cells = <1>;
+Optionally, a slave can provide a global unique chip ID which is used to
+identify the physical location of the chip in a system specific way
+
+ chip-id = <0>;
FSI engines (devices)
---------------------
@@ -125,6 +129,7 @@ device tree if no extra platform information is required.
reg = <0 0>;
#address-cells = <1>;
#size-cells = <1>;
+ chip-id = <0>;
/* FSI engine at 0xc00, using a single page. In this example,
* it's an I2C master controller, so subnodes describe the
diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt
new file mode 100644
index 000000000000..f1e4a2ff47c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/gnss.txt
@@ -0,0 +1,36 @@
+GNSS Receiver DT binding
+
+This documents the binding structure and common properties for GNSS receiver
+devices.
+
+A GNSS receiver node is a node named "gnss" and typically resides on a serial
+bus (e.g. UART, I2C or SPI).
+
+Please refer to the following documents for generic properties:
+
+ Documentation/devicetree/bindings/serial/slave-device.txt
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Required properties:
+
+- compatible : A string reflecting the vendor and specific device the node
+ represents
+
+Optional properties:
+- enable-gpios : GPIO used to enable the device
+- timepulse-gpios : Time pulse GPIO
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+
+ gnss {
+ compatible = "u-blox,neo-8";
+
+ vcc-supply = <&gnss_reg>;
+ timepulse-gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+
+ current-speed = <4800>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.txt b/Documentation/devicetree/bindings/gnss/sirfstar.txt
new file mode 100644
index 000000000000..648d183cdb77
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/sirfstar.txt
@@ -0,0 +1,45 @@
+SiRFstar-based GNSS Receiver DT binding
+
+SiRFstar chipsets are used in GNSS-receiver modules produced by several
+vendors and can use UART, SPI or I2C interfaces.
+
+Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic
+properties.
+
+Required properties:
+
+- compatible : Must be one of
+
+ "fastrax,uc430"
+ "linx,r4"
+ "wi2wi,w2sg0008i"
+ "wi2wi,w2sg0084i"
+
+- vcc-supply : Main voltage regulator (pin name: 3V3_IN, VCC, VDD)
+
+Required properties (I2C):
+- reg : I2C slave address
+
+Required properties (SPI):
+- reg : SPI chip select address
+
+Optional properties:
+
+- sirf,onoff-gpios : GPIO used to power on and off device (pin name: ON_OFF)
+- sirf,wakeup-gpios : GPIO used to determine device power state
+ (pin name: RFPWRUP, WAKEUP)
+- timepulse-gpios : Time pulse GPIO (pin name: 1PPS, TM)
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+
+ gnss {
+ compatible = "wi2wi,w2sg0084i";
+
+ vcc-supply = <&gnss_reg>;
+ sirf,onoff-gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+ sirf,wakeup-gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gnss/u-blox.txt b/Documentation/devicetree/bindings/gnss/u-blox.txt
new file mode 100644
index 000000000000..e475659cb85f
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/u-blox.txt
@@ -0,0 +1,44 @@
+u-blox GNSS Receiver DT binding
+
+The u-blox GNSS receivers can use UART, DDC (I2C), SPI and USB interfaces.
+
+Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic
+properties.
+
+Required properties:
+
+- compatible : Must be one of
+
+ "u-blox,neo-8"
+ "u-blox,neo-m8"
+
+- vcc-supply : Main voltage regulator
+
+Required properties (DDC):
+- reg : DDC (I2C) slave address
+
+Required properties (SPI):
+- reg : SPI chip select address
+
+Required properties (USB):
+- reg : Number of the USB hub port or the USB host-controller port
+ to which this device is attached
+
+Optional properties:
+
+- timepulse-gpios : Time pulse GPIO
+- u-blox,extint-gpios : GPIO connected to the "external interrupt" input pin
+- v-bckp-supply : Backup voltage regulator
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+
+ gnss {
+ compatible = "u-blox,neo-8";
+
+ v-bckp-supply = <&gnss_v_bckp_reg>;
+ vcc-supply = <&gnss_vcc_reg>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
index 798cfc9d3839..973362eb3f1e 100644
--- a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
@@ -25,8 +25,6 @@ Required properties:
- #gpio-cells: Should be two. The first cell is the pin number
and the second cell is used to specify optional
parameters (currently unused).
-- interrupt-parent: Phandle for the interrupt controller that
- services interrupts for this device.
- interrupts: Interrupt mapping for GPIO IRQ.
- gpio-controller: Marks the port as GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt b/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
index a25c87b650e5..ce19c5660aca 100644
--- a/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
@@ -14,7 +14,6 @@ Optional Properties:
- #interrupt-cells: Should be <1>. Interrupts are triggered on both edges.
- interrupts: Defines the interrupt line connecting this GPIO controller to
its parent interrupt controller.
-- interrupt-parent: Defines the parent interrupt controller.
GPIO ranges are specified as described in
Documentation/devicetree/bindings/gpio/gpio.txt
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
index b405b4410bfb..5d468ecd1809 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -30,9 +30,6 @@ Optional properties:
- interrupts:
The interrupt shared by all GPIO lines for this controller.
-- interrupt-parent:
- phandle of the parent interrupt controller
-
- interrupts-extended:
Alternate form of specifying interrupts and parents that allows for
multiple parents. This takes precedence over 'interrupts' and
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
index dbd22e0df21e..b4cd9f906c24 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
@@ -19,6 +19,9 @@ Required properties:
4 = active high level-sensitive.
8 = active low level-sensitive.
+Optional properties:
+- clocks: the clock for clocking the GPIO silicon
+
Example:
gpio0: gpio@73f84000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-adnp.txt b/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
index af66b2724837..a28902a65a62 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
@@ -3,7 +3,6 @@ Avionic Design N-bit GPIO expander bindings
Required properties:
- compatible: should be "ad,gpio-adnp"
- reg: The I2C slave address for this device.
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- #gpio-cells: Should be 2. The first cell is the GPIO number and the
second cell is used to specify optional parameters:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
index fc6378c778c5..7e9b586770b0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
@@ -17,7 +17,6 @@ Required properties:
Optional properties:
-- interrupt-parent : The parent interrupt controller, optional if inherited
- clocks : A phandle to the clock to use for debounce timings
The gpio and interrupt properties are further described in their respective
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ath79.txt b/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
index c522851017ae..cf71f3ec969d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
@@ -12,7 +12,6 @@ Required properties:
- ngpios: Should be set to the number of GPIOs available on the SoC.
Optional properties:
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode interrupt
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
index 8beb0539b6d8..553b92a7e87b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
@@ -15,8 +15,6 @@ Required Properties:
- first cell is the pin number
- second cell is used to specify optional parameters (unused)
-- interrupt-parent: phandle of the parent interrupt controller.
-
- interrupts: Array of GPIO interrupt number. Only banked or unbanked IRQs are
supported at a time.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-max732x.txt b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
index 5fdc843b4542..b3a9c0c32823 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
@@ -30,7 +30,6 @@ Optional properties:
- #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
- first cell is the pin number
- second cell is used to specify flags
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
Please refer to gpio.txt in this directory for details of the common GPIO
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index 88f228665507..4e3c550e319a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -37,6 +37,7 @@ Required properties:
- #interrupt-cells: if to be used as interrupt expander.
Optional properties:
+ - interrupts: interrupt specifier for the device's interrupt output.
- reset-gpios: GPIO specification for the RESET input. This is an
active low signal to the PCA953x.
- vcc-supply: power supply regulator.
@@ -49,6 +50,8 @@ Example:
reg = <0x20>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pca9505>;
+ gpio-controller;
+ #gpio-cells = <2>;
interrupt-parent = <&gpio3>;
interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
index 7d3bd631d011..a482455a205b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
@@ -49,7 +49,6 @@ Optional Properties:
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
index fed9158dd913..f281f12dac18 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
@@ -6,7 +6,6 @@ Required properties:
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells: Should be 2. The first cell is the pin number and the second
cell is used to specify optional parameters.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells: Should be 2. The first cell defines the interrupt number.
The second cell bits[3:0] is used to specify trigger type as follows:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
index 5490c1d68981..e90fb987e25f 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
@@ -26,7 +26,6 @@ Required properties:
1 = active low
- gpio-controller: Marks the device node as a GPIO controller.
- interrupts: The EXT_INT_0 parent interrupt resource must be listed first.
-- interrupt-parent: Phandle of the parent interrupt controller.
- interrupt-cells: Should be two.
- first cell is 0-N coresponding for EXT_INT_0 to EXT_INT_N.
- second cell is used to specify flags.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
index 63bf4becd5f0..08eed2335db0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
@@ -14,8 +14,6 @@ Required properties:
Optional properties:
- interrupts : Interrupt mapping for GPIO IRQ.
-- interrupt-parent : Phandle for the interrupt controller that
- services interrupts for this device.
- xlnx,all-inputs : if n-th bit is setup, GPIO-n is input
- xlnx,dout-default : if n-th bit is 1, GPIO-n default value is 1
- xlnx,gpio-width : gpio width
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
index 28662d83a43e..47fc64922fe0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
@@ -30,7 +30,6 @@ Required properties:
4 = active high level-sensitive.
8 = active low level-sensitive.
- interrupts: Interrupt number for this device.
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupt-controller: Identifies the node as an interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index 7b542657f259..4fa4eb5507cd 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -11,7 +11,6 @@ Required properties:
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt specifier (see interrupt bindings for
details)
-- interrupt-parent : Must be core interrupt controller
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : Should be 2. The first cell is the GPIO number.
The second cell bits[3:0] is used to specify trigger type and level flags:
diff --git a/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt b/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
new file mode 100644
index 000000000000..ba455589f869
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
@@ -0,0 +1,35 @@
+Mediatek MT7621 SoC GPIO controller bindings
+
+The IP core used inside these SoCs has 3 banks of 32 GPIOs each.
+The registers of all the banks are interwoven inside one single IO range.
+We load one GPIO controller instance per bank. Also the GPIO controller can receive
+interrupts on any of the GPIOs, either edge or level. It then interrupts the CPU
+using GIC INT12.
+
+Required properties for the top level node:
+- #gpio-cells : Should be two. The first cell is the GPIO pin number and the
+ second cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
+ Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt. Should be 2. The first cell defines the interrupt number,
+ the second encodes the triger flags encoded as described in
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+- compatible:
+ - "mediatek,mt7621-gpio" for Mediatek controllers
+- reg : Physical base address and length of the controller's registers
+- interrupt-parent : phandle of the parent interrupt controller.
+- interrupts : Interrupt specifier for the controllers interrupt.
+- interrupt-controller : Mark the device node as an interrupt controller.
+- gpio-controller : Marks the device node as a GPIO controller.
+
+Example:
+ gpio@600 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "mediatek,mt7621-gpio";
+ gpio-controller;
+ interrupt-controller;
+ reg = <0x600 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
index 20fc72d9e61e..df63da46309c 100644
--- a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
@@ -1,7 +1,7 @@
Nintendo Wii (Hollywood) GPIO controller
Required properties:
-- compatible: "nintendo,hollywood-gpio
+- compatible: "nintendo,hollywood-gpio"
- reg: Physical base address and length of the controller's registers.
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells: Should be <2>. The first cell is the pin number and the
@@ -14,7 +14,6 @@ Optional properties:
- #interrupt-cells: Should be two.
- interrupts: Interrupt specifier for the controller's Broadway (PowerPC)
interrupt.
-- interrupt-parent: phandle of the parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
index c82a2e221bc1..adff16c71d21 100644
--- a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
@@ -68,6 +68,8 @@ Required properties:
One of:
- "nvidia,tegra186-gpio".
- "nvidia,tegra186-gpio-aon".
+ - "nvidia,tegra194-gpio".
+ - "nvidia,tegra194-gpio-aon".
- reg-names
Array of strings.
Contains a list of names for the register spaces described by the reg
@@ -91,6 +93,8 @@ Required properties:
depending on compatible value:
- "nvidia,tegra186-gpio": 6 entries.
- "nvidia,tegra186-gpio-aon": 1 entry.
+ - "nvidia,tegra194-gpio": 6 entries.
+ - "nvidia,tegra194-gpio-aon": 1 entry.
- gpio-controller
Boolean.
Marks the device node as a GPIO controller/provider.
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 378f1322211e..4018ee57a6af 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -17,6 +17,7 @@ Required Properties:
- "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
- "renesas,gpio-r8a77965": for R8A77965 (R-Car M3-N) compatible GPIO controller.
- "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
+ - "renesas,gpio-r8a77980": for R8A77980 (R-Car V3H) compatible GPIO controller.
- "renesas,gpio-r8a77990": for R8A77990 (R-Car E3) compatible GPIO controller.
- "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
@@ -31,7 +32,6 @@ Required Properties:
- reg: Base address and length of each memory resource used by the GPIO
controller hardware module.
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- gpio-controller: Marks the device node as a gpio controller.
diff --git a/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
new file mode 100644
index 000000000000..f9231df17c2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
@@ -0,0 +1,32 @@
+Rockchip RK3328 GRF (General Register Files) GPIO controller.
+
+In Rockchip RK3328, the output only GPIO_MUTE pin, originally for codec mute
+control, can also be used for general purpose. It is manipulated by the
+GRF_SOC_CON10 register in GRF. Aside from the GPIO_MUTE pin, the HDMI pins can
+also be set in the same way.
+
+Currently this GPIO controller only supports the mute pin. If needed in the
+future, the HDMI pins support can also be added.
+
+Required properties:
+- compatible: Should contain "rockchip,rk3328-grf-gpio".
+- gpio-controller: Marks the device node as a gpio controller.
+- #gpio-cells: Should be 2. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity:
+ 0 = Active high,
+ 1 = Active low.
+
+Example:
+
+ grf: syscon@ff100000 {
+ compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd";
+
+ grf_gpio: grf-gpio {
+ compatible = "rockchip,rk3328-grf-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+Note: The grf_gpio node should be declared as the child of the GRF (General
+Register File) node. The GPIO_MUTE pin is referred to as <&grf_gpio 0>.
diff --git a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
index 3c1118bc67f5..7276b50c3506 100644
--- a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
@@ -25,7 +25,6 @@ controller.
interrupt. Shall be set to 2. The first cell defines the interrupt number,
the second encodes the triger flags encoded as described in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent : The parent interrupt controller.
- interrupts : The interrupts to the parent controller raised when GPIOs
generate the interrupts. If the controller provides one combined interrupt
for all GPIOs, specify a single interrupt. If the controller provides one
diff --git a/Documentation/devicetree/bindings/hsi/omap-ssi.txt b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
index 955e335e7e56..77a0c3c3036e 100644
--- a/Documentation/devicetree/bindings/hsi/omap-ssi.txt
+++ b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
@@ -33,7 +33,6 @@ Required Port sub-node properties:
- reg-names: Contains the values "tx" and "rx" (in this order).
- reg: Contains a matching register specifier for each entry
in reg-names.
-- interrupt-parent Should be a phandle for the interrupt controller
- interrupts: Should contain interrupt specifiers for mpu interrupts
0 and 1 (in this order).
- ti,ssi-cawake-gpio: Defines which GPIO pin is used to signify CAWAKE
diff --git a/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
new file mode 100644
index 000000000000..28f43e929f6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
@@ -0,0 +1,84 @@
+Nuvoton NPCM7xx PWM and Fan Tacho controller device
+
+The Nuvoton BMC NPCM7XX supports 8 Pulse-width modulation (PWM)
+controller outputs and 16 Fan tachometer controller inputs.
+
+Required properties for pwm-fan node
+- #address-cells : should be 1.
+- #size-cells : should be 0.
+- compatible : "nuvoton,npcm750-pwm-fan" for Poleg NPCM7XX.
+- reg : specifies physical base address and size of the registers.
+- reg-names : must contain:
+ * "pwm" for the PWM registers.
+ * "fan" for the Fan registers.
+- clocks : phandle of reference clocks.
+- clock-names : must contain
+ * "pwm" for PWM controller operating clock.
+ * "fan" for Fan controller operating clock.
+- interrupts : contain the Fan interrupts with flags for falling edge.
+- pinctrl-names : a pinctrl state named "default" must be defined.
+- pinctrl-0 : phandle referencing pin configuration of the PWM and Fan
+ controller ports.
+
+fan subnode format:
+===================
+Under fan subnode can be upto 8 child nodes, each child node representing a fan.
+Each fan subnode must have one PWM channel and atleast one Fan tach channel.
+
+For PWM channel can be configured cooling-levels to create cooling device.
+Cooling device could be bound to a thermal zone for the thermal control.
+
+Required properties for each child node:
+- reg : specify the PWM output channel.
+ integer value in the range 0 through 7, that represent
+ the PWM channel number that used.
+
+- fan-tach-ch : specify the Fan tach input channel.
+ integer value in the range 0 through 15, that represent
+ the fan tach channel number that used.
+
+ At least one Fan tach input channel is required
+
+Optional property for each child node:
+- cooling-levels: PWM duty cycle values in a range from 0 to 255
+ which correspond to thermal cooling states.
+
+Examples:
+
+pwm_fan:pwm-fan-controller@103000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nuvoton,npcm750-pwm-fan";
+ reg = <0x103000 0x2000>,
+ <0x180000 0x8000>;
+ reg-names = "pwm", "fan";
+ clocks = <&clk NPCM7XX_CLK_APB3>,
+ <&clk NPCM7XX_CLK_APB4>;
+ clock-names = "pwm","fan";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins &pwm1_pins &pwm2_pins
+ &fanin0_pins &fanin1_pins &fanin2_pins
+ &fanin3_pins &fanin4_pins>;
+ fan@0 {
+ reg = <0x00>;
+ fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ cooling-levels = <127 255>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ fan-tach-ch = /bits/ 8 <0x04>;
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
index e7106bfc1f13..8fbd8633a387 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
@@ -11,9 +11,6 @@ Required Properties:
- resets : phandle to reset controller with the reset number in
the second cell
- interrupts : interrupt number
-- interrupt-parent : interrupt controller for bus, should reference a
- aspeed,ast2400-i2c-ic or aspeed,ast2500-i2c-ic
- interrupt controller
Optional Properties:
- bus-frequency : frequency of the bus clock in Hz defaults to 100 kHz when not
diff --git a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt b/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
index aeceaceba3c5..0380609b177a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
@@ -10,8 +10,6 @@ Required properties:
Optional properties :
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt number, the irq line to be used
- interrupt-names: Interrupt name string
diff --git a/Documentation/devicetree/bindings/i2c/i2c-fsi.txt b/Documentation/devicetree/bindings/i2c/i2c-fsi.txt
new file mode 100644
index 000000000000..b1be2ceb7e69
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-fsi.txt
@@ -0,0 +1,40 @@
+Device-tree bindings for FSI-attached I2C master and busses
+-----------------------------------------------------------
+
+Required properties:
+ - compatible = "ibm,i2c-fsi";
+ - reg = < address size >; : The FSI CFAM address and address
+ space size.
+ - #address-cells = <1>; : Number of address cells in child
+ nodes.
+ - #size-cells = <0>; : Number of size cells in child nodes.
+ - child nodes : Nodes to describe busses off the I2C
+ master.
+
+Child node required properties:
+ - reg = < port number > : The port number on the I2C master.
+
+Child node optional properties:
+ - child nodes : Nodes to describe devices on the I2C
+ bus.
+
+Examples:
+
+ i2c@1800 {
+ compatible = "ibm,i2c-fsi";
+ reg = < 0x1800 0x400 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-bus@0 {
+ reg = <0>;
+ };
+
+ i2c-bus@1 {
+ reg = <1>;
+
+ eeprom@50 {
+ compatible = "vendor,dev-name";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index 60fe90d69f4e..00e4365d7206 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -5,7 +5,6 @@ Required properties:
- "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
- "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
- reg : address and length of the lpi2c master registers
-- interrupt-parent : core interrupt controller
- interrupts : lpi2c interrupt
- clocks : lpi2c clock specifier
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
index d4a082acf92f..3738cfbf863f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
@@ -11,10 +11,6 @@ Recommended properties:
- pinctrl-names: should be "default";
- pinctrl-0: phandle to pinctrl function
-Optional properties:
-- interrupt-parent: Should be the phandle of the interrupt controller that
- delivers interrupts to the I2C block.
-
Example
/ {
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mpc.txt b/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
index 1eacd6b20ed5..42a390526957 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
@@ -15,8 +15,6 @@ Recommended properties :
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- fsl,preserve-clocking : boolean; if defined, the clock settings
from the bootloader are preserved (not touched).
- clock-frequency : desired I2C bus clock frequency in Hz.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
index 34d91501342e..ccf6c86ed076 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
@@ -28,8 +28,6 @@ Optional Properties:
- i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
children in idle state. This is necessary for example, if there are several
multiplexers on the bus and the devices behind them use same I2C addresses.
- - interrupt-parent: Phandle for the interrupt controller that services
- interrupts for this device.
- interrupts: Interrupt mapping for IRQ.
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells : Should be two.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-owl.txt b/Documentation/devicetree/bindings/i2c/i2c-owl.txt
new file mode 100644
index 000000000000..b743fe444e9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-owl.txt
@@ -0,0 +1,27 @@
+Actions Semiconductor Owl I2C controller
+
+Required properties:
+
+- compatible : Should be "actions,s900-i2c".
+- reg : Offset and length of the register set for the device.
+- #address-cells : Should be 1.
+- #size-cells : Should be 0.
+- interrupts : A single interrupt specifier.
+- clocks : Phandle of the clock feeding the I2C controller.
+
+Optional properties:
+
+- clock-frequency : Desired I2C bus clock frequency in Hz. As only Normal and
+ Fast modes are supported, possible values are 100000 and
+ 400000.
+Examples:
+
+ i2c0: i2c@e0170000 {
+ compatible = "actions,s900-i2c";
+ reg = <0 0xe0170000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_I2C0>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt b/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
index f1f3876bb8e8..73a693d66ef7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
@@ -12,8 +12,6 @@ Required properties :
Optional properties
- interrupts : the interrupt number
- - interrupt-parent : the phandle for the interrupt controller.
- If an interrupt is not specified polling will be used.
- reset-gpios : gpio specifier for gpio connected to RESET_N pin. As the line
is active low, it should be marked GPIO_ACTIVE_LOW.
- clock-frequency : I2C bus frequency.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt b/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
index fe98ada33ee4..2a59006cf79e 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
@@ -7,8 +7,6 @@ Required properties:
- interrupts: configure one interrupt line
- #address-cells: always 1 (for i2c addresses)
- #size-cells: always 0
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
index d30f0b11d853..c30783c0eca0 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
@@ -12,9 +12,6 @@ Required properties :
Recommended properties :
- interrupts : the interrupt number
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device. If the parent is the default
- interrupt controller in device tree, it could be ignored.
- mrvl,i2c-polling : Disable interrupt of i2c controller. Polling
status register of i2c controller instead.
- mrvl,i2c-fast-mode : Enable fast mode of i2c controller.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
index 7ce8fae55537..39cd21d95810 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible:
"renesas,i2c-r8a7743" if the device is a part of a R8A7743 SoC.
"renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC.
+ "renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC.
"renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
"renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
"renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC.
@@ -16,11 +17,13 @@ Required properties:
"renesas,i2c-r8a77965" if the device is a part of a R8A77965 SoC.
"renesas,i2c-r8a77970" if the device is a part of a R8A77970 SoC.
"renesas,i2c-r8a77980" if the device is a part of a R8A77980 SoC.
+ "renesas,i2c-r8a77990" if the device is a part of a R8A77990 SoC.
"renesas,i2c-r8a77995" if the device is a part of a R8A77995 SoC.
"renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible
device.
- "renesas,rcar-gen3-i2c" for a generic R-Car Gen3 compatible device.
+ "renesas,rcar-gen3-i2c" for a generic R-Car Gen3 or RZ/G2 compatible
+ device.
"renesas,i2c-rcar" (deprecated)
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
index fc7e17802746..872673adff5a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
@@ -6,6 +6,7 @@ Required properties:
- "renesas,iic-r8a7740" (R-Mobile A1)
- "renesas,iic-r8a7743" (RZ/G1M)
- "renesas,iic-r8a7745" (RZ/G1E)
+ - "renesas,iic-r8a774a1" (RZ/G2M)
- "renesas,iic-r8a7790" (R-Car H2)
- "renesas,iic-r8a7791" (R-Car M2-W)
- "renesas,iic-r8a7792" (R-Car V2H)
@@ -17,7 +18,8 @@ Required properties:
- "renesas,iic-sh73a0" (SH-Mobile AG5)
- "renesas,rcar-gen2-iic" (generic R-Car Gen2 or RZ/G1
compatible device)
- - "renesas,rcar-gen3-iic" (generic R-Car Gen3 compatible device)
+ - "renesas,rcar-gen3-iic" (generic R-Car Gen3 or RZ/G2
+ compatible device)
- "renesas,rmobile-iic" (generic device)
When compatible with a generic R-Car version, nodes
diff --git a/Documentation/devicetree/bindings/iio/accel/adxl345.txt b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
index e7111b02c02c..f9525f6e3d43 100644
--- a/Documentation/devicetree/bindings/iio/accel/adxl345.txt
+++ b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
@@ -1,9 +1,12 @@
-Analog Devices ADXL345 3-Axis, +/-(2g/4g/8g/16g) Digital Accelerometer
+Analog Devices ADXL345/ADXL375 3-Axis Digital Accelerometers
http://www.analog.com/en/products/mems/accelerometers/adxl345.html
+http://www.analog.com/en/products/sensors-mems/accelerometers/adxl375.html
Required properties:
- - compatible : should be "adi,adxl345"
+ - compatible : should be one of
+ "adi,adxl345"
+ "adi,adxl375"
- reg : the I2C address or SPI chip select number of the sensor
Required properties for SPI bus usage:
@@ -11,8 +14,6 @@ Required properties for SPI bus usage:
- spi-cpol and spi-cpha : must be defined for adxl345 to enable SPI mode 3
Optional properties:
- - interrupt-parent : phandle to the parent interrupt controller as documented
- in Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: interrupt mapping for IRQ as documented in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index 4a3679d54457..3b25b4c4d446 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -10,8 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
-
- interrupts : interrupt mapping for GPIO IRQ, it should by configured with
flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
For the bma250 the first interrupt listed must be the one
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 45f5c5c5929c..2100e9af379c 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -15,8 +15,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
-
- interrupts: interrupt mapping for GPIO IRQ
- interrupt-names: should contain "INT1" and/or "INT2", the accelerometer's
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index d1acd5ea2737..54b823f3a453 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: depending on the SoC this should be one of:
- "amlogic,meson8-saradc" for Meson8
- "amlogic,meson8b-saradc" for Meson8b
+ - "amlogic,meson8m2-saradc" for Meson8m2
- "amlogic,meson-gxbb-saradc" for GXBB
- "amlogic,meson-gxl-saradc" for GXL
- "amlogic,meson-gxm-saradc" for GXM
diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
index 6469a4cd2a6d..4a3c1d496e1a 100644
--- a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
@@ -21,6 +21,14 @@ Optional properties:
- dmas: Phandle to dma channel for the ADC.
- dma-names: Must be "rx" when dmas property is being used.
See ../../dma/dma.txt for details.
+ - #io-channel-cells: in case consumer drivers are attached, this must be 1.
+ See <Documentation/devicetree/bindings/iio/iio-bindings.txt> for details.
+
+Properties for consumer drivers:
+ - Consumer drivers can be connected to this producer device, as specified
+ in <Documentation/devicetree/bindings/iio/iio-bindings.txt>
+ - Channels exposed are specified in:
+ <dt-bindings/iio/adc/at91-sama5d2_adc.txt>
Example:
@@ -38,4 +46,5 @@ adc: adc@fc030000 {
atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>;
dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(25))>;
dma-names = "rx";
+ #io-channel-cells = <1>;
}
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
index b3629405f568..7222328a3d0d 100644
--- a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
+++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
@@ -8,11 +8,17 @@ Required properties:
See Documentation/devicetree/bindings/gpio/gpio.txt
- avdd-supply: Definition of the regulator used as analog supply
+Optional properties:
+ - clock-frequency: Frequency of PD_SCK in Hz
+ Minimum value allowed is 10 kHz because of maximum
+ high time of 50 microseconds.
+
Example:
-weight@0 {
+weight {
compatible = "avia,hx711";
sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
avdd-suppy = <&avdd>;
+ clock-frequency = <100000>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
index 487ea966858e..ec04008e8f4f 100644
--- a/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
@@ -2,7 +2,6 @@ Motorola CPCAP PMIC ADC binding
Required properties:
- compatible: Should be "motorola,cpcap-adc" or "motorola,mapphone-cpcap-adc"
-- interrupt-parent: The interrupt controller
- interrupts: The interrupt number for the ADC device
- interrupt-names: Should be "adcdone"
- #io-channel-cells: Number of cells in an IIO specifier
diff --git a/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt b/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
index b0866d36a307..eebdcec3dab5 100644
--- a/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
+++ b/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Should be the register range of the module.
- interrupts: Should be the interrupt number of the module.
Typically this is <1>.
- - interrupt-parent: phandle to the tsadc module of the i.MX25.
- #address-cells: Should be <1> (setting for the subnodes)
- #size-cells: Should be <0> (setting for the subnodes)
diff --git a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt b/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
index a8770cc6bcad..e680c61dfb84 100644
--- a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "maxim,max1027" or "maxim,max1029" or "maxim,max1031"
- reg: SPI chip select number for the device
- - interrupt-parent: phandle to the parent interrupt controller
- see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: IRQ line for the ADC
see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
index ba24ca7ba95e..59b92cd32552 100644
--- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
@@ -7,7 +7,7 @@ Required properties:
Example node:
- ads1202: adc@0 {
+ ads1202: adc {
compatible = "sd-modulator";
#io-channel-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/sprd,sc27xx-adc.txt b/Documentation/devicetree/bindings/iio/adc/sprd,sc27xx-adc.txt
new file mode 100644
index 000000000000..8aad960de50b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sprd,sc27xx-adc.txt
@@ -0,0 +1,36 @@
+Spreadtrum SC27XX series PMICs ADC binding
+
+Required properties:
+- compatible: Should be one of the following.
+ "sprd,sc2720-adc"
+ "sprd,sc2721-adc"
+ "sprd,sc2723-adc"
+ "sprd,sc2730-adc"
+ "sprd,sc2731-adc"
+- reg: The address offset of ADC controller.
+- interrupt-parent: The interrupt controller.
+- interrupts: The interrupt number for the ADC device.
+- #io-channel-cells: Number of cells in an IIO specifier.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+
+Example:
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic_adc: adc@480 {
+ compatible = "sprd,sc2731-adc";
+ reg = <0x480>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
+ hwlocks = <&hwlock 4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index f1ead43a1a95..8346bcb04ad7 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -60,7 +60,6 @@ Required properties:
- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
- clocks: Input clock private to this ADC instance. It's required only on
stm32f4, that has per instance clock input for registers access.
-- interrupt-parent: Phandle to the parent interrupt controller.
- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
2 for adc@200).
- st,adc-channels: List of single-ended channels muxed for this ADC.
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index d71258e2d456..e0e0755cabd8 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -22,7 +22,6 @@ Required properties:
clock to the AXI bus interface of the core.
Optional properties:
- - interrupt-parent: phandle to the parent interrupt controller
- xlnx,external-mux:
* "none": No external multiplexer is used, this is the default
if the property is omitted.
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
index 2962bd9a2b3d..f4320595b851 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,ec-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
index 5d8b687d5edc..af1f5a9aa4da 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,orp-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
index cffa1907463a..79d90f060327 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,ph-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/dac/ad5758.txt b/Documentation/devicetree/bindings/iio/dac/ad5758.txt
new file mode 100644
index 000000000000..bba01a5cab1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ad5758.txt
@@ -0,0 +1,78 @@
+Analog Devices AD5758 DAC device driver
+
+Required properties for the AD5758:
+ - compatible: Must be "adi,ad5758"
+ - reg: SPI chip select number for the device
+ - spi-max-frequency: Max SPI frequency to use (< 50000000)
+ - spi-cpha: is the only mode that is supported
+
+Required properties:
+
+ - adi,dc-dc-mode: Mode of operation of the dc-to-dc converter
+ Dynamic Power Control (DPC)
+ In this mode, the AD5758 circuitry senses the output
+ voltage and dynamically regulates the supply voltage,
+ VDPC+, to meet compliance requirements plus an optimized
+ headroom voltage for the output buffer.
+
+ Programmable Power Control (PPC)
+ In this mode, the VDPC+ voltage is user-programmable to
+ a fixed level that needs to accommodate the maximum output
+ load required.
+
+ The output of the DAC core is either converted to a
+ current or voltage output at the VIOUT pin. Only one mode
+ can be enabled at any one time.
+
+ The following values are currently supported:
+ * 1: DPC current mode
+ * 2: DPC voltage mode
+ * 3: PPC current mode
+
+ Depending on the selected output mode (voltage or current) one of the
+ two properties must
+ be present:
+
+ - adi,range-microvolt: Voltage output range
+ The array of voltage output ranges must contain two fields:
+ * <0 5000000>: 0 V to 5 V voltage range
+ * <0 10000000>: 0 V to 10 V voltage range
+ * <(-5000000) 5000000>: ±5 V voltage range
+ * <(-10000000) 10000000>: ±10 V voltage range
+ - adi,range-microamp: Current output range
+ The array of current output ranges must contain two fields:
+ * <0 20000>: 0 mA to 20 mA current range
+ * <0 24000>: 0 mA to 24 mA current range
+ * <4 24000>: 4 mA to 20 mA current range
+ * <(-20000) 20000>: ±20 mA current range
+ * <(-24000) 24000>: ±24 mA current range
+ * <(-1000) 22000>: −1 mA to +22 mA current range
+
+Optional properties:
+
+ - adi,dc-dc-ilim-microamp: The dc-to-dc converter current limit
+ The following values are currently supported [uA]:
+ * 150000
+ * 200000
+ * 250000
+ * 300000
+ * 350000
+ * 400000
+
+ - adi,slew-time-us: The time it takes for the output to reach the
+ full scale [uS]
+ The supported range is between 133us up to 1023984375us
+
+AD5758 Example:
+
+ dac@0 {
+ compatible = "adi,ad5758";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpha;
+
+ adi,dc-dc-mode = <2>;
+ adi,range-microvolt = <0 10000000>;
+ adi,dc-dc-ilim-microamp = <200000>;
+ adi,slew-time-us = <125000>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
index b0d3b59966bc..233fe207aded 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -5,7 +5,6 @@ Required properties:
- reg : the I2C address of the sensor
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for the trigger interrupt from the
internal oscillator. The following IRQ modes are supported:
IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
diff --git a/Documentation/devicetree/bindings/iio/health/afe4403.txt b/Documentation/devicetree/bindings/iio/health/afe4403.txt
index 2fffd70336ba..8e412054d6d5 100644
--- a/Documentation/devicetree/bindings/iio/health/afe4403.txt
+++ b/Documentation/devicetree/bindings/iio/health/afe4403.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : Should be "ti,afe4403".
- reg : SPI chip select address of device.
- tx-supply : Regulator supply to transmitting LEDs.
- - interrupt-parent : Phandle to he parent interrupt controller.
- interrupts : The interrupt line the device ADC_RDY pin is
connected to. For details refer to,
../../interrupt-controller/interrupts.txt.
diff --git a/Documentation/devicetree/bindings/iio/health/afe4404.txt b/Documentation/devicetree/bindings/iio/health/afe4404.txt
index de69f203edfa..0b52830a0d9c 100644
--- a/Documentation/devicetree/bindings/iio/health/afe4404.txt
+++ b/Documentation/devicetree/bindings/iio/health/afe4404.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : Should be "ti,afe4404".
- reg : I2C address of the device.
- tx-supply : Regulator supply to transmitting LEDs.
- - interrupt-parent : Phandle to he parent interrupt controller.
- interrupts : The interrupt line the device ADC_RDY pin is
connected to. For details refer to,
../interrupt-controller/interrupts.txt.
diff --git a/Documentation/devicetree/bindings/iio/health/max30100.txt b/Documentation/devicetree/bindings/iio/health/max30100.txt
index 8d8176459d09..0054908a6e74 100644
--- a/Documentation/devicetree/bindings/iio/health/max30100.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30100.txt
@@ -5,7 +5,6 @@ Maxim MAX30100 heart rate and pulse oximeter sensor
Required properties:
- compatible: must be "maxim,max30100"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/health/max30102.txt b/Documentation/devicetree/bindings/iio/health/max30102.txt
index ef2ca0a0306f..7ef7ae40ae4f 100644
--- a/Documentation/devicetree/bindings/iio/health/max30102.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30102.txt
@@ -7,7 +7,6 @@ Maxim MAX30105 optical particle-sensing module
Required properties:
- compatible: must be "maxim,max30102" or "maxim,max30105"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
index 10adeb0d703d..84d029372260 100644
--- a/Documentation/devicetree/bindings/iio/humidity/hts221.txt
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -13,7 +13,6 @@ Optional properties:
when it is not active, whereas a pull-up one is needed when interrupt
line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
Refer to pinctrl/pinctrl-bindings.txt for the property description.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/imu/bmi160.txt b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
index ae0112c7debc..0c1c105fb503 100644
--- a/Documentation/devicetree/bindings/iio/imu/bmi160.txt
+++ b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
@@ -9,7 +9,6 @@ Required properties:
- spi-max-frequency : set maximum clock frequency (only for SPI)
Optional properties:
- - interrupt-parent : should be the phandle of the interrupt controller
- interrupts : interrupt mapping for IRQ, must be IRQ_TYPE_LEVEL_LOW
- interrupt-names : set to "INT1" if INT1 pin should be used as interrupt
input, set to "INT2" if INT2 pin should be used instead
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index 5f4777e8cc9e..b2f27da847b8 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -6,12 +6,12 @@ Required properties:
- compatible : should be one of
"invensense,mpu6050"
"invensense,mpu6500"
+ "invensense,mpu6515"
"invensense,mpu9150"
"invensense,mpu9250"
"invensense,mpu9255"
"invensense,icm20608"
- reg : the I2C address of the sensor
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with flags
IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index ef8a8566c63f..ea2d6e0ae4c5 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -20,7 +20,6 @@ Optional properties:
IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line
when it is not active, whereas a pull-up one is needed when interrupt
line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/light/apds9300.txt b/Documentation/devicetree/bindings/iio/light/apds9300.txt
index d6f66c73ddbf..aa199e09a493 100644
--- a/Documentation/devicetree/bindings/iio/light/apds9300.txt
+++ b/Documentation/devicetree/bindings/iio/light/apds9300.txt
@@ -9,7 +9,6 @@ Required properties:
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
Example:
diff --git a/Documentation/devicetree/bindings/iio/light/apds9960.txt b/Documentation/devicetree/bindings/iio/light/apds9960.txt
index 174b709f16db..3af325ad194b 100644
--- a/Documentation/devicetree/bindings/iio/light/apds9960.txt
+++ b/Documentation/devicetree/bindings/iio/light/apds9960.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "avago,apds9960"
- reg: the I2c address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
index 425ab459e209..b9bbde3e13ed 100644
--- a/Documentation/devicetree/bindings/iio/light/isl29018.txt
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/isl29501.txt b/Documentation/devicetree/bindings/iio/light/isl29501.txt
new file mode 100644
index 000000000000..46957997fee3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/isl29501.txt
@@ -0,0 +1,13 @@
+* ISL29501 Time-of-flight sensor.
+
+Required properties:
+
+ - compatible : should be "renesas,isl29501"
+ - reg : the I2C address of the sensor
+
+Example:
+
+isl29501@57 {
+ compatible = "renesas,isl29501";
+ reg = <0x57>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/opt3001.txt b/Documentation/devicetree/bindings/iio/light/opt3001.txt
index eac30d508849..47b13eb8f4ec 100644
--- a/Documentation/devicetree/bindings/iio/light/opt3001.txt
+++ b/Documentation/devicetree/bindings/iio/light/opt3001.txt
@@ -13,7 +13,6 @@ Required properties:
- reg: the I2C address of the sensor
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for GPIO IRQ (configure for falling edge)
Example:
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
index 8e2066c83f70..059dffa1829a 100644
--- a/Documentation/devicetree/bindings/iio/light/tsl2583.txt
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/uvis25.txt b/Documentation/devicetree/bindings/iio/light/uvis25.txt
index 3041207e3f3c..043c139d91e6 100644
--- a/Documentation/devicetree/bindings/iio/light/uvis25.txt
+++ b/Documentation/devicetree/bindings/iio/light/uvis25.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: i2c address of the sensor / spi cs line
Optional properties:
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt b/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
index 9f263b7df162..fd5fca90fb39 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
@@ -9,7 +9,6 @@ Required properties:
Optional properties:
- - interrupt-parent : phandle to the parent interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
Example:
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt b/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
index c7198a03c906..61c72e63c584 100644
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
@@ -8,11 +8,6 @@ Required properties:
"bosch,bme280"
Optional properties:
-- chip-id: configurable chip id for non-default chip revisions
-- temp-measurement-period: temperature measurement period (milliseconds)
-- default-oversampling: default oversampling value to be used at startup,
- value range is 0-3 with rising sensitivity.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ
- reset-gpios: a GPIO line handling reset of the sensor: as the line is
active low, it should be marked GPIO_ACTIVE_LOW (see gpio/gpio.txt)
@@ -24,9 +19,6 @@ Example:
pressure@77 {
compatible = "bosch,bmp085";
reg = <0x77>;
- chip-id = <10>;
- temp-measurement-period = <100>;
- default-oversampling = <2>;
interrupt-parent = <&gpio0>;
interrupts = <25 IRQ_TYPE_EDGE_RISING>;
reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
index fb85de676e03..a36ab3e0c3f7 100644
--- a/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
+++ b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
@@ -15,8 +15,6 @@ Optional properties:
power to the sensor
- vdd-supply: an optional regulator that needs to be on to provide VDD
power to the sensor
-- interrupt-parent: phandle to the parent interrupt controller as documented in
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: interrupt mapping for IRQ as documented in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index b6c1afa6f02d..849115585d55 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -6,7 +6,6 @@ Required properties:
- spi-max-frequency: specifies maximum SPI clock frequency
- spi-cpha: SPI Mode 1. Refer to spi/spi-bus.txt for generic SPI
slave node bindings.
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/proximity/sx9500.txt b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
index b301dd2b35da..c54455db3bec 100644
--- a/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
@@ -3,7 +3,6 @@ Semtech's SX9500 capacitive proximity button device driver
Required properties:
- compatible: must be "semtech,sx9500"
- reg: i2c address where to find the device
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/sensorhub.txt b/Documentation/devicetree/bindings/iio/sensorhub.txt
index 8d57571d5c0b..b6ac0457d4ea 100644
--- a/Documentation/devicetree/bindings/iio/sensorhub.txt
+++ b/Documentation/devicetree/bindings/iio/sensorhub.txt
@@ -6,7 +6,6 @@ of a virtual sensor device.
Required properties:
- compatible: "samsung,sensorhub-rinato" or "samsung,sensorhub-thermostat"
- spi-max-frequency: max SPI clock frequency
-- interrupt-parent: interrupt parent
- interrupts: communication interrupt
- ap-mcu-gpios: [out] ap to sensorhub line - used during communication
- mcu-ap-gpios: [in] sensorhub to ap - used during communication
diff --git a/Documentation/devicetree/bindings/iio/temperature/tmp007.txt b/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
index b63aba91ef03..da0af234a357 100644
--- a/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
+++ b/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
@@ -20,8 +20,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
-
- interrupts: interrupt mapping for GPIO IRQ (level active low)
Example:
diff --git a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
index d3b273e4336a..84f1a1b505d2 100644
--- a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
+++ b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -19,7 +19,6 @@ representing a dsaf device.
- #size-cells: must be 2
Optional properties:
- dma-coherent: Present if DMA operations are coherent.
-- interrupt-parent: the interrupt parent of this device.
- interrupts: should contain 32 completion event irq,1 async event irq
and 1 event overflow irq.
- interrupt-names:should be one of 34 irqs for roce device
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
index 8d91ba9ff2fd..d3db65916a36 100644
--- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -3,8 +3,6 @@ Cypress I2C Touchpad
Required properties:
- compatible: must be "cypress,cyapa".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
index 635f62c756ee..0c252d9306da 100644
--- a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
+++ b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
@@ -3,8 +3,6 @@ Samsung tm2-touchkey
Required properties:
- compatible: must be "cypress,tm2-touchkey"
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
- vcc-supply : internal regulator output. 1.8V
diff --git a/Documentation/devicetree/bindings/input/e3x0-button.txt b/Documentation/devicetree/bindings/input/e3x0-button.txt
index 751665e8e47a..907b195f2eaa 100644
--- a/Documentation/devicetree/bindings/input/e3x0-button.txt
+++ b/Documentation/devicetree/bindings/input/e3x0-button.txt
@@ -7,8 +7,6 @@ This module provides a simple power button event via two interrupts.
Required properties:
- compatible: should be one of the following
- "ettus,e3x0-button": For devices such as the NI Ettus Research USRP E3x0
-- interrupt-parent:
- - a phandle to the interrupt controller that it is attached to.
- interrupts: should be one of the following
- <0 30 1>, <0 31 1>: For devices such as the NI Ettus Research USRP E3x0
- interrupt-names: should be one of the following
diff --git a/Documentation/devicetree/bindings/input/elan_i2c.txt b/Documentation/devicetree/bindings/input/elan_i2c.txt
index d80a83583238..797607460735 100644
--- a/Documentation/devicetree/bindings/input/elan_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elan_i2c.txt
@@ -3,8 +3,6 @@ Elantech I2C Touchpad
Required properties:
- compatible: must be "elan,ekth3000".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
index 8a71038f3489..5edac8be0802 100644
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -3,8 +3,6 @@ Elantech I2C Touchscreen
Required properties:
- compatible: must be "elan,ekth3500".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.txt b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
index 4d3da9d91de4..c76bafaf98d2 100644
--- a/Documentation/devicetree/bindings/input/hid-over-i2c.txt
+++ b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible: must be "hid-over-i2c"
- reg: i2c slave address
- hid-descr-addr: HID descriptor address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt line
Additional optional properties:
@@ -26,7 +25,8 @@ device-specific compatible properties, which should be used in addition to the
- compatible:
* "wacom,w9013" (Wacom W9013 digitizer). Supports:
- - vdd-supply
+ - vdd-supply (3.3V)
+ - vddl-supply (1.8V)
- post-power-on-delay-ms
- vdd-supply: phandle of the regulator that provides the supply voltage.
diff --git a/Documentation/devicetree/bindings/input/keys.txt b/Documentation/devicetree/bindings/input/keys.txt
new file mode 100644
index 000000000000..f5a5ddde53f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/keys.txt
@@ -0,0 +1,8 @@
+General Keys Properties:
+
+Optional properties for Keys:
+- power-off-time-sec: Duration in seconds which the key should be kept
+ pressed for device to power off automatically. Device with key pressed
+ shutdown feature can specify this property.
+- linux,keycodes: Specifies the numeric keycode values to be used for
+ reporting key presses.
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
index 07bf55f6e0b9..34ab5763f494 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
@@ -7,6 +7,7 @@ PROPERTIES
Value type: <string>
Definition: must be one of:
"qcom,pm8941-pwrkey"
+ "qcom,pm8941-resin"
- reg:
Usage: required
@@ -32,6 +33,14 @@ PROPERTIES
Definition: presence of this property indicates that the KPDPWR_N pin
should be configured for pull up.
+- linux,code:
+ Usage: optional
+ Value type: <u32>
+ Definition: The input key-code associated with the power key.
+ Use the linux event codes defined in
+ include/dt-bindings/input/linux-event-codes.h
+ When property is omitted KEY_POWER is assumed.
+
EXAMPLE
pwrkey@800 {
@@ -40,4 +49,5 @@ EXAMPLE
interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
bias-pull-up;
+ linux,code = <KEY_POWER>;
};
diff --git a/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt b/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
index 5b6232db7c61..99a4f2ab5557 100644
--- a/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
+++ b/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
@@ -3,7 +3,6 @@ Raydium I2C touchscreen
Required properties:
- compatible: must be "raydium,rm32380"
- reg: The I2C address of the device
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt to which the chip is connected
See ../interrupt-controller/interrupts.txt
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
index ec908b91fd90..dcb012f5b3ee 100644
--- a/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
+++ b/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
@@ -16,7 +16,6 @@ Required Properties:
Optional Properties:
- interrupts: interrupt which the rmi device is connected to.
-- interrupt-parent: The interrupt controller.
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- syna,reset-delay-ms: The number of milliseconds to wait after resetting the
diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
index a4ca7828f21d..632f473db65b 100644
--- a/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
+++ b/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
@@ -16,7 +16,6 @@ Required Properties:
Optional Properties:
- interrupts: interrupt which the rmi device is connected to.
-- interrupt-parent: The interrupt controller.
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- spi-rx-delay-us: microsecond delay after a read transfer.
diff --git a/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt b/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
index a3dde8c30e67..c829e18e1a05 100644
--- a/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
@@ -9,7 +9,6 @@ This module provides a simple power button event via an Interrupt.
Required properties:
- compatible: should be one of the following
- "ti,palmas-pwrbutton": For Palmas compatible power on button
-- interrupt-parent: Parent interrupt device, must be handle of palmas node.
- interrupts: Interrupt number of power button submodule on device.
Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
index 3c8614c451f2..cdd743a1f2d5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
@@ -5,7 +5,6 @@ Required properties:
for I2C slave, use "adi,ad7879-1"
- reg : SPI chipselect/I2C slave address
See spi-bus.txt for more SPI slave properties
-- interrupt-parent : the phandle for the interrupt controller
- interrupts : touch controller interrupt
- touchscreen-max-pressure : maximum reported pressure
- adi,resistance-plate-x : total resistance of X-plate (for pressure
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
index 9fc47b006fd1..04413da51391 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
@@ -18,7 +18,6 @@ Additional required properties:
"ti,ads7846"
"ti,ads7873"
- interrupt-parent
interrupts An interrupt node describing the IRQ line the chip's
!PENIRQ pin is connected to.
vcc-supply A regulator node for the supply voltage.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt b/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
index e459e8546f34..82019bd6094e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : "microchip,ar1021-i2c"
- reg : I2C slave address
-- interrupt-parent : the phandle for the interrupt controller
- interrupts : touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt b/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt
new file mode 100644
index 000000000000..8daa0e868a8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt
@@ -0,0 +1,35 @@
+* Rohm BU21029 Touch Screen Controller
+
+Required properties:
+ - compatible : must be "rohm,bu21029"
+ - reg : i2c device address of the chip (0x40 or 0x41)
+ - interrupt-parent : the phandle for the gpio controller
+ - interrupts : (gpio) interrupt to which the chip is connected
+ - rohm,x-plate-ohms : x-plate resistance in Ohm
+
+Optional properties:
+ - reset-gpios : gpio pin to reset the chip (active low)
+ - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
+ - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
+ - touchscreen-max-pressure: maximum pressure value
+ - vdd-supply : power supply for the controller
+
+Example:
+
+ &i2c1 {
+ /* ... */
+
+ bu21029: bu21029@40 {
+ compatible = "rohm,bu21029";
+ reg = <0x40>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio6 16 GPIO_ACTIVE_LOW>;
+ rohm,x-plate-ohms = <600>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ touchscreen-max-pressure = <4095>;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
index d11f8d615b5d..38b0603f65f3 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "chipone,icn8318"
- reg : I2C slave address of the chip (0x40)
- - interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the icn8318 interrupt
- wake-gpios : GPIO specification for the WAKE input
- touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
index df531b5b6a0d..2e1490a8fe74 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
@@ -7,7 +7,6 @@ Required Properties:
- xm-gpios: FET gate driver for input of X-
- yp-gpios: FET gate driver for input of Y+
- ym-gpios: FET gate driver for input of Y-
-- interrupt-parent: phandle for the interrupt controller
- interrupts: pen irq interrupt for touch detection
- pinctrl-names: "idle", "default", "gpios"
- pinctrl-0: pinctrl node for pen/touch detection state pinmux
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
index b75d4cfd2c36..6ee274aa8b03 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible : must be "cypress,cyttsp-i2c" or "cypress,cyttsp-spi"
- reg : Device I2C address or SPI chip select number
- spi-max-frequency : Maximum SPI clocking speed of the device (for cyttsp-spi)
- - interrupt-parent : the phandle for the gpio controller
- (see interrupt binding[0]).
- interrupts : (gpio) interrupt to which the chip is connected
(see interrupt binding[0]).
- bootloader-key : the 8-byte bootloader key that is required to switch
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index 025cf8c9324a..da2dc5d6c98b 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -22,8 +22,6 @@ Required properties:
or: "focaltech,ft6236"
- reg: I2C slave address of the chip (0x38)
- - interrupt-parent: a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts: interrupt specification for the touchdetect
interrupt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/eeti.txt b/Documentation/devicetree/bindings/input/touchscreen/eeti.txt
new file mode 100644
index 000000000000..32b3712c916e
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/eeti.txt
@@ -0,0 +1,30 @@
+Bindings for EETI touchscreen controller
+
+Required properties:
+- compatible: should be "eeti,exc3000-i2c"
+- reg: I2C address of the chip. Should be set to <0xa>
+- interrupts: interrupt to which the chip is connected
+
+Optional properties:
+- attn-gpios: A handle to a GPIO to check whether interrupt is still
+ latched. This is necessary for platforms that lack
+ support for level-triggered IRQs.
+
+The following optional properties described in touchscreen.txt are
+also supported:
+
+- touchscreen-inverted-x
+- touchscreen-inverted-y
+- touchscreen-swapped-x-y
+
+Example:
+
+i2c-master {
+ touchscreen@a {
+ compatible = "eeti,exc3000-i2c";
+ reg = <0xa>;
+ interrupt-parent = <&gpio>;
+ interrupts = <123 IRQ_TYPE_EDGE_RISING>;
+ attn-gpios = <&gpio 123 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
index 298e3442f143..92fb2620f5e2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "eeti,egalax_ts"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
- wakeup-gpios: the gpio pin to be used for waking up the controller
and also used as irq pin
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
index 5a19f4c3e9d7..94c4fc644940 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "elan,ektf2127"
- reg : I2C slave address of the chip (0x40)
- - interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the ektf2127 interrupt
- power-gpios : GPIO specification for the pin connected to the
ektf2127's wake input. This needs to be driven high
diff --git a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
index 1dcff4a43eaa..68291b94fec2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "eeti,exc3000"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
- touchscreen-size-x: See touchscreen.txt
- touchscreen-size-y: See touchscreen.txt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt b/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
index abfcab3edc66..99d6f9d25335 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Memory range of the device.
- interrupts: Should be the interrupt number associated with this module within
the tscadc unit (<0>).
- - interrupt-parent: Should be a phandle to the tscadc unit.
- fsl,wires: Should be '<4>' or '<5>'
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index 0c369d8ebcab..f7e95c52f3c7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -11,7 +11,6 @@ Required properties:
or "goodix,gt928"
or "goodix,gt967"
- reg : I2C address of the chip. Should be 0x5d or 0x14
- - interrupt-parent : Interrupt controller to which the chip is connected
- interrupts : Interrupt to which the chip is connected
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
index 121d9b7c79a2..a47c36190b01 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : must be "hideep,hideep-ts"
- reg : I2C slave address, (e.g. 0x6C).
-- interrupt-parent : Interrupt controller to which the chip is connected.
- interrupts : Interrupt to which the chip is connected.
Optional properties:
@@ -32,7 +31,7 @@ i2c@00000000 {
reg = <0x6c>;
interrupt-parent = <&gpx1>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
- vdd-supply = <&ldo15_reg>";
+ vdd-supply = <&ldo15_reg>;
vid-supply = <&ldo18_reg>;
reset-gpios = <&gpx1 5 0>;
touchscreen-size-x = <1080>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
index 40ac0fe94df6..05e982c3454e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "maxim,max11801"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
index 7b8944c2cb31..b2ab5498e519 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "melfas,mip4_ts"
- reg: I2C slave address of the chip (0x48 or 0x34)
-- interrupt-parent: interrupt controller to which the chip is connected
- interrupts: interrupt to which the chip is connected
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
new file mode 100644
index 000000000000..51456c0e9a27
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
@@ -0,0 +1,30 @@
+Generic resistive touchscreen ADC
+
+Required properties:
+
+ - compatible: must be "resistive-adc-touch"
+The device must be connected to an ADC device that provides channels for
+position measurement and optional pressure.
+Refer to ../iio/iio-bindings.txt for details
+ - iio-channels: must have at least two channels connected to an ADC device.
+These should correspond to the channels exposed by the ADC device and should
+have the right index as the ADC device registers them. These channels
+represent the relative position on the "x" and "y" axes.
+ - iio-channel-names: must have all the channels' names. Mandatory channels
+are "x" and "y".
+
+Optional properties:
+ - iio-channels: The third channel named "pressure" is optional and can be
+used if the ADC device also measures pressure besides position.
+If this channel is missing, pressure will be ignored and the touchscreen
+will only report position.
+ - iio-channel-names: optional channel named "pressure".
+
+Example:
+
+ resistive_touch: resistive_touch {
+ compatible = "resistive-adc-touch";
+ touchscreen-min-pressure = <50000>;
+ io-channels = <&adc 24>, <&adc 25>, <&adc 26>;
+ io-channel-names = "x", "y", "pressure";
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
index d9b7c2ff611e..6805d10d226d 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : must be "samsung,s6sy761"
- reg : I2C slave address, (e.g. 0x48)
-- interrupt-parent : the phandle to the interrupt controller which provides
- the interrupt
- interrupts : interrupt specification
- avdd-supply : analogic power supply
- vdd-supply : power supply
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 84752de12412..d67e558e5ab5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -8,8 +8,6 @@ Required properties:
"silead,gsl3675"
"silead,gsl3692"
- reg : I2C slave address of the chip (0x40)
-- interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the gsl1680 interrupt
- power-gpios : Specification for the pin connected to the gsl1680's
shutdown input. This needs to be driven high to take the
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
index d87ad14f1efe..8f5322e01024 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: must be "sis,9200-ts"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- (see interrupt binding [0])
- interrupts: touch controller interrupt (see interrupt
binding [0])
diff --git a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
index 9683595cd0f5..0a5d0cb4a280 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
@@ -10,8 +10,6 @@ coordinates.
Required properties:
- compatible : must be "st,stmfts"
- reg : I2C slave address, (e.g. 0x49)
-- interrupt-parent : the phandle to the interrupt controller which provides
- the interrupt
- interrupts : interrupt specification
- avdd-supply : analogic power supply
- vdd-supply : power supply
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
index 5aaa6b3aa90c..4886c4aa2906 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "semtech,sx8654"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
index 537643e86f61..d092d5d033a0 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
@@ -7,6 +7,9 @@ Optional properties for Touchscreens:
(in pixels)
- touchscreen-max-pressure : maximum reported pressure (arbitrary range
dependent on the controller)
+ - touchscreen-min-pressure : minimum pressure on the touchscreen to be
+ achieved in order for the touchscreen
+ driver to report a touch event.
- touchscreen-fuzz-x : horizontal noise value of the absolute input
device (in pixels)
- touchscreen-fuzz-y : vertical noise value of the absolute input
diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
index ec365e172236..ed00f61b8c08 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
@@ -9,8 +9,6 @@ Optional properties:
- gpios: the interrupt gpio the chip is connected to (trough the penirq pin).
The penirq pin goes to low when the panel is touched.
(see GPIO binding[1] for more details).
-- interrupt-parent: the phandle for the gpio controller
- (see interrupt binding[0]).
- interrupts: (gpio) interrupt to which the chip is connected
(see interrupt binding[0]).
- ti,max-rt: maximum pressure.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt b/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
index fe6a1feef703..27d55a506f18 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
@@ -3,8 +3,6 @@ Zeitec ZET6223 I2C touchscreen controller
Required properties:
- compatible : "zeitec,zet6223"
- reg : I2C slave address of the chip (0x76)
-- interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the zet6223 interrupt
Optional properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
index 9d52d5afe3e9..5a4dd263fc12 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
@@ -13,7 +13,6 @@ Required properties
- interrupt-congroller: Identifies the node as an interrupt controller.
- #interrupt cells: Specifies the number of cells used to encode an interrupt
source connected to this controller. The value shall be 2.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupts: Specifies the list of interrupt lines which are handled by
the interrupt controller in the parent controller's notation. Interrupts
are mapped one-to-one to parent interrupts.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
index f6f1c14bf99b..5669764f9cc9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: should be "al,alpine-msix"
- reg: physical base address and size of the registers
-- interrupt-parent: specifies the parent interrupt controller.
- interrupt-controller: identifies the node as an interrupt controller
- msi-controller: identifies the node as an PCI Message Signaled Interrupt
controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
index 4903fb72d883..24beadf7ba83 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
@@ -13,7 +13,6 @@ Required properties:
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 2. The first cell is the IRQ number, the
second cell the trigger type as defined in interrupt.txt in this directory.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupts: Specifies the interrupt line (NMI) which is handled by
the interrupt controller in the parent controller's notation. This value
shall be the NMI.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
index 89674ad8a097..1502a51548bb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
@@ -15,9 +15,6 @@ Required properties:
"amlogic,meson-gxbb-gpio-intc" for GXBB SoCs (S905) or
"amlogic,meson-gxl-gpio-intc" for GXL SoCs (S905X, S912)
"amlogic,meson-axg-gpio-intc" for AXG SoCs (A113D, A113X)
-- interrupt-parent : a phandle to the GIC the interrupts are routed to.
- Usually this is provided at the root level of the device tree as it is
- common to most of the SoC.
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
index 0e9f09a6a2fe..f4c5d34c4111 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "atmel,<chip>-aic"
<chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
- interrupt-controller: Identifies the node as an interrupt controller.
-- interrupt-parent: For single AIC system, it is an empty property.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
The second cell is used to specify flags:
@@ -27,7 +26,6 @@ Examples:
aic: interrupt-controller@fffff000 {
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
- interrupt-parent;
#interrupt-cells = <3>;
reg = <0xfffff000 0x200>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
index 6428a6ba9f4a..0f1af5a1c12e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
@@ -26,8 +26,6 @@ Required properties:
are 0..7 for bank 0, and 0..31 for bank 1.
Additional required properties for brcm,bcm2836-armctrl-ic:
-- interrupt-parent : Specifies the parent interrupt controller when this
- controller is the second level.
- interrupts : Specifies the interrupt on the parent for this interrupt
controller to handle.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
index 8f48aad50868..37aea40d5430 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
@@ -18,8 +18,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt line in the interrupt-parent controller
node, valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
index 4040905388d9..2bc19b1ac877 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
@@ -29,8 +29,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
index cc217b22dccd..2117d4ac1ae5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
@@ -28,8 +28,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
index 44a9bb15dd56..addd86b6ca2f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
@@ -56,8 +56,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node, valid values depend on the type of parent interrupt controller
- brcm,int-map-mask: 32-bits bit mask describing how many and which interrupts
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
index 36df06c5c567..d514ec060a4a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
@@ -8,8 +8,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. Should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this controller is cacaded from
- interrupts: specifies the interrupt line in the interrupt-parent irq space
to be used for cascading
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
index a4ff93d6b7f3..454ce04d6787 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -13,9 +13,6 @@ Required properties:
- reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller.
-Optional properties:
-- interrupt-parent: the phandle to the parent interrupt controller.
-
This interrupt controller hardware is a second level interrupt controller that
is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
platforms. If interrupt-parent is not provided, the default parent interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt b/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
index 3b2f4c43ad8d..a6813a071f15 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
@@ -68,8 +68,6 @@ Examples:
Devices connect to mbigen required properties:
----------------------------------------------------
--interrupt-parent: Specifies the mbigen device node which device connected.
-
-interrupts:Specifies the interrupt source.
For the specific information of each cell in this property,please refer to
the "interrupt-cells" description mentioned above.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
index 5f89fb635a1b..d4373d0f7121 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : should be "ingenic,<socname>-intc". Valid strings are:
ingenic,jz4740-intc
+ ingenic,jz4725b-intc
ingenic,jz4770-intc
ingenic,jz4775-intc
ingenic,jz4780-intc
@@ -11,7 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
-- interrupt-parent : phandle of the CPU interrupt controller.
- interrupts : Specifies the CPU interrupt the controller is connected to.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
index 3f6442c7f867..930fb462fd9f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
@@ -26,8 +26,6 @@ Required properties:
See Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
for details about the GIC Device Tree binding.
-- interrupt-parent : Reference to the parent interrupt controller.
-
Example:
odmi: odmi@300000 {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
index a7efdbc3de5b..5865f4f2c69d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
@@ -16,8 +16,6 @@ Required properties:
and "mediatek,cirq" as a fallback.
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt.
-- interrupt-parent: phandle of irq parent for cirq. The parent must
- use the same interrupt-cells format as GIC.
- reg: Physical base address of the cirq registers and length of memory
mapped region.
- mediatek,ext-irq-range: Identifies external irq number range in different
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index 07bf0b9a5139..33a98eb44949 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -11,6 +11,7 @@ Required properties:
"mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622
"mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795
"mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797
+ "mediatek,mt6765-sysirq", "mediatek,mt6577-sysirq": for MT6765
"mediatek,mt6755-sysirq", "mediatek,mt6577-sysirq": for MT6755
"mediatek,mt6592-sysirq", "mediatek,mt6577-sysirq": for MT6592
"mediatek,mt6589-sysirq", "mediatek,mt6577-sysirq": for MT6589
@@ -21,8 +22,6 @@ Required properties:
"mediatek,mt2701-sysirq", "mediatek,mt6577-sysirq": for MT2701
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt.
-- interrupt-parent: phandle of irq parent for sysirq. The parent must
- use the same interrupt-cells format as GIC.
- reg: Physical base address of the intpol registers and length of memory
mapped region. Could be multiple bases here. Ex: mt6797 needs 2 reg, others
need 1.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
index b47a8a02b17b..f5baeccb689f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
@@ -7,7 +7,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
-- interrupt-parent : phandle of the CPU interrupt controller.
- interrupts : Specifies the CPU interrupt the controller is connected to.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
index 1099fe0788fa..2ff356640100 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
@@ -15,12 +15,10 @@ Required properties:
include "nvidia,tegra30-ictlr".
- reg : Specifies base physical address and size of the registers.
Each controller must be described separately (Tegra20 has 4 of them,
- whereas Tegra30 and later have 5"
+ whereas Tegra30 and later have 5).
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : a phandle to the GIC these interrupts are routed
- to.
Notes:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
index 38211f344dc8..0bfb3ba55f4c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
@@ -14,8 +14,6 @@ Required properties:
Reset value is IRQ_TYPE_LEVEL_LOW.
Optional properties:
-- interrupt-parent: empty for MIC interrupt controller, link to parent
- MIC interrupt controller for SIC1 and SIC2
- interrupts: empty for MIC interrupt controller, cascaded MIC
hardware interrupts for SIC1 and SIC2
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
index 475ae9bd562b..ad70006c1848 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
@@ -7,7 +7,6 @@ Required Properties:
- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
"qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
- reg: Base address and size of the controllers memory area
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 0b2c97ddb520..8e0797cb1487 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -35,12 +35,6 @@ Properties:
interrupt.
The second element is the trigger type.
-- interrupt-parent:
- Usage: required
- Value type: <phandle>
- Definition: Specifies the interrupt parent necessary for hierarchical
- domain to operate.
-
- interrupt-controller:
Usage: required
Value type: <bool>
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 20f121daa910..697ca2f26d1b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -7,6 +7,7 @@ Required properties:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
- "renesas,irqc-r8a7743" (RZ/G1M)
- "renesas,irqc-r8a7745" (RZ/G1E)
+ - "renesas,irqc-r8a77470" (RZ/G1C)
- "renesas,irqc-r8a7790" (R-Car H2)
- "renesas,irqc-r8a7791" (R-Car M2-W)
- "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
- "renesas,intc-ex-r8a7796" (R-Car M3-W)
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
- "renesas,intc-ex-r8a77970" (R-Car V3M)
+ - "renesas,intc-ex-r8a77980" (R-Car V3H)
- "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
new file mode 100644
index 000000000000..b0a8af51c388
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
@@ -0,0 +1,44 @@
+RISC-V Hart-Level Interrupt Controller (HLIC)
+---------------------------------------------
+
+RISC-V cores include Control Status Registers (CSRs) which are local to each
+CPU core (HART in RISC-V terminology) and can be read or written by software.
+Some of these CSRs are used to control local interrupts connected to the core.
+Every interrupt is ultimately routed through a hart's HLIC before it
+interrupts that hart.
+
+The RISC-V supervisor ISA manual specifies three interrupt sources that are
+attached to every HLIC: software interrupts, the timer interrupt, and external
+interrupts. Software interrupts are used to send IPIs between cores. The
+timer interrupt comes from an architecturally mandated real-time timer that is
+controller via Supervisor Binary Interface (SBI) calls and CSR reads. External
+interrupts connect all other device interrupts to the HLIC, which are routed
+via the platform-level interrupt controller (PLIC).
+
+All RISC-V systems that conform to the supervisor ISA specification are
+required to have a HLIC with these three interrupt sources present. Since the
+interrupt map is defined by the ISA it's not listed in the HLIC's device tree
+entry, though external interrupt controllers (like the PLIC, for example) will
+need to define how their interrupts map to the relevant HLICs. This means
+a PLIC interrupt property will typically list the HLICs for all present HARTs
+in the system.
+
+Required properties:
+- compatible : "riscv,cpu-intc"
+- #interrupt-cells : should be <1>
+- interrupt-controller : Identifies the node as an interrupt controller
+
+Furthermore, this interrupt-controller MUST be embedded inside the cpu
+definition of the hart whose CSRs control these local interrupts.
+
+An example device tree entry for a HLIC is show below.
+
+ cpu1: cpu@1 {
+ compatible = "riscv";
+ ...
+ cpu1-intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
+ interrupt-controller;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
index 9e5f73412cd7..19af687858a1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
@@ -32,8 +32,6 @@ Optional properties:
- samsung,combiner-nr: The number of interrupt combiners supported. If this
property is not specified, the default number of combiners is assumed
to be 16.
-- interrupt-parent: pHandle of the parent interrupt controller, if not
- inherited from the parent node.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.txt b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.txt
new file mode 100644
index 000000000000..6adf7a6e8825
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.txt
@@ -0,0 +1,58 @@
+SiFive Platform-Level Interrupt Controller (PLIC)
+-------------------------------------------------
+
+SiFive SOCs include an implementation of the Platform-Level Interrupt Controller
+(PLIC) high-level specification in the RISC-V Privileged Architecture
+specification. The PLIC connects all external interrupts in the system to all
+hart contexts in the system, via the external interrupt source in each hart.
+
+A hart context is a privilege mode in a hardware execution thread. For example,
+in an 4 core system with 2-way SMT, you have 8 harts and probably at least two
+privilege modes per hart; machine mode and supervisor mode.
+
+Each interrupt can be enabled on per-context basis. Any context can claim
+a pending enabled interrupt and then release it once it has been handled.
+
+Each interrupt has a configurable priority. Higher priority interrupts are
+serviced first. Each context can specify a priority threshold. Interrupts
+with priority below this threshold will not cause the PLIC to raise its
+interrupt line leading to the context.
+
+While the PLIC supports both edge-triggered and level-triggered interrupts,
+interrupt handlers are oblivious to this distinction and therefore it is not
+specified in the PLIC device-tree binding.
+
+While the RISC-V ISA doesn't specify a memory layout for the PLIC, the
+"sifive,plic-1.0.0" device is a concrete implementation of the PLIC that
+contains a specific memory layout, which is documented in chapter 8 of the
+SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
+
+Required properties:
+- compatible : "sifive,plic-1.0.0" and a string identifying the actual
+ detailed implementation in case that specific bugs need to be worked around.
+- #address-cells : should be <0> or more.
+- #interrupt-cells : should be <1> or more.
+- interrupt-controller : Identifies the node as an interrupt controller.
+- reg : Should contain 1 register range (address and length).
+- interrupts-extended : Specifies which contexts are connected to the PLIC,
+ with "-1" specifying that a context is not present. Each node pointed
+ to should be a riscv,cpu-intc node, which has a riscv node as parent.
+- riscv,ndev: Specifies how many external interrupts are supported by
+ this controller.
+
+Example:
+
+ plic: interrupt-controller@c000000 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ compatible = "sifive,plic-1.0.0", "sifive,fu540-c000-plic";
+ interrupt-controller;
+ interrupts-extended = <
+ &cpu0-intc 11
+ &cpu1-intc 11 &cpu1-intc 9
+ &cpu2-intc 11 &cpu2-intc 9
+ &cpu3-intc 11 &cpu3-intc 9
+ &cpu4-intc 11 &cpu4-intc 9>;
+ reg = <0xc000000 0x4000000>;
+ riscv,ndev = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
index 1f441fa0ad40..355c18a3a4d3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: should be "sigma,smp8642-intc"
- reg: physical address of MMIO region
- ranges: address space mapping of child nodes
-- interrupt-parent: phandle of parent interrupt controller
- interrupt-controller: boolean
- #address-cells: should be <1>
- #size-cells: should be <1>
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 8b46a34e05f1..09fc02b99845 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -7,7 +7,6 @@ Properties:
- compatible: "snps,archs-idu-intc"
- interrupt-controller: This is an interrupt controller.
-- interrupt-parent: <reference to parent core intc>
- #interrupt-cells: Must be <1>.
Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
index 492911744ca3..086ff08322db 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
@@ -11,7 +11,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: number of cells to encode an interrupt-specifier, shall be 1
- interrupts: interrupt reference to primary interrupt controller
-- interrupt-parent: (optional) reference specific primary interrupt controller
The interrupt sources map to the corresponding bits in the interrupt
registers, i.e.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
index 8b2faefe29ca..dac0846fe789 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
@@ -12,7 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : phandle of the GIC these interrupts are routed to.
- socionext,spi-base : The SPI number of the first SPI of the 32 adjacent
ones the EXIU forwards its interrups to.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
index 2ab0ea39867b..a407c499b3cc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
@@ -31,10 +31,6 @@ Required properties:
parent) is equal to number of groups. The format of the interrupt
specifier depends in the interrupt parent controller.
- Optional properties:
- - interrupt-parent: pHandle of the parent interrupt controller, if not
- inherited from the parent node.
-
Example:
The following is an example from the SPEAr320 SoC dtsi file.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
index 136bd612bd83..6a36bf66d932 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -12,7 +12,7 @@ Required properties:
specifier, shall be 2
- interrupts: interrupts references to primary interrupt controller
(only needed for exti controller with multiple exti under
- same parent interrupt: st,stm32-exti and st,stm32h7-exti")
+ same parent interrupt: st,stm32-exti and st,stm32h7-exti)
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
index 7f15f1b0325b..341ae5909333 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
@@ -11,6 +11,4 @@ Required properties:
region
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: phandle to the parent interrupt controller this one is
- cascaded from
- interrupts: specifies the interrupt line in the interrupt-parent controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
index 42bb796cc4ad..ee3f9c351501 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
@@ -46,7 +46,6 @@ C6X Interrupt Chips
- interrupt-controller
- #interrupt-cells: <1>
- reg: base address and size of register area
- - interrupt-parent: must be core interrupt controller
- interrupts: This should have four cells; one for each interrupt combiner.
The cells contain the core priority interrupt to which the
corresponding combiner output is wired.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
index 18d4f407bf0e..422d6908f8b2 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
@@ -12,8 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : a phandle to the GIC these interrupts are routed
- to.
Notes:
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
index b1682c80b490..525ec82615a6 100644
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
@@ -31,7 +31,6 @@ Required properties:
- compatible: Should be "samsung,exynos-sysmmu"
- reg: A tuple of base address and size of System MMU registers.
- #iommu-cells: Should be <0>.
-- interrupt-parent: The phandle of the interrupt controller of System MMU
- interrupts: An interrupt specifier for interrupt signal of System MMU,
according to the format defined by a particular interrupt
controller.
diff --git a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.txt b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.txt
index 310810906613..64fa2fbd98c9 100644
--- a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.txt
+++ b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.txt
@@ -3,13 +3,6 @@ pwm-backlight bindings
Required properties:
- compatible: "pwm-backlight"
- pwms: OF device-tree PWM specification (see PWM binding[0])
- - brightness-levels: Array of distinct brightness levels. Typically these
- are in the range from 0 to 255, but any range starting at 0 will do.
- The actual brightness level (PWM duty cycle) will be interpolated
- from these values. 0 means a 0% duty cycle (darkest/off), while the
- last value in the array represents a 100% duty cycle (brightest).
- - default-brightness-level: the default brightness level (index into the
- array defined by the "brightness-levels" property)
- power-supply: regulator for supply voltage
Optional properties:
@@ -21,6 +14,19 @@ Optional properties:
and enabling the backlight using GPIO.
- pwm-off-delay-ms: Delay in ms between disabling the backlight using GPIO
and setting PWM value to 0.
+ - brightness-levels: Array of distinct brightness levels. Typically these
+ are in the range from 0 to 255, but any range starting at
+ 0 will do. The actual brightness level (PWM duty cycle)
+ will be interpolated from these values. 0 means a 0% duty
+ cycle (darkest/off), while the last value in the array
+ represents a 100% duty cycle (brightest).
+ - default-brightness-level: The default brightness level (index into the
+ array defined by the "brightness-levels" property).
+ - num-interpolated-steps: Number of interpolated steps between each value
+ of brightness-levels table. This way a high
+ resolution pwm duty cycle can be used without
+ having to list out every possible value in the
+ brightness-level array.
[0]: Documentation/devicetree/bindings/pwm/pwm.txt
[1]: Documentation/devicetree/bindings/gpio/gpio.txt
@@ -39,3 +45,17 @@ Example:
post-pwm-on-delay-ms = <10>;
pwm-off-delay-ms = <10>;
};
+
+Example using num-interpolation-steps:
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 5000000>;
+
+ brightness-levels = <0 2048 4096 8192 16384 65535>;
+ num-interpolated-steps = <2048>;
+ default-brightness-level = <4096>;
+
+ power-supply = <&vdd_bl_reg>;
+ enable-gpios = <&gpio 58 0>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 1d4afe9644b6..aa1399814a2a 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -31,7 +31,7 @@ Optional properties for child nodes:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
"default-on" - LED will turn on (but for leds-gpio see "default-state"
- property in Documentation/devicetree/bindings/gpio/led.txt)
+ property in Documentation/devicetree/bindings/leds/leds-gpio.txt)
"heartbeat" - LED "double" flashes at a load average based rate
"disk-activity" - LED indicates disk activity
"ide-disk" - LED indicates IDE disk activity (deprecated),
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
index 6c9074f84a51..08b352840bd7 100644
--- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
@@ -20,7 +20,10 @@ Optional properties:
- vled-supply : LED supply
Required child properties:
- - reg : 0
+ - reg : 0 - Will enable all LED sync paths
+ 1 - Will enable the LED1 sync
+ 2 - Will enable the LED2 sync
+ 3 - Will enable the LED3 sync (LM36923 only)
Optional child properties:
- label : see Documentation/devicetree/bindings/leds/common.txt
diff --git a/Documentation/devicetree/bindings/leds/leds-lt3593.txt b/Documentation/devicetree/bindings/leds/leds-lt3593.txt
new file mode 100644
index 000000000000..6b2cabc36c0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lt3593.txt
@@ -0,0 +1,32 @@
+Bindings for Linear Technologies LT3593 LED controller
+
+Required properties:
+- compatible: Should be "lltc,lt3593".
+- lltc,ctrl-gpios: A handle to the GPIO that is connected to the 'CTRL'
+ pin of the chip.
+
+The hardware supports only one LED. The properties of this LED are
+configured in a sub-node in the device node.
+
+Optional sub-node properties:
+- label: A label for the LED. If none is given, the LED will be
+ named "lt3595::".
+- linux,default-trigger: The default trigger for the LED.
+ See Documentation/devicetree/bindings/leds/common.txt
+- default-state: The initial state of the LED.
+ See Documentation/devicetree/bindings/leds/common.txt
+
+If multiple chips of this type are found in a design, each one needs to
+be handled by its own device node.
+
+Example:
+
+led-controller {
+ compatible = "lltc,lt3593";
+ lltc,ctrl-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+
+ led {
+ label = "white:backlight";
+ default-state = "on";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
index 49cfc8c337c4..c4dd93f1fed2 100644
--- a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
@@ -9,7 +9,6 @@ Required properties:
of cells required for the mailbox specifier. Should be 1.
Optional properties:
-- interrupt-parent : interrupt source phandle.
- interrupts : interrupt number. The interrupt specifier format
depends on the interrupt controller parent.
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
new file mode 100644
index 000000000000..f3cf77eb5ab4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
@@ -0,0 +1,54 @@
+NXP i.MX Messaging Unit (MU)
+--------------------------------------------------------------------
+
+The Messaging Unit module enables two processors within the SoC to
+communicate and coordinate by passing messages (e.g. data, status
+and control) through the MU interface. The MU also provides the ability
+for one processor to signal the other processor using interrupts.
+
+Because the MU manages the messaging between processors, the MU uses
+different clocks (from each side of the different peripheral buses).
+Therefore, the MU must synchronize the accesses from one side to the
+other. The MU accomplishes synchronization using two sets of matching
+registers (Processor A-facing, Processor B-facing).
+
+Messaging Unit Device Node:
+=============================
+
+Required properties:
+-------------------
+- compatible : should be "fsl,<chip>-mu", the supported chips include
+ imx6sx, imx7s, imx8qxp, imx8qm.
+ The "fsl,imx6sx-mu" compatible is seen as generic and should
+ be included together with SoC specific compatible.
+- reg : Should contain the registers location and length
+- interrupts : Interrupt number. The interrupt specifier format depends
+ on the interrupt controller parent.
+- #mbox-cells: Must be 2.
+ <&phandle type channel>
+ phandle : Label name of controller
+ type : Channel type
+ channel : Channel number
+
+ This MU support 4 type of unidirectional channels, each type
+ has 4 channels. A total of 16 channels. Following types are
+ supported:
+ 0 - TX channel with 32bit transmit register and IRQ transmit
+ acknowledgment support.
+ 1 - RX channel with 32bit receive register and IRQ support
+ 2 - TX doorbell channel. Without own register and no ACK support.
+ 3 - RX doorbell channel.
+
+Optional properties:
+-------------------
+- clocks : phandle to the input clock.
+- fsl,mu-side-b : Should be set for side B MU.
+
+Examples:
+--------
+lsio_mu0: mailbox@5d1b0000 {
+ compatible = "fsl,imx8qxp-mu";
+ reg = <0x0 0x5d1b0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
new file mode 100644
index 000000000000..7d72b21c9e94
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
@@ -0,0 +1,57 @@
+MediaTek GCE
+===============
+
+The Global Command Engine (GCE) is used to help read/write registers with
+critical time limitation, such as updating display configuration during the
+vblank. The GCE can be used to implement the Command Queue (CMDQ) driver.
+
+CMDQ driver uses mailbox framework for communication. Please refer to
+mailbox.txt for generic information about mailbox device-tree bindings.
+
+Required properties:
+- compatible: Must be "mediatek,mt8173-gce"
+- reg: Address range of the GCE unit
+- interrupts: The interrupt signal from the GCE block
+- clock: Clocks according to the common clock binding
+- clock-names: Must be "gce" to stand for GCE clock
+- #mbox-cells: Should be 3.
+ <&phandle channel priority atomic_exec>
+ phandle: Label name of a gce node.
+ channel: Channel of mailbox. Be equal to the thread id of GCE.
+ priority: Priority of GCE thread.
+ atomic_exec: GCE processing continuous packets of commands in atomic
+ way.
+
+Required properties for a client device:
+- mboxes: Client use mailbox to communicate with GCE, it should have this
+ property and list of phandle, mailbox specifiers.
+- mediatek,gce-subsys: u32, specify the sub-system id which is corresponding
+ to the register address.
+
+Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h'. Such as
+sub-system ids, thread priority, event ids.
+
+Example:
+
+ gce: gce@10212000 {
+ compatible = "mediatek,mt8173-gce";
+ reg = <0 0x10212000 0 0x1000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_GCE>;
+ clock-names = "gce";
+ thread-num = CMDQ_THR_MAX_COUNT;
+ #mbox-cells = <3>;
+ };
+
+Example for a client device:
+
+ mmsys: clock-controller@14000000 {
+ compatible = "mediatek,mt8173-mmsys";
+ mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST 1>,
+ <&gce 1 CMDQ_THR_PRIO_LOWEST 1>;
+ mediatek,gce-subsys = <SUBSYS_1400XXXX>;
+ mutex-event-eof = <CMDQ_EVENT_MUTEX0_STREAM_EOF
+ CMDQ_EVENT_MUTEX1_STREAM_EOF>;
+
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
new file mode 100644
index 000000000000..6c9c7daf0f5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
@@ -0,0 +1,50 @@
+Texas Instruments' Secure Proxy
+========================================
+
+The Texas Instruments' secure proxy is a mailbox controller that has
+configurable queues selectable at SoC(System on Chip) integration. The
+Message manager is broken up into different address regions that are
+called "threads" or "proxies" - each instance is unidirectional and is
+instantiated at SoC integration level by system controller to indicate
+receive or transmit path.
+
+Message Manager Device Node:
+===========================
+Required properties:
+--------------------
+- compatible: Shall be "ti,am654-secure-proxy"
+- reg-names target_data - Map the proxy data region
+ rt - Map the realtime status region
+ scfg - Map the configuration region
+- reg: Contains the register map per reg-names.
+- #mbox-cells Shall be 1 and shall refer to the transfer path
+ called thread.
+- interrupt-names: Contains interrupt names matching the rx transfer path
+ for a given SoC. Receive interrupts shall be of the
+ format: "rx_<PID>".
+- interrupts: Contains the interrupt information corresponding to
+ interrupt-names property.
+
+Example(AM654):
+------------
+
+ secure_proxy: mailbox@32c00000 {
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x0 0x32c00000 0x0 0x100000>,
+ <0x0 0x32400000 0x0 0x100000>,
+ <0x0 0x32800000 0x0 0x100000>;
+ interrupt-names = "rx_011";
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dmsc: dmsc {
+ [...]
+ mbox-names = "rx", "tx";
+ # RX Thread ID is 11
+ # TX Thread ID is 13
+ mboxes= <&secure_proxy 11>,
+ <&secure_proxy 13>;
+ [...]
+ };
diff --git a/Documentation/devicetree/bindings/media/cec-gpio.txt b/Documentation/devicetree/bindings/media/cec-gpio.txt
index 12fcd55ed153..47e8d73d32a3 100644
--- a/Documentation/devicetree/bindings/media/cec-gpio.txt
+++ b/Documentation/devicetree/bindings/media/cec-gpio.txt
@@ -4,8 +4,8 @@ The HDMI CEC GPIO module supports CEC implementations where the CEC line
is hooked up to a pull-up GPIO line and - optionally - the HPD line is
hooked up to another GPIO line.
-Please note: the maximum voltage for the CEC line is 3.63V, for the HPD
-line it is 5.3V. So you may need some sort of level conversion circuitry
+Please note: the maximum voltage for the CEC line is 3.63V, for the HPD and
+5V lines it is 5.3V. So you may need some sort of level conversion circuitry
when connecting them to a GPIO line.
Required properties:
@@ -19,18 +19,24 @@ following property is also required:
- hdmi-phandle - phandle to the HDMI controller, see also cec.txt.
If the CEC line is not associated with an HDMI receiver/transmitter, then
-the following property is optional:
+the following property is optional and can be used for debugging HPD changes:
- hpd-gpios: gpio that the HPD line is connected to.
+This property is optional and can be used for debugging changes on the 5V line:
+
+ - v5-gpios: gpio that the 5V line is connected to.
+
Example for the Raspberry Pi 3 where the CEC line is connected to
-pin 26 aka BCM7 aka CE1 on the GPIO pin header and the HPD line is
-connected to pin 11 aka BCM17 (some level shifter is needed for this!):
+pin 26 aka BCM7 aka CE1 on the GPIO pin header, the HPD line is
+connected to pin 11 aka BCM17 and the 5V line is connected to pin
+15 aka BCM22 (some level shifter is needed for the HPD and 5V lines!):
#include <dt-bindings/gpio/gpio.h>
cec-gpio {
- compatible = "cec-gpio";
- cec-gpios = <&gpio 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
- hpd-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ compatible = "cec-gpio";
+ cec-gpios = <&gpio 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ hpd-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ v5-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ak7375.txt b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
new file mode 100644
index 000000000000..aa3e24b41241
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
@@ -0,0 +1,8 @@
+Asahi Kasei Microdevices AK7375 voice coil lens driver
+
+AK7375 is a camera voice coil lens.
+
+Mandatory properties:
+
+- compatible: "asahi-kasei,ak7375"
+- reg: I2C slave address
diff --git a/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
new file mode 100644
index 000000000000..bd896e9f67d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
@@ -0,0 +1,46 @@
+* Aptina MT9V111 CMOS sensor
+----------------------------
+
+The Aptina MT9V111 is a 1/4-Inch VGA-format digital image sensor with a core
+based on Aptina MT9V011 sensor and an integrated Image Flow Processor (IFP).
+
+The sensor has an active pixel array of 640x480 pixels and can output a number
+of image resolution and formats controllable through a simple two-wires
+interface.
+
+Required properties:
+--------------------
+
+- compatible: shall be "aptina,mt9v111".
+- clocks: reference to the system clock input provider.
+
+Optional properties:
+--------------------
+
+- enable-gpios: output enable signal, pin name "OE#". Active low.
+- standby-gpios: low power state control signal, pin name "STANDBY".
+ Active high.
+- reset-gpios: chip reset signal, pin name "RESET#". Active low.
+
+The device node must contain one 'port' child node with one 'endpoint' child
+sub-node for its digital output video port, in accordance with the video
+interface bindings defined in:
+Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+--------
+
+ &i2c1 {
+ camera@48 {
+ compatible = "aptina,mt9v111";
+ reg = <0x48>;
+
+ clocks = <&camera_clk>;
+
+ port {
+ mt9v111_out: endpoint {
+ remote-endpoint = <&ceu_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt
new file mode 100644
index 000000000000..c4701f1eaaf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt
@@ -0,0 +1,9 @@
+Dongwoon Anatech DW9807 voice coil lens driver
+
+DW9807 is a 10-bit DAC with current sink capability. It is intended for
+controlling voice coil lenses.
+
+Mandatory properties:
+
+- compatible: "dongwoon,dw9807-vcm"
+- reg: I2C slave address
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
index 33f10a94c381..8ee7c7972ac7 100644
--- a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -29,6 +29,9 @@ Optional properties
- reset-gpios: XSHUTDOWN GPIO
- flash-leds: See ../video-interfaces.txt
- lens-focus: See ../video-interfaces.txt
+- rotation: Integer property; valid values are 0 (sensor mounted upright)
+ and 180 (sensor mounted upside down). See
+ ../video-interfaces.txt .
Endpoint node mandatory properties
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2680.txt b/Documentation/devicetree/bindings/media/i2c/ov2680.txt
new file mode 100644
index 000000000000..11e925ed9dad
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov2680.txt
@@ -0,0 +1,46 @@
+* Omnivision OV2680 MIPI CSI-2 sensor
+
+Required Properties:
+- compatible: should be "ovti,ov2680".
+- clocks: reference to the xvclk input clock.
+- clock-names: should be "xvclk".
+- DOVDD-supply: Digital I/O voltage supply.
+- DVDD-supply: Digital core voltage supply.
+- AVDD-supply: Analog voltage supply.
+
+Optional Properties:
+- reset-gpios: reference to the GPIO connected to the powerdown/reset pin,
+ if any. This is an active low signal to the OV2680.
+
+The device node must contain one 'port' child node for its digital output
+video port, and this port must have a single endpoint in accordance with
+ the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Endpoint node required properties for CSI-2 connection are:
+- remote-endpoint: a phandle to the bus receiver's endpoint node.
+- clock-lanes: should be set to <0> (clock lane on hardware lane 0).
+- data-lanes: should be set to <1> (one CSI-2 lane supported).
+
+Example:
+
+&i2c2 {
+ ov2680: camera-sensor@36 {
+ compatible = "ovti,ov2680";
+ reg = <0x36>;
+ clocks = <&osc>;
+ clock-names = "xvclk";
+ reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ DOVDD-supply = <&sw2_reg>;
+ DVDD-supply = <&sw2_reg>;
+ AVDD-supply = <&reg_peri_3p15v>;
+
+ port {
+ ov2680_to_mipi: endpoint {
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5640.txt b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
index 8e36da0d8406..c97c2f2da12d 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov5640.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
@@ -13,6 +13,10 @@ Optional Properties:
This is an active low signal to the OV5640.
- powerdown-gpios: reference to the GPIO connected to the powerdown pin,
if any. This is an active high signal to the OV5640.
+- rotation: as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt,
+ valid values are 0 (sensor mounted upright) and 180 (sensor
+ mounted upside down).
The device node must contain one 'port' child node for its digital output
video port, in accordance with the video interface bindings defined in
@@ -51,6 +55,7 @@ Examples:
DVDD-supply = <&vgen2_reg>; /* 1.5v */
powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ rotation = <180>;
port {
/* MIPI CSI-2 bus endpoint */
diff --git a/Documentation/devicetree/bindings/media/i2c/tc358743.txt b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
index 49f8bcc2ea4d..59102edcf01e 100644
--- a/Documentation/devicetree/bindings/media/i2c/tc358743.txt
+++ b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
@@ -12,7 +12,7 @@ Required Properties:
Optional Properties:
- reset-gpios: gpio phandle GPIO connected to the reset pin
-- interrupts, interrupt-parent: GPIO connected to the interrupt pin
+- interrupts: GPIO connected to the interrupt pin
- data-lanes: should be <1 2 3 4> for four-lane operation,
or <1 2> for two-lane operation
- clock-lanes: should be <0>
diff --git a/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt b/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
index 470237ed6fe5..7302e949e662 100644
--- a/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
+++ b/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
@@ -27,9 +27,15 @@ Required properties:
- sxe
- clocks : Must include the following entries:
- vde
-- resets : Must include the following entries:
+- resets : Must contain an entry for each entry in reset-names.
+- reset-names : Should include the following entries:
- vde
+Optional properties:
+- resets : Must contain an entry for each entry in reset-names.
+- reset-names : Must include the following entries:
+ - mc
+
Example:
video-codec@6001a000 {
@@ -51,5 +57,6 @@ video-codec@6001a000 {
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; /* SXE interrupt */
interrupt-names = "sync-token", "bsev", "sxe";
clocks = <&tegra_car TEGRA20_CLK_VDE>;
- resets = <&tegra_car 61>;
+ reset-names = "vde", "mc";
+ resets = <&tegra_car 61>, <&mc TEGRA20_MC_RESET_VDE>;
};
diff --git a/Documentation/devicetree/bindings/media/qcom,camss.txt b/Documentation/devicetree/bindings/media/qcom,camss.txt
index cadecebc73f7..09eb6ed99114 100644
--- a/Documentation/devicetree/bindings/media/qcom,camss.txt
+++ b/Documentation/devicetree/bindings/media/qcom,camss.txt
@@ -5,8 +5,9 @@ Qualcomm Camera Subsystem
- compatible:
Usage: required
Value type: <stringlist>
- Definition: Should contain:
+ Definition: Should contain one of:
- "qcom,msm8916-camss"
+ - "qcom,msm8996-camss"
- reg:
Usage: required
Value type: <prop-encoded-array>
@@ -19,11 +20,16 @@ Qualcomm Camera Subsystem
- "csiphy0_clk_mux"
- "csiphy1"
- "csiphy1_clk_mux"
+ - "csiphy2" (8996 only)
+ - "csiphy2_clk_mux" (8996 only)
- "csid0"
- "csid1"
+ - "csid2" (8996 only)
+ - "csid3" (8996 only)
- "ispif"
- "csi_clk_mux"
- "vfe0"
+ - "vfe1" (8996 only)
- interrupts:
Usage: required
Value type: <prop-encoded-array>
@@ -34,10 +40,14 @@ Qualcomm Camera Subsystem
Definition: Should contain the following entries:
- "csiphy0"
- "csiphy1"
+ - "csiphy2" (8996 only)
- "csid0"
- "csid1"
+ - "csid2" (8996 only)
+ - "csid3" (8996 only)
- "ispif"
- "vfe0"
+ - "vfe1" (8996 only)
- power-domains:
Usage: required
Value type: <prop-encoded-array>
@@ -53,25 +63,42 @@ Qualcomm Camera Subsystem
Usage: required
Value type: <stringlist>
Definition: Should contain the following entries:
- - "camss_top_ahb"
- - "ispif_ahb"
- - "csiphy0_timer"
- - "csiphy1_timer"
- - "csi0_ahb"
- - "csi0"
- - "csi0_phy"
- - "csi0_pix"
- - "csi0_rdi"
- - "csi1_ahb"
- - "csi1"
- - "csi1_phy"
- - "csi1_pix"
- - "csi1_rdi"
- - "camss_ahb"
- - "camss_vfe_vfe"
- - "camss_csi_vfe"
- - "iface"
- - "bus"
+ - "top_ahb"
+ - "ispif_ahb"
+ - "csiphy0_timer"
+ - "csiphy1_timer"
+ - "csiphy2_timer" (8996 only)
+ - "csi0_ahb"
+ - "csi0"
+ - "csi0_phy"
+ - "csi0_pix"
+ - "csi0_rdi"
+ - "csi1_ahb"
+ - "csi1"
+ - "csi1_phy"
+ - "csi1_pix"
+ - "csi1_rdi"
+ - "csi2_ahb" (8996 only)
+ - "csi2" (8996 only)
+ - "csi2_phy" (8996 only)
+ - "csi2_pix" (8996 only)
+ - "csi2_rdi" (8996 only)
+ - "csi3_ahb" (8996 only)
+ - "csi3" (8996 only)
+ - "csi3_phy" (8996 only)
+ - "csi3_pix" (8996 only)
+ - "csi3_rdi" (8996 only)
+ - "ahb"
+ - "vfe0"
+ - "csi_vfe0"
+ - "vfe0_ahb", (8996 only)
+ - "vfe0_stream", (8996 only)
+ - "vfe1", (8996 only)
+ - "csi_vfe1", (8996 only)
+ - "vfe1_ahb", (8996 only)
+ - "vfe1_stream", (8996 only)
+ - "vfe_ahb"
+ - "vfe_axi"
- vdda-supply:
Usage: required
Value type: <phandle>
@@ -90,22 +117,27 @@ Qualcomm Camera Subsystem
- reg:
Usage: required
Value type: <u32>
- Definition: Selects CSI2 PHY interface - PHY0 or PHY1.
+ Definition: Selects CSI2 PHY interface - PHY0, PHY1
+ or PHY2 (8996 only)
Endpoint node properties:
- clock-lanes:
Usage: required
Value type: <u32>
- Definition: The physical clock lane index. The value
- must always be <1> as the physical clock
- lane is lane 1.
+ Definition: The physical clock lane index. On 8916
+ the value must always be <1> as the physical
+ clock lane is lane 1. On 8996 the value must
+ always be <7> as the hardware supports D-PHY
+ and C-PHY, indexes are in a common set and
+ D-PHY physical clock lane is labeled as 7.
- data-lanes:
Usage: required
Value type: <prop-encoded-array>
- Definition: An array of physical data lanes indexes.
- Position of an entry determines the logical
- lane number, while the value of an entry
- indicates physical lane index. Lane swapping
- is supported.
+ Definition: An array of physical data lanes indexes.
+ Position of an entry determines the logical
+ lane number, while the value of an entry
+ indicates physical lane index. Lane swapping
+ is supported. Physical lane indexes for
+ 8916: 0, 2, 3, 4; for 8996: 0, 1, 2, 3.
* An Example
@@ -161,25 +193,25 @@ Qualcomm Camera Subsystem
<&gcc GCC_CAMSS_CSI_VFE0_CLK>,
<&gcc GCC_CAMSS_VFE_AHB_CLK>,
<&gcc GCC_CAMSS_VFE_AXI_CLK>;
- clock-names = "camss_top_ahb",
- "ispif_ahb",
- "csiphy0_timer",
- "csiphy1_timer",
- "csi0_ahb",
- "csi0",
- "csi0_phy",
- "csi0_pix",
- "csi0_rdi",
- "csi1_ahb",
- "csi1",
- "csi1_phy",
- "csi1_pix",
- "csi1_rdi",
- "camss_ahb",
- "camss_vfe_vfe",
- "camss_csi_vfe",
- "iface",
- "bus";
+ clock-names = "top_ahb",
+ "ispif_ahb",
+ "csiphy0_timer",
+ "csiphy1_timer",
+ "csi0_ahb",
+ "csi0",
+ "csi0_phy",
+ "csi0_pix",
+ "csi0_rdi",
+ "csi1_ahb",
+ "csi1",
+ "csi1_phy",
+ "csi1_pix",
+ "csi1_rdi",
+ "ahb",
+ "vfe0",
+ "csi_vfe0",
+ "vfe_ahb",
+ "vfe_axi";
vdda-supply = <&pm8916_l2>;
iommus = <&apps_iommu 3>;
ports {
diff --git a/Documentation/devicetree/bindings/media/qcom,venus.txt b/Documentation/devicetree/bindings/media/qcom,venus.txt
index 2693449daf73..00d0d1bf7647 100644
--- a/Documentation/devicetree/bindings/media/qcom,venus.txt
+++ b/Documentation/devicetree/bindings/media/qcom,venus.txt
@@ -6,6 +6,7 @@
Definition: Value should contain one of:
- "qcom,msm8916-venus"
- "qcom,msm8996-venus"
+ - "qcom,sdm845-venus"
- reg:
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index a19517e1c669..2f420050d57f 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -21,7 +21,9 @@ on Gen3 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7794" for the R8A7794 device
- "renesas,vin-r8a7795" for the R8A7795 device
- "renesas,vin-r8a7796" for the R8A7796 device
+ - "renesas,vin-r8a77965" for the R8A77965 device
- "renesas,vin-r8a77970" for the R8A77970 device
+ - "renesas,vin-r8a77995" for the R8A77995 device
- "renesas,rcar-gen2-vin" for a generic R-Car Gen2 or RZ/G1 compatible
device.
@@ -37,30 +39,51 @@ Additionally, an alias named vinX will need to be created to specify
which video input device this is.
The per-board settings Gen2 platforms:
- - port sub-node describing a single endpoint connected to the vin
- as described in video-interfaces.txt[1]. Only the first one will
- be considered as each vin interface has one input port.
+
+- port - sub-node describing a single endpoint connected to the VIN
+ from external SoC pins as described in video-interfaces.txt[1].
+ Only the first one will be considered as each vin interface has one
+ input port.
+
+ - Optional properties for endpoint nodes:
+ - hsync-active: see [1] for description. Default is active high.
+ - vsync-active: see [1] for description. Default is active high.
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ - field-active-even: see [1] for description. Default is active high.
+ - bus-width: see [1] for description. The selected bus width depends on
+ the SoC type and selected input image format.
+ Valid values are: 8, 10, 12, 16, 24 and 32.
+ - data-shift: see [1] for description. Valid values are 0 and 8.
+ - data-enable-active: polarity of CLKENB signal, see [1] for
+ description. Default is active high.
The per-board settings Gen3 platforms:
Gen3 platforms can support both a single connected parallel input source
-from external SoC pins (port0) and/or multiple parallel input sources
-from local SoC CSI-2 receivers (port1) depending on SoC.
+from external SoC pins (port@0) and/or multiple parallel input sources
+from local SoC CSI-2 receivers (port@1) depending on SoC.
- renesas,id - ID number of the VIN, VINx in the documentation.
- ports
- - port 0 - sub-node describing a single endpoint connected to the VIN
- from external SoC pins described in video-interfaces.txt[1].
- Describing more then one endpoint in port 0 is invalid. Only VIN
- instances that are connected to external pins should have port 0.
- - port 1 - sub-nodes describing one or more endpoints connected to
+ - port@0 - sub-node describing a single endpoint connected to the VIN
+ from external SoC pins as described in video-interfaces.txt[1].
+ Describing more than one endpoint in port@0 is invalid. Only VIN
+ instances that are connected to external pins should have port@0.
+
+ Endpoint nodes of port@0 support the optional properties listed in
+ the Gen2 per-board settings description.
+
+ - port@1 - sub-nodes describing one or more endpoints connected to
the VIN from local SoC CSI-2 receivers. The endpoint numbers must
use the following schema.
- - Endpoint 0 - sub-node describing the endpoint connected to CSI20
- - Endpoint 1 - sub-node describing the endpoint connected to CSI21
- - Endpoint 2 - sub-node describing the endpoint connected to CSI40
- - Endpoint 3 - sub-node describing the endpoint connected to CSI41
+ - endpoint@0 - sub-node describing the endpoint connected to CSI20
+ - endpoint@1 - sub-node describing the endpoint connected to CSI21
+ - endpoint@2 - sub-node describing the endpoint connected to CSI40
+ - endpoint@3 - sub-node describing the endpoint connected to CSI41
+
+ Endpoint nodes of port@1 do not support any optional endpoint property.
Device node example for Gen2 platforms
--------------------------------------
@@ -107,9 +130,6 @@ Board setup example for Gen2 platforms (vin1 composite video input)
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
vin1ep0: endpoint {
remote-endpoint = <&adv7180>;
bus-width = <8>;
diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
index 17a8e81ca0cc..cfa4ffada8ae 100644
--- a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
+++ b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
@@ -2,7 +2,6 @@ Bindings, specific for the sh_mobile_ceu_camera.c driver:
- compatible: Should be "renesas,sh-mobile-ceu"
- reg: register base and size
- interrupts: the interrupt number
- - interrupt-parent: the interrupt controller
- renesas,max-width: maximum image width, supported on this SoC
- renesas,max-height: maximum image height, supported on this SoC
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 258b8dfddf48..baf9d9756b3c 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -85,6 +85,10 @@ Optional properties
- lens-focus: A phandle to the node of the focus lens controller.
+- rotation: The device, typically an image sensor, is not mounted upright,
+ but a number of degrees counter clockwise. Typical values are 0 and 180
+ (upside down).
+
Optional endpoint properties
----------------------------
@@ -109,6 +113,8 @@ Optional endpoint properties
Note, that if HSYNC and VSYNC polarities are not specified, embedded
synchronization may be required, where supported.
- data-active: similar to HSYNC and VSYNC, specifies data line polarity.
+- data-enable-active: similar to HSYNC and VSYNC, specifies the data enable
+ signal polarity.
- field-even-active: field signal level during the even field data transmission.
- pclk-sample: sample data on rising (1) or falling (0) edge of the pixel clock
signal.
diff --git a/Documentation/devicetree/bindings/mfd/ac100.txt b/Documentation/devicetree/bindings/mfd/ac100.txt
index b8ef00667599..dff219f07493 100644
--- a/Documentation/devicetree/bindings/mfd/ac100.txt
+++ b/Documentation/devicetree/bindings/mfd/ac100.txt
@@ -10,7 +10,6 @@ Required properties:
- sub-nodes:
- codec
- compatible: "x-powers,ac100-codec"
- - interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the
IRQ_AUDIO pin
- #clock-cells: Shall be 0
@@ -20,9 +19,6 @@ Required properties:
- rtc
- compatible: "x-powers,ac100-rtc"
- - interrupt-parent: The parent interrupt controller
- - interrupts: SoC NMI / GPIO interrupt connected to the
- IRQ_RTC pin
- clocks: A phandle to the codec's "4M_adda" clock
- #clock-cells: Shall be 1
- clock-output-names: "cko1_rtc", "cko2_rtc", "cko3_rtc"
diff --git a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
index c8a736554b4b..a688520dd87d 100644
--- a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
+++ b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
@@ -5,7 +5,6 @@ Required parent device properties:
- spi-max-frequency : Maximum SPI frequency.
- reg : The SPI Chip Select address for the Arria10
System Resource chip
-- interrupt-parent : The parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index a014afb07902..9b62831fdf3e 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -22,7 +22,6 @@ Required properties:
connected to.
- interrupt-controller : Arizona class devices contain interrupt controllers
and may provide interrupt services to other devices.
- - interrupt-parent : The parent interrupt controller.
- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
diff --git a/Documentation/devicetree/bindings/mfd/as3722.txt b/Documentation/devicetree/bindings/mfd/as3722.txt
index 5297b2210704..2a665741d7fe 100644
--- a/Documentation/devicetree/bindings/mfd/as3722.txt
+++ b/Documentation/devicetree/bindings/mfd/as3722.txt
@@ -20,6 +20,8 @@ Optional properties:
- ams,enable-internal-i2c-pullup: Boolean property, to enable internal pullup on
i2c scl/sda pins. Missing this will disable internal pullup on i2c
scl/sda lines.
+- ams,enable-ac-ok-power-on: Boolean property, to enable exit out of power off
+ mode with AC_OK pin (pin enabled in power off mode).
Optional submodule and their properties:
=======================================
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index d1762f3b30af..188f0373d441 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -28,7 +28,6 @@ Required properties:
* "x-powers,axp809"
* "x-powers,axp813"
- reg: The I2C slave address or RSB hardware address for the AXP chip
-- interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
- interrupt-controller: The PMIC has its own internal IRQs
- #interrupt-cells: Should be set to 1
@@ -45,8 +44,11 @@ Optional properties:
board is driving OTG VBus or not.
(axp221 / axp223 / axp803/ axp813 only)
-- x-powers,master-mode: Boolean (axp806 only). Set this when the PMIC is
- wired for master mode. The default is slave mode.
+- x-powers,self-working-mode and
+ x-powers,master-mode: Boolean (axp806 only). Set either of these when the
+ PMIC is wired for self-working mode or master mode.
+ If neither is set then slave mode is assumed.
+ This corresponds to how the MODESET pin is wired.
- <input>-supply: a phandle to the regulator supply node. May be omitted if
inputs are unregulated, such as using the IPSOUT output
diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
index 25d1f697eb25..8c4678650d1a 100644
--- a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
+++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "rohm,bd9571mwv".
- reg : I2C slave address.
- - interrupt-parent : Phandle to the parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/bfticu.txt b/Documentation/devicetree/bindings/mfd/bfticu.txt
index 65c90776c620..538192fda9ae 100644
--- a/Documentation/devicetree/bindings/mfd/bfticu.txt
+++ b/Documentation/devicetree/bindings/mfd/bfticu.txt
@@ -10,7 +10,6 @@ Required properties:
- interrupts: the main IRQ line to signal the collected IRQs
- #interrupt-cells : is 2 and their usage is compliant to the 2 cells variant
of Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent: the parent IRQ ctrl the main IRQ is connected to
- reg: access on the parent local bus (chip select, offset in chip select, size)
Example:
diff --git a/Documentation/devicetree/bindings/mfd/da9055.txt b/Documentation/devicetree/bindings/mfd/da9055.txt
index 6dab34d34fce..131a53283e17 100644
--- a/Documentation/devicetree/bindings/mfd/da9055.txt
+++ b/Documentation/devicetree/bindings/mfd/da9055.txt
@@ -22,8 +22,6 @@ Documentation/devicetree/bindings/sound/da9055.txt
Required properties:
- compatible : Should be "dlg,da9055-pmic"
- reg: Specifies the I2C slave address (defaults to 0x5a but can be modified)
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from da9055 are delivered to.
- interrupts: IRQ line info for da9055 chip.
- interrupt-controller: da9055 has internal IRQs (has own IRQ domain).
- #interrupt-cells: Should be 1, is the local IRQ number for da9055.
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
index c0a418c27e9d..edca653a5777 100644
--- a/Documentation/devicetree/bindings/mfd/da9062.txt
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -32,8 +32,6 @@ Required properties:
"dlg,da9061" for DA9061
- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
modified to match the chip's OTP settings).
-- interrupt-parent : Specifies the reference to the interrupt controller for
- the DA9062 or DA9061.
- interrupts : IRQ line information.
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
index 443e68286957..8da879935c59 100644
--- a/Documentation/devicetree/bindings/mfd/da9063.txt
+++ b/Documentation/devicetree/bindings/mfd/da9063.txt
@@ -16,8 +16,6 @@ Required properties:
- compatible : Should be "dlg,da9063" or "dlg,da9063l"
- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
modified to match the chip's OTP settings).
-- interrupt-parent : Specifies the reference to the interrupt controller for
- the DA9063.
- interrupts : IRQ line information.
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/mfd/da9150.txt b/Documentation/devicetree/bindings/mfd/da9150.txt
index fd4dca7f4aba..f09b41fbdf47 100644
--- a/Documentation/devicetree/bindings/mfd/da9150.txt
+++ b/Documentation/devicetree/bindings/mfd/da9150.txt
@@ -13,8 +13,6 @@ da9150-fg : Battery Fuel-Gauge
Required properties:
- compatible : Should be "dlg,da9150"
- reg: Specifies the I2C slave address
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from da9150 are delivered to.
- interrupts: IRQ line info for da9150 chip.
- interrupt-controller: da9150 has internal IRQs (own IRQ domain).
(See ../interrupt-controller/interrupts.txt for
diff --git a/Documentation/devicetree/bindings/mfd/madera.txt b/Documentation/devicetree/bindings/mfd/madera.txt
new file mode 100644
index 000000000000..db3266088386
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/madera.txt
@@ -0,0 +1,102 @@
+Cirrus Logic Madera class audio codecs Multi-Functional Device
+
+These devices are audio SoCs with extensive digital capabilities and a range
+of analogue I/O.
+
+See also the child driver bindings in:
+bindings/pinctrl/cirrus,madera-pinctrl.txt
+bindings/regulator/arizona-regulator.txt
+bindings/sound/madera.txt
+
+Required properties:
+
+ - compatible : One of the following chip-specific strings:
+ "cirrus,cs47l35"
+ "cirrus,cs47l85"
+ "cirrus,cs47l90"
+ "cirrus,cs47l91"
+ "cirrus,wm1840"
+
+ - reg : I2C slave address when connected using I2C, chip select number when
+ using SPI.
+
+ - DCVDD-supply : Power supply for the device as defined in
+ bindings/regulator/regulator.txt
+ Mandatory on CS47L35, CS47L90, CS47L91
+ Optional on CS47L85, WM1840
+
+ - AVDD-supply, DBVDD1-supply, DBVDD2-supply, CPVDD1-supply, CPVDD2-supply :
+ Power supplies for the device
+
+ - DBVDD3-supply, DBVDD4-supply : Power supplies for the device
+ (CS47L85, CS47L90, CS47L91, WM1840)
+
+ - SPKVDDL-supply, SPKVDDR-supply : Power supplies for the device
+ (CS47L85, WM1840)
+
+ - SPKVDD-supply : Power supply for the device
+ (CS47L35)
+
+ - interrupt-controller : Indicates that this device is an interrupt controller
+
+ - #interrupt-cells: the number of cells to describe an IRQ, must be 2.
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as the trigger masks from
+ bindings/interrupt-controller/interrupts.txt
+
+ - gpio-controller : Indicates this device is a GPIO controller.
+
+ - #gpio-cells : Must be 2. The first cell is the pin number. The second cell
+ is reserved for future use and must be zero
+
+ - interrupt-parent : The parent interrupt controller.
+
+ - interrupts : The interrupt line the /IRQ signal for the device is
+ connected to.
+
+Optional properties:
+
+ - MICVDD-supply : Power supply, only need to be specified if
+ powered externally
+
+ - reset-gpios : One entry specifying the GPIO controlling /RESET.
+ As defined in bindings/gpio.txt.
+ Although optional, it is strongly recommended to use a hardware reset
+
+ - MICBIASx : Initial data for the MICBIAS regulators, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+ One for each MICBIAS generator (MICBIAS1, MICBIAS2, ...)
+ (all codecs)
+
+ One for each output pin (MICBIAS1A, MIBCIAS1B, MICBIAS2A, ...)
+ (all except CS47L85, WM1840)
+
+ The following following additional property is supported for the generator
+ nodes:
+ - cirrus,ext-cap : Set to 1 if the MICBIAS has external decoupling
+ capacitors attached.
+
+Optional child nodes:
+ micvdd : Node containing initialization data for the micvdd regulator
+ See bindings/regulator/arizona-regulator.txt
+
+ ldo1 : Node containing initialization data for the LDO1 regulator
+ See bindings/regulator/arizona-regulator.txt
+ (cs47l85, wm1840)
+
+Example:
+
+cs47l85@0 {
+ compatible = "cirrus,cs47l85";
+ reg = <0>;
+
+ reset-gpios = <&gpio 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <&host_irq1>;
+ interrupt-parent = <&gic>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/mfd/max14577.txt b/Documentation/devicetree/bindings/mfd/max14577.txt
index 236264c10b92..fc6f0f4e8beb 100644
--- a/Documentation/devicetree/bindings/mfd/max14577.txt
+++ b/Documentation/devicetree/bindings/mfd/max14577.txt
@@ -11,7 +11,6 @@ Required properties:
- compatible : Must be "maxim,max14577" or "maxim,max77836".
- reg : I2C slave address for the max14577 chip (0x25 for max14577/max77836)
- interrupts : IRQ line for the chip.
-- interrupt-parent : The parent interrupt controller.
Required nodes:
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 0f2587fa42cb..42968b7144e0 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -15,7 +15,6 @@ Required properties:
- compatible : Must be "maxim,max77686";
- reg : Specifies the i2c slave address of PMIC block.
- interrupts : This i2c device has an IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index e6754974a745..a3c60a7a3be1 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible : Must be "maxim,max77693".
- reg : Specifies the i2c slave address of PMIC block.
- interrupts : This i2c device has an IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Optional properties:
- regulators : The regulators of max77693 have to be instantiated under subnode
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
index f2f3fe75901c..09decac20d91 100644
--- a/Documentation/devicetree/bindings/mfd/max77802.txt
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible : Must be "maxim,max77802"
- reg : Specifies the I2C slave address of PMIC block.
- interrupts : I2C device IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/mfd/max8998.txt b/Documentation/devicetree/bindings/mfd/max8998.txt
index 23a3650ff2a2..5f2f07c09c90 100644
--- a/Documentation/devicetree/bindings/mfd/max8998.txt
+++ b/Documentation/devicetree/bindings/mfd/max8998.txt
@@ -20,8 +20,6 @@ Required properties:
- reg: Specifies the i2c slave address of the pmic block. It should be 0x66.
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from MAX8998 are routed to.
- interrupts: Interrupt specifiers for two interrupt sources.
- First interrupt specifier is for main interrupt.
- Second interrupt specifier is for power-on/-off interrupt.
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index c639705a98ef..5ddcc8f4febc 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -3,7 +3,6 @@ Motorola CPCAP PMIC device tree binding
Required properties:
- compatible : One or both of "motorola,cpcap" or "ste,6556002"
- reg : SPI chip select
-- interrupt-parent : The parent interrupt controller
- interrupts : The interrupt line the device is connected to
- interrupt-controller : Marks the device node as an interrupt controller
- #interrupt-cells : The number of cells to describe an IRQ, should be 2
diff --git a/Documentation/devicetree/bindings/mfd/palmas.txt b/Documentation/devicetree/bindings/mfd/palmas.txt
index 8ae1a32bfb7e..e736ab3012a6 100644
--- a/Documentation/devicetree/bindings/mfd/palmas.txt
+++ b/Documentation/devicetree/bindings/mfd/palmas.txt
@@ -25,7 +25,6 @@ and also the generic series names
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent : The parent interrupt controller.
Optional properties:
ti,mux-padX : set the pad register X (1-2) to the correct muxing for the
diff --git a/Documentation/devicetree/bindings/mfd/retu.txt b/Documentation/devicetree/bindings/mfd/retu.txt
index 876242394a16..df3005dd3e3c 100644
--- a/Documentation/devicetree/bindings/mfd/retu.txt
+++ b/Documentation/devicetree/bindings/mfd/retu.txt
@@ -9,7 +9,6 @@ Required properties:
- compatible: "nokia,retu" or "nokia,tahvo"
- reg: Specifies the CBUS slave address of the ASIC chip
- interrupts: The interrupt line the device is connected to
-- interrupt-parent: The parent interrupt controller
Example:
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 91b65227afeb..1683ec3245bc 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible: "rockchip,rk808"
- compatible: "rockchip,rk818"
- reg: I2C slave address
-- interrupt-parent: The parent interrupt controller.
- interrupts: the interrupt outputs of the controller.
- #clock-cells: from common clock binding; shall be set to 1 (multiple clock
outputs). See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
new file mode 100644
index 000000000000..3ca56fdb5ffe
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
@@ -0,0 +1,62 @@
+* ROHM BD71837 Power Management Integrated Circuit bindings
+
+BD71837MWV is a programmable Power Management IC for powering single-core,
+dual-core, and quad-core SoCs such as NXP-i.MX 8M. It is optimized for
+low BOM cost and compact solution footprint. It integrates 8 Buck
+egulators and 7 LDOs to provide all the power rails required by the SoC and
+the commonly used peripherals.
+
+Datasheet for PMIC is available at:
+https://www.rohm.com/datasheet/BD71837MWV/bd71837mwv-e
+
+Required properties:
+ - compatible : Should be "rohm,bd71837".
+ - reg : I2C slave address.
+ - interrupt-parent : Phandle to the parent interrupt controller.
+ - interrupts : The interrupt line the device is connected to.
+ - clocks : The parent clock connected to PMIC. If this is missing
+ 32768 KHz clock is assumed.
+ - #clock-cells : Should be 0.
+ - regulators: : List of child nodes that specify the regulators.
+ Please see ../regulator/rohm,bd71837-regulator.txt
+
+Optional properties:
+- clock-output-names : Should contain name for output clock.
+
+Example:
+
+ /* external oscillator node */
+ osc: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <32768>;
+ clock-output-names = "osc";
+ };
+
+ pmic: pmic@4b {
+ compatible = "rohm,bd71837";
+ reg = <0x4b>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 GPIO_ACTIVE_LOW>;
+ interrupt-names = "irq";
+ #clock-cells = <0>;
+ clocks = <&osc 0>;
+ clock-output-names = "bd71837-32k-out";
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-ramp-delay = <1250>;
+ };
+ };
+ };
+
+ /* Clock consumer node */
+ rtc@0 {
+ compatible = "company,my-rtc";
+ clock-names = "my-clock";
+ clocks = <&pmic>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt b/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
index cdd079bfc287..c68cdd365153 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
+++ b/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
@@ -31,8 +31,6 @@ Required properties:
- reg: Specifies the I2C slave address of the pmic block. It should be 0x66.
Optional properties:
- - interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from s2mps11 are delivered to.
- interrupts: Interrupt specifiers for interrupt sources.
- samsung,s2mps11-wrstbi-ground: Indicates that WRSTBI pin of PMIC is pulled
down. When the system is suspended it will always go down thus triggerring
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
index f9065a5781a2..c797c05cd3c2 100644
--- a/Documentation/devicetree/bindings/mfd/stmpe.txt
+++ b/Documentation/devicetree/bindings/mfd/stmpe.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- interrupts : The interrupt outputs from the controller
- interrupt-controller : Marks the device node as an interrupt controller
- - interrupt-parent : Specifies which IRQ controller we're connected to
- wakeup-source : Marks the input device as wakable
- st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
- irq-gpio : If present, which GPIO to use for event IRQ
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
index 23fc2f21f5a4..4f22b2b07dc5 100644
--- a/Documentation/devicetree/bindings/mfd/tc3589x.txt
+++ b/Documentation/devicetree/bindings/mfd/tc3589x.txt
@@ -15,7 +15,6 @@ Required properties:
- compatible : must be "toshiba,tc35890", "toshiba,tc35892", "toshiba,tc35893",
"toshiba,tc35894", "toshiba,tc35895" or "toshiba,tc35896"
- reg : I2C address of the device
- - interrupt-parent : specifies which IRQ controller we're connected to
- interrupts : the interrupt on the parent the controller is connected to
- interrupt-controller : marks the device node as an interrupt controller
- #interrupt-cells : should be <1>, the first cell is the IRQ offset on this
diff --git a/Documentation/devicetree/bindings/mfd/tps65086.txt b/Documentation/devicetree/bindings/mfd/tps65086.txt
index 9cfa886fe99f..67eac0ed32df 100644
--- a/Documentation/devicetree/bindings/mfd/tps65086.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65086.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "ti,tps65086".
- reg : I2C slave address.
- - interrupt-parent : Phandle to the parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index 8af1202b381d..4f62143afd24 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -22,7 +22,7 @@ Required properties:
The valid regulator-compatible values are:
tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
vaux2, vaux33, vmmc, vbb
- tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
+ tps65911: vrtc, vio, vdd1, vdd2, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
ldo6, ldo7, ldo8
- xxx-supply: Input voltage supply regulator.
diff --git a/Documentation/devicetree/bindings/mfd/tps65912.txt b/Documentation/devicetree/bindings/mfd/tps65912.txt
index 717e66d23142..8becb183a48e 100644
--- a/Documentation/devicetree/bindings/mfd/tps65912.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65912.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "ti,tps65912".
- reg : Slave address or chip select number (I2C / SPI).
- - interrupt-parent : The parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/twl-familly.txt b/Documentation/devicetree/bindings/mfd/twl-familly.txt
index a66fcf946759..56f244b5d8a4 100644
--- a/Documentation/devicetree/bindings/mfd/twl-familly.txt
+++ b/Documentation/devicetree/bindings/mfd/twl-familly.txt
@@ -16,7 +16,6 @@ Required properties:
- interrupt-controller : Since the twl support several interrupts internally,
it is considered as an interrupt controller cascaded to the SoC one.
- #interrupt-cells = <1>;
-- interrupt-parent : The parent interrupt controller.
Optional node:
- Child nodes contain in the twl. The twl family is made of several variants
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index 9a98ee7c323d..06e9dd7a0d96 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -9,7 +9,6 @@ Required properties:
- compatible : "ti,twl6040" for twl6040, "ti,twl6041" for twl6041
- reg: must be 0x4b for i2c address
- interrupts: twl6040 has one interrupt line connecteded to the main SoC
-- interrupt-parent: The parent interrupt controller
- gpio-controller:
- #gpio-cells = <1>: twl6040 provides GPO lines.
- #clock-cells = <0>; twl6040 is a provider of pdmclk which is used by McPDM
diff --git a/Documentation/devicetree/bindings/mfd/wm831x.txt b/Documentation/devicetree/bindings/mfd/wm831x.txt
index 505709403d3f..6b84b1b0d018 100644
--- a/Documentation/devicetree/bindings/mfd/wm831x.txt
+++ b/Documentation/devicetree/bindings/mfd/wm831x.txt
@@ -22,7 +22,6 @@ Required properties:
- interrupts : The interrupt line the IRQ signal for the device is
connected to.
- - interrupt-parent : The parent interrupt controller.
- interrupt-controller : wm831x devices contain interrupt controllers and
may provide interrupt services to other devices.
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
index 356c29789cf5..3a66d3c483e1 100644
--- a/Documentation/devicetree/bindings/mips/brcm/soc.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -152,7 +152,7 @@ Required properties:
- compatible : should contain one of:
"brcm,bcm7425-timers"
"brcm,bcm7429-timers"
- "brcm,bcm7435-timers and
+ "brcm,bcm7435-timers" and
"brcm,brcmstb-timers"
- reg : the timers register range
- interrupts : the interrupt line for this timer block
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
index f39a1aa2852b..410efa322254 100644
--- a/Documentation/devicetree/bindings/mips/cavium/cib.txt
+++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt
@@ -13,8 +13,6 @@ Properties:
- cavium,max-bits: The index (zero based) of the highest numbered bit
in the CIB block.
-- interrupt-parent: Always the CIU on the SoC.
-
- interrupts: The CIU line to which the CIB block is connected.
- #interrupt-cells: Must be <2>. The first cell is the bit within the
diff --git a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
new file mode 100644
index 000000000000..d62c783d1d5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
@@ -0,0 +1,35 @@
+* ASPEED AST2400 and AST2500 coprocessor interrupt controller
+
+This file describes the bindings for the interrupt controller present
+in the AST2400 and AST2500 BMC SoCs which provides interrupt to the
+ColdFire coprocessor.
+
+It is not a normal interrupt controller and it would be rather
+inconvenient to create an interrupt tree for it as it somewhat shares
+some of the same sources as the main ARM interrupt controller but with
+different numbers.
+
+The AST2500 supports a SW generated interrupt
+
+Required properties:
+- reg: address and length of the register for the device.
+- compatible: "aspeed,cvic" and one of:
+ "aspeed,ast2400-cvic"
+ or
+ "aspeed,ast2500-cvic"
+
+- valid-sources: One cell, bitmap of supported sources for the implementation
+
+Optional properties;
+- copro-sw-interrupts: List of interrupt numbers that can be used as
+ SW interrupts from the ARM to the coprocessor.
+ (AST2500 only)
+
+Example:
+
+ cvic: copro-interrupt-controller@1e6c2000 {
+ compatible = "aspeed,ast2500-cvic";
+ valid-sources = <0xffffffff>;
+ copro-sw-interrupts = <1>;
+ reg = <0x1e6c2000 0x80>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 60481bfc3d31..f6ddba31cb73 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -19,8 +19,6 @@ Required Properties:
- clocks: From clock bindings: Handles to clock inputs.
- clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
- interrupts: Interrupt specifier
- - interrupt-parent: Phandle for the interrupt controller that services
- interrupts for this device.
Required Properties for "arasan,sdhci-5.1":
- phys: From PHY bindings: Phandle for the Generic PHY for arasan.
@@ -39,6 +37,8 @@ Optional Properties:
- xlnx,fails-without-test-cd: when present, the controller doesn't work when
the CD line is not connected properly, and the line is not connected
properly. Test mode can be used to force the controller to function.
+ - xlnx,int-clock-stable-broken: when present, the controller always reports
+ that the internal clock is stable even when it is not.
Example:
sdhci@e0100000 {
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index a2cf5e1c87d8..99c5cf8507e8 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -21,7 +21,6 @@ Required properties:
"fsl,ls1043a-esdhc"
"fsl,ls1046a-esdhc"
"fsl,ls2080a-esdhc"
- - interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 0e5e2ec4001d..75486cca8054 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -13,8 +13,6 @@ Optional properties:
- gpios : may specify GPIOs in this order: Card-Detect GPIO,
Write-Protect GPIO. Note that this does not follow the
binding from mmc.txt, for historical reasons.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/pxa-mmc.txt b/Documentation/devicetree/bindings/mmc/pxa-mmc.txt
index b7025de7dced..5f5c2bec2b8c 100644
--- a/Documentation/devicetree/bindings/mmc/pxa-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/pxa-mmc.txt
@@ -8,10 +8,9 @@ Required properties:
Optional properties:
- marvell,detect-delay-ms: sets the detection delay timeout in ms.
-- marvell,gpio-power: GPIO spec for the card power enable pin
-This file documents differences between the core properties in mmc.txt
-and the properties used by the pxa-mmc driver.
+In addition to the properties described in this docuent, the details
+described in mmc.txt are supported.
Examples:
@@ -19,6 +18,7 @@ mmc0: mmc@41100000 {
compatible = "marvell,pxa-mmc";
reg = <0x41100000 0x1000>;
interrupts = <23>;
+ vmmc-supply = <&mmc_regulator>;
cd-gpios = <&gpio 23 0>;
wp-gpios = <&gpio 24 0>;
};
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8ce49b255974..6f629b12bd69 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -14,6 +14,7 @@ Required Properties:
before RK3288
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
+ - "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip PX30
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
- "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x
- "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index bfdcdc4ccdff..502b3b851ebb 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -4,7 +4,12 @@ This file documents differences between the core properties in mmc.txt
and the properties used by the sdhci-msm driver.
Required properties:
-- compatible: Should contain "qcom,sdhci-msm-v4".
+- compatible: Should contain:
+ "qcom,sdhci-msm-v4" for sdcc versions less than 5.0
+ "qcom,sdhci-msm-v5" for sdcc versions >= 5.0
+ For SDCC version 5.0.0, MCI registers are removed from SDCC
+ interface and some registers are moved to HC. New compatible
+ string is added to support this change - "qcom,sdhci-msm-v5".
- reg: Base address and length of the register in the following order:
- Host controller register map (required)
- SD Core register map (required)
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-of-dwcmshc.txt b/Documentation/devicetree/bindings/mmc/sdhci-of-dwcmshc.txt
new file mode 100644
index 000000000000..ee4253b33be2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-of-dwcmshc.txt
@@ -0,0 +1,20 @@
+* Synopsys DesignWare Cores Mobile Storage Host Controller
+
+Required properties:
+- compatible: should be one of the following:
+ "snps,dwcmshc-sdhci"
+- reg: offset and length of the register set for the device.
+- interrupts: a single interrupt specifier.
+- clocks: Array of clocks required for SDHCI; requires at least one for
+ core clock.
+- clock-names: Array of names corresponding to clocks property; shall be
+ "core" for core clock and "bus" for optional bus clock.
+
+Example:
+ sdhci2: sdhci@aa0000 {
+ compatible = "snps,dwcmshc-sdhci";
+ reg = <0xaa0000 0x1000>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&emmcclk>;
+ bus-width = <8>;
+ }
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 839f469f4525..c434200d19d5 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -28,6 +28,7 @@ Required properties:
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
"renesas,sdhi-r8a77965" - SDHI IP on R8A77965 SoC
"renesas,sdhi-r8a77980" - SDHI IP on R8A77980 SoC
+ "renesas,sdhi-r8a77990" - SDHI IP on R8A77990 SoC
"renesas,sdhi-r8a77995" - SDHI IP on R8A77995 SoC
"renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
"renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index b40f3a492800..bcda1dfc4bac 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -42,7 +42,6 @@ Required properties:
May be "nand", if the SoC has the individual NAND
interrupts multiplexed behind another custom piece of
hardware
-- interrupt-parent : See standard interrupt bindings
- #address-cells : <1> - subnodes give the chip-select number
- #size-cells : <0>
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index 0ee8edb60efc..f33da8782741 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -8,6 +8,9 @@ Required properties:
- reg : should contain registers location and length for data and reg.
- reg-names: Should contain the reg names "nand_data" and "denali_reg"
- interrupts : The interrupt number.
+ - clocks: should contain phandle of the controller core clock, the bus
+ interface clock, and the ECC circuit clock.
+ - clock-names: should contain "nand", "nand_x", "ecc"
Optional properties:
- nand-ecc-step-size: see nand.txt for details. If present, the value must be
@@ -31,5 +34,7 @@ nand: nand@ff900000 {
compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
+ clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
+ clock-names = "nand", "nand_x", "ecc";
interrupts = <0 144 4>;
};
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index dd559045593d..c059ab74ed88 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -16,7 +16,6 @@ Required properties:
- compatible: "ti,omap2-nand"
- reg: range id (CS number), base offset and length of the
NAND I/O space
- - interrupt-parent: must point to gpmc node
- interrupts: Two interrupt specifiers, one for fifoevent, one for termcount.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 956bb046e599..f03be904d3c2 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -69,6 +69,15 @@ Optional properties:
all chips and support for it can not be detected at runtime.
Refer to your chips' datasheet to check if this is supported
by your chip.
+- broken-flash-reset : Some flash devices utilize stateful addressing modes
+ (e.g., for 32-bit addressing) which need to be managed
+ carefully by a system. Because these sorts of flash don't
+ have a standardized software reset command, and because some
+ systems don't toggle the flash RESET# pin upon system reset
+ (if the pin even exists at all), there are systems which
+ cannot reboot properly if the flash is left in the "wrong"
+ state. This boolean flag can be used on such systems, to
+ denote the absence of a reliable reset mechanism.
Example:
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 8bb11d809429..e949c778e983 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -25,7 +25,7 @@ Optional NAND chip properties:
Deprecated values:
"soft_bch": use "soft" and nand-ecc-algo instead
- nand-ecc-algo: string, algorithm of NAND ECC.
- Supported values are: "hamming", "bch".
+ Valid values are: "hamming", "bch", "rs".
- nand-bus-width : 8 or 16 bus width if not present 8
- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
@@ -43,6 +43,10 @@ Optional NAND chip properties:
This is particularly useful when only the in-band area is
used by the upper layers, and you want to make your NAND
as reliable as possible.
+- nand-is-boot-medium: Whether the NAND chip is a boot medium. Drivers might use
+ this information to select ECC algorithms supported by
+ the boot ROM or similar restrictions.
+
- nand-rb: shall contain the native Ready/Busy ids.
The ECC strength and ECC step size properties define the correction capability
diff --git a/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt b/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
new file mode 100644
index 000000000000..b2f2ca12f9e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
@@ -0,0 +1,64 @@
+NVIDIA Tegra NAND Flash controller
+
+Required properties:
+- compatible: Must be one of:
+ - "nvidia,tegra20-nand"
+- reg: MMIO address range
+- interrupts: interrupt output of the NFC controller
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - nand
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - nand
+
+Optional children nodes:
+Individual NAND chips are children of the NAND controller node. Currently
+only one NAND chip supported.
+
+Required children node properties:
+- reg: An integer ranging from 1 to 6 representing the CS line to use.
+
+Optional children node properties:
+- nand-ecc-mode: String, operation mode of the NAND ecc mode. Currently only
+ "hw" is supported.
+- nand-ecc-algo: string, algorithm of NAND ECC.
+ Supported values with "hw" ECC mode are: "rs", "bch".
+- nand-bus-width : See nand.txt
+- nand-on-flash-bbt: See nand.txt
+- nand-ecc-strength: integer representing the number of bits to correct
+ per ECC step (always 512). Supported strength using HW ECC
+ modes are:
+ - RS: 4, 6, 8
+ - BCH: 4, 8, 14, 16
+- nand-ecc-maximize: See nand.txt
+- nand-is-boot-medium: Makes sure only ECC strengths supported by the boot ROM
+ are chosen.
+- wp-gpios: GPIO specifier for the write protect pin.
+
+Optional child node of NAND chip nodes:
+Partitions: see partition.txt
+
+ Example:
+ nand-controller@70008000 {
+ compatible = "nvidia,tegra20-nand";
+ reg = <0x70008000 0x100>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA20_CLK_NDFLASH>;
+ clock-names = "nand";
+ resets = <&tegra_car 13>;
+ reset-names = "nand";
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-bus-width = <8>;
+ nand-on-flash-bbt;
+ nand-ecc-algo = "bch";
+ nand-ecc-strength = <8>;
+ wp-gpios = <&gpio TEGRA_GPIO(S, 0) GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index a8f382642ba9..afbbd870496d 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -14,6 +14,13 @@ method is used for a given flash device. To describe the method there should be
a subnode of the flash device that is named 'partitions'. It must have a
'compatible' property, which is used to identify the method to use.
+When a single partition is represented with a DT node (it depends on a used
+format) it may also be described using above rules ('compatible' and optionally
+some extra properties / subnodes). It allows describing more complex,
+hierarchical (multi-level) layouts and should be used if there is some
+significant relation between partitions or some partition internally uses
+another partitioning method.
+
Available bindings are listed in the "partitions" subdirectory.
@@ -109,3 +116,42 @@ flash@2 {
};
};
};
+
+flash@3 {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x000000 0x100000>;
+ read-only;
+ };
+
+ firmware@100000 {
+ label = "firmware";
+ reg = <0x100000 0xe00000>;
+ compatible = "brcm,trx";
+ };
+
+ calibration@f00000 {
+ label = "calibration";
+ reg = <0xf00000 0x100000>;
+ compatible = "fixed-partitions";
+ ranges = <0 0xf00000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "wifi0";
+ reg = <0x000000 0x080000>;
+ };
+
+ partition@80000 {
+ label = "wifi1";
+ reg = <0x080000 0x080000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
new file mode 100644
index 000000000000..b677147ca4e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
@@ -0,0 +1,37 @@
+Broadcom TRX Container Partition
+================================
+
+TRX is Broadcom's official firmware format for the BCM947xx boards. It's used by
+most of the vendors building devices based on Broadcom's BCM47xx SoCs and is
+supported by the CFE bootloader.
+
+Design of the TRX format is very minimalistic. Its header contains
+identification fields, CRC32 checksum and the locations of embedded partitions.
+Its purpose is to store a few partitions in a format that can be distributed as
+a standalone file and written in a flash memory.
+
+Container can hold up to 4 partitions. The first partition has to contain a
+device executable binary (e.g. a kernel) as it's what the CFE bootloader starts
+executing. Other partitions can be used for operating system purposes. This is
+useful for systems that keep kernel and rootfs separated.
+
+TRX doesn't enforce any strict partition boundaries or size limits. All
+partitions have to be less than the 4GiB max size limit.
+
+There are two existing/known TRX variants:
+1) v1 which contains 3 partitions
+2) v2 which contains 4 partitions
+
+There aren't separated compatible bindings for them as version can be trivialy
+detected by a software parsing TRX header.
+
+Required properties:
+- compatible : (required) must be "brcm,trx"
+
+Example:
+
+flash@0 {
+ partitions {
+ compatible = "brcm,trx";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
index 73d336befa08..1123cc6d56ef 100644
--- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
+++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
@@ -45,11 +45,12 @@ Required properties:
number (e.g., 0, 1, 2, etc.)
- #address-cells: see partition.txt
- #size-cells: see partition.txt
-- nand-ecc-strength: see nand.txt
-- nand-ecc-step-size: must be 512. see nand.txt for more details.
Optional properties:
- nand-bus-width: see nand.txt
+- nand-ecc-strength: see nand.txt. If not specified, then ECC strength will
+ be used according to chip requirement and available
+ OOB size.
Each nandcs device node may optionally contain a 'partitions' sub-node, which
further contains sub-nodes describing the flash partition mapping. See
@@ -77,7 +78,6 @@ nand-controller@1ac00000 {
reg = <0>;
nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
nand-bus-width = <8>;
partitions {
@@ -117,7 +117,6 @@ nand-controller@79b0000 {
nand@0 {
reg = <0>;
nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
nand-bus-width = <8>;
partitions {
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt
index 7248aadd89e4..c41873e92d26 100644
--- a/Documentation/devicetree/bindings/mtd/spear_smi.txt
+++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt
@@ -5,8 +5,6 @@ Required properties:
- reg : Address range of the mtd chip
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the STMMAC interrupts
- clock-rate : Functional clock rate of SMI in Hz
diff --git a/Documentation/devicetree/bindings/mtd/spi-nand.txt b/Documentation/devicetree/bindings/mtd/spi-nand.txt
new file mode 100644
index 000000000000..8b51f3b6d55c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/spi-nand.txt
@@ -0,0 +1,5 @@
+SPI NAND flash
+
+Required properties:
+- compatible: should be "spi-nand"
+- reg: should encode the chip-select line used to access the NAND chip
diff --git a/Documentation/devicetree/bindings/mux/adi,adgs1408.txt b/Documentation/devicetree/bindings/mux/adi,adgs1408.txt
new file mode 100644
index 000000000000..be6947f4d86b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/adi,adgs1408.txt
@@ -0,0 +1,48 @@
+Bindings for Analog Devices ADGS1408/1409 8:1/Dual 4:1 Mux
+
+Required properties:
+- compatible : Should be one of
+ * "adi,adgs1408"
+ * "adi,adgs1409"
+* Standard mux-controller bindings as described in mux-controller.txt
+
+Optional properties for ADGS1408/1409:
+- gpio-controller : if present, #gpio-cells is required.
+- #gpio-cells : should be <2>
+ - First cell is the GPO line number, i.e. 0 to 3
+ for ADGS1408 and 0 to 4 for ADGS1409
+ - Second cell is used to specify active high (0)
+ or active low (1)
+
+Optional properties:
+- idle-state : if present, the state that the mux controller will have
+ when idle. The special state MUX_IDLE_AS_IS is the default and
+ MUX_IDLE_DISCONNECT is also supported.
+
+States 0 through 7 correspond to signals S1 through S8 in the datasheet.
+For ADGS1409 only states 0 to 3 are available.
+
+Example:
+
+ /*
+ * One mux controller.
+ * Mux state set to idle as is (no idle-state declared)
+ */
+ &spi0 {
+ mux: mux-controller@0 {
+ compatible = "adi,adgs1408";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ #mux-control-cells = <0>;
+ };
+ }
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 1>;
+ io-channel-names = "parent";
+ mux-controls = <&mux>;
+
+ channels = "out_a0", "out_a1", "test0", "test1",
+ "out_b0", "out_b1", "testb0", "testb1";
+ };
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 4bb624a73b54..93dcb79a5f16 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -8,8 +8,6 @@ Required properties:
- SerDes Rx/Tx registers
- SerDes integration registers (1/2)
- SerDes integration registers (2/2)
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
index dfe287a5d6f2..b58843f29591 100644
--- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
+++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
@@ -13,14 +13,17 @@ MDIO multiplexer node:
Every non-ethernet PHY requires a compatible so that it could be probed based
on this compatible string.
+Optional properties:
+- clocks: phandle of the core clock which drives the mdio block.
+
Additional information regarding generic multiplexer properties can be found
at- Documentation/devicetree/bindings/net/mdio-mux.txt
for example:
- mdio_mux_iproc: mdio-mux@6602023c {
+ mdio_mux_iproc: mdio-mux@66020000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x6602023c 0x14>;
+ reg = <0x66020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index 9c5e663fa1af..37d67926dd6d 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -15,7 +15,6 @@ Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
Optional properties:
- - interrupt-parent: phandle of the parent interrupt controller
- interrupt-names: (see below)
- interrupts : The interrupt specified by the name "wakeup" is the interrupt
that shall be used for out-of-band wake-on-bt. Driver will
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
index 23aa94eab207..903a78da65be 100644
--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
@@ -5,7 +5,6 @@ Required properties:
- "holt,hi3110" for HI-3110
- reg: SPI chip select.
- clocks: The clock feeding the CAN controller.
- - interrupt-parent: The parent interrupt controller.
- interrupts: Should contain IRQ line for the CAN controller.
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
index ee3723beb701..188c8bd4eb67 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -6,7 +6,6 @@ Required properties:
- "microchip,mcp2515" for MCP2515.
- reg: SPI chip select.
- clocks: The clock feeding the CAN controller.
- - interrupt-parent: The parent interrupt controller.
- interrupts: Should contain IRQ line for the CAN controller.
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
index fe38847d8e26..060e2d46bad9 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx_can.txt
+++ b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
@@ -2,20 +2,25 @@ Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings
---------------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,zynq-can-1.0" for Zynq CAN
- controllers and "xlnx,axi-can-1.00.a" for Axi CAN
- controllers.
-- reg : Physical base address and size of the Axi CAN/Zynq
- CANPS registers map.
+- compatible : Should be:
+ - "xlnx,zynq-can-1.0" for Zynq CAN controllers
+ - "xlnx,axi-can-1.00.a" for Axi CAN controllers
+ - "xlnx,canfd-1.0" for CAN FD controllers
+- reg : Physical base address and size of the controller
+ registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
-- clock-names : List of input clock names - "can_clk", "pclk"
- (For CANPS), "can_clk" , "s_axi_aclk"(For AXI CAN)
+- clock-names : List of input clock names
+ - "can_clk", "pclk" (For CANPS),
+ - "can_clk", "s_axi_aclk" (For AXI CAN and CAN FD).
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
-- tx-fifo-depth : Can Tx fifo depth.
-- rx-fifo-depth : Can Rx fifo depth.
+- tx-fifo-depth : Can Tx fifo depth (Zynq, Axi CAN).
+- rx-fifo-depth : Can Rx fifo depth (Zynq, Axi CAN, CAN FD in
+ sequential Rx mode).
+- tx-mailbox-count : Can Tx mailbox buffer count (CAN FD).
+- rx-mailbox-count : Can Rx mailbox buffer count (CAN FD in mailbox Rx
+ mode).
Example:
@@ -42,3 +47,14 @@ For Axi CAN Dts file:
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
};
+For CAN FD Dts file:
+ canfd_0: canfd@40000000 {
+ compatible = "xlnx,canfd-1.0";
+ clocks = <&clkc 0>, <&clkc 1>;
+ clock-names = "can_clk", "s_axi_aclk";
+ reg = <0x40000000 0x2000>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 59 1>;
+ tx-mailbox-count = <0x20>;
+ rx-fifo-depth = <0x20>;
+ };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 4cb4925a28ab..41089369f891 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -11,7 +11,6 @@ Required properties:
registers map
- interrupts : property with a value describing the interrupt
number
-- interrupt-parent : The parent interrupt controller
- cpdma_channels : Specifies number of channels in CPDMA
- ale_entries : Specifies No of entries ALE can hold
- bd_ram_size : Specifies internal descriptor RAM size
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
index 5224bf05f6f8..64c159e9cbf7 100644
--- a/Documentation/devicetree/bindings/net/davicom-dm9000.txt
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -5,7 +5,6 @@ Required properties:
- reg : physical addresses and sizes of registers, must contain 2 entries:
first entry : address register,
second entry : data register.
-- interrupt-parent : interrupt controller to which the device is connected
- interrupts : interrupt specifier specific to interrupt controller
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index 47a6a7fe0b86..1811e1972a7a 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -24,6 +24,14 @@ Required properties:
"brcm,bcm53018-srab"
"brcm,bcm53019-srab" and the mandatory "brcm,bcm5301x-srab" string
+ For the BCM5831X/BCM1140x SoCs with an integrated switch, must be one of:
+ "brcm,bcm11404-srab"
+ "brcm,bcm11407-srab"
+ "brcm,bcm11409-srab"
+ "brcm,bcm58310-srab"
+ "brcm,bcm58311-srab"
+ "brcm,bcm58313-srab" and the mandatory "brcm,omega-srab" string
+
For the BCM585xx/586XX/88312 SoCs with an integrated switch, must be one of:
"brcm,bcm58522-srab"
"brcm,bcm58523-srab"
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index a700943218ca..ac145b885e95 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -4,7 +4,9 @@ Microchip KSZ Series Ethernet switches
Required properties:
- compatible: For external switch chips, compatible string must be exactly one
- of: "microchip,ksz9477"
+ of the following:
+ - "microchip,ksz9477"
+ - "microchip,ksz9897"
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required and optional properties.
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 60d50a2b0323..feb007af13cb 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -30,7 +30,6 @@ Required properties:
Optional properties:
- reset-gpios : Should be a gpio specifier for a reset line
-- interrupt-parent : Parent interrupt controller
- interrupts : Interrupt from the switch
- interrupt-controller : Indicates the switch is itself an interrupt
controller. This is used for the PHY interrupts.
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
new file mode 100644
index 000000000000..b6ae8541bd55
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
@@ -0,0 +1,153 @@
+Realtek SMI-based Switches
+==========================
+
+The SMI "Simple Management Interface" is a two-wire protocol using
+bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does
+not use the MDIO protocol. This binding defines how to specify the
+SMI-based Realtek devices.
+
+Required properties:
+
+- compatible: must be exactly one of:
+ "realtek,rtl8366"
+ "realtek,rtl8366rb" (4+1 ports)
+ "realtek,rtl8366s" (4+1 ports)
+ "realtek,rtl8367"
+ "realtek,rtl8367b"
+ "realtek,rtl8368s" (8 port)
+ "realtek,rtl8369"
+ "realtek,rtl8370" (8 port)
+
+Required properties:
+- mdc-gpios: GPIO line for the MDC clock line.
+- mdio-gpios: GPIO line for the MDIO data line.
+- reset-gpios: GPIO line for the reset signal.
+
+Optional properties:
+- realtek,disable-leds: if the LED drivers are not used in the
+ hardware design this will disable them so they are not turned on
+ and wasting power.
+
+Required subnodes:
+
+- interrupt-controller
+
+ This defines an interrupt controller with an IRQ line (typically
+ a GPIO) that will demultiplex and handle the interrupt from the single
+ interrupt line coming out of one of the SMI-based chips. It most
+ importantly provides link up/down interrupts to the PHY blocks inside
+ the ASIC.
+
+Required properties of interrupt-controller:
+
+- interrupt: parent interrupt, see interrupt-controller/interrupts.txt
+- interrupt-controller: see interrupt-controller/interrupts.txt
+- #address-cells: should be <0>
+- #interrupt-cells: should be <1>
+
+- mdio
+
+ This defines the internal MDIO bus of the SMI device, mostly for the
+ purpose of being able to hook the interrupts to the right PHY and
+ the right PHY to the corresponding port.
+
+Required properties of mdio:
+
+- compatible: should be set to "realtek,smi-mdio" for all SMI devices
+
+See net/mdio.txt for additional MDIO bus properties.
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch {
+ compatible = "realtek,rtl8366rb";
+ /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+
+ switch_intc: interrupt-controller {
+ /* GPIO 15 provides the interrupt */
+ interrupt-parent = <&gpio0>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ phy-handle = <&phy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy3>;
+ };
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ phy-handle = <&phy4>;
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio", "dsa-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <0>;
+ };
+ phy1: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <1>;
+ };
+ phy2: phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <2>;
+ };
+ phy3: phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <3>;
+ };
+ phy4: phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <12>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
new file mode 100644
index 000000000000..ed4710c40641
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
@@ -0,0 +1,81 @@
+Vitesse VSC73xx Switches
+========================
+
+This defines device tree bindings for the Vitesse VSC73xx switch chips.
+The Vitesse company has been acquired by Microsemi and Microsemi in turn
+acquired by Microchip but retains this vendor branding.
+
+The currently supported switch chips are:
+Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+
+The device tree node is an SPI device so it must reside inside a SPI bus
+device tree node, see spi/spi-bus.txt
+
+Required properties:
+
+- compatible: must be exactly one of:
+ "vitesse,vsc7385"
+ "vitesse,vsc7388"
+ "vitesse,vsc7395"
+ "vitesse,vsc7398"
+- gpio-controller: indicates that this switch is also a GPIO controller,
+ see gpio/gpio.txt
+- #gpio-cells: this must be set to <2> and indicates that we are a twocell
+ GPIO controller, see gpio/gpio.txt
+
+Optional properties:
+
+- reset-gpios: a handle to a GPIO line that can issue reset of the chip.
+ It should be tagged as active low.
+
+Required subnodes:
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch@0 {
+ compatible = "vitesse,vsc7395";
+ reg = <0>;
+ /* Specified for 2.5 MHz or below */
+ spi-max-frequency = <2500000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ vsc: port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index df873d1f3b7c..299c0dcd67db 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -238,7 +238,7 @@ PROPERTIES
Must include one of the following:
- "fsl,fman-dtsec" for dTSEC MAC
- "fsl,fman-xgec" for XGEC MAC
- - "fsl,fman-memac for mEMAC MAC
+ - "fsl,fman-memac" for mEMAC MAC
- cell-index
Usage: required
@@ -356,30 +356,7 @@ ethernet@e0000 {
============================================================================
FMan IEEE 1588 Node
-DESCRIPTION
-
-The FMan interface to support IEEE 1588
-
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: A standard property.
- Must include "fsl,fman-ptp-timer".
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property.
-
-EXAMPLE
-
-ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
-};
+Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
=============================================================================
FMan MDIO Node
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
index abfbeecbcf39..8ee4b1cedae8 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
@@ -9,7 +9,6 @@ Required properties:
"6port-16rss",
"6port-16vf",
"single-port".
-- interrupt-parent: the interrupt parent of this device.
- interrupts: should contain the DSA Fabric and rcb interrupt.
- reg: specifies base physical address(es) and size of the device registers.
The first region is external interface control register base and size(optional,
diff --git a/Documentation/devicetree/bindings/net/ibm,emac.txt b/Documentation/devicetree/bindings/net/ibm,emac.txt
index 44b842b6ca15..c0c14aa3f97c 100644
--- a/Documentation/devicetree/bindings/net/ibm,emac.txt
+++ b/Documentation/devicetree/bindings/net/ibm,emac.txt
@@ -18,7 +18,6 @@
"ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
"ibm,emac4"
- interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
- - interrupt-parent : optional, if needed for interrupt mapping
- reg : <registers mapping>
- local-mac-address : 6 bytes, MAC address
- mal-device : phandle of the associated McMAL node
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
index 3d27c68613a6..957e5e5c2927 100644
--- a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
+++ b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
@@ -22,7 +22,6 @@ Optional properties:
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
platform. The value will be configured to firmware. This
is needed to work chip's sleep feature as expected (u16).
- - interrupt-parent: phandle of the parent interrupt controller
- interrupt-names: Used only for USB based devices (See below)
- interrupts : specifies the interrupt pin number to the cpu. For SDIO, the
driver will use the first interrupt specified in the interrupt
diff --git a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
index aa6313024176..358fed2fab43 100644
--- a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
+++ b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
@@ -6,8 +6,6 @@ The mediatek gigabit switch can be found on Mediatek SoCs (mt7620, mt7621).
Required properties:
- compatible: Should be "mediatek,mt7620-gsw" or "mediatek,mt7621-gsw"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the gigabit switches interrupt
- resets: Should contain the gigabit switches resets
- reset-names: Should contain the reset names "gsw"
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
new file mode 100644
index 000000000000..14ceb2a5b4e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
@@ -0,0 +1,35 @@
+MediaTek SoC built-in Bluetooth Devices
+==================================
+
+This device is a serial attached device to BTIF device and thus it must be a
+child node of the serial node with BTIF. The dt-bindings details for BTIF
+device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+
+Required properties:
+
+- compatible: Must be
+ "mediatek,mt7622-bluetooth": for MT7622 SoC
+- clocks: Should be the clock specifiers corresponding to the entry in
+ clock-names property.
+- clock-names: Should contain "ref" entries.
+- power-domains: Phandle to the power domain that the device is part of
+
+Example:
+
+ btif: serial@1100c000 {
+ compatible = "mediatek,mt7622-btif",
+ "mediatek,mtk-btif";
+ reg = <0 0x1100c000 0 0x1000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_BTIF_PD>;
+ clock-names = "main";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+
+ bluetooth {
+ compatible = "mediatek,mt7622-bluetooth";
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ clocks = <&clk25m>;
+ clock-names = "ref";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 53c13ee384a4..503f2b9194e2 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -30,9 +30,6 @@ Required properties:
- mediatek,pctl: phandle to the syscon node that handles the ports slew rate
and driver current: only for MT2701 and MT7623 SoC
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
* Ethernet MAC node
Required properties:
diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
index 44dff53d4dda..24626e082b83 100644
--- a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
+++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
@@ -8,9 +8,6 @@ the SPI master node.
Required properties:
- compatible: Should be "microchip,enc28j60"
- reg: Specify the SPI chip select the ENC28J60 is wired to
-- interrupt-parent: Specify the phandle of the source interrupt, see interrupt
- binding documentation for details. Usually this is the GPIO bank
- the interrupt line is wired to.
- interrupts: Specify the interrupt index within the interrupt controller (referred
to above in interrupt-parent) and interrupt type. The ENC28J60 natively
generates falling edge interrupts, however, additional board logic
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
index 92486733df71..cfaf88998918 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,nxp-nci-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the chip
- firmware-gpios: Output GPIO pin used to enter firmware download mode
diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
index 122460e42e3c..2efe3886b95b 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
Optional SoC Specific Properties:
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
index 538a86f7b2b0..5b937403fed6 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,pn544-i2c".
- clock-frequency: IC work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the PN544
- firmware-gpios: Output GPIO pin used to enter firmware download mode
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
index ed5b3eaadb39..f02f6fb7f81c 100644
--- a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
+++ b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "samsung,s3fwrn5-i2c".
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
index b46d473be425..baa8f8133d19 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "st,st21nfcb-i2c" or "st,st21nfcc-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- reset-gpios: Output GPIO pin used to reset the ST21NFCB
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
index 54ce8e7ac681..d33343330b94 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "st,st21nfcb-spi"
- spi-max-frequency: Maximum SPI frequency (<= 4000000).
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- reset-gpios: Output GPIO pin used to reset the ST21NFCB
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
index 5ee9440fa9ad..b8bd90f80e12 100644
--- a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: Should be "st,st21nfca-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
-- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the ST21NFCA
Optional SoC Specific Properties:
diff --git a/Documentation/devicetree/bindings/net/nfc/st95hf.txt b/Documentation/devicetree/bindings/net/nfc/st95hf.txt
index 08a202e00d47..3f373a1e20ff 100644
--- a/Documentation/devicetree/bindings/net/nfc/st95hf.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st95hf.txt
@@ -17,9 +17,6 @@ Required properties:
- enable-gpio: GPIO line to enable ST95HF transceiver.
-- interrupt-parent : Standard way to specify the controller to which
- ST95HF transceiver's interrupt is routed.
-
- interrupts : Standard way to define ST95HF transceiver's out
interrupt.
diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
index 5ca9362ef127..ba1934b950e5 100644
--- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
+++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "ti,trf7970a".
- spi-max-frequency: Maximum SPI frequency (<= 2000000).
-- interrupt-parent: phandle of parent interrupt handler.
- interrupts: A single interrupt specifier.
- ti,enable-gpios: One or two GPIO entries used for 'EN' and 'EN2' pins on the
TRF7970A. EN2 is optional.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index d2169a56f5e3..17c1d2bd00f6 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -3,8 +3,6 @@ PHY nodes
Required properties:
- interrupts : interrupt specifier for the sole interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- reg : The ID number for the phy, usually a small integer
Optional Properties:
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
index 3987846b3fd3..e4a8a51086df 100644
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt
@@ -19,7 +19,6 @@ Required properties:
- spi-cpol : Must be set
Optional properties:
-- interrupt-parent : Specify the pHandle of the source interrupt
- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
Numbers smaller than 1000000 or greater than 16000000
are invalid. Missing the property will set the SPI
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
index 0ea18a53cc29..824c0e23c544 100644
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
@@ -10,12 +10,25 @@ device the slave device is attached to.
Required properties:
- compatible: should contain one of the following:
* "qcom,qca6174-bt"
+ * "qcom,wcn3990-bt"
+
+Optional properties for compatible string qcom,qca6174-bt:
-Optional properties:
- enable-gpios: gpio specifier used to enable chip
- clocks: clock provided to the controller (SUSCLK_32KHZ)
-Example:
+Required properties for compatible string qcom,wcn3990-bt:
+
+ - vddio-supply: VDD_IO supply regulator handle.
+ - vddxo-supply: VDD_XO supply regulator handle.
+ - vddrf-supply: VDD_RF supply regulator handle.
+ - vddch0-supply: VDD_CH0 supply regulator handle.
+
+Optional properties for compatible string qcom,wcn3990-bt:
+
+ - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
+
+Examples:
serial@7570000 {
label = "BT-UART";
@@ -28,3 +41,15 @@ serial@7570000 {
clocks = <&divclk4>;
};
};
+
+serial@898000 {
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+
+ vddio-supply = <&vreg_s4a_1p8>;
+ vddxo-supply = <&vreg_l7a_1p8>;
+ vddrf-supply = <&vreg_l17a_1p3>;
+ vddch0-supply = <&vreg_l25a_3p3>;
+ max-speed = <3200000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
index 88b095d1f13b..9fe1a0a22e44 100644
--- a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
+++ b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
@@ -14,8 +14,6 @@ Required properties:
"ralink,rt3050-eth", "ralink,rt3883-eth", "ralink,rt5350-eth",
"mediatek,mt7620-eth", "mediatek,mt7621-eth"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the frame engines interrupt
- resets: Should contain the frame engines resets
- reset-names: Should contain the reset names "fe". If a switch is present
diff --git a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
index 2e79bd376c56..87e315856efa 100644
--- a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
+++ b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
@@ -7,8 +7,6 @@ SoCs (RT3x5x, RT5350, MT76x8).
Required properties:
- compatible: Should be "ralink,rt3050-esw"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the embedded switches interrupt
- resets: Should contain the embedded switches resets
- reset-names: Should contain the reset names "esw"
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index fac897d54423..da249b7c406c 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -16,6 +16,7 @@ Required properties:
- "renesas,etheravb-rcar-gen2" as a fallback for the above
R-Car Gen2 and RZ/G1 devices.
+ - "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
@@ -24,7 +25,7 @@ Required properties:
- "renesas,etheravb-r8a77990" for the R8A77990 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above
- R-Car Gen3 devices.
+ R-Car Gen3 and RZ/G2 devices.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first followed by
@@ -47,8 +48,6 @@ Required properties:
- pinctrl-0: phandle, referring to a default pin configuration node.
Optional properties:
-- interrupt-parent: the phandle for the interrupt controller that services
- interrupts for this device.
- interrupt-names: A list of interrupt names.
For the R-Car Gen 3 SoCs this property is mandatory;
it should include one entry per channel, named "ch%u",
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 9c16ee2965a2..3b71da7e8742 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -4,6 +4,7 @@ The device node has following properties.
Required properties:
- compatible: should be "rockchip,<name>-gamc"
+ "rockchip,px30-gmac": found on PX30 SoCs
"rockchip,rk3128-gmac": found on RK312x SoCs
"rockchip,rk3228-gmac": found on RK322x SoCs
"rockchip,rk3288-gmac": found on RK3288 SoCs
diff --git a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
index 888c250197fe..46e591178911 100644
--- a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
+++ b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "samsung,sxgbe-v2.0a"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the SXGBE interrupts
These interrupts are ordered by fixed and follows variable
trasmit DMA interrupts, receive DMA interrupts and lpi interrupt.
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 82a4cf2c145d..76db9f13ad96 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -35,8 +35,6 @@ Required properties:
- pinctrl-0: phandle, referring to a default pin configuration node.
Optional properties:
-- interrupt-parent: the phandle for the interrupt controller that services
- interrupts for this device.
- pinctrl-names: pin configuration state name ("default").
- renesas,no-ether-link: boolean, specify when a board does not provide a proper
Ether LINK signal.
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
index 21d27aa4c68c..36f1aef585f0 100644
--- a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -83,8 +83,6 @@ Required properties:
- "snps,dwc-qos-ethernet-4.10" (deprecated):
- "phy_ref_clk"
- "apb_clk"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the core's combined interrupt signal
- phy-mode: See ethernet.txt file in the same directory
- resets: Phandle and reset specifiers for each entry in reset-names, in the
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 3a28a5d8857d..cb694062afff 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -1,11 +1,10 @@
-* STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
+* STMicroelectronics 10/100/1000/2500/10000 Ethernet (GMAC/XGMAC)
Required properties:
-- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
+- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac" or
+ "snps,dwxgmac-<ip_version>", "snps,dwxgmac".
For backwards compatibility: "st,spear600-gmac" is also supported.
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the STMMAC interrupts
- interrupt-names: Should contain a list of interrupt names corresponding to
the interrupts in the interrupts property, if available.
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
index 86602f264dce..cffb2d6876e3 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
@@ -11,8 +11,6 @@ Required properties:
Optional properties:
- brcm,drive-strength : drive strength used for SDIO pins on device in mA
(default = 6).
- - interrupt-parent : the phandle for the interrupt controller to which the
- device interrupts are connected.
- interrupts : specifies attributes for the out-of-band interrupt (host-wake).
When not specified the device will use in-band SDIO interrupts.
- interrupt-names : name of the out-of-band interrupt, which must be set
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
index 59de8646862d..9bf9bbac16e2 100644
--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
@@ -29,7 +29,6 @@ Optional properties:
- marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured
to firmware. Firmware will wakeup the host using this pin
during suspend/resume.
- - interrupt-parent: phandle of the parent interrupt controller
- interrupts : interrupt pin number to the cpu. driver will request an irq based on
this interrupt number. during system suspend, the irq will be enabled
so that the wifi chip can wakeup host platform under certain condition.
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index 189ae5cad8f7..bb2fcde6f7ff 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -8,8 +8,6 @@ Required properties:
- reg : Chip select address of device
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
- interrupts : Should contain interrupt line
-- interrupt-parent : Should be the phandle for the interrupt controller
- that services interrupts for this device
- vio-supply : phandle to regulator providing VIO
- ti,power-gpio : GPIO connected to chip's PMEN pin
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
index 8f9ced076fe1..cb5c9e1569ca 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
@@ -17,9 +17,7 @@ Required properties:
* "ti,wl1837"
- reg : Chip select address of device
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
-- interrupt-parent, interrupts :
- Should contain parameters for 1 interrupt line.
- Interrupt parameters: parent, line number, type.
+- interrupts : Should contain parameters for 1 interrupt line.
- vwlan-supply : Point the node of the regulator that powers/enable the
wl12xx/wl18xx chip
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
index f42f6b0f1bc7..9306c4dadd46 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
@@ -20,8 +20,6 @@ Required properties:
- interrupts : specifies attributes for the out-of-band interrupt.
Optional properties:
- - interrupt-parent : the phandle for the interrupt controller to which the
- device interrupts are connected.
- ref-clock-frequency : ref clock frequency in Hz
- tcxo-clock-frequency : tcxo clock frequency in Hz
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 729f6747813b..792bc5fafeb9 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,7 @@
Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX and i.MX6UL SoCs.
+i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL and i.MX6SLL SoCs.
Required properties:
- compatible: should be one of
@@ -10,6 +10,7 @@ Required properties:
"fsl,imx6sx-ocotp" (i.MX6SX),
"fsl,imx6ul-ocotp" (i.MX6UL),
"fsl,imx7d-ocotp" (i.MX7D/S),
+ "fsl,imx6sll-ocotp" (i.MX6SLL),
followed by "syscon".
- #address-cells : Should be 1
- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/nvmem/sc27xx-efuse.txt b/Documentation/devicetree/bindings/nvmem/sc27xx-efuse.txt
new file mode 100644
index 000000000000..586c08286aa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/sc27xx-efuse.txt
@@ -0,0 +1,52 @@
+= Spreadtrum SC27XX PMIC eFuse device tree bindings =
+
+Required properties:
+- compatible: Should be one of the following.
+ "sprd,sc2720-efuse"
+ "sprd,sc2721-efuse"
+ "sprd,sc2723-efuse"
+ "sprd,sc2730-efuse"
+ "sprd,sc2731-efuse"
+- reg: Specify the address offset of efuse controller.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+
+= Data cells =
+Are child nodes of eFuse, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ efuse@380 {
+ compatible = "sprd,sc2731-efuse";
+ reg = <0x380>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ hwlocks = <&hwlock 12>;
+
+ /* Data cells */
+ thermal_calib: calib@10 {
+ reg = <0x10 0x2>;
+ };
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+Example:
+
+ thermal {
+ ...
+ nvmem-cells = <&thermal_calib>;
+ nvmem-cell-names = "calibration";
+ };
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
index 09cd3bc4d038..9514c327d31b 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
@@ -7,7 +7,6 @@ Required properties:
- reg-names: must include the following entries:
"csr": CSR registers
"vector_slave": vectors slave port region
-- interrupt-parent: interrupt source phandle.
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
index a1dc9366a8fc..6c396f17c91a 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt
@@ -6,7 +6,6 @@ Required properties:
- reg-names: must include the following entries:
"Txs": TX slave port region
"Cra": Control register access region
-- interrupt-parent: interrupt source phandle.
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends
on the parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
index b8e48b4762b2..df065aa53a83 100644
--- a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
@@ -65,7 +65,6 @@ When the iProc event queue based MSI is used, one needs to define the
following properties in the MSI device node:
- compatible: Must be "brcm,iproc-msi"
- msi-controller: claims itself as an MSI controller
-- interrupt-parent: Link to its parent interrupt device
- interrupts: List of interrupt IDs from its parent interrupt device
Optional properties:
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
index 9a305237fa6e..4a0475e2ba7e 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
@@ -9,6 +9,9 @@ Required properties:
Optional properties:
- max-functions: Maximum number of functions that can be configured (default 1).
+- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
+ than one in the list. If only one PHY listed it must manage all lanes.
+- phy-names: List of names to identify the PHY.
Example:
@@ -19,4 +22,6 @@ pcie@fc000000 {
reg-names = "reg", "mem";
cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <8>;
+ phys = <&ep_phy0 &ep_phy1>;
+ phy-names = "pcie-lane0","pcie-lane1";
};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
index 20a33f38f69d..91de69c713a9 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
@@ -24,6 +24,9 @@ Optional properties:
translations (default 32)
- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
- device-id: The PCI device ID (16 bits, default is design dependent)
+- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
+ than one in the list. If only one PHY listed it must manage all lanes.
+- phy-names: List of names to identify the PHY.
Example:
@@ -57,4 +60,7 @@ pcie@fb000000 {
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-parent = <&its_pci>;
+
+ phys = <&pcie_phy0>;
+ phy-names = "pcie-phy";
};
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
index 89a84f8aa621..5f8cb4962f8d 100644
--- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -41,7 +41,6 @@ Mandatory subnodes:
- For "faraday,ftpci100" a node representing the interrupt-controller inside the
host bridge is mandatory. It has the following mandatory properties:
- interrupt: see interrupt-controller/interrupts.txt
- - interrupt-parent: see interrupt-controller/interrupts.txt
- interrupt-controller: see interrupt-controller/interrupts.txt
- #address-cells: set to <0>
- #interrupt-cells: set to <1>
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
index 65038aa642e5..a618d4787dd7 100644
--- a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -26,9 +26,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
-- interrupt-parent : phandle to the interrupt controller that
- it is attached to, it should be set to gic to point to
- ARM's Generic Interrupt Controller node in system DT.
- interrupts: The interrupt line of the PCIe controller
last cell of this field is set to 4 to
denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 3d4a209b0fd0..4dd17de549a7 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -17,7 +17,6 @@ reg: index 1 is the base address and length of DW application registers.
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
- interrupt-parent: Parent interrupt controller phandle
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
Example:
@@ -37,8 +36,6 @@ pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
pcie_intc: Interrupt controller device node for Legacy IRQ chip
interrupt-cells: should be set to 1
- interrupt-parent: Parent interrupt controller phandle
- interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
Example:
pcie_intc: legacy-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
index a04ab1b76211..ffba4f63d71f 100644
--- a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
@@ -41,9 +41,6 @@
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
-
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt b/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt
new file mode 100644
index 000000000000..e8d82286beb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,sr-pcie-phy.txt
@@ -0,0 +1,41 @@
+Broadcom Stingray PCIe PHY
+
+Required properties:
+- compatible: must be "brcm,sr-pcie-phy"
+- reg: base address and length of the PCIe SS register space
+- brcm,sr-cdru: phandle to the CDRU syscon node
+- brcm,sr-mhb: phandle to the MHB syscon node
+- #phy-cells: Must be 1, denotes the PHY index
+
+For PAXB based root complex, one can have a configuration of up to 8 PHYs
+PHY index goes from 0 to 7
+
+For the internal PAXC based root complex, PHY index is always 8
+
+Example:
+ mhb: syscon@60401000 {
+ compatible = "brcm,sr-mhb", "syscon";
+ reg = <0 0x60401000 0 0x38c>;
+ };
+
+ cdru: syscon@6641d000 {
+ compatible = "brcm,sr-cdru", "syscon";
+ reg = <0 0x6641d000 0 0x400>;
+ };
+
+ pcie_phy: phy@40000000 {
+ compatible = "brcm,sr-pcie-phy";
+ reg = <0 0x40000000 0 0x800>;
+ brcm,sr-cdru = <&cdru>;
+ brcm,sr-mhb = <&mhb>;
+ #phy-cells = <1>;
+ };
+
+ /* users of the PCIe PHY */
+
+ pcie0: pcie@48000000 {
+ ...
+ ...
+ phys = <&pcie_phy 0>;
+ phy-names = "pcie-phy";
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
index cafe2197dad9..c3a29c5feea3 100644
--- a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible: "qca,ar7100-usb-phy"
- #phys-cells: should be 0
-- reset-names: "usb-phy"[, "usb-suspend-override"]
+- reset-names: "phy"[, "suspend-override"]
- resets: references to the reset controllers
Example:
@@ -11,7 +11,7 @@ Example:
usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
index 0d34b2b4a6b7..a5f7a4f0dbc1 100644
--- a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
@@ -47,6 +47,12 @@ Required properties (port (child) node):
- PHY_TYPE_PCIE
- PHY_TYPE_SATA
+Optional properties (PHY_TYPE_USB2 port (child) node):
+- mediatek,eye-src : u32, the value of slew rate calibrate
+- mediatek,eye-vrt : u32, the selection of VRT reference voltage
+- mediatek,eye-term : u32, the selection of HS_TX TERM reference voltage
+- mediatek,bc12 : bool, enable BC12 of u2phy if support it
+
Example:
u3phy: usb-phy@11290000 {
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 266a1bb8bb6e..0c7629e88bf3 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -12,7 +12,14 @@ Required properties:
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
"qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
- - reg: offset and length of register set for PHY's common serdes block.
+ - reg:
+ - For "qcom,sdm845-qmp-usb3-phy":
+ - index 0: address and length of register set for PHY's common serdes
+ block.
+ - named register "dp_com" (using reg-names): address and length of the
+ DP_COM control block.
+ - For all others:
+ - offset and length of register set for PHY's common serdes block.
- #clock-cells: must be 1
- Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
@@ -60,7 +67,10 @@ Required nodes:
Required properties for child node:
- reg: list of offset and length pairs of register sets for PHY blocks -
- tx, rx and pcs.
+ - index 0: tx
+ - index 1: rx
+ - index 2: pcs
+ - index 3: pcs_misc (optional)
- #phy-cells: must be 0
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt
new file mode 100644
index 000000000000..63853b35e083
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt
@@ -0,0 +1,24 @@
+* Renesas R-Car generation 3 PCIe PHY
+
+This file provides information on what the device node for the R-Car
+generation 3 PCIe PHY contains.
+
+Required properties:
+- compatible: "renesas,r8a77980-pcie-phy" if the device is a part of the
+ R8A77980 SoC.
+- reg: offset and length of the register block.
+- clocks: clock phandle and specifier pair.
+- power-domains: power domain phandle and specifier pair.
+- resets: reset phandle and specifier pair.
+- #phy-cells: see phy-bindings.txt in the same directory, must be <0>.
+
+Example (R-Car V3H):
+
+ pcie-phy@e65d0000 {
+ compatible = "renesas,r8a77980-pcie-phy";
+ reg = <0 0xe65d0000 0 0x8000>;
+ #phy-cells = <0>;
+ clocks = <&cpg CPG_MOD 319>;
+ power-domains = <&sysc 32>;
+ resets = <&cpg 319>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index dbd137c079e2..fb4a204da2bf 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -10,6 +10,8 @@ Required properties:
SoC.
"renesas,usb2-phy-r8a77965" if the device is a part of an
R8A77965 SoC.
+ "renesas,usb2-phy-r8a77990" if the device is a part of an
+ R8A77990 SoC.
"renesas,usb2-phy-r8a77995" if the device is a part of an
R8A77995 SoC.
"renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 compatible device.
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
index 8fb5a53775e8..81b58dddd3ed 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
@@ -19,6 +19,10 @@ Required Properties:
defines the interrupt number, the second encodes
the trigger flags described in
bindings/interrupt-controller/interrupts.txt
+- interrupts: The interrupt outputs from the controller. There is one GPIO
+ interrupt per GPIO bank. The number of interrupts listed depends
+ on the number of GPIO banks on the SoC. The interrupts must be
+ ordered by bank, starting with bank 0.
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
@@ -180,6 +184,12 @@ Example:
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
uart2-default: uart2-default {
pinmux {
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
index 61ac75706cc9..04d16fb69eb7 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
@@ -36,6 +36,8 @@ Optional properties:
- GENERIC_PINCONFIG: generic pinconfig options to use, bias-disable,
bias-pull-down, bias-pull-up, drive-open-drain, input-schmitt-enable,
input-debounce, output-low, output-high.
+- atmel,drive-strength: 0 or 1 for low drive, 2 for medium drive and 3 for
+high drive. The default value is low drive.
Example:
@@ -66,6 +68,7 @@ Example:
pinmux = <PIN_PB0>,
<PIN_PB5>;
bias-pull-up;
+ atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
pinctrl_sdmmc1_default: sdmmc1_default {
diff --git a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
index f8fa28ce163e..0a2d5516e1f3 100644
--- a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
@@ -23,7 +23,8 @@ Required properties:
"marvell,berlin2q-system-pinctrl",
"marvell,berlin4ct-avio-pinctrl",
"marvell,berlin4ct-soc-pinctrl",
- "marvell,berlin4ct-system-pinctrl"
+ "marvell,berlin4ct-system-pinctrl",
+ "syna,as370-soc-pinctrl"
Required subnode-properties:
- groups: a list of strings describing the group names.
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
new file mode 100644
index 000000000000..b0e36cf0d289
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
@@ -0,0 +1,99 @@
+Cirrus Logic Madera class audio codecs pinctrl driver
+
+The Cirrus Logic Madera codecs provide a number of GPIO functions for
+interfacing to external hardware and to provide logic outputs to other devices.
+Certain groups of GPIO pins also have an alternate function, normally as an
+audio interface.
+
+The set of available GPIOs, functions and alternate function groups differs
+between codecs so refer to the datasheet for the codec for further information
+on what is supported on that device.
+
+The properties for this driver exist within the parent MFD driver node.
+
+See also
+ the core bindings for the parent MFD driver:
+ Documentation/devicetree/bindings/mfd/madera.txt
+
+ the generic pinmix bindings:
+ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Required properties of parent mfd node:
+ - pinctrl-names : must be "default"
+ - pinctrl-0 : a phandle to the node containing the subnodes containing default
+ configurations
+
+Required subnodes:
+ One subnode is required to contain the default settings. It contains an
+ arbitrary number of configuration subnodes, one for each group or pin
+ configuration you want to apply as a default.
+
+Required properties of configuration subnodes:
+ - groups : name of one pin group to configure. One of:
+ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1, pdmspk2,
+ dmic4, dmic5, dmic6,
+ gpio1, gpio2, ..., gpio40
+ The gpioN groups select the single pin of this name for configuration
+
+Optional properties of configuration subnodes:
+ Any configuration option not explicitly listed in the dts will be left at
+ chip default setting.
+
+ - function : name of function to assign to this group. One of:
+ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1, pdmspk2,
+ dmic3, dmic4, dmic5, dmic6,
+ io, dsp-gpio, irq1, irq2,
+ fll1-clk, fll1-lock, fll2-clk, fll2-lock, fll3-clk, fll3-lock,
+ fllao-clk, fllao-lock,
+ opclk, opclk-async, pwm1, pwm2, spdif,
+ asrc1-in1-lock, asrc1-in2-lock, asrc2-in1-lock, asrc2-in2-lock,
+ spkl-short-circuit, spkr-short-circuit, spk-shutdown,
+ spk-overheat-shutdown, spk-overheat-warn,
+ timer1-sts, timer2-sts, timer3-sts, timer4-sts, timer5-sts, timer6-sts,
+ timer7-sts, timer8-sts,
+ log1-fifo-ne, log2-fifo-ne, log3-fifo-ne, log4-fifo-ne, log5-fifo-ne,
+ log6-fifo-ne, log7-fifo-ne, log8-fifo-ne,
+
+ - bias-disable : disable pull-up and pull-down
+ - bias-bus-hold : enable buskeeper
+ - bias-pull-up : output is pulled-up
+ - bias-pull-down : output is pulled-down
+ - drive-push-pull : CMOS output
+ - drive-open-drain : open-drain output
+ - drive-strength : drive strength in mA. Valid values are 4 or 8
+ - input-schmitt-enable : enable schmitt-trigger mode
+ - input-schmitt-disable : disable schmitt-trigger mode
+ - input-debounce : A value of 0 disables debounce, a value !=0 enables
+ debounce
+ - output-low : set the pin to output mode with low level
+ - output-high : set the pin to output mode with high level
+
+Example:
+
+cs47l85@0 {
+ compatible = "cirrus,cs47l85";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&cs47l85_defaults>;
+
+ cs47l85_defaults: cs47l85-gpio-defaults {
+ aif1 {
+ groups = "aif1";
+ function = "aif1";
+ bias-bus-hold;
+ };
+
+ aif2 {
+ groups = "aif2";
+ function = "aif2";
+ bias-bus-hold;
+ };
+
+ opclk {
+ groups = "gpio1";
+ function = "opclk";
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
new file mode 100644
index 000000000000..66de75090458
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
@@ -0,0 +1,36 @@
+* Freescale IMX8MQ IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+for common binding part and usage.
+
+Required properties:
+- compatible: "fsl,imx8mq-iomuxc"
+- reg: should contain the base physical address and size of the iomuxc
+ registers.
+
+Required properties in sub-nodes:
+- fsl,pins: each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
+ input_val> are specified using a PIN_FUNC_ID macro, which can be found in
+ imx8mq-pinfunc.h under device tree source folder. The last integer CONFIG is
+ the pad setting value like pull-up on this pin. Please refer to i.MX8M Quad
+ Reference Manual for detailed CONFIG settings.
+
+Examples:
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+};
+
+iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mq-iomuxc";
+ reg = <0x0 0x30330000 0x0 0x10000>;
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
+ MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
index ecb5c0d25218..f4d06bb0b55a 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
@@ -17,7 +17,7 @@ Tegra124 adds the following optional properties for pin configuration subnodes.
The macros for options are defined in the
include/dt-binding/pinctrl/pinctrl-tegra.h.
- nvidia,enable-input: Integer. Enable the pin's input path.
- enable :TEGRA_PIN_ENABLE0 and
+ enable :TEGRA_PIN_ENABLE and
disable or output only: TEGRA_PIN_DISABLE.
- nvidia,open-drain: Integer.
enable: TEGRA_PIN_ENABLE.
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
index a62d82d5fbe9..85f211436b8e 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
@@ -44,7 +44,7 @@ Optional subnode-properties:
- nvidia,tristate: Integer.
0: drive, 1: tristate.
- nvidia,enable-input: Integer. Enable the pin's input path.
- enable :TEGRA_PIN_ENABLE0 and
+ enable :TEGRA_PIN_ENABLE and
disable or output only: TEGRA_PIN_DISABLE.
- nvidia,open-drain: Integer.
enable: TEGRA_PIN_ENABLE.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index ad9bbbba36e9..cef2b5855d60 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -103,6 +103,12 @@ Optional properties:
#pinctrl-cells: Number of pin control cells in addition to the index within the
pin controller device instance
+pinctrl-use-default: Boolean. Indicates that the OS can use the boot default
+ pin configuration. This allows using an OS that does not have a
+ driver for the pin controller. This property can be set either
+ globally for the pin controller or in child nodes for individual
+ pin group control.
+
Pin controller devices should contain the pin configuration nodes that client
devices reference.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
index def8fcad8941..3b695131c51b 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
@@ -16,8 +16,6 @@ If the property interrupt-controller is defined, following property is required
- reg-names: A string describing the "reg" entries. Must contain "eint".
- interrupts : The interrupt output from the controller.
- #interrupt-cells: Should be two.
-- interrupt-parent: Phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
index bf76867168e9..4023bad2fe39 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
@@ -25,8 +25,6 @@ Required properties:
- gpio-controller: Marks the device as a GPIO controller.
Optional properties :
-- interrupt-parent: phandle of the parent interrupt controller.
-
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller: Marks the device as a interrupt controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
index a752a4716486..c2dbb3e8d840 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -67,6 +72,7 @@ Example:
pinctrl-names = "default";
pinctrl-0 = <&gsbi5_uart_default>;
+ gpio-ranges = <&msmgpio 0 0 90>;
gsbi5_uart_default: gsbi5_uart_default {
mux {
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
index c4ea61ac56f2..68e93d5b7ede 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8960 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -154,6 +162,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 147>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
index 93374f478b9e..991be0cd0948 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
@@ -13,6 +13,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -64,6 +69,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
index 6e88e91feb11..7ed56a1b70fc 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -67,6 +72,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinmux 0 0 69>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 32 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
index 407b9443629d..6dd72f8599e9 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
@@ -40,6 +40,14 @@ IPQ8074 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -148,6 +156,7 @@ Example:
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 70>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
index 1b52f01ddcb7..86ecdcfc4fb8 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
@@ -40,6 +40,14 @@ MDM9615 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -127,6 +135,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 88>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
index df9a838ec5f9..cdc4787e59d2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -62,6 +67,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 173>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
index 498caff6029e..195a7a0ef0cc 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8916 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -162,6 +170,7 @@ Example:
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 122>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
index eb8d8aa41f20..5034eb6653c7 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8960 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -156,6 +164,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 152>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
index 453bd7c76d6b..c22e6c425d0b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -87,6 +92,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 146>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
index 13cd629f896e..f15443f6e78e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
@@ -42,6 +42,14 @@ MSM8994 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -160,6 +168,7 @@ Example:
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 146>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
index aaf01e929eea..fa97f609fe45 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8996 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -180,6 +188,7 @@ Example:
reg = <0x01010000 0x300000>;
interrupts = <0 208 0>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 150>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 5c25fcb29fb5..ffd4345415f3 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -7,6 +7,7 @@ PMIC's from Qualcomm.
Usage: required
Value type: <string>
Definition: must be one of:
+ "qcom,pm8005-gpio"
"qcom,pm8018-gpio"
"qcom,pm8038-gpio"
"qcom,pm8058-gpio"
@@ -15,7 +16,7 @@ PMIC's from Qualcomm.
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
"qcom,pm8994-gpio"
- "qcom,pmi8994-gpio"
+ "qcom,pm8998-gpio"
"qcom,pma8084-gpio"
"qcom,pmi8994-gpio"
@@ -78,6 +79,7 @@ to specify in a pin configuration subnode:
Value type: <string-array>
Definition: List of gpio pins affected by the properties specified in
this subnode. Valid pins are:
+ gpio1-gpio4 for pm8005
gpio1-gpio6 for pm8018
gpio1-gpio12 for pm8038
gpio1-gpio40 for pm8058
@@ -86,7 +88,7 @@ to specify in a pin configuration subnode:
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
gpio1-gpio22 for pm8994
- gpio1-gpio10 for pmi8994
+ gpio1-gpio26 for pm8998
gpio1-gpio22 for pma8084
gpio1-gpio10 for pmi8994
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 5e00a21de2bf..70659c917bdc 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -124,8 +124,6 @@ used as system wakeup events.
A. External GPIO Interrupts: For supporting external gpio interrupts, the
following properties should be specified in the pin-controller device node.
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- interrupts: interrupt specifier for the controller. The format and value of
the interrupt specifier depends on the interrupt parent for the controller.
@@ -145,8 +143,13 @@ A. External GPIO Interrupts: For supporting external gpio interrupts, the
B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
child node representing the external wakeup interrupt controller should be
- included in the pin-controller device node. This child node should include
- the following properties.
+ included in the pin-controller device node.
+
+ Only one pin-controller device node can include external wakeup interrupts
+ child node (in other words, only one External Wakeup Interrupts
+ pin-controller is supported).
+
+ This child node should include following properties:
- compatible: identifies the type of the external wakeup interrupt controller
The possible values are:
@@ -156,12 +159,12 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
found on Samsung S3C2412 and S3C2413 SoCs,
- samsung,s3c64xx-wakeup-eint: represents wakeup interrupt controller
found on Samsung S3C64xx SoCs,
+ - samsung,s5pv210-wakeup-eint: represents wakeup interrupt controller
+ found on Samsung S5Pv210 SoCs,
- samsung,exynos4210-wakeup-eint: represents wakeup interrupt controller
found on Samsung Exynos4210 and S5PC110/S5PV210 SoCs.
- samsung,exynos7-wakeup-eint: represents wakeup interrupt controller
found on Samsung Exynos7 SoC.
- - interrupt-parent: phandle of the interrupt parent to which the external
- wakeup interrupts are forwarded to.
- interrupts: interrupt used by multiplexed wakeup interrupts.
In addition, following properties must be present in node of every bank
@@ -181,8 +184,6 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
Node of every bank of pins supporting direct wake-up interrupts (without
multiplexing) must contain following properties:
- - interrupt-parent: phandle of the interrupt parent to which the external
- wakeup interrupts are forwarded to.
- interrupts: interrupts of the interrupt parent which are used for external
wakeup interrupts from pins of the bank, must contain interrupts for all
pins of the bank.
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index 9a06e1fdbc42..ef4f2ff4a1aa 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -37,11 +37,10 @@ Required properties:
Optional properties:
- reset: : Reference to the reset controller
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
- which includes IRQ mux selection register, and the offset of the IRQ mux
- selection register.
+ - st,syscfg: Should be phandle/offset/mask.
+ -The phandle to the syscon node which includes IRQ mux selection register.
+ -The offset of the IRQ mux selection register
+ -The field mask of IRQ mux, needed if different of 0xf.
- gpio-ranges: Define a dedicated mapping between a pin-controller and
a gpio controller. Format is <&phandle a b c> with:
-(phandle): phandle of pin-controller.
@@ -55,6 +54,8 @@ Optional properties:
NOTE: If "gpio-ranges" is used for a gpio controller, all gpio-controller
have to use a "gpio-ranges" entry.
More details in Documentation/devicetree/bindings/gpio/gpio.txt.
+ - st,bank-ioport: should correspond to the EXTI IOport selection (EXTI line
+ used to select GPIOs as interrupts).
Example 1:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 9b387f861aed..8f8b25a24b8f 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -114,18 +114,26 @@ Required properties:
- power-domains : A list of PM domain specifiers, as defined by bindings of
the power controller that is the PM domain provider.
+Optional properties:
+ - power-domain-names : A list of power domain name strings sorted in the same
+ order as the power-domains property. Consumers drivers will use
+ power-domain-names to match power domains with power-domains
+ specifiers.
+
Example:
leaky-device@12350000 {
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&power 0>;
+ power-domain-names = "io";
};
leaky-device@12351000 {
compatible = "foo,i-leak-current";
reg = <0x12351000 0x1000>;
power-domains = <&power 0>, <&power 1> ;
+ power-domain-names = "io", "clk";
};
The first example above defines a typical PM domain consumer device, which is
@@ -133,7 +141,7 @@ located inside a PM domain with index 0 of a power controller represented by a
node with the label "power".
In the second example the consumer device are partitioned across two PM domains,
the first with index 0 and the second with index 1, of a power controller that
-is represented by a node with the label "power.
+is represented by a node with the label "power".
Optional properties:
- required-opps: This contains phandle to an OPP node in another device's OPP
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
new file mode 100644
index 000000000000..651491bb63b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
@@ -0,0 +1,45 @@
+Qualcomm PON Device
+
+The Power On device for Qualcomm PM8xxx is MFD supporting pwrkey
+and resin along with the Android reboot-mode.
+
+This DT node has pwrkey and resin as sub nodes.
+
+Required Properties:
+-compatible: "qcom,pm8916-pon"
+-reg: Specifies the physical address of the pon register
+
+Optional subnode:
+-pwrkey: Specifies the subnode pwrkey and should follow the
+ qcom,pm8941-pwrkey.txt description.
+-resin: Specifies the subnode resin and should follow the
+ qcom,pm8xxx-pwrkey.txt description.
+
+The rest of the properties should follow the generic reboot-mode description
+found in reboot-mode.txt
+
+Example:
+
+ pon@800 {
+ compatible = "qcom,pm8916-pon";
+
+ reg = <0x800>;
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+
+ pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_POWER>;
+ };
+
+ resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
index b86ecada4f84..c7dfb7cecf40 100644
--- a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
@@ -9,8 +9,6 @@ Required properties:
- interrupts: <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
information for the interrupt.
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- active-semi,input-voltage-threshold-microvolt: unit: mV;
diff --git a/Documentation/devicetree/bindings/power/supply/bq24257.txt b/Documentation/devicetree/bindings/power/supply/bq24257.txt
index d693702c9c1e..f8f5a1685bb9 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24257.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq24257.txt
@@ -6,8 +6,6 @@ Required properties:
* "ti,bq24251"
* "ti,bq24257"
- reg: integer, i2c address of the device.
-- interrupt-parent: Should be the phandle for the interrupt controller. Use in
- conjunction with "interrupts".
- interrupts: Interrupt mapping for GPIO IRQ (configure for both edges). Use in
conjunction with "interrupt-parent".
- ti,battery-regulation-voltage: integer, maximum charging voltage in uV.
diff --git a/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
index 2246bc5c874b..0355a4b68f79 100644
--- a/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: I2C slave address 27h
Optional properties:
-- interrupt-parent: interrupt controller node (see interrupt binding[0])
- interrupts: interrupt specifier (see interrupt binding[0])
- debounce-ms: interrupt debounce time. (u32)
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,ds2760.txt b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.txt
new file mode 100644
index 000000000000..55967a0bee11
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.txt
@@ -0,0 +1,26 @@
+Devicetree bindings for Maxim DS2760
+====================================
+
+The ds2760 is a w1 slave device and must hence have its sub-node in DT
+under a w1 bus master node.
+
+The device exposes a power supply, so the details described in
+Documentation/devicetree/bindings/power/supply/power_supply.txt apply.
+
+Required properties:
+- compatible: must be "maxim,ds2760"
+
+Optional properties:
+- power-supplies: Refers to one or more power supplies connected to
+ this battery.
+- maxim,pmod-enabled: This boolean property enables the DS2760 to enter
+ sleep mode when the DQ line goes low for greater
+ than 2 seconds and leave sleep Mode when the DQ
+ line goes high.
+- maxim,cache-time-ms: Time im milliseconds to cache the data for. When
+ this time expires, the values are read again from
+ the hardware. Defaults to 1000.
+- rated-capacity-microamp-hours:
+ The rated capacity of the battery, in mAh.
+ If not specified, the value stored in the
+ non-volatile chip memory is used.
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
index d6e8dfd0a581..f956247d493e 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
@@ -3,7 +3,6 @@ Maxim MAX14656 / AL32 USB Charger Detector
Required properties :
- compatible : "maxim,max14656";
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt line
Example:
diff --git a/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
index 5d9ad5cf2c5a..1e6107c7578e 100644
--- a/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: it should contain one of the following:
"richtek,rt9455".
- reg: integer, i2c address of the device.
-- interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
- interrupts: interrupt mapping for GPIO IRQ, it should be
configured with IRQ_TYPE_LEVEL_LOW flag.
- richtek,output-charge-current: integer, output current from the charger to the
diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
index c40e8926facf..4e78e51018eb 100644
--- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
@@ -2,7 +2,11 @@ SBS sbs-battery
~~~~~~~~~~
Required properties :
- - compatible : "sbs,sbs-battery"
+ - compatible: "<vendor>,<part-number>", "sbs,sbs-battery" as fallback. The
+ part number compatible string might be used in order to take care of
+ vendor specific registers.
+ Known <vendor>,<part-number>:
+ ti,bq20z75
Optional properties :
- sbs,i2c-retry-count : The number of times to retry i2c transactions on i2c
@@ -14,9 +18,9 @@ Optional properties :
Example:
- bq20z75@b {
- compatible = "sbs,sbs-battery";
- reg = < 0xb >;
+ battery@b {
+ compatible = "ti,bq20z75", "sbs,sbs-battery";
+ reg = <0xb>;
sbs,i2c-retry-count = <2>;
sbs,poll-retry-count = <10>;
sbs,battery-detect-gpios = <&gpio-controller 122 1>;
diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
index a3719623a94f..84e74151eef2 100644
--- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
@@ -7,8 +7,6 @@ Required properties:
specific registers.
Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller. Use in
- conjunction with "interrupts".
- interrupts: Interrupt mapping for GPIO IRQ. Use in conjunction with
"interrupt-parent". If an interrupt is not provided the driver will switch
automatically to polling.
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt b/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
index db939210e29d..940fd78e3363 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
@@ -19,7 +19,6 @@ The IBM Akebono board is a development board for the PPC476GTR SoC.
- compatible : should be "ibm,476gtr-sdhci","generic-sdhci".
- reg : should contain the SDHCI registers location and length.
- - interrupt-parent : a phandle for the interrupt controller.
- interrupts : should contain the SDHCI interrupt.
1.b) The Advanced Host Controller Interface (AHCI) SATA node
@@ -30,7 +29,6 @@ The IBM Akebono board is a development board for the PPC476GTR SoC.
- compatible : should be "ibm,476gtr-ahci".
- reg : should contain the AHCI registers location and length.
- - interrupt-parent : a phandle for the interrupt controller.
- interrupts : should contain the AHCI interrupt.
1.c) The FPGA node
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt b/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
index c737c8338705..66dbd9ff56f7 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
@@ -13,7 +13,6 @@ device tree entries:
Require properties:
- compatible : "ibm,476gtr-hsta-msi", "ibm,hsta-msi"
- reg : register mapping for the HSTA MSI space
-- interrupt-parent : parent controller for mapping interrupts
- interrupts : ordered interrupt mapping for each MSI in the register
space. The first interrupt should be associated with a
register offset of 0x00, the second to 0x10, etc.
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
index 515ebcf1b97d..de6a5f7d4aa4 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
@@ -38,7 +38,6 @@ DMA devices.
2 sources: DMAx CS FIFO Needs Service IRQ (on UIC0)
and DMA Error IRQ (on UIC1). The latter is common
for both DMA engines>.
- - interrupt-parent : needed for interrupt mapping
Example:
@@ -65,7 +64,6 @@ DMA devices.
- compatible : "amcc,xor-accelerator";
- reg : <registers mapping>
- interrupts : <interrupt mapping for XOR interrupt source>
- - interrupt-parent : for interrupt mapping
Example:
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt b/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
index 18a88100af94..4b01e1afafda 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
@@ -84,13 +84,6 @@ PROPERTIES
Interrupt numbers are listed in order (perfmon, event0, event1).
- - interrupt-parent
- Usage: required
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped. Value must be "&mpic"
-
- reg
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
index b66cb6d31d69..eb45db1ecee5 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
@@ -8,8 +8,6 @@ Required properties:
- reg : should contain at least address and length of the DIU register
set.
- interrupts : one DIU interrupt should be described here.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- edid : verbatim EDID data block describing attached display.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
index 7fc1b010fa75..c11ad5c6db21 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
@@ -13,7 +13,6 @@ Required properties:
DMA channels and the address space of the DMA controller
- cell-index : controller index. 0 for controller @ 0x8100
- interrupts : interrupt specifier for DMA IRQ
-- interrupt-parent : optional, if needed for interrupt mapping
- DMA channel nodes:
- compatible : must include "fsl,elo-dma-channel"
@@ -25,7 +24,6 @@ Optional properties:
- interrupts : interrupt specifier for DMA channel IRQ
(on 83xx this is expected to be identical to
the interrupts property of the parent node)
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@82a8 {
@@ -88,7 +86,6 @@ Required properties:
- cell-index : DMA channel index starts at 0.
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@21300 {
@@ -146,7 +143,6 @@ Required properties:
- compatible : must include "fsl,eloplus-dma-channel"
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@100300 {
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
index f514f29c67d6..76dc547bc492 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
@@ -57,8 +57,4 @@ PROPERTIES
Usage: required
Value type: <prop-encoded-array>
- - interrupt-parent
- Usage: required
- Value type: <phandle>
-
=====================================================================
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
index 4ceda9b3b413..a5dae6b1f545 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
@@ -57,8 +57,4 @@ PROPERTIES
Usage: required
Value type: <prop-encoded-array>
- - interrupt-parent
- Usage: required
- Value type: <phandle>
-
=====================================================================
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
index 647817527c88..5dfd68f1a423 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
@@ -18,8 +18,6 @@ Required properties :
- interrupts : <a b> where a is the interrupt number of the
PSC FIFO Controller and b is a field that represents an
encoding of the sense and level information for the interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Recommended properties :
- fsl,rx-fifo-size : the size of the RX fifo slice (a multiple of 4)
@@ -45,8 +43,6 @@ Required properties :
- interrupts : <a b> where a is the interrupt number of the
PSC FIFO Controller and b is a field that represents an
encoding of the sense and level information for the interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Recommended properties :
- clocks : specifies the clock needed to operate the fifo controller
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
index 82dd5b65cf48..f8d2b7fe06d6 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
@@ -21,11 +21,6 @@ Required properties:
be set as edge sensitive. If msi-available-ranges is present, only
the interrupts that correspond to available ranges shall be present.
-- interrupt-parent: the phandle for the interrupt controller
- that services interrupts for this device. for 83xx cpu, the interrupts
- are routed to IPIC, and for 85xx/86xx cpu the interrupts are routed
- to MPIC.
-
Optional properties:
- msi-available-ranges: use <start count> style section to define which
msi interrupt can be used in the 256 msi interrupts. This property is
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt b/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
index c2b2899885f2..b21ab85de6e7 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
@@ -32,8 +32,6 @@ Optional properties:
A standard property. It represents the CCSR registers of
all child PAMUs combined. Include it to provide support
for legacy drivers.
-- interrupt-parent : <phandle>
- Phandle to interrupt controller
- fsl,portid-mapping : <u32>
The Coherency Subdomain ID Port Mapping Registers and
Snoop ID Port Mapping registers, which are part of the
diff --git a/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
index a3dc4b9fa11a..c4d78f28d23c 100644
--- a/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
+++ b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
@@ -148,7 +148,6 @@ Nintendo Wii device tree
- reg : should contain the controller registers location and length
- interrupt-controller
- interrupts : should contain the cascade interrupt of the "flipper" pic
- - interrupt-parent: should contain the phandle of the "flipper" pic
1.l) The General Purpose I/O (GPIO) controller node
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index 0f569d8e73a3..c5d0e7998e2b 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -2,7 +2,8 @@
General Properties:
- - compatible Should be "fsl,etsec-ptp"
+ - compatible Should be "fsl,etsec-ptp" for eTSEC
+ Should be "fsl,fman-ptp-timer" for DPAA FMan
- reg Offset and length of the register set for the device
- interrupts There should be at least two interrupts. Some devices
have as many as four PTP related interrupts.
@@ -43,14 +44,22 @@ Clock Properties:
value, which will be directly written in those bits, that is why,
according to reference manual, the next clock sources can be used:
+ For eTSEC,
<0> - external high precision timer reference clock (TSEC_TMR_CLK
input is used for this purpose);
<1> - eTSEC system clock;
<2> - eTSEC1 transmit clock;
<3> - RTC clock input.
- When this attribute is not used, eTSEC system clock will serve as
- IEEE 1588 timer reference clock.
+ For DPAA FMan,
+ <0> - external high precision timer reference clock (TMR_1588_CLK)
+ <1> - MAC system clock (1/2 FMan clock)
+ <2> - reserved
+ <3> - RTC clock oscillator
+
+ When this attribute is not used, the IEEE 1588 timer reference clock
+ will use the eTSEC system clock (for Gianfar) or the MAC system
+ clock (for DPAA).
Example:
diff --git a/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt b/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
index 675f4437ce92..36f5e2f5cc0f 100644
--- a/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
@@ -5,6 +5,7 @@ Requires node properties:
- "compatible" value one of:
"motorola,cpcap-regulator"
"motorola,mapphone-cpcap-regulator"
+ "motorola,xoom-cpcap-regulator"
Required regulator properties:
- "regulator-name"
diff --git a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
index 5c186a7a77ba..6fe825b8ac1b 100644
--- a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
@@ -32,8 +32,6 @@ Required properties:
'max8997,pmic-buck[1/2/5]-dvs-voltage' should be specified.
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from max8997 are delivered to.
- interrupts: Interrupt specifiers for two interrupt sources.
- First interrupt specifier is for 'irq1' interrupt.
- Second interrupt specifier is for 'alert' interrupt.
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index 99872819604f..84bc76a7c39e 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -18,7 +18,6 @@ Required properties:
ti,tps659038-pmic
and also the generic series names
ti,palmas-pmic
-- interrupt-parent : The parent interrupt controller which is palmas.
- interrupts : The interrupt number and the type which can be looked up here:
arch/arm/boot/dts/include/dt-bindings/interrupt-controller/irq.h
- interrupts-name: The names of the individual interrupts.
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index f0ada3b14d70..c7610718adff 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,9 +1,18 @@
PFUZE100 family of regulators
Required properties:
-- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
+- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000", "fsl,pfuze3001"
- reg: I2C slave address
+Optional properties:
+- fsl,pfuze-support-disable-sw: Boolean, if present disable all unused switch
+ regulators to save power consumption. Attention, ensure that all important
+ regulators (e.g. DDR ref, DDR supply) has set the "regulator-always-on"
+ property. If not present, the switched regualtors are always on and can't be
+ disabled. This binding is a workaround to keep backward compatibility with
+ old dtb's which rely on the fact that the switched regulators are always on
+ and don't mark them explicit as "regulator-always-on".
+
Required child node:
- regulators: This is the list of child nodes that specify the regulator
initialization data for defined regulators. Please refer to below doc
@@ -16,6 +25,8 @@ Required child node:
sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6,coin
--PFUZE3000
sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
+ --PFUZE3001
+ sw1,sw2,sw3,vsnvs,vldo1,vldo2,vccsd,v33,vldo3,vldo4
Each regulator is defined using the standard binding for regulators.
@@ -303,3 +314,76 @@ Example 3: PFUZE3000
};
};
};
+
+Example 4: PFUZE 3001
+
+ pfuze3001: pmic@8 {
+ compatible = "fsl,pfuze3001";
+ reg = <0x08>;
+
+ regulators {
+ sw1_reg: sw1 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
new file mode 100644
index 000000000000..7ef2dbe48e8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
@@ -0,0 +1,160 @@
+Qualcomm Technologies, Inc. RPMh Regulators
+
+rpmh-regulator devices support PMIC regulator management via the Voltage
+Regulator Manager (VRM) and Oscillator Buffer (XOB) RPMh accelerators. The APPS
+processor communicates with these hardware blocks via a Resource State
+Coordinator (RSC) using command packets. The VRM allows changing three
+parameters for a given regulator: enable state, output voltage, and operating
+mode. The XOB allows changing only a single parameter for a given regulator:
+its enable state. Despite its name, the XOB is capable of controlling the
+enable state of any PMIC peripheral. It is used for clock buffers, low-voltage
+switches, and LDO/SMPS regulators which have a fixed voltage and mode.
+
+=======================
+Required Node Structure
+=======================
+
+RPMh regulators must be described in two levels of device nodes. The first
+level describes the PMIC containing the regulators and must reside within an
+RPMh device node. The second level describes each regulator within the PMIC
+which is to be used on the board. Each of these regulators maps to a single
+RPMh resource.
+
+The names used for regulator nodes must match those supported by a given PMIC.
+Supported regulator node names:
+ PM8998: smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
+ PMI8998: bob
+ PM8005: smps1 - smps4
+
+========================
+First Level Nodes - PMIC
+========================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must be one of: "qcom,pm8998-rpmh-regulators",
+ "qcom,pmi8998-rpmh-regulators" or
+ "qcom,pm8005-rpmh-regulators".
+
+- qcom,pmic-id
+ Usage: required
+ Value type: <string>
+ Definition: RPMh resource name suffix used for the regulators found on
+ this PMIC. Typical values: "a", "b", "c", "d", "e", "f".
+
+- vdd-s1-supply
+- vdd-s2-supply
+- vdd-s3-supply
+- vdd-s4-supply
+ Usage: optional (PM8998 and PM8005 only)
+ Value type: <phandle>
+ Definition: phandle of the parent supply regulator of one or more of the
+ regulators for this PMIC.
+
+- vdd-s5-supply
+- vdd-s6-supply
+- vdd-s7-supply
+- vdd-s8-supply
+- vdd-s9-supply
+- vdd-s10-supply
+- vdd-s11-supply
+- vdd-s12-supply
+- vdd-s13-supply
+- vdd-l1-l27-supply
+- vdd-l2-l8-l17-supply
+- vdd-l3-l11-supply
+- vdd-l4-l5-supply
+- vdd-l6-supply
+- vdd-l7-l12-l14-l15-supply
+- vdd-l9-supply
+- vdd-l10-l23-l25-supply
+- vdd-l13-l19-l21-supply
+- vdd-l16-l28-supply
+- vdd-l18-l22-supply
+- vdd-l20-l24-supply
+- vdd-l26-supply
+- vin-lvs-1-2-supply
+ Usage: optional (PM8998 only)
+ Value type: <phandle>
+ Definition: phandle of the parent supply regulator of one or more of the
+ regulators for this PMIC.
+
+- vdd-bob-supply
+ Usage: optional (PMI8998 only)
+ Value type: <phandle>
+ Definition: BOB regulator parent supply phandle
+
+===============================
+Second Level Nodes - Regulators
+===============================
+
+- qcom,always-wait-for-ack
+ Usage: optional
+ Value type: <empty>
+ Definition: Boolean flag which indicates that the application processor
+ must wait for an ACK or a NACK from RPMh for every request
+ sent for this regulator including those which are for a
+ strictly lower power state.
+
+Other properties defined in Documentation/devicetree/bindings/regulator.txt
+may also be used. regulator-initial-mode and regulator-allowed-modes may be
+specified for VRM regulators using mode values from
+include/dt-bindings/regulator/qcom,rpmh-regulator.h. regulator-allow-bypass
+may be specified for BOB type regulators managed via VRM.
+regulator-allow-set-load may be specified for LDO type regulators managed via
+VRM.
+
+========
+Examples
+========
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-l7-l12-l14-l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ pm8998_s5: smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_AUTO
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
index 4edf3137d9f7..76ead07072b1 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
@@ -1,13 +1,5 @@
ROHM BD71837 Power Management Integrated Circuit (PMIC) regulator bindings
-BD71837MWV is a programmable Power Management
-IC (PMIC) for powering single-core, dual-core, and
-quad-core SoC’s such as NXP-i.MX 8M. It is optimized
-for low BOM cost and compact solution footprint. It
-integrates 8 Buck regulators and 7 LDO’s to provide all
-the power rails required by the SoC and the commonly
-used peripherals.
-
Required properties:
- regulator-name: should be "buck1", ..., "buck8" and "ldo1", ..., "ldo7"
diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt
index ca69f5e3040c..ae326f263597 100644
--- a/Documentation/devicetree/bindings/regulator/tps65090.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65090.txt
@@ -16,7 +16,7 @@ Required properties:
Optional properties:
- ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
If DCDCs are externally controlled then this property should be there.
-- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
If DCDCs are externally controlled and if it is from GPIO then GPIO
number should be provided. If it is externally controlled and no GPIO
entry then driver will just configure this rails as external control
diff --git a/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
new file mode 100644
index 000000000000..c9919f4b92d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
@@ -0,0 +1,57 @@
+Socionext UniPhier Regulator Controller
+
+This describes the devicetree bindings for regulator controller implemented
+on Socionext UniPhier SoCs.
+
+USB3 Controller
+---------------
+
+This regulator controls VBUS and belongs to USB3 glue layer. Before using
+the regulator, it is necessary to control the clocks and resets to enable
+this layer. These clocks and resets should be described in each property.
+
+Required properties:
+- compatible: Should be
+ "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names: Should contain
+ "gio", "link" - for Pro4 SoC
+ "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names: Should contain
+ "gio", "link" - for Pro4 SoC
+ "link" - for others
+
+See Documentation/devicetree/bindings/regulator/regulator.txt
+for more details about the regulator properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulators@100 {
+ compatible = "socionext,uniphier-ld20-usb3-regulator";
+ reg = <0x100 0x10>;
+ clock-names = "link";
+ clocks = <&sys_clk 14>;
+ reset-names = "link";
+ resets = <&sys_rst 14>;
+ };
+
+ phy {
+ ...
+ phy-supply = <&usb_vbus0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index d90182425450..601dd9f389aa 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -8,6 +8,7 @@ on the Qualcomm Hexagon core.
Value type: <string>
Definition: must be one of:
"qcom,q6v5-pil",
+ "qcom,ipq8074-wcss-pil"
"qcom,msm8916-mss-pil",
"qcom,msm8974-mss-pil"
"qcom,msm8996-mss-pil"
@@ -50,11 +51,15 @@ on the Qualcomm Hexagon core.
Usage: required
Value type: <phandle>
Definition: reference to the reset-controller for the modem sub-system
+ reference to the list of 3 reset-controllers for the
+ wcss sub-system
- reset-names:
Usage: required
Value type: <stringlist>
- Definition: must be "mss_restart"
+ Definition: must be "mss_restart" for the modem sub-system
+ Definition: must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
+ for the wcss syb-system
- cx-supply:
- mss-supply:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
index e44a97e21164..25f8658e216f 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
@@ -45,12 +45,6 @@ The following are the mandatory properties:
per the bindings in
Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
-Optional properties:
---------------------
-- interrupt-parent: phandle to the interrupt controller node. This property
- is needed if the device node hierarchy doesn't have an
- interrupt controller.
-
Example:
--------
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
index 1eb72874130b..461dc1d8d570 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
@@ -51,12 +51,6 @@ The following are the mandatory properties:
Documentation/devicetree/bindings/reset/ti,sci-reset.txt
for 66AK2G SoCs
-- interrupt-parent: Should contain a phandle to the Keystone 2 IRQ controller
- IP node that is used by the ARM CorePac processor to
- receive interrupts from the DSP remote processors. See
- Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
- for details.
-
- interrupts: Should contain an entry for each value in 'interrupt-names'.
Each entry should have the interrupt source number used by
the remote processor to the host processor. The values should
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
index a21658f18fe6..3661e6153a92 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
@@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be st,stih407-softreset";
+- compatible: Should be "st,stih407-softreset";
- #reset-cells: 1, see below
example:
diff --git a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
index 1d990bcc0baf..d946f28502b3 100644
--- a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
+++ b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
@@ -7,8 +7,6 @@ Required properties:
- compatible : should contain "brcm,brcmstb-waketimer"
- reg : the register start and length for the WKTMR block
- interrupts : The TIMER interrupt
-- interrupt-parent: The phandle to the Always-On (AON) Power Management (PM) L2
- interrupt controller node
- clocks : The phandle to the UPG fixed clock (27Mhz domain)
Example:
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
index fbbdd92e5af9..ff7c43555199 100644
--- a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
+++ b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
@@ -25,9 +25,6 @@ Optional properties:
- "wakeup-source": mark the chip as a wakeup source, independently of
the availability of an IRQ line connected to the SoC.
- - "interrupt-parent", "interrupts": for passing the interrupt line
- of the SoC connected to IRQ#2 of the RTC chip.
-
Example isl12057 node without IRQ#2 pin connected (no alarm support):
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl1219.txt b/Documentation/devicetree/bindings/rtc/isil,isl1219.txt
new file mode 100644
index 000000000000..c3efd48e91c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/isil,isl1219.txt
@@ -0,0 +1,29 @@
+Intersil ISL1219 I2C RTC/Alarm chip with event in
+
+ISL1219 has additional pins EVIN and #EVDET for tamper detection.
+
+Required properties supported by the device:
+
+ - "compatible": must be "isil,isl1219"
+ - "reg": I2C bus address of the device
+
+Optional properties:
+
+ - "interrupt-names": list which may contains "irq" and "evdet"
+ - "interrupts": list of interrupts for "irq" and "evdet"
+ - "isil,ev-evienb": if present EV.EVIENB bit is set to the specified
+ value for proper operation.
+
+
+Example isl1219 node with #IRQ pin connected to SoC gpio1 pin12
+ and #EVDET pin connected to SoC gpio2 pin 24:
+
+ isl1219: rtc@68 {
+ compatible = "isil,isl1219";
+ reg = <0x68>;
+ interrupt-names = "irq", "evdet";
+ interrupts-extended = <&gpio1 12 IRQ_TYPE_EDGE_FALLING>,
+ <&gpio2 24 IRQ_TYPE_EDGE_FALLING>;
+ isil,ev-evienb = <1>;
+ };
+
diff --git a/Documentation/devicetree/bindings/rtc/rtc-cmos.txt b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
index 7382989b3052..b94b35f3600b 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
@@ -7,7 +7,6 @@ Required properties:
Optional properties:
- interrupts : should contain interrupt.
- - interrupt-parent : interrupt source phandle.
- ctrl-reg : Contains the initial value of the control register also
called "Register B".
- freq-reg : Contains the initial value of the frequency register also
diff --git a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
index d28d6e7f6ae8..eebfbe04207a 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
@@ -13,6 +13,7 @@ Required properties:
"maxim,ds3231",
"st,m41t0",
"st,m41t00",
+ "st,m41t11",
"microchip,mcp7940x",
"microchip,mcp7941x",
"pericom,pt7c4338",
@@ -21,7 +22,6 @@ Required properties:
- reg: I2C bus address of the device
Optional properties:
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clock-output-names: From common clock binding to override the default output
clock name
diff --git a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
index 717d93860af1..c746cb221210 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
@@ -16,7 +16,6 @@ Required properties:
- reg: I2C bus address of the device
Optional properties:
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clock-output-names: From common clock binding to override the default output
clock name
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index bee41f97044e..062ebb14cecf 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -11,7 +11,6 @@ Required properties:
- "ti,am4372-rtc" - for RTC IP used similar to that on AM437X SoC family.
- reg: Address range of rtc register set
- interrupts: rtc timer, alarm interrupts in order
-- interrupt-parent: phandle for the interrupt controller
Optional properties:
- system-power-controller: whether the rtc is controlling the system power
diff --git a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
index eb1c7fdeb413..c6cf37758a77 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
@@ -3,7 +3,6 @@ Palmas RTC controller bindings
Required properties:
- compatible:
- "ti,palmas-rtc" for palma series of the RTC controller
-- interrupt-parent: Parent interrupt device, must be handle of palmas node.
- interrupts: Interrupt number of RTC submodule on device.
Optional properties:
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
index ca67ac62108e..fecf8e4ad4b4 100644
--- a/Documentation/devicetree/bindings/rtc/spear-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "st,spear600-rtc"
- reg : Address range of the rtc registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the rtc interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
index 7c170da0d4b7..1f5754299d31 100644
--- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
@@ -3,7 +3,6 @@ Spreadtrum SC27xx Real Time Clock
Required properties:
- compatible: should be "sprd,sc2731-rtc".
- reg: address offset of rtc register.
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
Example:
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
index c920e2736991..130ca5b98253 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
@@ -13,8 +13,6 @@ Required properties:
It is required on stm32(h7/mp1).
- clock-names: must be "rtc_ck" and "pclk".
It is required on stm32(h7/mp1).
-- interrupt-parent: phandle for the interrupt controller.
- It is required on stm32(f4/f7/h7).
- interrupts: rtc alarm interrupt. On stm32mp1, a second interrupt is required
for rtc alarm wakeup interrupt.
- st,syscfg: phandle/offset/mask triplet. The phandle to pwrcfg used to
diff --git a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
index 3ebeb311335f..e615a897b20e 100644
--- a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
+++ b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
@@ -3,7 +3,6 @@ ST-Ericsson COH 901 331 Real Time Clock
Required properties:
- compatible: must be "stericsson,coh901331"
- reg: address range of rtc register set.
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clocks: phandle to the rtc clock source
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
index 6a4e0d30d8c4..0dc121b6eace 100644
--- a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
@@ -6,7 +6,6 @@ Required properties:
- reg: address on the bus
Optional ST33ZP24 Properties:
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
If set, power must be present when the platform is going into sleep/hibernate mode.
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
index 604dce901b60..37198971f17b 100644
--- a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
@@ -5,7 +5,6 @@ Required properties:
- spi-max-frequency: Maximum SPI frequency (<= 10000000).
Optional ST33ZP24 Properties:
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
If set, power must be present when the platform is going into sleep/hibernate mode.
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
index 41d740545189..7c6304426da1 100644
--- a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
+++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
@@ -13,7 +13,7 @@ Required properties:
"tcg,tpm-tis-mmio". Valid chip strings are:
* "atmel,at97sc3204"
- reg: The location of the MMIO registers, should be at least 0x5000 bytes
-- interrupt-parent/interrupts: An optional interrupt indicating command completion.
+- interrupts: An optional interrupt indicating command completion.
Example:
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index afcfbc34e243..35957cbf1571 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,11 @@ Optional properties:
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
in DCE mode by default.
- rs485-rts-delay, rs485-rts-active-low, rs485-rx-during-tx,
- linux,rs485-enabled-at-boot-time: see rs485.txt
+ linux,rs485-enabled-at-boot-time: see rs485.txt. Note that for RS485
+ you must enable either the "uart-has-rtscts" or the "rts-gpios"
+ properties. In case you use "uart-has-rtscts" the signal that controls
+ the transceiver is actually CTS_B, not RTS_B. CTS_B is always output,
+ and RTS_B is input, regardless of dte-mode.
Please check Documentation/devicetree/bindings/serial/serial.txt
for the complete list of generic properties.
diff --git a/Documentation/devicetree/bindings/serial/maxim,max310x.txt b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
index 823f77dd7978..79e10a05a96a 100644
--- a/Documentation/devicetree/bindings/serial/maxim,max310x.txt
+++ b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
@@ -7,8 +7,6 @@ Required properties:
- "maxim,max3109" for Maxim MAX3109,
- "maxim,max14830" for Maxim MAX14830.
- reg: SPI chip select number.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index f73abff3de43..742cb470595b 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -8,6 +8,7 @@ Required properties:
* "mediatek,mt6582-uart" for MT6582 compatible UARTS
* "mediatek,mt6589-uart" for MT6589 compatible UARTS
* "mediatek,mt6755-uart" for MT6755 compatible UARTS
+ * "mediatek,mt6765-uart" for MT6765 compatible UARTS
* "mediatek,mt6795-uart" for MT6795 compatible UARTS
* "mediatek,mt6797-uart" for MT6797 compatible UARTS
* "mediatek,mt7622-uart" for MT7622 compatible UARTS
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
index fbfe53635a3a..e7921a8e276b 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
@@ -10,8 +10,6 @@ Required properties:
- "nxp,sc16is760" for NXP SC16IS760,
- "nxp,sc16is762" for NXP SC16IS762.
- reg: I2C address of the SC16IS7xx device.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Should contain the UART interrupt
- clocks: Reference to the IC source clock.
@@ -44,8 +42,6 @@ Required properties:
- "nxp,sc16is760" for NXP SC16IS760,
- "nxp,sc16is762" for NXP SC16IS762.
- reg: SPI chip select number.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index 4b0f05adb228..c35d5ece1156 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -1,6 +1,7 @@
OMAP UART controller
Required properties:
+- compatible : should be "ti,am654-uart" for AM654 controllers
- compatible : should be "ti,omap2-uart" for OMAP2 controllers
- compatible : should be "ti,omap3-uart" for OMAP3 controllers
- compatible : should be "ti,omap4-uart" for OMAP4 controllers
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..7d65126bd1d7 100644
--- a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
@@ -7,9 +7,6 @@ Required properties:
- reg: Specifies the physical base address of the controller and
the length of the memory mapped region.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this device.
-
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt b/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt
new file mode 100644
index 000000000000..8b9e0d4dc2e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt
@@ -0,0 +1,10 @@
+Renesas RZ/N1 UART
+
+This controller is based on the Synopsys DesignWare ABP UART and inherits all
+properties defined in snps-dw-apb-uart.txt except for the compatible property.
+
+Required properties:
+- compatible : The device specific string followed by the generic RZ/N1 string.
+ Therefore it must be one of:
+ "renesas,r9a06g032-uart", "renesas,rzn1-uart"
+ "renesas,r9a06g033-uart", "renesas,rzn1-uart"
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 106808b55b6d..eaca9da79d83 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: Must contain one or more of the following:
- "renesas,scif-r7s72100" for R7S72100 (RZ/A1H) SCIF compatible UART.
+ - "renesas,scif-r7s9210" for R7S9210 (RZ/A2) SCIF compatible UART.
- "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
@@ -72,7 +73,21 @@ Required properties:
family-specific and/or generic versions.
- reg: Base address and length of the I/O registers used by the UART.
- - interrupts: Must contain an interrupt-specifier for the SCIx interrupt.
+ - interrupts: Must contain one or more interrupt-specifiers for the SCIx.
+ If a single interrupt is expressed, then all events are
+ multiplexed into this single interrupt.
+
+ If multiple interrupts are provided by the hardware, the order
+ in which the interrupts are listed must match order below. Note
+ that some HW interrupt events may be muxed together resulting
+ in duplicate entries.
+ The interrupt order is as follows:
+ 1. Error (ERI)
+ 2. Receive buffer full (RXI)
+ 3. Transmit buffer empty (TXI)
+ 4. Break (BRI)
+ 5. Data Ready (DRI)
+ 6. Transmit End (TEI)
- clocks: Must contain a phandle and clock-specifier pair for each entry
in clock-names.
@@ -89,7 +104,7 @@ Required properties:
- "scif_clk" for the optional external clock source for the frequency
divider (SCIF_CLK).
-Note: Each enabled SCIx UART should have an alias correctly numbered in the
+Note: Each enabled SCIx UART may have an optional "serialN" alias in the
"aliases" node.
Optional properties:
diff --git a/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.txt b/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.txt
new file mode 100644
index 000000000000..c37deb44dead
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/xlnx,opb-uartlite.txt
@@ -0,0 +1,23 @@
+Xilinx Axi Uartlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be either of
+ "xlnx,xps-uartlite-1.00.a"
+ "xlnx,opb-uartlite-1.00.b"
+- reg : Physical base address and size of the Axi Uartlite
+ registers map.
+- interrupts : Should contain the UART controller interrupt.
+
+Optional properties:
+- port-number : Set Uart port number
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+
+Example:
+serial@800c0000 {
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ reg = <0x0 0x800c0000 0x10000>;
+ interrupts = <0x0 0x6e 0x1>;
+ port-number = <0>;
+};
diff --git a/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt
new file mode 100644
index 000000000000..e94a2ad3a710
--- /dev/null
+++ b/Documentation/devicetree/bindings/slimbus/slim-ngd-qcom-ctrl.txt
@@ -0,0 +1,84 @@
+Qualcomm SLIMBus Non Generic Device (NGD) Controller binding
+
+SLIMBus NGD controller is a light-weight driver responsible for communicating
+with SLIMBus slaves directly over the bus using messaging interface and
+communicating with master component residing on ADSP for bandwidth and
+data-channel management
+
+Please refer to slimbus/bus.txt for details of the common SLIMBus bindings.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,slim-ngd-v<MAJOR>.<MINOR>.<STEP>"
+ must be one of the following.
+ "qcom,slim-ngd-v1.5.0" for MSM8996
+ "qcom,slim-ngd-v2.1.0" for SDM845
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the controller
+ register space.
+- dmas
+ Usage: required
+ Value type: <array of phandles>
+ Definition: List of rx and tx dma channels
+
+- dma-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "rx" and "tx".
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must list controller IRQ.
+
+#address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Should be 1, reflecting the instance id of ngd.
+
+#size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Should be 0
+
+= NGD Devices
+Each subnode represents an instance of NGD, must contain the following
+properties:
+
+- reg:
+ Usage: required
+ Value type: <u32>
+ Definition: Should be instance id of ngd.
+
+#address-cells
+ Usage: required
+ Refer to slimbus/bus.txt for details of the common SLIMBus bindings.
+
+#size-cells
+ Usage: required
+ Refer to slimbus/bus.txt for details of the common SLIMBus bindings.
+
+= EXAMPLE
+
+slim@91c0000 {
+ compatible = "qcom,slim-ngd-v1.5.0";
+ reg = <0x91c0000 0x2c000>;
+ interrupts = <0 163 0>;
+ dmas = <&slimbam 3>, <&slimbam 4>;
+ dma-names = "rx", "tx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ngd@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ codec@1 {
+ compatible = "slim217,1a0";
+ reg = <1 0>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
index 626e1afa64a6..cce3cd71e85a 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
@@ -21,7 +21,6 @@ Optional properties:
one as described by the fsl,cpm1-gpio-irq-mask property. There should be as
many interrupts as number of ones in the mask property. The first interrupt in
the list corresponds to the most significant bit of the mask.
-- interrupt-parent : Parent for the above interrupt property.
Example of four SOC GPIO banks defined as gpio-controller nodes:
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
index e47734bee3f0..5efb7ac94c79 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
@@ -11,8 +11,6 @@ Required properties:
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- pio-handle : The phandle for the Parallel I/O port configuration.
- port-number : for UART drivers, the port number to use, between 0 and 3.
This usually corresponds to the /dev/ttyQE device, e.g. <0> = /dev/ttyQE0.
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
index 9ccd5f30405b..da13999337a4 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
@@ -6,7 +6,6 @@ Required properties:
length, the next two two cells should contain PRAM location and
length.
- interrupts : should contain USB interrupt.
-- interrupt-parent : interrupt source phandle.
- fsl,fullspeed-clock : specifies the full speed USB clock source:
"none": clock source is disabled
"brg1" through "brg16": clock source is BRG1-BRG16, respectively
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
index d330c73de9a2..ff92e5a41bed 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
@@ -39,14 +39,14 @@ Required properties:
Optional property:
- clock-frequency: Desired I2C bus clock frequency in Hz.
- When missing default to 400000Hz.
+ When missing default to 100000Hz.
Child nodes should conform to I2C bus binding as described in i2c.txt.
Qualcomm Technologies Inc. GENI Serial Engine based UART Controller
Required properties:
-- compatible: Must be "qcom,geni-debug-uart".
+- compatible: Must be "qcom,geni-debug-uart" or "qcom,geni-uart".
- reg: Must contain UART register location and length.
- interrupts: Must contain UART core interrupts.
- clock-names: Must contain "se".
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
index 9663cab52246..0b8cc533ca83 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -10,6 +10,11 @@ edge.
Value type: <stringlist>
Definition: must be "qcom,glink-rpm"
+- label:
+ Usage: optional
+ Value type: <string>
+ Definition: should specify the subsystem name this edge corresponds to.
+
- interrupts:
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/sound/ac97-bus.txt b/Documentation/devicetree/bindings/sound/ac97-bus.txt
new file mode 100644
index 000000000000..103c428f2595
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ac97-bus.txt
@@ -0,0 +1,32 @@
+Generic AC97 Device Properties
+
+This documents describes the devicetree bindings for an ac97 controller child
+node describing ac97 codecs.
+
+Required properties:
+-compatible : Must be "ac97,vendor_id1,vendor_id2
+ The ids shall be the 4 characters hexadecimal encoding, such as
+ given by "%04x" formatting of printf
+-reg : Must be the ac97 codec number, between 0 and 3
+
+Example:
+ac97: sound@40500000 {
+ compatible = "marvell,pxa270-ac97";
+ reg = < 0x40500000 0x1000 >;
+ interrupts = <14>;
+ reset-gpios = <&gpio 95 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = < &pinctrl_ac97_default >;
+ clocks = <&clks CLK_AC97>, <&clks CLK_AC97CONF>;
+ clock-names = "AC97CLK", "AC97CONFCLK";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ audio-codec@0 {
+ reg = <0>;
+ compatible = "ac97,574d,4c13";
+ clocks = <&fixed_wm9713_clock>;
+ clock-names = "ac97_clk";
+ }
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
new file mode 100644
index 000000000000..3dfc2515e5c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
@@ -0,0 +1,23 @@
+* Amlogic Audio FIFO controllers
+
+Required properties:
+- compatible: 'amlogic,axg-toddr' or
+ 'amlogic,axg-frddr'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- interrupts: interrupt specifier for the fifo.
+- clocks: phandle to the fifo peripheral clock provided by the audio
+ clock controller.
+- resets: phandle to memory ARB line provided by the arb reset controller.
+- #sound-dai-cells: must be 0.
+
+Example of FRDDR A on the A113 SoC:
+
+frddr_a: audio-controller@1c0 {
+ compatible = "amlogic,axg-frddr";
+ reg = <0x0 0x1c0 0x0 0x1c>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_audio AUD_CLKID_FRDDR_A>;
+ resets = <&arb AXG_ARB_FRDDR_A>;
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt
new file mode 100644
index 000000000000..80b411296480
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt
@@ -0,0 +1,124 @@
+Amlogic AXG sound card:
+
+Required properties:
+
+- compatible: "amlogic,axg-sound-card"
+- model : User specified audio sound card name, one string
+
+Optional properties:
+
+- audio-aux-devs : List of phandles pointing to auxiliary devices
+- audio-widgets : Please refer to widgets.txt.
+- audio-routing : A list of the connections between audio components.
+
+Subnodes:
+
+- dai-link: Container for dai-link level properties and the CODEC
+ sub-nodes. There should be at least one (and probably more)
+ subnode of this type.
+
+Required dai-link properties:
+
+- sound-dai: phandle and port of the CPU DAI.
+
+Required TDM Backend dai-link properties:
+- dai-format : CPU/CODEC common audio format
+
+Optional TDM Backend dai-link properties:
+- dai-tdm-slot-rx-mask-{0,1,2,3}: Receive direction slot masks
+- dai-tdm-slot-tx-mask-{0,1,2,3}: Transmit direction slot masks
+ When omitted, mask is assumed to have to no
+ slots. A valid must have at one slot, so at
+ least one these mask should be provided with
+ an enabled slot.
+- dai-tdm-slot-num : Please refer to tdm-slot.txt.
+ If omitted, slot number is set to accommodate the largest
+ mask provided.
+- dai-tdm-slot-width : Please refer to tdm-slot.txt. default to 32 if omitted.
+- mclk-fs : Multiplication factor between stream rate and mclk
+
+Backend dai-link subnodes:
+
+- codec: dai-link representing backend links should have at least one subnode.
+ One subnode for each codec of the dai-link.
+ dai-link representing frontend links have no codec, therefore have no
+ subnodes
+
+Required codec subnodes properties:
+
+- sound-dai: phandle and port of the CODEC DAI.
+
+Optional codec subnodes properties:
+
+- dai-tdm-slot-tx-mask : Please refer to tdm-slot.txt.
+- dai-tdm-slot-rx-mask : Please refer to tdm-slot.txt.
+
+Example:
+
+sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "AXG-S420";
+ audio-aux-devs = <&tdmin_a>, <&tdmout_c>;
+ audio-widgets = "Line", "Lineout",
+ "Line", "Linein",
+ "Speaker", "Speaker1 Left",
+ "Speaker", "Speaker1 Right";
+ "Speaker", "Speaker2 Left",
+ "Speaker", "Speaker2 Right";
+ audio-routing = "TDMOUT_C IN 0", "FRDDR_A OUT 2",
+ "SPDIFOUT IN 0", "FRDDR_A OUT 3",
+ "TDM_C Playback", "TDMOUT_C OUT",
+ "TDMIN_A IN 2", "TDM_C Capture",
+ "TDMIN_A IN 5", "TDM_C Loopback",
+ "TODDR_A IN 0", "TDMIN_A OUT",
+ "Lineout", "Lineout AOUTL",
+ "Lineout", "Lineout AOUTR",
+ "Speaker1 Left", "SPK1 OUT_A",
+ "Speaker2 Left", "SPK2 OUT_A",
+ "Speaker1 Right", "SPK1 OUT_B",
+ "Speaker2 Right", "SPK2 OUT_B",
+ "Linein AINL", "Linein",
+ "Linein AINR", "Linein";
+
+ dai-link@0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link@1 {
+ sound-dai = <&toddr_a>;
+ };
+
+ dai-link@2 {
+ sound-dai = <&tdmif_c>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ dai-tdm-slot-rx-mask-1 = <1 1>;
+ mclk-fs = <256>;
+
+ codec@0 {
+ sound-dai = <&lineout>;
+ };
+
+ codec@1 {
+ sound-dai = <&speaker_amp1>;
+ };
+
+ codec@2 {
+ sound-dai = <&speaker_amp2>;
+ };
+
+ codec@3 {
+ sound-dai = <&linein>;
+ };
+
+ };
+
+ dai-link@3 {
+ sound-dai = <&spdifout>;
+
+ codec {
+ sound-dai = <&spdif_dit>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
new file mode 100644
index 000000000000..521c38ad89e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
@@ -0,0 +1,20 @@
+* Amlogic Audio SPDIF Output
+
+Required properties:
+- compatible: 'amlogic,axg-spdifout'
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "mclk" : master clock
+- #sound-dai-cells: must be 0.
+
+Example on the A113 SoC:
+
+spdifout: audio-controller@480 {
+ compatible = "amlogic,axg-spdifout";
+ reg = <0x0 0x480 0x0 0x50>;
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_SPDIFOUT>,
+ <&clkc_audio AUD_CLKID_SPDIFOUT_CLK>;
+ clock-names = "pclk", "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
new file mode 100644
index 000000000000..1c1b7490554e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
@@ -0,0 +1,28 @@
+* Amlogic Audio TDM formatters
+
+Required properties:
+- compatible: 'amlogic,axg-tdmin' or
+ 'amlogic,axg-tdmout'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "sclk" : bit clock.
+ * "sclk_sel" : bit clock input multiplexer.
+ * "lrclk" : sample clock
+ * "lrclk_sel": sample clock input multiplexer
+
+Example of TDMOUT_A on the A113 SoC:
+
+tdmout_a: audio-controller@500 {
+ compatible = "amlogic,axg-tdmout";
+ reg = <0x0 0x500 0x0 0x40>;
+ clocks = <&clkc_audio AUD_CLKID_TDMOUT_A>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_SCLK>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_SCLK_SEL>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_LRCLK>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_LRCLK>;
+ clock-names = "pclk", "sclk", "sclk_sel",
+ "lrclk", "lrclk_sel";
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt
new file mode 100644
index 000000000000..cabfb26a5f22
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt
@@ -0,0 +1,22 @@
+* Amlogic Audio TDM Interfaces
+
+Required properties:
+- compatible: 'amlogic,axg-tdm-iface'
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "sclk" : bit clock.
+ * "lrclk": sample clock
+ * "mclk" : master clock
+ -> optional if the interface is in clock slave mode.
+- #sound-dai-cells: must be 0.
+
+Example of TDM_A on the A113 SoC:
+
+tdmif_a: audio-controller@0 {
+ compatible = "amlogic,axg-tdm-iface";
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_MST_A_MCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_LRCLK>;
+ clock-names = "mclk", "sclk", "lrclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/atmel-i2s.txt b/Documentation/devicetree/bindings/sound/atmel-i2s.txt
index 735368b8a73f..40549f496a81 100644
--- a/Documentation/devicetree/bindings/sound/atmel-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/atmel-i2s.txt
@@ -15,7 +15,6 @@ Required properties:
- clock-names: Should be one of each entry matching the clocks phandles list:
- "pclk" (peripheral clock) Required.
- "gclk" (generated clock) Optional (1).
- - "aclk" (Audio PLL clock) Optional (1).
- "muxclk" (I2S mux clock) Optional (1).
Optional properties:
@@ -23,9 +22,9 @@ Optional properties:
- princtrl-names: Should contain only one value - "default".
-(1) : Only the peripheral clock is required. The generated clock, the Audio
- PLL clock adn the I2S mux clock are optional and should only be set
- together, when Master Mode is required.
+(1) : Only the peripheral clock is required. The generated clock and the I2S
+ mux clock are optional and should only be set together, when Master Mode
+ is required.
Example:
@@ -40,8 +39,8 @@ Example:
(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(32))>;
dma-names = "tx", "rx";
- clocks = <&i2s0_clk>, <&i2s0_gclk>, <&audio_pll_pmc>, <&i2s0muxck>;
- clock-names = "pclk", "gclk", "aclk", "muxclk";
+ clocks = <&i2s0_clk>, <&i2s0_gclk>, <&i2s0muxck>;
+ clock-names = "pclk", "gclk", "muxclk";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2s0_default>;
};
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
index d04ea3b1a1dd..7e63e53a901c 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
@@ -18,6 +18,8 @@ Below are same as Simple-Card.
- bitclock-inversion
- frame-inversion
- mclk-fs
+- hp-det-gpio
+- mic-det-gpio
- dai-tdm-slot-num
- dai-tdm-slot-width
- clocks / system-clock-frequency
diff --git a/Documentation/devicetree/bindings/sound/cs35l33.txt b/Documentation/devicetree/bindings/sound/cs35l33.txt
index acfb47525b49..dc5a355d1a19 100644
--- a/Documentation/devicetree/bindings/sound/cs35l33.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l33.txt
@@ -14,8 +14,6 @@ Optional properties:
- reset-gpios : gpio used to reset the amplifier
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L33 are delivered to.
- interrupts : IRQ line info CS35L33.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs35l34.txt b/Documentation/devicetree/bindings/sound/cs35l34.txt
index b218ead2e68e..2f7606b7d542 100644
--- a/Documentation/devicetree/bindings/sound/cs35l34.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l34.txt
@@ -21,8 +21,6 @@ Optional properties:
- reset-gpios: GPIO used to reset the amplifier.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L34 are delivered to.
- interrupts : IRQ line info CS35L34.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs35l35.txt b/Documentation/devicetree/bindings/sound/cs35l35.txt
index 77ee75c39233..7915897f8a81 100644
--- a/Documentation/devicetree/bindings/sound/cs35l35.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l35.txt
@@ -10,8 +10,6 @@ Required properties:
as covered in
Documentation/devicetree/bindings/regulator/regulator.txt.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L35 are delivered to.
- interrupts : IRQ line info CS35L35.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
index 9a2c5e2423d5..7dfaa2ab906f 100644
--- a/Documentation/devicetree/bindings/sound/cs42l42.txt
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -15,9 +15,6 @@ Optional properties:
- reset-gpios : a GPIO spec for the reset pin. If specified, it will be
deasserted before communication to the codec starts.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS42L42 are delivered to.
-
- interrupts : IRQ line info CS42L42.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
@@ -107,4 +104,4 @@ cs42l42: cs42l42@48 {
cirrus,btn-det-event-dbnce = <10>;
cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
cirrus,hs-bias-ramp-rate = <0x02>;
-}; \ No newline at end of file
+};
diff --git a/Documentation/devicetree/bindings/sound/da7218.txt b/Documentation/devicetree/bindings/sound/da7218.txt
index 3ab9dfef38d1..2cf30899bd0d 100644
--- a/Documentation/devicetree/bindings/sound/da7218.txt
+++ b/Documentation/devicetree/bindings/sound/da7218.txt
@@ -15,8 +15,6 @@ Required properties:
information relating to regulators)
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from DA7218 are delivered to.
- interrupts: IRQ line info for DA7218 chip.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/da7219.txt b/Documentation/devicetree/bindings/sound/da7219.txt
index c3df92d31c4b..e9d0baeb94e2 100644
--- a/Documentation/devicetree/bindings/sound/da7219.txt
+++ b/Documentation/devicetree/bindings/sound/da7219.txt
@@ -8,8 +8,6 @@ Required properties:
- compatible : Should be "dlg,da7219"
- reg: Specifies the I2C slave address
-- interrupt-parent : Specifies the phandle of the interrupt controller to which
- the IRQs from DA7219 are delivered to.
- interrupts : IRQ line info for DA7219.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/dioo,dio2125.txt b/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
deleted file mode 100644
index 63dbfe0f11d0..000000000000
--- a/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DIO2125 Audio Driver
-
-Required properties:
-- compatible : "dioo,dio2125"
-- enable-gpios : the gpio connected to the enable pin of the dio2125
-
-Example:
-
-amp: analog-amplifier {
- compatible = "dioo,dio2125";
- enable-gpios = <&gpio GPIOH_3 0>;
-};
diff --git a/Documentation/devicetree/bindings/sound/everest,es7134.txt b/Documentation/devicetree/bindings/sound/everest,es7134.txt
index 5495a3cb8b7b..091666069bde 100644
--- a/Documentation/devicetree/bindings/sound/everest,es7134.txt
+++ b/Documentation/devicetree/bindings/sound/everest,es7134.txt
@@ -1,10 +1,15 @@
ES7134 i2s DA converter
Required properties:
-- compatible : "everest,es7134" or "everest,es7144"
+- compatible : "everest,es7134" or
+ "everest,es7144" or
+ "everest,es7154"
+- VDD-supply : regulator phandle for the VDD supply
+- PVDD-supply: regulator phandle for the PVDD supply for the es7154
Example:
i2s_codec: external-codec {
compatible = "everest,es7134";
+ VDD-supply = <&vcc_5v>;
};
diff --git a/Documentation/devicetree/bindings/sound/everest,es7241.txt b/Documentation/devicetree/bindings/sound/everest,es7241.txt
new file mode 100644
index 000000000000..28f82cf4959f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/everest,es7241.txt
@@ -0,0 +1,28 @@
+ES7241 i2s AD converter
+
+Required properties:
+- compatible : "everest,es7241"
+- VDDP-supply: regulator phandle for the VDDA supply
+- VDDA-supply: regulator phandle for the VDDP supply
+- VDDD-supply: regulator phandle for the VDDD supply
+
+Optional properties:
+- reset-gpios: gpio connected to the reset pin
+- m0-gpios : gpio connected to the m0 pin
+- m1-gpios : gpio connected to the m1 pin
+- everest,sdout-pull-down:
+ Format used by the serial interface is controlled by pulling
+ the sdout. If the sdout is pulled down, leftj format is used.
+ If this property is not provided, sdout is assumed to pulled
+ up and i2s format is used
+
+Example:
+
+linein: audio-codec@2 {
+ #sound-dai-cells = <0>;
+ compatible = "everest,es7241";
+ VDDA-supply = <&vcc_3v3>;
+ VDDP-supply = <&vcc_3v3>;
+ VDDD-supply = <&vcc_3v3>;
+ reset-gpios = <&gpio GPIOH_42>;
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index d415888e1316..7e15a85cecd2 100644
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -18,8 +18,6 @@ Required properties:
encoded based on the information in section 2)
depending on the type of interrupt controller you
have.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this device.
- fsl,fifo-depth: The number of elements in the transmit and receive FIFOs.
This number is the maximum allowed value for SFCSR[TFWM0].
- clocks: "ipg" - Required clock for the SSI unit
diff --git a/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt b/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt
new file mode 100644
index 000000000000..2ea85d5be6a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt
@@ -0,0 +1,27 @@
+Marvell PXA2xx audio complex
+
+This descriptions matches the AC97 controller found in pxa2xx and pxa3xx series.
+
+Required properties:
+ - compatible: should be one of the following:
+ "marvell,pxa250-ac97"
+ "marvell,pxa270-ac97"
+ "marvell,pxa300-ac97"
+ - reg: device MMIO address space
+ - interrupts: single interrupt generated by AC97 IP
+ - clocks: input clock of the AC97 IP, refer to clock-bindings.txt
+
+Optional properties:
+ - pinctrl-names, pinctrl-0: refer to pinctrl-bindings.txt
+ - reset-gpios: gpio used for AC97 reset, refer to gpio.txt
+
+Example:
+ ac97: sound@40500000 {
+ compatible = "marvell,pxa250-ac97";
+ reg = < 0x40500000 0x1000 >;
+ interrupts = <14>;
+ reset-gpios = <&gpio 113 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = < &pmux_ac97_default >;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
index 74c9ba6c2823..93b982e9419f 100644
--- a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
+++ b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
@@ -5,6 +5,14 @@ Required properties:
compatible Must be "mrvl,pxa-ssp-dai"
port A phandle reference to a PXA ssp upstream device
+Optional properties:
+
+ clock-names
+ clocks Through "clock-names" and "clocks", external clocks
+ can be configured. If a clock names "extclk" exists,
+ it will be set to the mclk rate of the audio stream
+ and be used as clock provider of the DAI.
+
Example:
/* upstream device */
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
deleted file mode 100644
index 551fbb8348c2..000000000000
--- a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-DT bindings for ARM PXA2xx PCM platform driver
-
-This is just a dummy driver that registers the PXA ASoC platform driver.
-It does not have any resources assigned.
-
-Required properties:
-
- - compatible 'mrvl,pxa-pcm-audio'
-
-Example:
-
- pxa_pcm_audio: snd_soc_pxa_audio {
- compatible = "mrvl,pxa-pcm-audio";
- };
-
diff --git a/Documentation/devicetree/bindings/sound/name-prefix.txt b/Documentation/devicetree/bindings/sound/name-prefix.txt
new file mode 100644
index 000000000000..645775908657
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/name-prefix.txt
@@ -0,0 +1,24 @@
+Name prefix:
+
+Card implementing the routing property define the connection between
+audio components as list of string pair. Component using the same
+sink/source names may use the name prefix property to prepend the
+name of their sinks/sources with the provided string.
+
+Optional name prefix property:
+- sound-name-prefix : string using as prefix for the sink/source names of
+ the component.
+
+Example: Two instances of the same component.
+
+amp0: analog-amplifier@0 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_3 0>;
+ sound-name-prefix = "FRONT";
+};
+
+amp1: analog-amplifier@1 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_4 0>;
+ sound-name-prefix = "BACK";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
index fd8105f18978..418e30e72e89 100644
--- a/Documentation/devicetree/bindings/sound/omap-dmic.txt
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -6,7 +6,6 @@ Required properties:
<MPU access base address, size>,
<L3 interconnect address, size>;
- interrupts: Interrupt number for DMIC
-- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
Example:
diff --git a/Documentation/devicetree/bindings/sound/omap-mcbsp.txt b/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
index 17cce4490456..ae8bf703ce7a 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
@@ -15,7 +15,6 @@ Required properties:
<TX irq>,
<RX irq>;
- interrupt-names: Array of strings associated with the interrupt numbers
-- interrupt-parent: The parent interrupt controller
- ti,buffer-size: Size of the FIFO on the port (OMAP2430 and newer SoC)
- ti,hwmods: Name of the hwmod associated to the McBSP port
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
index 0741dff048dd..5f4e68ca228c 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -6,7 +6,6 @@ Required properties:
<MPU access base address, size>,
<L3 interconnect address, size>;
- interrupts: Interrupt number for McPDM
-- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM
Example:
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index 6a4aadc4ce06..84b28dbe9f15 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -30,7 +30,7 @@ Required properties:
Board connectors:
* Headset Mic
- * Secondary Mic",
+ * Secondary Mic
* DMIC
* Ext Spk
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
index aa54e49fc8a2..c814e867850f 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
@@ -7,7 +7,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
Value type: <stringlist>
Definition: must be "qcom,apq8096-sndcard"
-- qcom,audio-routing:
+- audio-routing:
Usage: Optional
Value type: <stringlist>
Definition: A list of the connections between audio components.
@@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
"Digital Mic3"
Audio pins and MicBias on WCD9335 Codec:
- "MIC_BIAS1
+ "MIC_BIAS1"
"MIC_BIAS2"
"MIC_BIAS3"
"MIC_BIAS4"
@@ -49,6 +49,12 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
"DMIC1"
"DMIC2"
"DMIC3"
+
+- model:
+ Usage: required
+ Value type: <stringlist>
+ Definition: The user-visible name of this sound card.
+
= dailinks
Each subnode of sndcard represents either a dailink, and subnodes of each
dailinks would be cpu/codec/platform dais.
@@ -79,11 +85,16 @@ dailinks would be cpu/codec/platform dais.
Value type: <phandle with arguments>
Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+Obsolete:
+ qcom,model: String for soundcard name (Use model instead)
+ qcom,audio-routing: A list of the connections between audio components.
+ (Use audio-routing instead)
+
Example:
audio {
compatible = "qcom,apq8096-sndcard";
- qcom,model = "DB820c";
+ model = "DB820c";
mm1-dai-link {
link-name = "MultiMedia1";
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
index 551ecab67efe..fdcea3d12ee5 100644
--- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -7,7 +7,6 @@ Bindings for codec Analog IP which is integrated in pmic pm8916,
Required properties
- compatible = "qcom,pm8916-wcd-analog-codec";
- reg: represents the slave base address provided to the peripheral.
- - interrupt-parent : The parent interrupt controller.
- interrupts: List of interrupts in given SPMI peripheral.
- interrupt-names: Names specified to above list of interrupts in same
order. List of supported interrupt names are:
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
index cb709e5dbc44..bbae426cdfb1 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
@@ -18,6 +18,11 @@ used by the apr service device.
= ADM routing
"routing" subnode of the ADM node represents adm routing specific configuration
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6adm-routing".
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -28,6 +33,7 @@ q6adm@8 {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
q6routing: routing {
+ compatible = "qcom,q6adm-routing";
#sound-dai-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
index bdbf87df8c0b..a8179409c194 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
@@ -17,6 +17,11 @@ used by all apr services. Must contain the following properties.
subnode of "dais" representing board specific dai setup.
"dais" node should have following properties followed by dai children.
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6afe-dais"
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -100,6 +105,7 @@ q6afe@4 {
reg = <APR_SVC_AFE>;
dais {
+ compatible = "qcom,q6afe-dais";
#sound-dai-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
index 2178eb91146f..f9c7bd8c1bc0 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
@@ -17,6 +17,11 @@ used by the apr service device.
= ASM DAIs (Digial Audio Interface)
"dais" subnode of the ASM node represents dai specific configuration
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6asm-dais".
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -28,6 +33,7 @@ q6asm@7 {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
#sound-dai-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,sdm845.txt b/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
new file mode 100644
index 000000000000..408c4837e6d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
@@ -0,0 +1,80 @@
+* Qualcomm Technologies Inc. SDM845 ASoC sound card driver
+
+This binding describes the SDM845 sound card, which uses qdsp for audio.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,sdm845-sndcard"
+
+- audio-routing:
+ Usage: Optional
+ Value type: <stringlist>
+ Definition: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of codec and the jacks on the board.
+
+- model:
+ Usage: required
+ Value type: <stringlist>
+ Definition: The user-visible name of this sound card.
+
+= dailinks
+Each subnode of sndcard represents either a dailink, and subnodes of each
+dailinks would be cpu/codec/platform dais.
+
+- link-name:
+ Usage: required
+ Value type: <string>
+ Definition: User friendly name for dai link
+
+= CPU, PLATFORM, CODEC dais subnodes
+- cpu:
+ Usage: required
+ Value type: <subnode>
+ Definition: cpu dai sub-node
+
+- codec:
+ Usage: required
+ Value type: <subnode>
+ Definition: codec dai sub-node
+
+- platform:
+ Usage: Optional
+ Value type: <subnode>
+ Definition: platform dai sub-node
+
+- sound-dai:
+ Usage: required
+ Value type: <phandle>
+ Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+
+Example:
+
+audio {
+ compatible = "qcom,sdm845-sndcard";
+ model = "sdm845-snd-card";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pri_mi2s_active &pri_mi2s_ws_active>;
+ pinctrl-1 = <&pri_mi2s_sleep &pri_mi2s_ws_sleep>;
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ pri-mi2s-dai-link {
+ link-name = "PRI MI2S Playback";
+ cpu {
+ sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
new file mode 100644
index 000000000000..1d8d49e30af7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
@@ -0,0 +1,123 @@
+QCOM WCD9335 Codec
+
+Qualcomm WCD9335 Codec is a standalone Hi-Fi audio codec IC, supports
+Qualcomm Technologies, Inc. (QTI) multimedia solutions, including
+the MSM8996, MSM8976, and MSM8956 chipsets. It has in-built
+Soundwire controller, interrupt mux. It supports both I2S/I2C and
+SLIMbus audio interfaces.
+
+Required properties with SLIMbus Interface:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: For SLIMbus interface it should be "slimMID,PID",
+ textual representation of Manufacturer ID, Product Code,
+ shall be in lower case hexadecimal with leading zeroes
+ suppressed. Refer to slimbus/bus.txt for details.
+ Should be:
+ "slim217,1a0" for MSM8996 and APQ8096 SoCs with SLIMbus.
+
+- reg
+ Usage: required
+ Value type: <u32 u32>
+ Definition: Should be ('Device index', 'Instance ID')
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Interrupts via WCD INTR1 and INTR2 pins
+
+- interrupt-names:
+ Usage: required
+ Value type: <String array>
+ Definition: Interrupt names of WCD INTR1 and INTR2
+ Should be: "intr1", "intr2"
+
+- reset-gpio:
+ Usage: required
+ Value type: <String Array>
+ Definition: Reset gpio line
+
+- qcom,ifd:
+ Usage: required
+ Value type: <phandle>
+ Definition: SLIM interface device
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: See clock-bindings.txt section "consumers". List of
+ three clock specifiers for mclk, mclk2 and slimbus clock.
+
+- clock-names:
+ Usage: required
+ Value type: <string>
+ Definition: Must contain "mclk", "mclk2" and "slimbus" strings.
+
+- vdd-buck-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V buck supply
+
+- vdd-buck-sido-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V SIDO buck supply
+
+- vdd-rx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V rx supply
+
+- vdd-tx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V tx supply
+
+- vdd-vbat-supply:
+ Usage: Optional
+ Value type: <phandle>
+ Definition: Should contain a reference to the vbat supply
+
+- vdd-micbias-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the micbias supply
+
+- vdd-io-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V io supply
+
+- interrupt-controller:
+ Usage: required
+ Definition: Indicating that this is a interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <int>
+ Definition: should be 1
+
+#sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+codec@1{
+ compatible = "slim217,1a0";
+ reg = <1 0>;
+ interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "intr2"
+ reset-gpio = <&msmgpio 64 0>;
+ qcom,ifd = <&wc9335_ifd>;
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+ vdd-buck-supply = <&pm8994_s4>;
+ vdd-rx-supply = <&pm8994_s4>;
+ vdd-buck-sido-supply = <&pm8994_s4>;
+ vdd-tx-supply = <&pm8994_s4>;
+ vdd-io-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+}
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b86d790f630f..9e764270c36b 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -352,6 +352,7 @@ Required properties:
- "renesas,rcar_sound-r8a7794" (R-Car E2)
- "renesas,rcar_sound-r8a7795" (R-Car H3)
- "renesas,rcar_sound-r8a7796" (R-Car M3-W)
+ - "renesas,rcar_sound-r8a77965" (R-Car M3-N)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
index b208a752576c..54aefab71f2c 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
@@ -7,6 +7,7 @@ Required properties:
- compatible: should be one of the following:
- "rockchip,rk3066-i2s": for rk3066
+ - "rockchip,px30-i2s", "rockchip,rk3066-i2s": for px30
- "rockchip,rk3036-i2s", "rockchip,rk3066-i2s": for rk3036
- "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188
- "rockchip,rk3228-i2s", "rockchip,rk3066-i2s": for rk3228
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index b25ed08c7a5a..d2cc171f22f2 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -14,7 +14,6 @@ Optional properties:
- clocks: The phandle of the master clock to the CODEC
- clock-names: Should be "mclk"
-- interrupt-parent: The phandle for the interrupt controller.
- interrupts: The interrupt number to the cpu. The interrupt specifier format
depends on the interrupt controller.
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
new file mode 100644
index 000000000000..312e9a129530
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5682.txt
@@ -0,0 +1,50 @@
+RT5682 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt5682" or "realtek,rt5682i"
+
+- reg : The I2C address of the device.
+
+Optional properties:
+
+- interrupts : The CODEC's interrupt output.
+
+- realtek,dmic1-data-pin
+ 0: dmic1 is not used
+ 1: using GPIO2 pin as dmic1 data pin
+ 2: using GPIO5 pin as dmic1 data pin
+
+- realtek,dmic1-clk-pin
+ 0: using GPIO1 pin as dmic1 clock pin
+ 1: using GPIO3 pin as dmic1 clock pin
+
+- realtek,jd-src
+ 0: No JD is used
+ 1: using JD1 as JD source
+
+- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+
+Pins on the device (for linking into audio routes) for RT5682:
+
+ * DMIC L1
+ * DMIC R1
+ * IN1P
+ * HPOL
+ * HPOR
+
+Example:
+
+rt5682 {
+ compatible = "realtek,rt5682i";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(U, 6) GPIO_ACTIVE_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_HIGH>;
+ realtek,dmic1-data-pin = <1>;
+ realtek,dmic1-clk-pin = <1>;
+ realtek,jd-src = <1>;
+};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 0f214457476f..9c58f724396a 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -17,7 +17,7 @@ Optional properties:
- VDDD-supply : the regulator provider of VDDD
-- micbias-resistor-k-ohms : the bias resistor to be used in kOmhs
+- micbias-resistor-k-ohms : the bias resistor to be used in kOhms
The resistor can take values of 2k, 4k or 8k.
If set to 0 it will be off.
If this node is not mentioned or if the value is unknown, then
diff --git a/Documentation/devicetree/bindings/sound/simple-amplifier.txt b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
new file mode 100644
index 000000000000..8647edae7af0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
@@ -0,0 +1,12 @@
+Simple Amplifier Audio Driver
+
+Required properties:
+- compatible : "dioo,dio2125" or "simple-audio-amplifier"
+- enable-gpios : the gpio connected to the enable pin of the simple amplifier
+
+Example:
+
+amp: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_3 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/tas571x.txt b/Documentation/devicetree/bindings/sound/tas571x.txt
index b4959f10b74b..7c8fd37c2f9e 100644
--- a/Documentation/devicetree/bindings/sound/tas571x.txt
+++ b/Documentation/devicetree/bindings/sound/tas571x.txt
@@ -7,6 +7,7 @@ powerdown (optional).
Required properties:
- compatible: should be one of the following:
+ - "ti,tas5707"
- "ti,tas5711",
- "ti,tas5717",
- "ti,tas5719",
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
index a836881d9608..3ed8359144d3 100644
--- a/Documentation/devicetree/bindings/sound/ts3a227e.txt
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible: Should contain "ti,ts3a227e".
- reg: The i2c address. Should contain <0x3b>.
- - interrupt-parent: The parent interrupt controller
- interrupts: Interrupt number for /INT pin from the 227e
Optional properies:
diff --git a/Documentation/devicetree/bindings/sound/ux500-msp.txt b/Documentation/devicetree/bindings/sound/ux500-msp.txt
index 99acd9c774e1..7dd1b96160f5 100644
--- a/Documentation/devicetree/bindings/sound/ux500-msp.txt
+++ b/Documentation/devicetree/bindings/sound/ux500-msp.txt
@@ -6,7 +6,6 @@ Required properties:
Optional properties:
- interrupts : The interrupt output from the device.
- - interrupt-parent : The parent interrupt controller.
- <name>-supply : Phandle to the regulator <name> supply
Example:
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
index 4a9dead1b7d3..68cccc4653ba 100644
--- a/Documentation/devicetree/bindings/sound/wm8994.txt
+++ b/Documentation/devicetree/bindings/sound/wm8994.txt
@@ -26,7 +26,6 @@ Optional properties:
- interrupt-controller : These devices contain interrupt controllers
and may provide interrupt services to other devices if they have an
interrupt line connected.
- - interrupt-parent : The parent interrupt controller.
- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
@@ -57,6 +56,12 @@ Optional properties:
- wlf,ldoena-always-driven : If present LDOENA is always driven.
+ - wlf,spkmode-pu : If present enable the internal pull-up resistor on
+ the SPKMODE pin.
+
+ - wlf,csnaddr-pd : If present enable the internal pull-down resistor on
+ the CS/ADDR pin.
+
Example:
wm8994: codec@1a {
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
index a2331372068c..8854004a1d3a 100644
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt
@@ -12,8 +12,6 @@ Required properties:
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- clock-frequency : input clock frequency to non FSL_SOC cores
Optional properties:
diff --git a/Documentation/devicetree/bindings/spi/sh-hspi.txt b/Documentation/devicetree/bindings/spi/sh-hspi.txt
index 585fed90376e..b9d1e4d11a77 100644
--- a/Documentation/devicetree/bindings/spi/sh-hspi.txt
+++ b/Documentation/devicetree/bindings/spi/sh-hspi.txt
@@ -6,8 +6,6 @@ Required properties:
- "renesas,hspi-r8a7778" (R-Car M1)
- "renesas,hspi-r8a7779" (R-Car H1)
- reg : Offset and length of the register set for the device
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device
- interrupts : Interrupt specifier
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 39806329c193..bfbc2035fb6b 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -29,8 +29,6 @@ Required properties:
If two register sets are present, the first is to be
used by the CPU, and the second is to be used by the
DMA engine.
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device
- interrupts : Interrupt specifier
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index 204b311e0400..642d3fb1ef85 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -1,8 +1,10 @@
Synopsys DesignWare AMBA 2.0 Synchronous Serial Interface.
Required properties:
-- compatible : "snps,dw-apb-ssi"
-- reg : The register base for the controller.
+- compatible : "snps,dw-apb-ssi" or "mscc,<soc>-spi", where soc is "ocelot" or
+ "jaguar2"
+- reg : The register base for the controller. For "mscc,<soc>-spi", a second
+ register set is required (named ICPU_CFG:SPI_MST)
- interrupts : One interrupt, used by the controller.
- #address-cells : <1>, as required by generic SPI binding.
- #size-cells : <0>, also as required by generic SPI binding.
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.txt b/Documentation/devicetree/bindings/spi/spi-cadence.txt
index 94f09141a4f0..05a2ef945664 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.txt
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
index 225ace1d0c65..4af132606b37 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible :
- "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
- reg : address and length of the lpspi master registers
-- interrupt-parent : core interrupt controller
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
index 6e3ffacbba32..a0edac12d8df 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: should be one of the following.
"rockchip,rv1108-spi" for rv1108 SoCs.
+ "rockchip,px30-spi", "rockchip,rk3066-spi" for px30 SoCs.
"rockchip,rk3036-spi" for rk3036 SoCS.
"rockchip,rk3066-spi" for rk3066 SoCs.
"rockchip,rk3188-spi" for rk3188 SoCs.
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index 3b02b3a7cfb2..96fd58548f69 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -28,8 +28,6 @@ Required properties:
- "rx" for SPRI,
- "tx" to SPTI,
- "mux" for a single muxed interrupt.
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device.
- num-cs : Number of chip selects. Some RSPI cores have more than 1.
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
new file mode 100644
index 000000000000..504a4ecfc7b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
@@ -0,0 +1,22 @@
+Socionext UniPhier SPI controller driver
+
+UniPhier SoCs have SCSSI which supports SPI single channel.
+
+Required properties:
+ - compatible: should be "socionext,uniphier-scssi"
+ - reg: address and length of the spi master registers
+ - #address-cells: must be <1>, see spi-bus.txt
+ - #size-cells: must be <0>, see spi-bus.txt
+ - clocks: A phandle to the clock for the device.
+ - resets: A phandle to the reset control for the device.
+
+Example:
+
+spi0: spi@54006000 {
+ compatible = "socionext,uniphier-scssi";
+ reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&peri_clk 11>;
+ resets = <&peri_rst 11>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index 7bf61efc66c8..dc924a5f71db 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
Optional properties:
- xlnx,num-ss-bits : Number of chip selects used.
diff --git a/Documentation/devicetree/bindings/spi/spi-xlp.txt b/Documentation/devicetree/bindings/spi/spi-xlp.txt
index 40e82d51efec..f4925ec0ed33 100644
--- a/Documentation/devicetree/bindings/spi/spi-xlp.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xlp.txt
@@ -13,7 +13,6 @@ Required properties:
- reg : Should contain register location and length.
- clocks : Phandle of the spi clock
- interrupts : Interrupt number used by this controller.
-- interrupt-parent : Phandle of the parent interrupt controller.
SPI slave nodes must be children of the SPI master node and can contain
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
index c8f50e5cf70b..0f6d37ff541c 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller.
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
index 267da4410aef..e98908bd4227 100644
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ b/Documentation/devicetree/bindings/sram/sram.txt
@@ -50,6 +50,8 @@ Optional properties in the area nodes:
manipulation of the page attributes.
- label : the name for the reserved partition, if omitted, the label
is taken from the node name excluding the unit address.
+- clocks : a list of phandle and clock specifier pair that controls the
+ single SRAM clock.
Example:
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt b/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
index 02ea23a63f20..88bc94fe1f6d 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
+++ b/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "st,spear600-adc"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the ADC interrupt
- sampling-frequency: Default sampling frequency
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index e0d013a2e66d..f3b441100890 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -10,6 +10,11 @@ Required properties:
* marvell,armada-ap806-thermal
* marvell,armada-cp110-thermal
+Note: these bindings are deprecated for AP806/CP110 and should instead
+follow the rules described in:
+Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
+
- reg: Device's register space.
Two entries are expected, see the examples below. The first one points
to the status register (4B). The second one points to the control
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
index 9d43553a8d39..43a9ed545944 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
@@ -7,7 +7,6 @@ Required properties:
- reg: address range for the AVS TMON registers
- interrupts: temperature monitor interrupt, for high/low threshold triggers
- interrupt-names: should be "tmon"
-- interrupt-parent: the parent interrupt controller
Example:
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
index ad648d93d961..33004ce7e5df 100644
--- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
@@ -13,7 +13,6 @@
Exynos5420 (Must pass triminfo base and triminfo clock)
"samsung,exynos5433-tmu"
"samsung,exynos7-tmu"
-- interrupt-parent : The phandle for the interrupt controller
- reg : Address range of the thermal registers. For soc's which has multiple
instances of TMU and some registers are shared across all TMU's like
interrupt related then 2 set of register has to supplied. First set
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
index 06195e8f35e2..1d9e8cf61018 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
@@ -1,18 +1,28 @@
* QCOM SoC Temperature Sensor (TSENS)
Required properties:
-- compatible :
- - "qcom,msm8916-tsens" : For 8916 Family of SoCs
- - "qcom,msm8974-tsens" : For 8974 Family of SoCs
- - "qcom,msm8996-tsens" : For 8996 Family of SoCs
+- compatible:
+ Must be one of the following:
+ - "qcom,msm8916-tsens" (MSM8916)
+ - "qcom,msm8974-tsens" (MSM8974)
+ - "qcom,msm8996-tsens" (MSM8996)
+ - "qcom,msm8998-tsens", "qcom,tsens-v2" (MSM8998)
+ - "qcom,sdm845-tsens", "qcom,tsens-v2" (SDM845)
+ The generic "qcom,tsens-v2" property must be used as a fallback for any SoC
+ with version 2 of the TSENS IP. MSM8996 is the only exception because the
+ generic property did not exist when support was added.
+
+- reg: Address range of the thermal registers.
+ New platforms containing v2.x.y of the TSENS IP must specify the SROT and TM
+ register spaces separately, with order being TM before SROT.
+ See Example 2, below.
-- reg: Address range of the thermal registers
- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
- #qcom,sensors: Number of sensors in tsens block
- Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify
nvmem cells
-Example:
+Example 1 (legacy support before a fallback tsens-v2 property was introduced):
tsens: thermal-sensor@900000 {
compatible = "qcom,msm8916-tsens";
reg = <0x4a8000 0x2000>;
@@ -20,3 +30,12 @@ tsens: thermal-sensor@900000 {
nvmem-cell-names = "caldata", "calsel";
#thermal-sensor-cells = <1>;
};
+
+Example 2 (for any platform containing v2 of the TSENS IP):
+tsens0: thermal-sensor@c263000 {
+ compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
+ reg = <0xc263000 0x1ff>, /* TM */
+ <0xc222000 0x1ff>; /* SROT */
+ #qcom,sensors = <13>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
index 904a5846d7ac..e698e3488735 100644
--- a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
+++ b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : should be "altr,timer-1.0"
- reg : Specifies base physical address and size of the registers.
-- interrupt-parent: phandle of the interrupt controller
- interrupts : Should contain the timer interrupt number
- clock-frequency : The frequency of the clock that drives the counter, in Hz.
diff --git a/Documentation/devicetree/bindings/timer/fsl,gtm.txt b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
index 9a33efded4bc..fc1c571f7412 100644
--- a/Documentation/devicetree/bindings/timer/fsl,gtm.txt
+++ b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
@@ -7,7 +7,6 @@ Required properties:
"fsl,<chip>-cpm2-gtm", "fsl,cpm2-gtm", "fsl,gtm" for CPM2 GTMs
- reg : should contain gtm registers location and length (0x40).
- interrupts : should contain four interrupts.
- - interrupt-parent : interrupt source phandle.
- clock-frequency : specifies the frequency driving the timer.
Example:
diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
index 62bb8260cf6a..cd1a0c256f94 100644
--- a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
+++ b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
@@ -3,7 +3,6 @@ Marvell Orion SoC timer
Required properties:
- compatible: shall be "marvell,orion-timer"
- reg: base address of the timer register starting with TIMERS CONTROL register
-- interrupt-parent: phandle of the bridge interrupt controller
- interrupts: should contain the interrupts for Timer0 and Timer1
- clocks: phandle of timer reference clock (tclk)
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index b1fe7e9de1b4..18d4d0166c76 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -1,19 +1,25 @@
-Mediatek MT6577, MT6572 and MT6589 Timers
----------------------------------------
+Mediatek Timers
+---------------
+
+Mediatek SoCs have two different timers on different platforms,
+- GPT (General Purpose Timer)
+- SYST (System Timer)
+
+The proper timer will be selected automatically by driver.
Required properties:
- compatible should contain:
- * "mediatek,mt2701-timer" for MT2701 compatible timers
- * "mediatek,mt6580-timer" for MT6580 compatible timers
- * "mediatek,mt6589-timer" for MT6589 compatible timers
- * "mediatek,mt7623-timer" for MT7623 compatible timers
- * "mediatek,mt8127-timer" for MT8127 compatible timers
- * "mediatek,mt8135-timer" for MT8135 compatible timers
- * "mediatek,mt8173-timer" for MT8173 compatible timers
- * "mediatek,mt6577-timer" for MT6577 and all above compatible timers
-- reg: Should contain location and length for timers register.
-- clocks: Clocks driving the timer hardware. This list should include two
- clocks. The order is system clock and as second clock the RTC clock.
+ * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
+ * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
+ * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
+ * "mediatek,mt7623-timer" for MT7623 compatible timers (GPT)
+ * "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
+ * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
+ * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+ * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
+ * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+- reg: Should contain location and length for timer register.
+- clocks: Should contain system clock.
Examples:
@@ -21,5 +27,5 @@ Examples:
compatible = "mediatek,mt6577-timer";
reg = <0x10008000 0x80>;
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&system_clk>, <&rtc_clk>;
+ clocks = <&system_clk>;
};
diff --git a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
index 4ef024630d61..147ef3e74452 100644
--- a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
+++ b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
@@ -12,10 +12,6 @@ Required properties:
(16 for ARCHS cores, 3 for ARC700 cores)
- clocks : phandle to the source clock
-Optional properties:
-
-- interrupt-parent : phandle to parent intc
-
Example:
timer0 {
diff --git a/Documentation/devicetree/bindings/timer/st,spear-timer.txt b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
index c0017221cf55..b5238a07da17 100644
--- a/Documentation/devicetree/bindings/timer/st,spear-timer.txt
+++ b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
@@ -5,8 +5,6 @@
- compatible : Should be:
"st,spear-timer"
- reg: Address range of the timer registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the timer interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
index 95911fe70224..d96c1e283e73 100644
--- a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
+++ b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
@@ -7,7 +7,6 @@ Required properties:
- compatible: must be "ti,c64x+timer64"
- reg: base address and size of register region
-- interrupt-parent: interrupt controller
- interrupts: interrupt id
Optional properties:
diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
new file mode 100644
index 000000000000..a48c44817367
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
@@ -0,0 +1,41 @@
+* Hisilicon Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains one of the following -
+ "hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs
+ host controller present on Hi36xx chipset.
+- reg : should contain UFS register address space & UFS SYS CTRL register address,
+- interrupt-parent : interrupt device
+- interrupts : interrupt number
+- clocks : List of phandle and clock specifier pairs
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "ref_clk", "phy_clk" is optional
+- freq-table-hz : Array of <min max> operating frequencies stored in the same
+ order as the clocks property. If this property is not
+ defined or a value in the array is "0" then it is assumed
+ that the frequency is set by the parent clock or a
+ fixed rate clock source.
+- resets : describe reset node register
+- reset-names : reset node register, the "rst" corresponds to reset the whole UFS IP.
+
+Example:
+
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index c39dfef76a18..2df00524bd21 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -41,6 +41,8 @@ Optional properties:
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
+- resets : reset node register
+- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
@@ -61,9 +63,11 @@ Example:
vccq-max-microamp = 200000;
vccq2-max-microamp = 200000;
- clocks = <&core 0>, <&ref 0>, <&iface 0>;
- clock-names = "core_clk", "ref_clk", "iface_clk";
- freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
+ clocks = <&core 0>, <&ref 0>, <&phy 0>, <&iface 0>;
+ clock-names = "core_clk", "ref_clk", "phy_clk", "iface_clk";
+ freq-table-hz = <100000000 200000000>, <0 0>, <0 0>, <0 0>;
+ resets = <&reset 0 1>;
+ reset-names = "rst";
phys = <&ufsphy1>;
phy-names = "ufsphy";
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 7f13ebef06cb..3e4c38b806ac 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -96,6 +96,11 @@ Optional properties:
enable periodic ESS TX threshold.
- <DEPRECATED> tx-fifo-resize: determines if the FIFO *has* to be reallocated.
+ - snps,incr-burst-type-adjustment: Value for INCR burst type of GSBUSCFG0
+ register, undefined length INCR burst type enable and INCRx type.
+ When just one value, which means INCRX burst mode enabled. When
+ more than one value, which means undefined length INCR burst type
+ enabled. The values can be 1, 4, 8, 16, 32, 64, 128 and 256.
- in addition all properties from usb-xhci.txt from the current directory are
supported as well
@@ -108,4 +113,5 @@ dwc3@4a030000 {
reg = <0x4a030000 0xcfff>;
interrupts = <0 92 4>
usb-phy = <&usb2_phy>, <&usb3,phy>;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
diff --git a/Documentation/devicetree/bindings/usb/fsl-usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt
index 4779c029b675..0b08b006c5ea 100644
--- a/Documentation/devicetree/bindings/usb/fsl-usb.txt
+++ b/Documentation/devicetree/bindings/usb/fsl-usb.txt
@@ -33,8 +33,6 @@ Recommended properties :
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties :
- fsl,invert-drvvbus : boolean; for MPC5121 USB0 only. Indicates the
diff --git a/Documentation/devicetree/bindings/usb/maxim,max3421.txt b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
index 8cdbe0c85188..90495b1aeec2 100644
--- a/Documentation/devicetree/bindings/usb/maxim,max3421.txt
+++ b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
@@ -11,9 +11,6 @@ Required properties:
The driver configures MAX3421E for active low level triggered interrupts,
configure your interrupt line accordingly.
-Optional property:
- - interrupt-parent: the phandle to the associated interrupt controller.
-
Example:
usb@0 {
diff --git a/Documentation/devicetree/bindings/usb/npcm7xx-usb.txt b/Documentation/devicetree/bindings/usb/npcm7xx-usb.txt
new file mode 100644
index 000000000000..5a0f1f14fbfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/npcm7xx-usb.txt
@@ -0,0 +1,18 @@
+Nuvoton NPCM7XX SoC USB controllers:
+-----------------------------
+
+EHCI:
+-----
+
+Required properties:
+- compatible: "nuvoton,npcm750-ehci"
+- interrupts: Should contain the EHCI interrupt
+- reg: Physical address and length of the register set for the device
+
+Example:
+
+ ehci1: usb@f0806000 {
+ compatible = "nuvoton,npcm750-ehci";
+ reg = <0xf0806000 0x1000>;
+ interrupts = <0 61 4>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
index 09e847e92e5e..d4cf53c071d9 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
@@ -3,8 +3,6 @@ Richtek RT1711H TypeC PD Controller.
Required properties:
- compatible : Must be "richtek,rt1711h".
- reg : Must be 0x4e, it's slave address of RT1711H.
- - interrupt-parent : the phandle for the interrupt controller that
- provides interrupts for this device.
- interrupts : <a b> where a is the interrupt number and b represents an
encoding of the sense and level information for the interrupt.
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
index 252a05c5d976..c8c4b00ecb94 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
Example device nodes:
diff --git a/Documentation/devicetree/bindings/usb/samsung-hsotg.txt b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
index b83d428a265e..0388634598ce 100644
--- a/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
+++ b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
@@ -14,8 +14,6 @@ Binding details
Required properties:
- compatible: "samsung,s3c6400-hsotg" should be used for all currently
supported SoC,
-- interrupt-parent: phandle for the interrupt controller to which the
- interrupt signal of the HSOTG block is routed,
- interrupts: specifier of interrupt signal of interrupt controller,
according to bindings of interrupt controller,
- clocks: contains an array of clock specifiers:
diff --git a/Documentation/devicetree/bindings/usb/spear-usb.txt b/Documentation/devicetree/bindings/usb/spear-usb.txt
index f8a464a25653..1dc91cc459c0 100644
--- a/Documentation/devicetree/bindings/usb/spear-usb.txt
+++ b/Documentation/devicetree/bindings/usb/spear-usb.txt
@@ -6,8 +6,6 @@ EHCI:
Required properties:
- compatible: "st,spear600-ehci"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the EHCI interrupt
Example:
@@ -25,8 +23,6 @@ OHCI:
Required properties:
- compatible: "st,spear600-ohci"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the OHCI interrupt
Example:
diff --git a/Documentation/devicetree/bindings/usb/typec-tcpci.txt b/Documentation/devicetree/bindings/usb/typec-tcpci.txt
new file mode 100644
index 000000000000..0dd1469e7318
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/typec-tcpci.txt
@@ -0,0 +1,49 @@
+TCPCI(Typec port cotroller interface) binding
+---------------------------------------------
+
+Required properties:
+- compatible: should be set one of following:
+ - "nxp,ptn5110" for NXP USB PD TCPC PHY IC ptn5110.
+
+- reg: the i2c slave address of typec port controller device.
+- interrupt-parent: the phandle to the interrupt controller which provides
+ the interrupt.
+- interrupts: interrupt specification for tcpci alert.
+
+Required sub-node:
+- connector: The "usb-c-connector" attached to the tcpci chip, the bindings
+ of connector node are specified in
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
+Example:
+
+ptn5110@50 {
+ compatible = "nxp,ptn5110";
+ reg = <0x50>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index bd1dd316fb23..ac4cd0d6195a 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -14,6 +14,7 @@ Required properties:
- "renesas,xhci-r8a7795" for r8a7795 SoC
- "renesas,xhci-r8a7796" for r8a7796 SoC
- "renesas,xhci-r8a77965" for r8a77965 SoC
+ - "renesas,xhci-r8a77990" for r8a77990 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 7cad066191ee..2f3620547249 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ abracon Abracon Corporation
actions Actions Semiconductor Co., Ltd.
active-semi Active-Semi International Inc
ad Avionic Design GmbH
+adafruit Adafruit Industries, LLC
adapteva Adapteva, Inc.
adaptrum Adaptrum, Inc.
adh AD Holdings Plc.
@@ -41,6 +42,7 @@ arrow Arrow Electronics
artesyn Artesyn Embedded Technologies Inc.
asahi-kasei Asahi Kasei Corp.
aspeed ASPEED Technology Inc.
+asus AsusTek Computer Inc.
atlas Atlas Scientific LLC
atmel Atmel Corporation
auo AU Optronics Corporation
@@ -53,6 +55,7 @@ axentia Axentia Technologies AB
axis Axis Communications AB
bananapi BIPAI KEJI LIMITED
bhf Beckhoff Automation GmbH & Co. KG
+bitmain Bitmain Technologies
boe BOE Technology Group Co., Ltd.
bosch Bosch Sensortec GmbH
boundary Boundary Devices Inc.
@@ -85,6 +88,7 @@ cubietech Cubietech, Ltd.
cypress Cypress Semiconductor Corporation
cznic CZ.NIC, z.s.p.o.
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
+dataimage DataImage, Inc.
davicom DAVICOM Semiconductor, Inc.
delta Delta Electronics, Inc.
denx Denx Software Engineering
@@ -93,6 +97,7 @@ dh DH electronics GmbH
digi Digi International Inc.
digilent Diglent, Inc.
dioo Dioo Microcircuit Co., Ltd
+dlc DLC Display Co., Ltd.
dlg Dialog Semiconductor
dlink D-Link Corporation
dmo Data Modul AG
@@ -124,6 +129,7 @@ excito Excito
ezchip EZchip Semiconductor
fairphone Fairphone B.V.
faraday Faraday Technology Corporation
+fastrax Fastrax Oy
fcs Fairchild Semiconductor
firefly Firefly
focaltech FocalTech Systems Co.,Ltd
@@ -188,6 +194,7 @@ keymile Keymile GmbH
khadas Khadas
kiebackpeter Kieback & Peter GmbH
kinetic Kinetic Technologies
+kingdisplay King & Display Technology Co., Ltd.
kingnovel Kingnovel Technology Co., Ltd.
koe Kaohsiung Opto-Electronics Inc.
kosagi Sutajio Ko-Usagi PTE Ltd.
@@ -203,6 +210,7 @@ licheepi Lichee Pi
linaro Linaro Limited
linksys Belkin International, Inc. (Linksys)
linux Linux-specific binding
+linx Linx Technologies
lltc Linear Technology Corporation
logicpd Logic PD, Inc.
lsi LSI Corp. (LSI Logic)
@@ -384,6 +392,7 @@ tronsmart Tronsmart
truly Truly Semiconductors Limited
tsd Theobroma Systems Design und Consulting GmbH
tyan Tyan Computer Corporation
+u-blox u-blox
ucrobotics uCRobotics
ubnt Ubiquiti Networks
udoo Udoo
@@ -395,6 +404,7 @@ v3 V3 Semiconductor
variscite Variscite Ltd.
via VIA Technologies, Inc.
virtio Virtual I/O Device Specification, developed by the OASIS consortium
+vitesse Vitesse Semiconductor Corporation
vivante Vivante Corporation
vocore VoCore Studio
voipac Voipac Technologies s.r.o.
@@ -412,6 +422,7 @@ xes Extreme Engineering Solutions (X-ES)
xillybus Xillybus Ltd.
xlnx Xilinx
xunlong Shenzhen Xunlong Software CO.,Limited
+ysoft Y Soft Corporation a.s.
zarlink Zarlink Semiconductor
zeitec ZEITEC Semiconductor Co., LTD.
zidoo Shenzhen Zidoo Technology Co., Ltd.
diff --git a/Documentation/devicetree/bindings/w1/w1-gpio.txt b/Documentation/devicetree/bindings/w1/w1-gpio.txt
index 6e09c35d9f1a..3d6554eac240 100644
--- a/Documentation/devicetree/bindings/w1/w1-gpio.txt
+++ b/Documentation/devicetree/bindings/w1/w1-gpio.txt
@@ -13,10 +13,15 @@ Optional properties:
- linux,open-drain: if specified, the data pin is considered in
open-drain mode.
+Also refer to the generic w1.txt document.
+
Examples:
- onewire@0 {
+ onewire {
compatible = "w1-gpio";
- gpios = <&gpio 126 0>, <&gpio 105 0>;
- };
+ gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ battery {
+ // ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/w1/w1.txt b/Documentation/devicetree/bindings/w1/w1.txt
new file mode 100644
index 000000000000..05f26b27d898
--- /dev/null
+++ b/Documentation/devicetree/bindings/w1/w1.txt
@@ -0,0 +1,25 @@
+Generic devicetree bindings for onewire (w1) busses
+===================================================
+
+Onewire busses are described through nodes of their master bus controller.
+Slave devices are listed as sub-nodes of such master devices. For now, only
+one slave is allowed per bus master.
+
+
+Example:
+
+ charger: charger {
+ compatible = "gpio-charger";
+ charger-type = "mains";
+ gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ };
+
+ onewire {
+ compatible = "w1-gpio";
+ gpios = <&gpio 100 0>, <&gpio 101 0>;
+
+ battery {
+ compatible = "maxim,ds2760";
+ power-supplies = <&charger>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/arm,sp805.txt b/Documentation/devicetree/bindings/watchdog/arm,sp805.txt
index ca99d64e6211..bee6f1f0e41b 100644
--- a/Documentation/devicetree/bindings/watchdog/arm,sp805.txt
+++ b/Documentation/devicetree/bindings/watchdog/arm,sp805.txt
@@ -1,17 +1,32 @@
ARM AMBA Primecell SP805 Watchdog
+SP805 WDT is a ARM Primecell Peripheral and has a standard-id register that
+can be used to identify the peripheral type, vendor, and revision.
+This value can be used for driver matching.
+
+As SP805 WDT is a primecell IP, it follows the base bindings specified in
+'arm/primecell.txt'
+
Required properties:
-- compatible: Should be "arm,sp805" & "arm,primecell"
-- reg: Should contain location and length for watchdog timer register.
-- interrupts: Should contain the list of watchdog timer interrupts.
-- clocks: clocks driving the watchdog timer hardware. This list should be 2
- clocks. With 2 clocks, the order is wdogclk clock, apb_pclk.
+- compatible: Should be "arm,sp805" & "arm,primecell"
+- reg: Should contain location and length for watchdog timer register
+- clocks: Clocks driving the watchdog timer hardware. This list should be
+ 2 clocks. With 2 clocks, the order is wdog_clk, apb_pclk
+ wdog_clk can be equal to or be a sub-multiple of the apb_pclk
+ frequency
+- clock-names: Shall be "wdog_clk" for first clock and "apb_pclk" for the
+ second one
+
+Optional properties:
+- interrupts: Should specify WDT interrupt number
+- timeout-sec: Should specify default WDT timeout in seconds. If unset, the
+ default timeout is determined by the driver
Example:
watchdog@66090000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x66090000 0x1000>;
interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb_pclk>,<&apb_pclk>;
- clock-names = "wdogclk", "apb_pclk";
+ clocks = <&wdt_clk>, <&apb_pclk>;
+ clock-names = "wdog_clk", "apb_pclk";
};
diff --git a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
index c3a36ee45552..750a87657448 100644
--- a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
@@ -5,7 +5,6 @@ Required properties:
- compatible : Should be "cdns,wdt-r1p2".
- clocks : This is pclk (APB clock).
- interrupts : This is wd_irq - watchdog timeout interrupt.
-- interrupt-parent : Must be core interrupt controller.
Optional properties
- reset-on-timeout : If this property exists, then a reset is done
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index f24d802b8056..5d47a262474c 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -16,6 +16,7 @@ Required properties:
- "renesas,r8a7796-wdt" (R-Car M3-W)
- "renesas,r8a77965-wdt" (R-Car M3-N)
- "renesas,r8a77970-wdt" (R-Car V3M)
+ - "renesas,r8a77990-wdt" (R-Car E3)
- "renesas,r8a77995-wdt" (R-Car D3)
- "renesas,r7s72100-wdt" (RZ/A1)
The generic compatible string must be:
diff --git a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
index d7bab3db9d1f..05b95bfa2a89 100644
--- a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: physical base address of the controller and length of the register range
Optional properties:
-- interrupt-parent: phandle to the INTC device node
- interrupts: Specify the INTC interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/watchdog/sp805-wdt.txt b/Documentation/devicetree/bindings/watchdog/sp805-wdt.txt
deleted file mode 100644
index edc4f0ea54a3..000000000000
--- a/Documentation/devicetree/bindings/watchdog/sp805-wdt.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* ARM SP805 Watchdog Timer (WDT) Controller
-
-SP805 WDT is a ARM Primecell Peripheral and has a standard-id register that
-can be used to identify the peripheral type, vendor, and revision.
-This value can be used for driver matching.
-
-As SP805 WDT is a primecell IP, it follows the base bindings specified in
-'arm/primecell.txt'
-
-Required properties:
-- compatible : Should be "arm,sp805-wdt", "arm,primecell"
-- reg : Base address and size of the watchdog timer registers.
-- clocks : From common clock binding.
- First clock is PCLK and the second is WDOGCLK.
- WDOGCLK can be equal to or be a sub-multiple of the PCLK frequency.
-- clock-names : From common clock binding.
- Shall be "apb_pclk" for first clock and "wdog_clk" for the
- second one.
-
-Optional properties:
-- interrupts : Should specify WDT interrupt number.
-
-Examples:
-
- cluster1_core0_watchdog: wdt@c000000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc000000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt
index cc13b10a3f82..d8f4430b0a13 100644
--- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt
+++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.txt
@@ -2,9 +2,15 @@ STM32 Independent WatchDoG (IWDG)
---------------------------------
Required properties:
-- compatible: "st,stm32-iwdg"
-- reg: physical base address and length of the registers set for the device
-- clocks: must contain a single entry describing the clock input
+- compatible: Should be either:
+ - "st,stm32-iwdg"
+ - "st,stm32mp1-iwdg"
+- reg: Physical base address and length of the registers set for the device
+- clocks: Reference to the clock entry lsi. Additional pclk clock entry
+ is required only for st,stm32mp1-iwdg.
+- clock-names: Name of the clocks used.
+ "lsi" for st,stm32-iwdg
+ "lsi", "pclk" for st,stm32mp1-iwdg
Optional Properties:
- timeout-sec: Watchdog timeout value in seconds.
@@ -15,5 +21,6 @@ iwdg: watchdog@40003000 {
compatible = "st,stm32-iwdg";
reg = <0x40003000 0x400>;
clocks = <&clk_lsi>;
+ clock-names = "lsi";
timeout-sec = <32>;
};
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index 1d11b9002ef8..d058ace29345 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -49,7 +49,7 @@
followed by an older IP core version which implements the same
interface or any other device with the same interface.
- 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
+ 'reg' and 'interrupts' are all optional properties.
For example, the following block from system.mhs:
diff --git a/Documentation/devicetree/bindings/xillybus/xillybus.txt b/Documentation/devicetree/bindings/xillybus/xillybus.txt
index 9e316dc2e40f..e65d1f94b49c 100644
--- a/Documentation/devicetree/bindings/xillybus/xillybus.txt
+++ b/Documentation/devicetree/bindings/xillybus/xillybus.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: Should be "xillybus,xillybus-1.00.a"
- reg: Address and length of the register set for the device
- interrupts: Contains one interrupt node, typically consisting of three cells.
-- interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- dma-coherent: Present if DMA operations are coherent
diff --git a/Documentation/devicetree/dynamic-resolution-notes.txt b/Documentation/devicetree/dynamic-resolution-notes.txt
index 083d23262abe..c24ec366c5dc 100644
--- a/Documentation/devicetree/dynamic-resolution-notes.txt
+++ b/Documentation/devicetree/dynamic-resolution-notes.txt
@@ -2,15 +2,14 @@ Device Tree Dynamic Resolver Notes
----------------------------------
This document describes the implementation of the in-kernel
-Device Tree resolver, residing in drivers/of/resolver.c and is a
-companion document to Documentation/devicetree/dt-object-internal.txt[1]
+Device Tree resolver, residing in drivers/of/resolver.c
How the resolver works
----------------------
The resolver is given as an input an arbitrary tree compiled with the
proper dtc option and having a /plugin/ tag. This generates the
-appropriate __fixups__ & __local_fixups__ nodes as described in [1].
+appropriate __fixups__ & __local_fixups__ nodes.
In sequence the resolver works by the following steps:
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 80383b1a574a..8db53cdc225f 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -488,14 +488,19 @@ doc: *title*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:doc: High Definition Audio over HDMI and Display Port
-functions: *function* *[...]*
+functions: *[ function ...]*
Include documentation for each *function* in *source*.
+ If no *function* if specified, the documentaion for all functions
+ and types in the *source* will be included.
- Example::
+ Examples::
.. kernel-doc:: lib/bitmap.c
:functions: bitmap_parselist bitmap_parselist_user
+ .. kernel-doc:: lib/idr.c
+ :functions:
+
Without options, the kernel-doc directive includes all documentation comments
from the source file.
diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst
index 82a3e43b6864..24cfaa15dd81 100644
--- a/Documentation/doc-guide/parse-headers.rst
+++ b/Documentation/doc-guide/parse-headers.rst
@@ -32,7 +32,7 @@ SYNOPSIS
\ **parse_headers.pl**\ [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-Where <options> can be: --debug, --help or --man.
+Where <options> can be: --debug, --help or --usage.
OPTIONS
@@ -133,7 +133,7 @@ For both statements, \ **type**\ can be either one of the following:
\ **symbol**\
- The ignore or replace statement will apply to the name of enum statements
+ The ignore or replace statement will apply to the name of enum value
at C_FILE.
For replace statements, \ **new_value**\ will automatically use :c:type:
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index a2417633fdd8..f0796daa95b4 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -28,7 +28,7 @@ The ReST markups currently used by the Documentation/ files are meant to be
built with ``Sphinx`` version 1.3 or upper. If you're desiring to build
PDF outputs, it is recommended to use version 1.4.6 or upper.
-There's a script that checks for the Spinx requirements. Please see
+There's a script that checks for the Sphinx requirements. Please see
:ref:`sphinx-pre-install` for further details.
Most distributions are shipped with Sphinx, but its toolchain is fragile,
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index 70e328e16aad..d6763272e747 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -81,10 +81,14 @@ integration is desired.
Two other flags are specifically targeted at use cases where the device
link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE``
can be specified to runtime resume the supplier upon addition of the
-device link. ``DL_FLAG_AUTOREMOVE`` causes the device link to be automatically
-purged when the consumer fails to probe or later unbinds. This obviates
-the need to explicitly delete the link in the ``->remove`` callback or in
-the error path of the ``->probe`` callback.
+device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
+automatically purged when the consumer fails to probe or later unbinds.
+This obviates the need to explicitly delete the link in the ``->remove``
+callback or in the error path of the ``->probe`` callback.
+
+Similarly, when the device link is added from supplier's ``->probe`` callback,
+``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
+purged when the supplier fails to probe or later unbinds.
Limitations
===========
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index dc384f2f7f34..b541e97c7ab1 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -131,6 +131,12 @@ DMA Fences
----------
.. kernel-doc:: drivers/dma-buf/dma-fence.c
+ :doc: DMA fences overview
+
+DMA Fences Functions Reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
:export:
.. kernel-doc:: include/linux/dma-fence.h
diff --git a/Documentation/driver-api/firmware/fallback-mechanisms.rst b/Documentation/driver-api/firmware/fallback-mechanisms.rst
index d35fed65eae9..8b041d0ab426 100644
--- a/Documentation/driver-api/firmware/fallback-mechanisms.rst
+++ b/Documentation/driver-api/firmware/fallback-mechanisms.rst
@@ -92,7 +92,7 @@ the loading file.
The firmware device used to help load firmware using sysfs is only created if
direct firmware loading fails and if the fallback mechanism is enabled for your
-firmware request, this is set up with fw_load_from_user_helper(). It is
+firmware request, this is set up with :c:func:`firmware_fallback_sysfs`. It is
important to re-iterate that no device is created if a direct filesystem lookup
succeeded.
@@ -108,6 +108,11 @@ firmware_data_read() and firmware_loading_show() are just provided for the
test_firmware driver for testing, they are not called in normal use or
expected to be used regularly by userspace.
+firmware_fallback_sysfs
+-----------------------
+.. kernel-doc:: drivers/base/firmware_loader/fallback.c
+ :functions: firmware_fallback_sysfs
+
Firmware kobject uevent fallback mechanism
==========================================
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index bcf2dd24e179..4b3825da48d9 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -83,7 +83,7 @@ The programming sequence is::
3. .write_complete
The .write_init function will prepare the FPGA to receive the image data. The
-buffer passed into .write_init will be atmost .initial_header_size bytes long,
+buffer passed into .write_init will be at most .initial_header_size bytes long;
if the whole bitstream is not immediately available then the core code will
buffer up at least this much before starting.
@@ -98,9 +98,9 @@ scatter list. This interface is suitable for drivers which use DMA.
The .write_complete function is called after all the image has been written
to put the FPGA into operating mode.
-The ops include a .state function which will read the hardware FPGA manager and
-return a code of type enum fpga_mgr_states. It doesn't result in a change in
-hardware state.
+The ops include a .state function which will determine the state the FPGA is in
+and return a code of type enum fpga_mgr_states. It doesn't result in a change
+in state.
How to write an image buffer to a supported FPGA
------------------------------------------------
@@ -181,8 +181,8 @@ API for implementing a new FPGA Manager driver
.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_unregister
-API for programming a FPGA
---------------------------
+API for programming an FPGA
+---------------------------
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_image_info
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index f89e4a311722..f30333ce828e 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -4,7 +4,7 @@ FPGA Region
Overview
--------
-This document is meant to be an brief overview of the FPGA region API usage. A
+This document is meant to be a brief overview of the FPGA region API usage. A
more conceptual look at regions can be found in the Device Tree binding
document [#f1]_.
@@ -31,11 +31,11 @@ fpga_image_info including:
* pointers to the image as either a scatter-gather buffer, a contiguous
buffer, or the name of firmware file
- * flags indicating specifics such as whether the image if for partial
+ * flags indicating specifics such as whether the image is for partial
reconfiguration.
-How to program a FPGA using a region
-------------------------------------
+How to program an FPGA using a region
+-------------------------------------
First, allocate the info struct::
@@ -77,8 +77,8 @@ An example of usage can be seen in the probe function of [#f2]_.
.. [#f1] ../devicetree/bindings/fpga/fpga-region.txt
.. [#f2] ../../drivers/fpga/of-fpga-region.c
-API to program a FGPA
----------------------
+API to program an FPGA
+----------------------
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_program_fpga
diff --git a/Documentation/driver-api/fpga/intro.rst b/Documentation/driver-api/fpga/intro.rst
index 51cd81dbb4dc..50d1cab84950 100644
--- a/Documentation/driver-api/fpga/intro.rst
+++ b/Documentation/driver-api/fpga/intro.rst
@@ -12,18 +12,18 @@ Linux. Some of the core intentions of the FPGA subsystems are:
* Code should not be shared between upper and lower layers. This
should go without saying. If that seems necessary, there's probably
- framework functionality that that can be added that will benefit
+ framework functionality that can be added that will benefit
other users. Write the linux-fpga mailing list and maintainers and
seek out a solution that expands the framework for broad reuse.
-* Generally, when adding code, think of the future. Plan for re-use.
+* Generally, when adding code, think of the future. Plan for reuse.
The framework in the kernel is divided into:
FPGA Manager
------------
-If you are adding a new FPGA or a new method of programming a FPGA,
+If you are adding a new FPGA or a new method of programming an FPGA,
this is the subsystem for you. Low level FPGA manager drivers contain
the knowledge of how to program a specific device. This subsystem
includes the framework in fpga-mgr.c and the low level drivers that
@@ -32,10 +32,10 @@ are registered with it.
FPGA Bridge
-----------
-FPGA Bridges prevent spurious signals from going out of a FPGA or a
-region of a FPGA during programming. They are disabled before
+FPGA Bridges prevent spurious signals from going out of an FPGA or a
+region of an FPGA during programming. They are disabled before
programming begins and re-enabled afterwards. An FPGA bridge may be
-actual hard hardware that gates a bus to a cpu or a soft ("freeze")
+actual hard hardware that gates a bus to a CPU or a soft ("freeze")
bridge in FPGA fabric that surrounds a partial reconfiguration region
of an FPGA. This subsystem includes fpga-bridge.c and the low level
drivers that are registered with it.
@@ -44,7 +44,7 @@ FPGA Region
-----------
If you are adding a new interface to the FPGA framework, add it on top
-of a FPGA region to allow the most reuse of your interface.
+of an FPGA region to allow the most reuse of your interface.
The FPGA Region framework (fpga-region.c) associates managers and
bridges as reconfigurable regions. A region may refer to the whole
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
index dcd63599f700..c55a6034c397 100644
--- a/Documentation/driver-api/mtdnand.rst
+++ b/Documentation/driver-api/mtdnand.rst
@@ -374,7 +374,7 @@ The nand driver supports three different types of hardware ECC.
- NAND_ECC_HW8_512
- Hardware ECC generator providing 6 bytes ECC per 512 byte.
+ Hardware ECC generator providing 8 bytes ECC per 512 byte.
If your hardware generator has a different functionality add it at the
appropriate place in nand_base.c
@@ -889,7 +889,7 @@ Use these constants to select the ECC algorithm::
#define NAND_ECC_HW3_512 3
/* Hardware ECC 6 byte ECC per 512 Byte data */
#define NAND_ECC_HW6_512 4
- /* Hardware ECC 6 byte ECC per 512 Byte data */
+ /* Hardware ECC 8 byte ECC per 512 Byte data */
#define NAND_ECC_HW8_512 6
diff --git a/Documentation/driver-api/slimbus.rst b/Documentation/driver-api/slimbus.rst
index a97449cf603a..410eec79b2a1 100644
--- a/Documentation/driver-api/slimbus.rst
+++ b/Documentation/driver-api/slimbus.rst
@@ -125,3 +125,8 @@ Messaging APIs:
~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/slimbus/messaging.c
:export:
+
+Streaming APIs:
+~~~~~~~~~~~~~~~
+.. kernel-doc:: drivers/slimbus/stream.c
+ :export:
diff --git a/Documentation/driver-api/usb/typec_bus.rst b/Documentation/driver-api/usb/typec_bus.rst
new file mode 100644
index 000000000000..d5eec1715b5b
--- /dev/null
+++ b/Documentation/driver-api/usb/typec_bus.rst
@@ -0,0 +1,136 @@
+
+API for USB Type-C Alternate Mode drivers
+=========================================
+
+Introduction
+------------
+
+Alternate modes require communication with the partner using Vendor Defined
+Messages (VDM) as defined in USB Type-C and USB Power Delivery Specifications.
+The communication is SVID (Standard or Vendor ID) specific, i.e. specific for
+every alternate mode, so every alternate mode will need a custom driver.
+
+USB Type-C bus allows binding a driver to the discovered partner alternate
+modes by using the SVID and the mode number.
+
+USB Type-C Connector Class provides a device for every alternate mode a port
+supports, and separate device for every alternate mode the partner supports.
+The drivers for the alternate modes are bound to the partner alternate mode
+devices, and the port alternate mode devices must be handled by the port
+drivers.
+
+When a new partner alternate mode device is registered, it is linked to the
+alternate mode device of the port that the partner is attached to, that has
+matching SVID and mode. Communication between the port driver and alternate mode
+driver will happen using the same API.
+
+The port alternate mode devices are used as a proxy between the partner and the
+alternate mode drivers, so the port drivers are only expected to pass the SVID
+specific commands from the alternate mode drivers to the partner, and from the
+partners to the alternate mode drivers. No direct SVID specific communication is
+needed from the port drivers, but the port drivers need to provide the operation
+callbacks for the port alternate mode devices, just like the alternate mode
+drivers need to provide them for the partner alternate mode devices.
+
+Usage:
+------
+
+General
+~~~~~~~
+
+By default, the alternate mode drivers are responsible for entering the mode.
+It is also possible to leave the decision about entering the mode to the user
+space (See Documentation/ABI/testing/sysfs-class-typec). Port drivers should not
+enter any modes on their own.
+
+``->vdm`` is the most important callback in the operation callbacks vector. It
+will be used to deliver all the SVID specific commands from the partner to the
+alternate mode driver, and vice versa in case of port drivers. The drivers send
+the SVID specific commands to each other using :c:func:`typec_altmode_vmd()`.
+
+If the communication with the partner using the SVID specific commands results
+in need to reconfigure the pins on the connector, the alternate mode driver
+needs to notify the bus using :c:func:`typec_altmode_notify()`. The driver
+passes the negotiated SVID specific pin configuration value to the function as
+parameter. The bus driver will then configure the mux behind the connector using
+that value as the state value for the mux, and also call blocking notification
+chain to notify the external drivers about the state of the connector that need
+to know it.
+
+NOTE: The SVID specific pin configuration values must always start from
+``TYPEC_STATE_MODAL``. USB Type-C specification defines two default states for
+the connector: ``TYPEC_STATE_USB`` and ``TYPEC_STATE_SAFE``. These values are
+reserved by the bus as the first possible values for the state. When the
+alternate mode is entered, the bus will put the connector into
+``TYPEC_STATE_SAFE`` before sending Enter or Exit Mode command as defined in USB
+Type-C Specification, and also put the connector back to ``TYPEC_STATE_USB``
+after the mode has been exited.
+
+An example of working definitions for SVID specific pin configurations would
+look like this:
+
+enum {
+ ALTMODEX_CONF_A = TYPEC_STATE_MODAL,
+ ALTMODEX_CONF_B,
+ ...
+};
+
+Helper macro ``TYPEC_MODAL_STATE()`` can also be used:
+
+#define ALTMODEX_CONF_A = TYPEC_MODAL_STATE(0);
+#define ALTMODEX_CONF_B = TYPEC_MODAL_STATE(1);
+
+Notification chain
+~~~~~~~~~~~~~~~~~~
+
+The drivers for the components that the alternate modes are designed for need to
+get details regarding the results of the negotiation with the partner, and the
+pin configuration of the connector. In case of DisplayPort alternate mode for
+example, the GPU drivers will need to know those details. In case of
+Thunderbolt alternate mode, the thunderbolt drivers will need to know them, and
+so on.
+
+The notification chain is designed for this purpose. The drivers can register
+notifiers with :c:func:`typec_altmode_register_notifier()`.
+
+Cable plug alternate modes
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The alternate mode drivers are not bound to cable plug alternate mode devices,
+only to the partner alternate mode devices. If the alternate mode supports, or
+requires, a cable that responds to SOP Prime, and optionally SOP Double Prime
+messages, the driver for that alternate mode must request handle to the cable
+plug alternate modes using :c:func:`typec_altmode_get_plug()`, and take over
+their control.
+
+Driver API
+----------
+
+Alternate mode driver registering/unregistering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/usb/typec/bus.c
+ :functions: typec_altmode_register_driver typec_altmode_unregister_driver
+
+Alternate mode driver operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/usb/typec/bus.c
+ :functions: typec_altmode_enter typec_altmode_exit typec_altmode_attention typec_altmode_vdm typec_altmode_notify
+
+API for the port drivers
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/usb/typec/bus.c
+ :functions: typec_match_altmode
+
+Cable Plug operations
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/usb/typec/bus.c
+ :functions: typec_altmode_get_plug typec_altmode_put_plug
+
+Notifications
+~~~~~~~~~~~~~
+.. kernel-doc:: drivers/usb/typec/class.c
+ :functions: typec_altmode_register_notifier typec_altmode_unregister_notifier
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 7c1bb3d0c222..43681ca0837f 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -240,6 +240,7 @@ CLOCK
devm_of_clk_add_hw_provider()
DMA
+ dmaenginem_async_device_register()
dmam_alloc_coherent()
dmam_alloc_attrs()
dmam_declare_coherent_memory()
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 79c22d096bbc..d4d642e1ce9c 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -155,6 +155,13 @@ C. Boot options
used by text. By default, this area will be black. The 'color' value
is an integer number that depends on the framebuffer driver being used.
+6. fbcon=nodefer
+
+ If the kernel is compiled with deferred fbcon takeover support, normally
+ the framebuffer contents, left in place by the firmware/bootloader, will
+ be preserved until there actually is some text is output to the console.
+ This option causes fbcon to bind immediately to the fbdev device.
+
C. Attaching, Detaching and Unloading
Before going on how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index dbdf62907703..c7858dd1ea8f 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -5,10 +5,10 @@
#
# Architecture requirements
#
-# * arm64
+# * arm/arm64
#
-# Rely on eret context synchronization when returning from IPI handler, and
-# when returning to user-space.
+# Rely on implicit context synchronization as a result of exception return
+# when returning from IPI handler, and when returning to user-space.
#
# * x86
#
@@ -31,7 +31,7 @@
-----------------------
| alpha: | TODO |
| arc: | TODO |
- | arm: | TODO |
+ | arm: | ok |
| arm64: | ok |
| c6x: | TODO |
| h8300: | TODO |
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 37bf0a9de75c..efea228ccd8a 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -21,8 +21,7 @@ prototypes:
char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
struct vfsmount *(*d_automount)(struct path *path);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int, unsigned int);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *);
locking rules:
rename_lock ->d_lock may block rcu-walk
@@ -64,7 +63,7 @@ prototypes:
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
locking rules:
@@ -532,9 +531,9 @@ More details about quota locking can be found in fs/dquot.c.
prototypes:
void (*open)(struct vm_area_struct*);
void (*close)(struct vm_area_struct*);
- int (*fault)(struct vm_area_struct*, struct vm_fault *);
- int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
- int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+ vm_fault_t (*fault)(struct vm_area_struct*, struct vm_fault *);
+ vm_fault_t (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+ vm_fault_t (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
locking rules:
diff --git a/Documentation/filesystems/cifs/CHANGES b/Documentation/filesystems/cifs/CHANGES
index 455e1cc494a9..1df7f4910eb2 100644
--- a/Documentation/filesystems/cifs/CHANGES
+++ b/Documentation/filesystems/cifs/CHANGES
@@ -1,1068 +1,4 @@
-See https://wiki.samba.org/index.php/LinuxCIFSKernel for
-more current information.
-
-Version 1.62
-------------
-Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
-to more strictly handle corrupt frames.
-
-Version 1.61
-------------
-Fix append problem to Samba servers (files opened with O_APPEND could
-have duplicated data). Fix oops in cifs_lookup. Workaround problem
-mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
-Disable use of server inode numbers when server only
-partially supports them (e.g. for one server querying inode numbers on
-FindFirst fails but QPathInfo queries works). Fix oops with dfs in
-cifs_put_smb_ses. Fix mmap to work on directio mounts (needed
-for OpenOffice when on forcedirectio mount e.g.)
-
-Version 1.60
--------------
-Fix memory leak in reconnect. Fix oops in DFS mount error path.
-Set s_maxbytes to smaller (the max that vfs can handle) so that
-sendfile will now work over cifs mounts again. Add noforcegid
-and noforceuid mount parameters. Fix small mem leak when using
-ntlmv2. Fix 2nd mount to same server but with different port to
-be allowed (rather than reusing the 1st port) - only when the
-user explicitly overrides the port on the 2nd mount.
-
-Version 1.59
-------------
-Client uses server inode numbers (which are persistent) rather than
-client generated ones by default (mount option "serverino" turned
-on by default if server supports it). Add forceuid and forcegid
-mount options (so that when negotiating unix extensions specifying
-which uid mounted does not immediately force the server's reported
-uids to be overridden). Add support for scope mount parm. Improve
-hard link detection to use same inode for both. Do not set
-read-only dos attribute on directories (for chmod) since Windows
-explorer special cases this attribute bit for directories for
-a different purpose.
-
-Version 1.58
-------------
-Guard against buffer overruns in various UCS-2 to UTF-8 string conversions
-when the UTF-8 string is composed of unusually long (more than 4 byte) converted
-characters. Add support for mounting root of a share which redirects immediately
-to DFS target. Convert string conversion functions from Unicode to more
-accurately mark string length before allocating memory (which may help the
-rare cases where a UTF-8 string is much larger than the UCS2 string that
-we converted from). Fix endianness of the vcnum field used during
-session setup to distinguish multiple mounts to same server from different
-userids. Raw NTLMSSP fixed (it requires /proc/fs/cifs/experimental
-flag to be set to 2, and mount must enable krb5 to turn on extended security).
-Performance of file create to Samba improved (posix create on lookup
-removes 1 of 2 network requests sent on file create)
-
-Version 1.57
-------------
-Improve support for multiple security contexts to the same server. We
-used to use the same "vcnumber" for all connections which could cause
-the server to treat subsequent connections, especially those that
-are authenticated as guest, as reconnections, invalidating the earlier
-user's smb session. This fix allows cifs to mount multiple times to the
-same server with different userids without risking invalidating earlier
-established security contexts. fsync now sends SMB Flush operation
-to better ensure that we wait for server to write all of the data to
-server disk (not just write it over the network). Add new mount
-parameter to allow user to disable sending the (slow) SMB flush on
-fsync if desired (fsync still flushes all cached write data to the server).
-Posix file open support added (turned off after one attempt if server
-fails to support it properly, as with Samba server versions prior to 3.3.2)
-Fix "redzone overwritten" bug in cifs_put_tcon (CIFSTcon may allocate too
-little memory for the "nativeFileSystem" field returned by the server
-during mount). Endian convert inode numbers if necessary (makes it easier
-to compare inode numbers on network files from big endian systems).
-
-Version 1.56
-------------
-Add "forcemandatorylock" mount option to allow user to use mandatory
-rather than posix (advisory) byte range locks, even though server would
-support posix byte range locks. Fix query of root inode when prefixpath
-specified and user does not have access to query information about the
-top of the share. Fix problem in 2.6.28 resolving DFS paths to
-Samba servers (worked to Windows). Fix rmdir so that pending search
-(readdir) requests do not get invalid results which include the now
-removed directory. Fix oops in cifs_dfs_ref.c when prefixpath is not reachable
-when using DFS. Add better file create support to servers which support
-the CIFS POSIX protocol extensions (this adds support for new flags
-on create, and improves semantics for write of locked ranges).
-
-Version 1.55
-------------
-Various fixes to make delete of open files behavior more predictable
-(when delete of an open file fails we mark the file as "delete-on-close"
-in a way that more servers accept, but only if we can first rename the
-file to a temporary name). Add experimental support for more safely
-handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp
-sends, and also let tcp autotune the socket send and receive buffers.
-This reduces the number of EAGAIN errors returned by TCP/IP in
-high stress workloads (and the number of retries on socket writes
-when sending large SMBWriteX requests). Fix case in which a portion of
-data can in some cases not get written to the file on the server before the
-file is closed. Fix DFS parsing to properly handle path consumed field,
-and to handle certain codepage conversions better. Fix mount and
-umount race that can cause oops in mount or umount or reconnect.
-
-Version 1.54
-------------
-Fix premature write failure on congested networks (we would give up
-on EAGAIN from the socket too quickly on large writes).
-Cifs_mkdir and cifs_create now respect the setgid bit on parent dir.
-Fix endian problems in acl (mode from/to cifs acl) on bigendian
-architectures. Fix problems with preserving timestamps on copying open
-files (e.g. "cp -a") to Windows servers. For mkdir and create honor setgid bit
-on parent directory when server supports Unix Extensions but not POSIX
-create. Update cifs.upcall version to handle new Kerberos sec flags
-(this requires update of cifs.upcall program from Samba). Fix memory leak
-on dns_upcall (resolving DFS referralls). Fix plain text password
-authentication (requires setting SecurityFlags to 0x30030 to enable
-lanman and plain text though). Fix writes to be at correct offset when
-file is open with O_APPEND and file is on a directio (forcediretio) mount.
-Fix bug in rewinding readdir directory searches. Add nodfs mount option.
-
-Version 1.53
-------------
-DFS support added (Microsoft Distributed File System client support needed
-for referrals which enable a hierarchical name space among servers).
-Disable temporary caching of mode bits to servers which do not support
-storing of mode (e.g. Windows servers, when client mounts without cifsacl
-mount option) and add new "dynperm" mount option to enable temporary caching
-of mode (enable old behavior). Fix hang on mount caused when server crashes
-tcp session during negotiate protocol.
-
-Version 1.52
-------------
-Fix oops on second mount to server when null auth is used.
-Enable experimental Kerberos support. Return writebehind errors on flush
-and sync so that events like out of disk space get reported properly on
-cached files. Fix setxattr failure to certain Samba versions. Fix mount
-of second share to disconnected server session (autoreconnect on this).
-Add ability to modify cifs acls for handling chmod (when mounted with
-cifsacl flag). Fix prefixpath path separator so we can handle mounts
-with prefixpaths longer than one directory (one path component) when
-mounted to Windows servers. Fix slow file open when cifsacl
-enabled. Fix memory leak in FindNext when the SMB call returns -EBADF.
-
-
-Version 1.51
-------------
-Fix memory leak in statfs when mounted to very old servers (e.g.
-Windows 9x). Add new feature "POSIX open" which allows servers
-which support the current POSIX Extensions to provide better semantics
-(e.g. delete for open files opened with posix open). Take into
-account umask on posix mkdir not just older style mkdir. Add
-ability to mount to IPC$ share (which allows CIFS named pipes to be
-opened, read and written as if they were files). When 1st tree
-connect fails (e.g. due to signing negotiation failure) fix
-leak that causes cifsd not to stop and rmmod to fail to cleanup
-cifs_request_buffers pool. Fix problem with POSIX Open/Mkdir on
-bigendian architectures. Fix possible memory corruption when
-EAGAIN returned on kern_recvmsg. Return better error if server
-requires packet signing but client has disabled it. When mounted
-with cifsacl mount option - mode bits are approximated based
-on the contents of the ACL of the file or directory. When cifs
-mount helper is missing convert make sure that UNC name
-has backslash (not forward slash) between ip address of server
-and the share name.
-
-Version 1.50
-------------
-Fix NTLMv2 signing. NFS server mounted over cifs works (if cifs mount is
-done with "serverino" mount option). Add support for POSIX Unlink
-(helps with certain sharing violation cases when server such as
-Samba supports newer POSIX CIFS Protocol Extensions). Add "nounix"
-mount option to allow disabling the CIFS Unix Extensions for just
-that mount. Fix hang on spinlock in find_writable_file (race when
-reopening file after session crash). Byte range unlock request to
-windows server could unlock more bytes (on server copy of file)
-than intended if start of unlock request is well before start of
-a previous byte range lock that we issued.
-
-Version 1.49
-------------
-IPv6 support. Enable ipv6 addresses to be passed on mount (put the ipv6
-address after the "ip=" mount option, at least until mount.cifs is fixed to
-handle DNS host to ipv6 name translation). Accept override of uid or gid
-on mount even when Unix Extensions are negotiated (it used to be ignored
-when Unix Extensions were ignored). This allows users to override the
-default uid and gid for files when they are certain that the uids or
-gids on the server do not match those of the client. Make "sec=none"
-mount override username (so that null user connection is attempted)
-to match what documentation said. Support for very large reads, over 127K,
-available to some newer servers (such as Samba 3.0.26 and later but
-note that it also requires setting CIFSMaxBufSize at module install
-time to a larger value which may hurt performance in some cases).
-Make sign option force signing (or fail if server does not support it).
-
-Version 1.48
-------------
-Fix mtime bouncing around from local idea of last write times to remote time.
-Fix hang (in i_size_read) when simultaneous size update of same remote file
-on smp system corrupts sequence number. Do not reread unnecessarily partial page
-(which we are about to overwrite anyway) when writing out file opened rw.
-When DOS attribute of file on non-Unix server's file changes on the server side
-from read-only back to read-write, reflect this change in default file mode
-(we had been leaving a file's mode read-only until the inode were reloaded).
-Allow setting of attribute back to ATTR_NORMAL (removing readonly dos attribute
-when archive dos attribute not set and we are changing mode back to writeable
-on server which does not support the Unix Extensions). Remove read only dos
-attribute on chmod when adding any write permission (ie on any of
-user/group/other (not all of user/group/other ie 0222) when
-mounted to windows. Add support for POSIX MkDir (slight performance
-enhancement and eliminates the network race between the mkdir and set
-path info of the mode).
-
-
-Version 1.47
-------------
-Fix oops in list_del during mount caused by unaligned string.
-Fix file corruption which could occur on some large file
-copies caused by writepages page i/o completion bug.
-Seek to SEEK_END forces check for update of file size for non-cached
-files. Allow file size to be updated on remote extend of locally open,
-non-cached file. Fix reconnect to newer Samba servers (or other servers
-which support the CIFS Unix/POSIX extensions) so that we again tell the
-server the Unix/POSIX cifs capabilities which we support (SetFSInfo).
-Add experimental support for new POSIX Open/Mkdir (which returns
-stat information on the open, and allows setting the mode).
-
-Version 1.46
-------------
-Support deep tree mounts. Better support OS/2, Win9x (DOS) time stamps.
-Allow null user to be specified on mount ("username="). Do not return
-EINVAL on readdir when filldir fails due to overwritten blocksize
-(fixes FC problem). Return error in rename 2nd attempt retry (ie report
-if rename by handle also fails, after rename by path fails, we were
-not reporting whether the retry worked or not). Fix NTLMv2 to
-work to Windows servers (mount with option "sec=ntlmv2").
-
-Version 1.45
-------------
-Do not time out lockw calls when using posix extensions. Do not
-time out requests if server still responding reasonably fast
-on requests on other threads. Improve POSIX locking emulation,
-(lock cancel now works, and unlock of merged range works even
-to Windows servers now). Fix oops on mount to lanman servers
-(win9x, os/2 etc.) when null password. Do not send listxattr
-(SMB to query all EAs) if nouser_xattr specified. Fix SE Linux
-problem (instantiate inodes/dentries in right order for readdir).
-
-Version 1.44
-------------
-Rewritten sessionsetup support, including support for legacy SMB
-session setup needed for OS/2 and older servers such as Windows 95 and 98.
-Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst
-so we can do search (ls etc.) to OS/2. Do not send NTCreateX
-or recent levels of FindFirst unless server says it supports NT SMBs
-(instead use legacy equivalents from LANMAN dialect). Fix to allow
-NTLMv2 authentication support (now can use stronger password hashing
-on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004).
-Allow override of global cifs security flags on mount via "sec=" option(s).
-
-Version 1.43
-------------
-POSIX locking to servers which support CIFS POSIX Extensions
-(disabled by default controlled by proc/fs/cifs/Experimental).
-Handle conversion of long share names (especially Asian languages)
-to Unicode during mount. Fix memory leak in sess struct on reconnect.
-Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on
-cifs open which helps rare case when setpathinfo fails or server does
-not support it.
-
-Version 1.42
-------------
-Fix slow oplock break when mounted to different servers at the same time and
-the tids match and we try to find matching fid on wrong server. Fix read
-looping when signing required by server (2.6.16 kernel only). Fix readdir
-vs. rename race which could cause each to hang. Return . and .. even
-if server does not. Allow searches to skip first three entries and
-begin at any location. Fix oops in find_writeable_file.
-
-Version 1.41
-------------
-Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
-configure stronger authentication. Fix sfu symlinks so they can
-be followed (not just recognized). Fix wraparound of bcc on
-read responses when buffer size over 64K and also fix wrap of
-max smb buffer size when CIFSMaxBufSize over 64K. Fix oops in
-cifs_user_read and cifs_readpages (when EAGAIN on send of smb
-on socket is returned over and over). Add POSIX (advisory) byte range
-locking support (requires server with newest CIFS UNIX Extensions
-to the protocol implemented). Slow down negprot slightly in port 139
-RFC1001 case to give session_init time on buggy servers.
-
-Version 1.40
-------------
-Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
-of readpages by eliminating one extra memcpy. Allow update of file size
-from remote server even if file is open for write as long as mount is
-directio. Recognize share mode security and send NTLM encrypted password
-on tree connect if share mode negotiated.
-
-Version 1.39
-------------
-Defer close of a file handle slightly if pending writes depend on that handle
-(this reduces the EBADF bad file handle errors that can be logged under heavy
-stress on writes). Modify cifs Kconfig options to expose CONFIG_CIFS_STATS2
-Fix SFU style symlinks and mknod needed for servers which do not support the
-CIFS Unix Extensions. Fix setfacl/getfacl on bigendian. Timeout negative
-dentries so files that the client sees as deleted but that later get created
-on the server will be recognized. Add client side permission check on setattr.
-Timeout stuck requests better (where server has never responded or sent corrupt
-responses)
-
-Version 1.38
-------------
-Fix tcp socket retransmission timeouts (e.g. on ENOSPACE from the socket)
-to be smaller at first (but increasing) so large write performance performance
-over GigE is better. Do not hang thread on illegal byte range lock response
-from Windows (Windows can send an RFC1001 size which does not match smb size) by
-allowing an SMBs TCP length to be up to a few bytes longer than it should be.
-wsize and rsize can now be larger than negotiated buffer size if server
-supports large readx/writex, even when directio mount flag not specified.
-Write size will in many cases now be 16K instead of 4K which greatly helps
-file copy performance on lightly loaded networks. Fix oops in dnotify
-when experimental config flag enabled. Make cifsFYI more granular.
-
-Version 1.37
-------------
-Fix readdir caching when unlink removes file in current search buffer,
-and this is followed by a rewind search to just before the deleted entry.
-Do not attempt to set ctime unless atime and/or mtime change requested
-(most servers throw it away anyway). Fix length check of received smbs
-to be more accurate. Fix big endian problem with mapchars mount option,
-and with a field returned by statfs.
-
-Version 1.36
-------------
-Add support for mounting to older pre-CIFS servers such as Windows9x and ME.
-For these older servers, add option for passing netbios name of server in
-on mount (servernetbiosname). Add suspend support for power management, to
-avoid cifsd thread preventing software suspend from working.
-Add mount option for disabling the default behavior of sending byte range lock
-requests to the server (necessary for certain applications which break with
-mandatory lock behavior such as Evolution), and also mount option for
-requesting case insensitive matching for path based requests (requesting
-case sensitive is the default).
-
-Version 1.35
-------------
-Add writepage performance improvements. Fix path name conversions
-for long filenames on mounts which were done with "mapchars" mount option
-specified. Ensure multiplex ids do not collide. Fix case in which
-rmmod can oops if done soon after last unmount. Fix truncated
-search (readdir) output when resume filename was a long filename.
-Fix filename conversion when mapchars mount option was specified and
-filename was a long filename.
-
-Version 1.34
-------------
-Fix error mapping of the TOO_MANY_LINKS (hardlinks) case.
-Do not oops if root user kills cifs oplock kernel thread or
-kills the cifsd thread (NB: killing the cifs kernel threads is not
-recommended, unmount and rmmod cifs will kill them when they are
-no longer needed). Fix readdir to ASCII servers (ie older servers
-which do not support Unicode) and also require asterisk.
-Fix out of memory case in which data could be written one page
-off in the page cache.
-
-Version 1.33
-------------
-Fix caching problem, in which readdir of directory containing a file
-which was cached could cause the file's time stamp to be updated
-without invalidating the readahead data (so we could get stale
-file data on the client for that file even as the server copy changed).
-Cleanup response processing so cifsd can not loop when abnormally
-terminated.
-
-
-Version 1.32
-------------
-Fix oops in ls when Transact2 FindFirst (or FindNext) returns more than one
-transact response for an SMB request and search entry split across two frames.
-Add support for lsattr (getting ext2/ext3/reiserfs attr flags from the server)
-as new protocol extensions. Do not send Get/Set calls for POSIX ACLs
-unless server explicitly claims to support them in CIFS Unix extensions
-POSIX ACL capability bit. Fix packet signing when multiuser mounting with
-different users from the same client to the same server. Fix oops in
-cifs_close. Add mount option for remapping reserved characters in
-filenames (also allow recognizing files with created by SFU which have any
-of these seven reserved characters, except backslash, to be recognized).
-Fix invalid transact2 message (we were sometimes trying to interpret
-oplock breaks as SMB responses). Add ioctl for checking that the
-current uid matches the uid of the mounter (needed by umount.cifs).
-Reduce the number of large buffer allocations in cifs response processing
-(significantly reduces memory pressure under heavy stress with multiple
-processes accessing the same server at the same time).
-
-Version 1.31
-------------
-Fix updates of DOS attributes and time fields so that files on NT4 servers
-do not get marked delete on close. Display sizes of cifs buffer pools in
-cifs stats. Fix oops in unmount when cifsd thread being killed by
-shutdown. Add generic readv/writev and aio support. Report inode numbers
-consistently in readdir and lookup (when serverino mount option is
-specified use the inode number that the server reports - for both lookup
-and readdir, otherwise by default the locally generated inode number is used
-for inodes created in either path since servers are not always able to
-provide unique inode numbers when exporting multiple volumes from under one
-sharename).
-
-Version 1.30
-------------
-Allow new nouser_xattr mount parm to disable xattr support for user namespace.
-Do not flag user_xattr mount parm in dmesg. Retry failures setting file time
-(mostly affects NT4 servers) by retry with handle based network operation.
-Add new POSIX Query FS Info for returning statfs info more accurately.
-Handle passwords with multiple commas in them.
-
-Version 1.29
-------------
-Fix default mode in sysfs of cifs module parms. Remove old readdir routine.
-Fix capabilities flags for large readx so as to allow reads larger than 64K.
-
-Version 1.28
-------------
-Add module init parm for large SMB buffer size (to allow it to be changed
-from its default of 16K) which is especially useful for large file copy
-when mounting with the directio mount option. Fix oops after
-returning from mount when experimental ExtendedSecurity enabled and
-SpnegoNegotiated returning invalid error. Fix case to retry better when
-peek returns from 1 to 3 bytes on socket which should have more data.
-Fixed path based calls (such as cifs lookup) to handle path names
-longer than 530 (now can handle PATH_MAX). Fix pass through authentication
-from Samba server to DC (Samba required dummy LM password).
-
-Version 1.27
-------------
-Turn off DNOTIFY (directory change notification support) by default
-(unless built with the experimental flag) to fix hang with KDE
-file browser. Fix DNOTIFY flag mappings. Fix hang (in wait_event
-waiting on an SMB response) in SendReceive when session dies but
-reconnects quickly from another task. Add module init parms for
-minimum number of large and small network buffers in the buffer pools,
-and for the maximum number of simultaneous requests.
-
-Version 1.26
-------------
-Add setfacl support to allow setting of ACLs remotely to Samba 3.10 and later
-and other POSIX CIFS compliant servers. Fix error mapping for getfacl
-to EOPNOTSUPP when server does not support posix acls on the wire. Fix
-improperly zeroed buffer in CIFS Unix extensions set times call.
-
-Version 1.25
-------------
-Fix internationalization problem in cifs readdir with filenames that map to
-longer UTF-8 strings than the string on the wire was in Unicode. Add workaround
-for readdir to netapp servers. Fix search rewind (seek into readdir to return
-non-consecutive entries). Do not do readdir when server negotiates
-buffer size to small to fit filename. Add support for reading POSIX ACLs from
-the server (add also acl and noacl mount options).
-
-Version 1.24
-------------
-Optionally allow using server side inode numbers, rather than client generated
-ones by specifying mount option "serverino" - this is required for some apps
-to work which double check hardlinked files and have persistent inode numbers.
-
-Version 1.23
-------------
-Multiple bigendian fixes. On little endian systems (for reconnect after
-network failure) fix tcp session reconnect code so we do not try first
-to reconnect on reverse of port 445. Treat reparse points (NTFS junctions)
-as directories rather than symlinks because we can do follow link on them.
-
-Version 1.22
-------------
-Add config option to enable XATTR (extended attribute) support, mapping
-xattr names in the "user." namespace space to SMB/CIFS EAs. Lots of
-minor fixes pointed out by the Stanford SWAT checker (mostly missing
-or out of order NULL pointer checks in little used error paths).
-
-Version 1.21
-------------
-Add new mount parm to control whether mode check (generic_permission) is done
-on the client. If Unix extensions are enabled and the uids on the client
-and server do not match, client permission checks are meaningless on
-server uids that do not exist on the client (this does not affect the
-normal ACL check which occurs on the server). Fix default uid
-on mknod to match create and mkdir. Add optional mount parm to allow
-override of the default uid behavior (in which the server sets the uid
-and gid of newly created files). Normally for network filesystem mounts
-user want the server to set the uid/gid on newly created files (rather than
-using uid of the client processes you would in a local filesystem).
-
-Version 1.20
-------------
-Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
-info into /proc/fs/cifs/DebugData. Fix oops in rare oops in readdir
-(in build_wildcard_path_from_dentry). Fix mknod to pass type field
-(block/char/fifo) properly. Remove spurious mount warning log entry when
-credentials passed as mount argument. Set major/minor device number in
-inode for block and char devices when unix extensions enabled.
-
-Version 1.19
-------------
-Fix /proc/fs/cifs/Stats and DebugData display to handle larger
-amounts of return data. Properly limit requests to MAX_REQ (50
-is the usual maximum active multiplex SMB/CIFS requests per server).
-Do not kill cifsd (and thus hurt the other SMB session) when more than one
-session to the same server (but with different userids) exists and one
-of the two user's smb sessions is being removed while leaving the other.
-Do not loop reconnecting in cifsd demultiplex thread when admin
-kills the thread without going through unmount.
-
-Version 1.18
-------------
-Do not rename hardlinked files (since that should be a noop). Flush
-cached write behind data when reopening a file after session abend,
-except when already in write. Grab per socket sem during reconnect
-to avoid oops in sendmsg if overlapping with reconnect. Do not
-reset cached inode file size on readdir for files open for write on
-client.
-
-
-Version 1.17
-------------
-Update number of blocks in file so du command is happier (in Linux a fake
-blocksize of 512 is required for calculating number of blocks in inode).
-Fix prepare write of partial pages to read in data from server if possible.
-Fix race on tcpStatus field between unmount and reconnection code, causing
-cifsd process sometimes to hang around forever. Improve out of memory
-checks in cifs_filldir
-
-Version 1.16
-------------
-Fix incorrect file size in file handle based setattr on big endian hardware.
-Fix oops in build_path_from_dentry when out of memory. Add checks for invalid
-and closing file structs in writepage/partialpagewrite. Add statistics
-for each mounted share (new menuconfig option). Fix endianness problem in
-volume information displayed in /proc/fs/cifs/DebugData (only affects
-affects big endian architectures). Prevent renames while constructing
-path names for open, mkdir and rmdir.
-
-Version 1.15
-------------
-Change to mempools for alloc smb request buffers and multiplex structs
-to better handle low memory problems (and potential deadlocks).
-
-Version 1.14
-------------
-Fix incomplete listings of large directories on Samba servers when Unix
-extensions enabled. Fix oops when smb_buffer can not be allocated. Fix
-rename deadlock when writing out dirty pages at same time.
-
-Version 1.13
-------------
-Fix open of files in which O_CREATE can cause the mode to change in
-some cases. Fix case in which retry of write overlaps file close.
-Fix PPC64 build error. Reduce excessive stack usage in smb password
-hashing. Fix overwrite of Linux user's view of file mode to Windows servers.
-
-Version 1.12
-------------
-Fixes for large file copy, signal handling, socket retry, buffer
-allocation and low memory situations.
-
-Version 1.11
-------------
-Better port 139 support to Windows servers (RFC1001/RFC1002 Session_Initialize)
-also now allowing support for specifying client netbiosname. NT4 support added.
-
-Version 1.10
-------------
-Fix reconnection (and certain failed mounts) to properly wake up the
-blocked users thread so it does not seem hung (in some cases was blocked
-until the cifs receive timeout expired). Fix spurious error logging
-to kernel log when application with open network files killed.
-
-Version 1.09
-------------
-Fix /proc/fs module unload warning message (that could be logged
-to the kernel log). Fix intermittent failure in connectathon
-test7 (hardlink count not immediately refreshed in case in which
-inode metadata can be incorrectly kept cached when time near zero)
-
-Version 1.08
-------------
-Allow file_mode and dir_mode (specified at mount time) to be enforced
-locally (the server already enforced its own ACLs too) for servers
-that do not report the correct mode (do not support the
-CIFS Unix Extensions).
-
-Version 1.07
-------------
-Fix some small memory leaks in some unmount error paths. Fix major leak
-of cache pages in readpages causing multiple read oriented stress
-testcases (including fsx, and even large file copy) to fail over time.
-
-Version 1.06
-------------
-Send NTCreateX with ATTR_POSIX if Linux/Unix extensions negotiated with server.
-This allows files that differ only in case and improves performance of file
-creation and file open to such servers. Fix semaphore conflict which causes
-slow delete of open file to Samba (which unfortunately can cause an oplock
-break to self while vfs_unlink held i_sem) which can hang for 20 seconds.
-
-Version 1.05
-------------
-fixes to cifs_readpages for fsx test case
-
-Version 1.04
-------------
-Fix caching data integrity bug when extending file size especially when no
-oplock on file. Fix spurious logging of valid already parsed mount options
-that are parsed outside of the cifs vfs such as nosuid.
-
-
-Version 1.03
-------------
-Connect to server when port number override not specified, and tcp port
-unitialized. Reset search to restart at correct file when kernel routine
-filldir returns error during large directory searches (readdir).
-
-Version 1.02
-------------
-Fix caching problem when files opened by multiple clients in which
-page cache could contain stale data, and write through did
-not occur often enough while file was still open when read ahead
-(read oplock) not allowed. Treat "sep=" when first mount option
-as an override of comma as the default separator between mount
-options.
-
-Version 1.01
-------------
-Allow passwords longer than 16 bytes. Allow null password string.
-
-Version 1.00
-------------
-Gracefully clean up failed mounts when attempting to mount to servers such as
-Windows 98 that terminate tcp sessions during protocol negotiation. Handle
-embedded commas in mount parsing of passwords.
-
-Version 0.99
-------------
-Invalidate local inode cached pages on oplock break and when last file
-instance is closed so that the client does not continue using stale local
-copy rather than later modified server copy of file. Do not reconnect
-when server drops the tcp session prematurely before negotiate
-protocol response. Fix oops in reopen_file when dentry freed. Allow
-the support for CIFS Unix Extensions to be disabled via proc interface.
-
-Version 0.98
-------------
-Fix hang in commit_write during reconnection of open files under heavy load.
-Fix unload_nls oops in a mount failure path. Serialize writes to same socket
-which also fixes any possible races when cifs signatures are enabled in SMBs
-being sent out of signature sequence number order.
-
-Version 0.97
-------------
-Fix byte range locking bug (endian problem) causing bad offset and
-length.
-
-Version 0.96
-------------
-Fix oops (in send_sig) caused by CIFS unmount code trying to
-wake up the demultiplex thread after it had exited. Do not log
-error on harmless oplock release of closed handle.
-
-Version 0.95
-------------
-Fix unsafe global variable usage and password hash failure on gcc 3.3.1
-Fix problem reconnecting secondary mounts to same server after session
-failure. Fix invalid dentry - race in mkdir when directory gets created
-by another client between the lookup and mkdir.
-
-Version 0.94
-------------
-Fix to list processing in reopen_files. Fix reconnection when server hung
-but tcpip session still alive. Set proper timeout on socket read.
-
-Version 0.93
-------------
-Add missing mount options including iocharset. SMP fixes in write and open.
-Fix errors in reconnecting after TCP session failure. Fix module unloading
-of default nls codepage
-
-Version 0.92
-------------
-Active smb transactions should never go negative (fix double FreeXid). Fix
-list processing in file routines. Check return code on kmalloc in open.
-Fix spinlock usage for SMP.
-
-Version 0.91
-------------
-Fix oops in reopen_files when invalid dentry. drop dentry on server rename
-and on revalidate errors. Fix cases where pid is now tgid. Fix return code
-on create hard link when server does not support them.
-
-Version 0.90
-------------
-Fix scheduling while atomic error in getting inode info on newly created file.
-Fix truncate of existing files opened with O_CREAT but not O_TRUNC set.
-
-Version 0.89
-------------
-Fix oops on write to dead tcp session. Remove error log write for case when file open
-O_CREAT but not O_EXCL
-
-Version 0.88
-------------
-Fix non-POSIX behavior on rename of open file and delete of open file by taking
-advantage of trans2 SetFileInfo rename facility if available on target server.
-Retry on ENOSPC and EAGAIN socket errors.
-
-Version 0.87
-------------
-Fix oops on big endian readdir. Set blksize to be even power of two (2**blkbits) to fix
-allocation size miscalculation. After oplock token lost do not read through
-cache.
-
-Version 0.86
-------------
-Fix oops on empty file readahead. Fix for file size handling for locally cached files.
-
-Version 0.85
-------------
-Fix oops in mkdir when server fails to return inode info. Fix oops in reopen_files
-during auto reconnection to server after server recovered from failure.
-
-Version 0.84
-------------
-Finish support for Linux 2.5 open/create changes, which removes the
-redundant NTCreate/QPathInfo/close that was sent during file create.
-Enable oplock by default. Enable packet signing by default (needed to
-access many recent Windows servers)
-
-Version 0.83
-------------
-Fix oops when mounting to long server names caused by inverted parms to kmalloc.
-Fix MultiuserMount (/proc/fs/cifs configuration setting) so that when enabled
-we will choose a cifs user session (smb uid) that better matches the local
-uid if a) the mount uid does not match the current uid and b) we have another
-session to the same server (ip address) for a different mount which
-matches the current local uid.
-
-Version 0.82
-------------
-Add support for mknod of block or character devices. Fix oplock
-code (distributed caching) to properly send response to oplock
-break from server.
-
-Version 0.81
-------------
-Finish up CIFS packet digital signing for the default
-NTLM security case. This should help Windows 2003
-network interoperability since it is common for
-packet signing to be required now. Fix statfs (stat -f)
-which recently started returning errors due to
-invalid value (-1 instead of 0) being set in the
-struct kstatfs f_ffiles field.
-
-Version 0.80
------------
-Fix oops on stopping oplock thread when removing cifs when
-built as module.
-
-Version 0.79
-------------
-Fix mount options for ro (readonly), uid, gid and file and directory mode.
-
-Version 0.78
-------------
-Fix errors displayed on failed mounts to be more understandable.
-Fixed various incorrect or misleading smb to posix error code mappings.
-
-Version 0.77
-------------
-Fix display of NTFS DFS junctions to display as symlinks.
-They are the network equivalent. Fix oops in
-cifs_partialpagewrite caused by missing spinlock protection
-of openfile linked list. Allow writebehind caching errors to
-be returned to the application at file close.
-
-Version 0.76
-------------
-Clean up options displayed in /proc/mounts by show_options to
-be more consistent with other filesystems.
-
-Version 0.75
-------------
-Fix delete of readonly file to Windows servers. Reflect
-presence or absence of read only dos attribute in mode
-bits for servers that do not support CIFS Unix extensions.
-Fix shortened results on readdir of large directories to
-servers supporting CIFS Unix extensions (caused by
-incorrect resume key).
-
-Version 0.74
-------------
-Fix truncate bug (set file size) that could cause hangs e.g. running fsx
-
-Version 0.73
-------------
-unload nls if mount fails.
-
-Version 0.72
-------------
-Add resume key support to search (readdir) code to workaround
-Windows bug. Add /proc/fs/cifs/LookupCacheEnable which
-allows disabling caching of attribute information for
-lookups.
-
-Version 0.71
-------------
-Add more oplock handling (distributed caching code). Remove
-dead code. Remove excessive stack space utilization from
-symlink routines.
-
-Version 0.70
-------------
-Fix oops in get dfs referral (triggered when null path sent in to
-mount). Add support for overriding rsize at mount time.
-
-Version 0.69
-------------
-Fix buffer overrun in readdir which caused intermittent kernel oopses.
-Fix writepage code to release kmap on write data. Allow "-ip=" new
-mount option to be passed in on parameter distinct from the first part
-(server name portion of) the UNC name. Allow override of the
-tcp port of the target server via new mount option "-port="
-
-Version 0.68
-------------
-Fix search handle leak on rewind. Fix setuid and gid so that they are
-reflected in the local inode immediately. Cleanup of whitespace
-to make 2.4 and 2.5 versions more consistent.
-
-
-Version 0.67
-------------
-Fix signal sending so that captive thread (cifsd) exits on umount
-(which was causing the warning in kmem_cache_free of the request buffers
-at rmmod time). This had broken as a sideeffect of the recent global
-kernel change to daemonize. Fix memory leak in readdir code which
-showed up in "ls -R" (and applications that did search rewinding).
-
-Version 0.66
-------------
-Reconnect tids and fids after session reconnection (still do not
-reconnect byte range locks though). Fix problem caching
-lookup information for directory inodes, improving performance,
-especially in deep directory trees. Fix various build warnings.
-
-Version 0.65
-------------
-Finish fixes to commit write for caching/readahead consistency. fsx
-now works to Samba servers. Fix oops caused when readahead
-was interrupted by a signal.
-
-Version 0.64
-------------
-Fix data corruption (in partial page after truncate) that caused fsx to
-fail to Windows servers. Cleaned up some extraneous error logging in
-common error paths. Add generic sendfile support.
-
-Version 0.63
-------------
-Fix memory leak in AllocMidQEntry.
-Finish reconnection logic, so connection with server can be dropped
-(or server rebooted) and the cifs client will reconnect.
-
-Version 0.62
-------------
-Fix temporary socket leak when bad userid or password specified
-(or other SMBSessSetup failure). Increase maximum buffer size to slightly
-over 16K to allow negotiation of up to Samba and Windows server default read
-sizes. Add support for readpages
-
-Version 0.61
-------------
-Fix oops when username not passed in on mount. Extensive fixes and improvements
-to error logging (strip redundant newlines, change debug macros to ensure newline
-passed in and to be more consistent). Fix writepage wrong file handle problem,
-a readonly file handle could be incorrectly used to attempt to write out
-file updates through the page cache to multiply open files. This could cause
-the iozone benchmark to fail on the fwrite test. Fix bug mounting two different
-shares to the same Windows server when using different usernames
-(doing this to Samba servers worked but Windows was rejecting it) - now it is
-possible to use different userids when connecting to the same server from a
-Linux client. Fix oops when treeDisconnect called during unmount on
-previously freed socket.
-
-Version 0.60
-------------
-Fix oops in readpages caused by not setting address space operations in inode in
-rare code path.
-
-Version 0.59
-------------
-Includes support for deleting of open files and renaming over existing files (per POSIX
-requirement). Add readlink support for Windows junction points (directory symlinks).
-
-Version 0.58
-------------
-Changed read and write to go through pagecache. Added additional address space operations.
-Memory mapped operations now working.
-
-Version 0.57
-------------
-Added writepage code for additional memory mapping support. Fixed leak in xids causing
-the simultaneous operations counter (/proc/fs/cifs/SimultaneousOps) to increase on
-every stat call. Additional formatting cleanup.
-
-Version 0.56
-------------
-Fix bigendian bug in order of time conversion. Merge 2.5 to 2.4 version. Formatting cleanup.
-
-Version 0.55
-------------
-Fixes from Zwane Mwaikambo for adding missing return code checking in a few places.
-Also included a modified version of his fix to protect global list manipulation of
-the smb session and tree connection and mid related global variables.
-
-Version 0.54
-------------
-Fix problem with captive thread hanging around at unmount time. Adjust to 2.5.42-pre
-changes to superblock layout. Remove wasteful allocation of smb buffers (now the send
-buffer is reused for responses). Add more oplock handling. Additional minor cleanup.
-
-Version 0.53
-------------
-More stylistic updates to better match kernel style. Add additional statistics
-for filesystem which can be viewed via /proc/fs/cifs. Add more pieces of NTLMv2
-and CIFS Packet Signing enablement.
-
-Version 0.52
-------------
-Replace call to sleep_on with safer wait_on_event.
-Make stylistic changes to better match kernel style recommendations.
-Remove most typedef usage (except for the PDUs themselves).
-
-Version 0.51
-------------
-Update mount so the -unc mount option is no longer required (the ip address can be specified
-in a UNC style device name. Implementation of readpage/writepage started.
-
-Version 0.50
-------------
-Fix intermittent problem with incorrect smb header checking on badly
-fragmented tcp responses
-
-Version 0.49
-------------
-Fixes to setting of allocation size and file size.
-
-Version 0.48
-------------
-Various 2.5.38 fixes. Now works on 2.5.38
-
-Version 0.47
-------------
-Prepare for 2.5 kernel merge. Remove ifdefs.
-
-Version 0.46
-------------
-Socket buffer management fixes. Fix dual free.
-
-Version 0.45
-------------
-Various big endian fixes for hardlinks and symlinks and also for dfs.
-
-Version 0.44
-------------
-Various big endian fixes for servers with Unix extensions such as Samba
-
-Version 0.43
-------------
-Various FindNext fixes for incorrect filenames on large directory searches on big endian
-clients. basic posix file i/o tests now work on big endian machines, not just le
-
-Version 0.42
-------------
-SessionSetup and NegotiateProtocol now work from Big Endian machines.
-Various Big Endian fixes found during testing on the Linux on 390. Various fixes for compatibility with older
-versions of 2.4 kernel (now builds and works again on kernels at least as early as 2.4.7).
-
-Version 0.41
-------------
-Various minor fixes for Connectathon Posix "basic" file i/o test suite. Directory caching fixed so hardlinked
-files now return the correct number of links on fstat as they are repeatedly linked and unlinked.
-
-Version 0.40
-------------
-Implemented "Raw" (i.e. not encapsulated in SPNEGO) NTLMSSP (i.e. the Security Provider Interface used to negotiate
-session advanced session authentication). Raw NTLMSSP is preferred by Windows 2000 Professional and Windows XP.
-Began implementing support for SPNEGO encapsulation of NTLMSSP based session authentication blobs
-(which is the mechanism preferred by Windows 2000 server in the absence of Kerberos).
-
-Version 0.38
-------------
-Introduced optional mount helper utility mount.cifs and made coreq changes to cifs vfs to enable
-it. Fixed a few bugs in the DFS code (e.g. bcc two bytes too short and incorrect uid in PDU).
-
-Version 0.37
-------------
-Rewrote much of connection and mount/unmount logic to handle bugs with
-multiple uses to same share, multiple users to same server etc.
-
-Version 0.36
-------------
-Fixed major problem with dentry corruption (missing call to dput)
-
-Version 0.35
-------------
-Rewrite of readdir code to fix bug. Various fixes for bigendian machines.
-Begin adding oplock support. Multiusermount and oplockEnabled flags added to /proc/fs/cifs
-although corresponding function not fully implemented in the vfs yet
-
-Version 0.34
-------------
-Fixed dentry caching bug, misc. cleanup
-
-Version 0.33
-------------
-Fixed 2.5 support to handle build and configure changes as well as misc. 2.5 changes. Now can build
-on current 2.5 beta version (2.5.24) of the Linux kernel as well as on 2.4 Linux kernels.
-Support for STATUS codes (newer 32 bit NT error codes) added. DFS support begun to be added.
-
-Version 0.32
-------------
-Unix extensions (symlink, readlink, hardlink, chmod and some chgrp and chown) implemented
-and tested against Samba 2.2.5
-
-
-Version 0.31
-------------
-1) Fixed lockrange to be correct (it was one byte too short)
-
-2) Fixed GETLK (i.e. the fcntl call to test a range of bytes in a file to see if locked) to correctly
-show range as locked when there is a conflict with an existing lock.
-
-3) default file perms are now 2767 (indicating support for mandatory locks) instead of 777 for directories
-in most cases. Eventually will offer optional ability to query server for the correct perms.
-
-3) Fixed eventual trap when mounting twice to different shares on the same server when the first succeeded
-but the second one was invalid and failed (the second one was incorrectly disconnecting the tcp and smb
-session)
-
-4) Fixed error logging of valid mount options
-
-5) Removed logging of password field.
-
-6) Moved negotiate, treeDisconnect and uloggoffX (only tConx and SessSetup remain in connect.c) to cifssmb.c
-and cleaned them up and made them more consistent with other cifs functions.
-
-7) Server support for Unix extensions is now fully detected and FindFirst is implemented both ways
-(with or without Unix extensions) but FindNext and QueryPathInfo with the Unix extensions are not completed,
-nor is the symlink support using the Unix extensions
-
-8) Started adding the readlink and follow_link code
-
-Version 0.3
------------
-Initial drop
-
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary
+information (that may be easier to read than parsing the output of
+"git log fs/cifs") about fixes/improvements to CIFS/SMB2/SMB3 support (changes
+to cifs.ko module) by kernel version (and cifs internal module version).
diff --git a/Documentation/filesystems/cifs/README b/Documentation/filesystems/cifs/README
index 99ce3d25003d..4a804619cff2 100644
--- a/Documentation/filesystems/cifs/README
+++ b/Documentation/filesystems/cifs/README
@@ -603,8 +603,7 @@ DebugData Displays information about active CIFS sessions and
shares, features enabled as well as the cifs.ko
version.
Stats Lists summary resource usage information as well as per
- share statistics, if CONFIG_CIFS_STATS in enabled
- in the kernel configuration.
+ share statistics.
Configuration pseudo-files:
SecurityFlags Flags which control security negotiation and
@@ -687,23 +686,22 @@ cifsFYI functions as a bit mask. Setting it to 1 enables additional kernel
logging of various informational messages. 2 enables logging of non-zero
SMB return codes while 4 enables logging of requests that take longer
than one second to complete (except for byte range lock requests).
-Setting it to 4 requires defining CONFIG_CIFS_STATS2 manually in the
-source code (typically by setting it in the beginning of cifsglob.h),
-and setting it to seven enables all three. Finally, tracing
+Setting it to 4 requires CONFIG_CIFS_STATS2 to be set in kernel configuration
+(.config). Setting it to seven enables all three. Finally, tracing
the start of smb requests and responses can be enabled via:
echo 1 > /proc/fs/cifs/traceSMB
-Per share (per client mount) statistics are available in /proc/fs/cifs/Stats
-if the kernel was configured with cifs statistics enabled. The statistics
-represent the number of successful (ie non-zero return code from the server)
-SMB responses to some of the more common commands (open, delete, mkdir etc.).
+Per share (per client mount) statistics are available in /proc/fs/cifs/Stats.
+Additional information is available if CONFIG_CIFS_STATS2 is enabled in the
+kernel configuration (.config). The statistics returned include counters which
+represent the number of attempted and failed (ie non-zero return code from the
+server) SMB3 (or cifs) requests grouped by request type (read, write, close etc.).
Also recorded is the total bytes read and bytes written to the server for
that share. Note that due to client caching effects this can be less than the
number of bytes read and written by the application running on the client.
-The statistics for the number of total SMBs and oplock breaks are different in
-that they represent all for that share, not just those for which the server
-returned success.
+Statistics can be reset to zero by "echo 0 > /proc/fs/cifs/Stats" which may be
+useful if comparing performance of two different scenarios.
Also note that "cat /proc/fs/cifs/DebugData" will display information about
the active sessions and the shares that are mounted.
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4/ext4.rst
index 7f628b9f7c4b..9d4368d591fa 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4/ext4.rst
@@ -1,6 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
-Ext4 Filesystem
-===============
+========================
+General Information
+========================
Ext4 is an advanced level of the ext3 filesystem which incorporates
scalability and reliability enhancements for supporting large filesystems
@@ -11,37 +13,30 @@ Mailing list: linux-ext4@vger.kernel.org
Web site: http://ext4.wiki.kernel.org
-1. Quick usage instructions:
-===========================
+Quick usage instructions
+========================
Note: More extensive information for getting started with ext4 can be
- found at the ext4 wiki site at the URL:
- http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+found at the ext4 wiki site at the URL:
+http://ext4.wiki.kernel.org/index.php/Ext4_Howto
- - Compile and install the latest version of e2fsprogs (as of this
- writing version 1.41.3) from:
+ - The latest version of e2fsprogs can be found at:
+
+ https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
- http://sourceforge.net/project/showfiles.php?group_id=2406
-
or
- https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
+ http://sourceforge.net/project/showfiles.php?group_id=2406
or grab the latest git repository from:
- git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
-
- - Note that it is highly important to install the mke2fs.conf file
- that comes with the e2fsprogs 1.41.x sources in /etc/mke2fs.conf. If
- you have edited the /etc/mke2fs.conf file installed on your system,
- you will need to merge your changes with the version from e2fsprogs
- 1.41.x.
+ https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
- Create a new filesystem using the ext4 filesystem type:
- # mke2fs -t ext4 /dev/hda1
+ # mke2fs -t ext4 /dev/hda1
- Or to configure an existing ext3 filesystem to support extents:
+ Or to configure an existing ext3 filesystem to support extents:
# tune2fs -O extents /dev/hda1
@@ -50,10 +45,6 @@ Note: More extensive information for getting started with ext4 can be
# tune2fs -I 256 /dev/hda1
- (Note: we currently do not have tools to convert an ext4
- filesystem back to ext3; so please do not do try this on production
- filesystems.)
-
- Mounting:
# mount -t ext4 /dev/hda1 /wherever
@@ -75,10 +66,11 @@ Note: More extensive information for getting started with ext4 can be
the filesystem with a large journal can also be helpful for
metadata-intensive workloads.
-2. Features
-===========
+Features
+========
-2.1 Currently available
+Currently Available
+-------------------
* ability to use filesystems > 16TB (e2fsprogs support not available yet)
* extent format reduces metadata overhead (RAM, IO for access, transactions)
@@ -103,31 +95,15 @@ Note: More extensive information for getting started with ext4 can be
[1] Filesystems with a block size of 1k may see a limit imposed by the
directory hash tree having a maximum depth of two.
-2.2 Candidate features for future inclusion
-
-* online defrag (patches available but not well tested)
-* reduced mke2fs time via lazy itable initialization in conjunction with
- the uninit_bg feature (capability to do this is available in e2fsprogs
- but a kernel thread to do lazy zeroing of unused inode table blocks
- after filesystem is first mounted is required for safety)
-
-There are several others under discussion, whether they all make it in is
-partly a function of how much time everyone has to work on them. Features like
-metadata checksumming have been discussed and planned for a bit but no patches
-exist yet so I'm not sure they're in the near-term roadmap.
-
-The big performance win will come with mballoc, delalloc and flex_bg
-grouping of bitmaps and inode tables. Some test results available here:
-
- - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-write-2.6.27-rc1.html
- - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-readwrite-2.6.27-rc1.html
-
-3. Options
-==========
+Options
+=======
When mounting an ext4 filesystem, the following option are accepted:
(*) == default
+======================= =======================================================
+Mount Option Description
+======================= =======================================================
ro Mount filesystem read only. Note that ext4 will
replay the journal (and thus write to the
partition) even when mounted "read only". The
@@ -387,33 +363,38 @@ i_version Enable 64-bit inode version support. This option is
dax Use direct access (no page cache). See
Documentation/filesystems/dax.txt. Note that
this option is incompatible with data=journal.
+======================= =======================================================
Data Mode
=========
There are 3 different data modes:
* writeback mode
-In data=writeback mode, ext4 does not journal data at all. This mode provides
-a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
-mode - metadata journaling. A crash+recovery can cause incorrect data to
-appear in files which were written shortly before the crash. This mode will
-typically provide the best ext4 performance.
+
+ In data=writeback mode, ext4 does not journal data at all. This mode provides
+ a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
+ mode - metadata journaling. A crash+recovery can cause incorrect data to
+ appear in files which were written shortly before the crash. This mode will
+ typically provide the best ext4 performance.
* ordered mode
-In data=ordered mode, ext4 only officially journals metadata, but it logically
-groups metadata information related to data changes with the data blocks into a
-single unit called a transaction. When it's time to write the new metadata
-out to disk, the associated data blocks are written first. In general,
-this mode performs slightly slower than writeback but significantly faster than journal mode.
+
+ In data=ordered mode, ext4 only officially journals metadata, but it logically
+ groups metadata information related to data changes with the data blocks into
+ a single unit called a transaction. When it's time to write the new metadata
+ out to disk, the associated data blocks are written first. In general, this
+ mode performs slightly slower than writeback but significantly faster than
+ journal mode.
* journal mode
-data=journal mode provides full data and metadata journaling. All new data is
-written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both data and
-metadata into a consistent state. This mode is the slowest except when data
-needs to be read from and written to disk at the same time where it
-outperforms all others modes. Enabling this mode will disable delayed
-allocation and O_DIRECT support.
+
+ data=journal mode provides full data and metadata journaling. All new data is
+ written to the journal first, and then to its final location. In the event of
+ a crash, the journal can be replayed, bringing both data and metadata into a
+ consistent state. This mode is the slowest except when data needs to be read
+ from and written to disk at the same time where it outperforms all others
+ modes. Enabling this mode will disable delayed allocation and O_DIRECT
+ support.
/proc entries
=============
@@ -425,10 +406,12 @@ Information about mounted ext4 file systems can be found in
in table below.
Files in /proc/fs/ext4/<devname>
-..............................................................................
+
+================ =======
File Content
+================ =======
mb_groups details of multiblock allocator buddy cache of free blocks
-..............................................................................
+================ =======
/sys entries
============
@@ -439,28 +422,30 @@ Information about mounted ext4 file systems can be found in
/sys/fs/ext4/dm-0). The files in each per-device directory are shown
in table below.
-Files in /sys/fs/ext4/<devname>
+Files in /sys/fs/ext4/<devname>:
+
(see also Documentation/ABI/testing/sysfs-fs-ext4)
-..............................................................................
- File Content
+============================= =================================================
+File Content
+============================= =================================================
delayed_allocation_blocks This file is read-only and shows the number of
blocks that are dirty in the page cache, but
which do not have their location in the
filesystem allocated yet.
- inode_goal Tuning parameter which (if non-zero) controls
+inode_goal Tuning parameter which (if non-zero) controls
the goal inode used by the inode allocator in
preference to all other allocation heuristics.
This is intended for debugging use only, and
should be 0 on production systems.
- inode_readahead_blks Tuning parameter which controls the maximum
+inode_readahead_blks Tuning parameter which controls the maximum
number of inode table blocks that ext4's inode
table readahead algorithm will pre-read into
the buffer cache
- lifetime_write_kbytes This file is read-only and shows the number of
+lifetime_write_kbytes This file is read-only and shows the number of
kilobytes of data that have been written to this
filesystem since it was created.
@@ -508,7 +493,7 @@ Files in /sys/fs/ext4/<devname>
in the file system. If there is not enough space
for the reserved space when mounting the file
mount will _not_ fail.
-..............................................................................
+============================= =================================================
Ioctls
======
@@ -518,8 +503,10 @@ through the system call interfaces. The list of all Ext4 specific ioctls are
shown in the table below.
Table of Ext4 specific ioctls
-..............................................................................
- Ioctl Description
+
+============================= =================================================
+Ioctl Description
+============================= =================================================
EXT4_IOC_GETFLAGS Get additional attributes associated with inode.
The ioctl argument is an integer bitfield, with
bit values described in ext4.h. This ioctl is an
@@ -610,8 +597,7 @@ Table of Ext4 specific ioctls
normal user by accident.
The data blocks of the previous boot loader
will be associated with the given inode.
-
-..............................................................................
+============================= =================================================
References
==========
diff --git a/Documentation/filesystems/ext4/index.rst b/Documentation/filesystems/ext4/index.rst
new file mode 100644
index 000000000000..71121605558c
--- /dev/null
+++ b/Documentation/filesystems/ext4/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ext4 Filesystem
+===============
+
+General usage and on-disk artifacts writen by ext4. More documentation may
+be ported from the wiki as time permits. This should be considered the
+canonical source of information as the details here have been reviewed by
+the ext4 community.
+
+.. toctree::
+ :maxdepth: 5
+ :numbered:
+
+ ext4
+ ondisk/index
diff --git a/Documentation/filesystems/ext4/ondisk/about.rst b/Documentation/filesystems/ext4/ondisk/about.rst
new file mode 100644
index 000000000000..0aadba052264
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/about.rst
@@ -0,0 +1,44 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+About this Book
+===============
+
+This document attempts to describe the on-disk format for ext4
+filesystems. The same general ideas should apply to ext2/3 filesystems
+as well, though they do not support all the features that ext4 supports,
+and the fields will be shorter.
+
+**NOTE**: This is a work in progress, based on notes that the author
+(djwong) made while picking apart a filesystem by hand. The data
+structure definitions should be current as of Linux 4.18 and
+e2fsprogs-1.44. All comments and corrections are welcome, since there is
+undoubtedly plenty of lore that might not be reflected in freshly
+created demonstration filesystems.
+
+License
+-------
+This book is licensed under the terms of the GNU Public License, v2.
+
+Terminology
+-----------
+
+ext4 divides a storage device into an array of logical blocks both to
+reduce bookkeeping overhead and to increase throughput by forcing larger
+transfer sizes. Generally, the block size will be 4KiB (the same size as
+pages on x86 and the block layer's default block size), though the
+actual size is calculated as 2 ^ (10 + ``sb.s_log_block_size``) bytes.
+Throughout this document, disk locations are given in terms of these
+logical blocks, not raw LBAs, and not 1024-byte blocks. For the sake of
+convenience, the logical block size will be referred to as
+``$block_size`` throughout the rest of the document.
+
+When referenced in ``preformatted text`` blocks, ``sb`` refers to fields
+in the super block, and ``inode`` refers to fields in an inode table
+entry.
+
+Other References
+----------------
+
+Also see http://www.nongnu.org/ext2-doc/ for quite a collection of
+information about ext2/3. Here's another old reference:
+http://wiki.osdev.org/Ext2
diff --git a/Documentation/filesystems/ext4/ondisk/allocators.rst b/Documentation/filesystems/ext4/ondisk/allocators.rst
new file mode 100644
index 000000000000..7aa85152ace3
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/allocators.rst
@@ -0,0 +1,56 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block and Inode Allocation Policy
+---------------------------------
+
+ext4 recognizes (better than ext3, anyway) that data locality is
+generally a desirably quality of a filesystem. On a spinning disk,
+keeping related blocks near each other reduces the amount of movement
+that the head actuator and disk must perform to access a data block,
+thus speeding up disk IO. On an SSD there of course are no moving parts,
+but locality can increase the size of each transfer request while
+reducing the total number of requests. This locality may also have the
+effect of concentrating writes on a single erase block, which can speed
+up file rewrites significantly. Therefore, it is useful to reduce
+fragmentation whenever possible.
+
+The first tool that ext4 uses to combat fragmentation is the multi-block
+allocator. When a file is first created, the block allocator
+speculatively allocates 8KiB of disk space to the file on the assumption
+that the space will get written soon. When the file is closed, the
+unused speculative allocations are of course freed, but if the
+speculation is correct (typically the case for full writes of small
+files) then the file data gets written out in a single multi-block
+extent. A second related trick that ext4 uses is delayed allocation.
+Under this scheme, when a file needs more blocks to absorb file writes,
+the filesystem defers deciding the exact placement on the disk until all
+the dirty buffers are being written out to disk. By not committing to a
+particular placement until it's absolutely necessary (the commit timeout
+is hit, or sync() is called, or the kernel runs out of memory), the hope
+is that the filesystem can make better location decisions.
+
+The third trick that ext4 (and ext3) uses is that it tries to keep a
+file's data blocks in the same block group as its inode. This cuts down
+on the seek penalty when the filesystem first has to read a file's inode
+to learn where the file's data blocks live and then seek over to the
+file's data blocks to begin I/O operations.
+
+The fourth trick is that all the inodes in a directory are placed in the
+same block group as the directory, when feasible. The working assumption
+here is that all the files in a directory might be related, therefore it
+is useful to try to keep them all together.
+
+The fifth trick is that the disk volume is cut up into 128MB block
+groups; these mini-containers are used as outlined above to try to
+maintain data locality. However, there is a deliberate quirk -- when a
+directory is created in the root directory, the inode allocator scans
+the block groups and puts that directory into the least heavily loaded
+block group that it can find. This encourages directories to spread out
+over a disk; as the top-level directory/file blobs fill up one block
+group, the allocators simply move on to the next block group. Allegedly
+this scheme evens out the loading on the block groups, though the author
+suspects that the directories which are so unlucky as to land towards
+the end of a spinning drive get a raw deal performance-wise.
+
+Of course if all of these mechanisms fail, one can always use e4defrag
+to defragment files.
diff --git a/Documentation/filesystems/ext4/ondisk/attributes.rst b/Documentation/filesystems/ext4/ondisk/attributes.rst
new file mode 100644
index 000000000000..0b01b67b81fe
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/attributes.rst
@@ -0,0 +1,191 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Extended Attributes
+-------------------
+
+Extended attributes (xattrs) are typically stored in a separate data
+block on the disk and referenced from inodes via ``inode.i_file_acl*``.
+The first use of extended attributes seems to have been for storing file
+ACLs and other security data (selinux). With the ``user_xattr`` mount
+option it is possible for users to store extended attributes so long as
+all attribute names begin with “user”; this restriction seems to have
+disappeared as of Linux 3.0.
+
+There are two places where extended attributes can be found. The first
+place is between the end of each inode entry and the beginning of the
+next inode entry. For example, if inode.i\_extra\_isize = 28 and
+sb.inode\_size = 256, then there are 256 - (128 + 28) = 100 bytes
+available for in-inode extended attribute storage. The second place
+where extended attributes can be found is in the block pointed to by
+``inode.i_file_acl``. As of Linux 3.11, it is not possible for this
+block to contain a pointer to a second extended attribute block (or even
+the remaining blocks of a cluster). In theory it is possible for each
+attribute's value to be stored in a separate data block, though as of
+Linux 3.11 the code does not permit this.
+
+Keys are generally assumed to be ASCIIZ strings, whereas values can be
+strings or binary data.
+
+Extended attributes, when stored after the inode, have a header
+``ext4_xattr_ibody_header`` that is 4 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_magic
+ - Magic number for identification, 0xEA020000. This value is set by the
+ Linux driver, though e2fsprogs doesn't seem to check it(?)
+
+The beginning of an extended attribute block is in
+``struct ext4_xattr_header``, which is 32 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_magic
+ - Magic number for identification, 0xEA020000.
+ * - 0x4
+ - \_\_le32
+ - h\_refcount
+ - Reference count.
+ * - 0x8
+ - \_\_le32
+ - h\_blocks
+ - Number of disk blocks used.
+ * - 0xC
+ - \_\_le32
+ - h\_hash
+ - Hash value of all attributes.
+ * - 0x10
+ - \_\_le32
+ - h\_checksum
+ - Checksum of the extended attribute block.
+ * - 0x14
+ - \_\_u32
+ - h\_reserved[2]
+ - Zero.
+
+The checksum is calculated against the FS UUID, the 64-bit block number
+of the extended attribute block, and the entire block (header +
+entries).
+
+Following the ``struct ext4_xattr_header`` or
+``struct ext4_xattr_ibody_header`` is an array of
+``struct ext4_xattr_entry``; each of these entries is at least 16 bytes
+long. When stored in an external block, the ``struct ext4_xattr_entry``
+entries must be stored in sorted order. The sort order is
+``e_name_index``, then ``e_name_len``, and finally ``e_name``.
+Attributes stored inside an inode do not need be stored in sorted order.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_u8
+ - e\_name\_len
+ - Length of name.
+ * - 0x1
+ - \_\_u8
+ - e\_name\_index
+ - Attribute name index. There is a discussion of this below.
+ * - 0x2
+ - \_\_le16
+ - e\_value\_offs
+ - Location of this attribute's value on the disk block where it is stored.
+ Multiple attributes can share the same value. For an inode attribute
+ this value is relative to the start of the first entry; for a block this
+ value is relative to the start of the block (i.e. the header).
+ * - 0x4
+ - \_\_le32
+ - e\_value\_inum
+ - The inode where the value is stored. Zero indicates the value is in the
+ same block as this entry. This field is only used if the
+ INCOMPAT\_EA\_INODE feature is enabled.
+ * - 0x8
+ - \_\_le32
+ - e\_value\_size
+ - Length of attribute value.
+ * - 0xC
+ - \_\_le32
+ - e\_hash
+ - Hash value of attribute name and attribute value. The kernel doesn't
+ update the hash for in-inode attributes, so for that case this value
+ must be zero, because e2fsck validates any non-zero hash regardless of
+ where the xattr lives.
+ * - 0x10
+ - char
+ - e\_name[e\_name\_len]
+ - Attribute name. Does not include trailing NULL.
+
+Attribute values can follow the end of the entry table. There appears to
+be a requirement that they be aligned to 4-byte boundaries. The values
+are stored starting at the end of the block and grow towards the
+xattr\_header/xattr\_entry table. When the two collide, the overflow is
+put into a separate disk block. If the disk block fills up, the
+filesystem returns -ENOSPC.
+
+The first four fields of the ``ext4_xattr_entry`` are set to zero to
+mark the end of the key list.
+
+Attribute Name Indices
+~~~~~~~~~~~~~~~~~~~~~~
+
+Logically speaking, extended attributes are a series of key=value pairs.
+The keys are assumed to be NULL-terminated strings. To reduce the amount
+of on-disk space that the keys consume, the beginning of the key string
+is matched against the attribute name index. If a match is found, the
+attribute name index field is set, and matching string is removed from
+the key name. Here is a map of name index values to key prefixes:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Name Index
+ - Key Prefix
+ * - 0
+ - (no prefix)
+ * - 1
+ - “user.”
+ * - 2
+ - “system.posix\_acl\_access”
+ * - 3
+ - “system.posix\_acl\_default”
+ * - 4
+ - “trusted.”
+ * - 6
+ - “security.”
+ * - 7
+ - “system.” (inline\_data only?)
+ * - 8
+ - “system.richacl” (SuSE kernels only?)
+
+For example, if the attribute key is “user.fubar”, the attribute name
+index is set to 1 and the “fubar” name is recorded on disk.
+
+POSIX ACLs
+~~~~~~~~~~
+
+POSIX ACLs are stored in a reduced version of the Linux kernel (and
+libacl's) internal ACL format. The key difference is that the version
+number is different (1) and the ``e_id`` field is only stored for named
+user and group ACLs.
diff --git a/Documentation/filesystems/ext4/ondisk/bigalloc.rst b/Documentation/filesystems/ext4/ondisk/bigalloc.rst
new file mode 100644
index 000000000000..c6d88557553c
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/bigalloc.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Bigalloc
+--------
+
+At the moment, the default size of a block is 4KiB, which is a commonly
+supported page size on most MMU-capable hardware. This is fortunate, as
+ext4 code is not prepared to handle the case where the block size
+exceeds the page size. However, for a filesystem of mostly huge files,
+it is desirable to be able to allocate disk blocks in units of multiple
+blocks to reduce both fragmentation and metadata overhead. The
+`bigalloc <Bigalloc>`__ feature provides exactly this ability. The
+administrator can set a block cluster size at mkfs time (which is stored
+in the s\_log\_cluster\_size field in the superblock); from then on, the
+block bitmaps track clusters, not individual blocks. This means that
+block groups can be several gigabytes in size (instead of just 128MiB);
+however, the minimum allocation unit becomes a cluster, not a block,
+even for directories. TaoBao had a patchset to extend the “use units of
+clusters instead of blocks” to the extent tree, though it is not clear
+where those patches went-- they eventually morphed into “extent tree v2”
+but that code has not landed as of May 2015.
+
diff --git a/Documentation/filesystems/ext4/ondisk/bitmaps.rst b/Documentation/filesystems/ext4/ondisk/bitmaps.rst
new file mode 100644
index 000000000000..c7546dbc197a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/bitmaps.rst
@@ -0,0 +1,28 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block and inode Bitmaps
+-----------------------
+
+The data block bitmap tracks the usage of data blocks within the block
+group.
+
+The inode bitmap records which entries in the inode table are in use.
+
+As with most bitmaps, one bit represents the usage status of one data
+block or inode table entry. This implies a block group size of 8 \*
+number\_of\_bytes\_in\_a\_logical\_block.
+
+NOTE: If ``BLOCK_UNINIT`` is set for a given block group, various parts
+of the kernel and e2fsprogs code pretends that the block bitmap contains
+zeros (i.e. all blocks in the group are free). However, it is not
+necessarily the case that no blocks are in use -- if ``meta_bg`` is set,
+the bitmaps and group descriptor live inside the group. Unfortunately,
+ext2fs\_test\_block\_bitmap2() will return '0' for those locations,
+which produces confusing debugfs output.
+
+Inode Table
+-----------
+Inode tables are statically allocated at mkfs time. Each block group
+descriptor points to the start of the table, and the superblock records
+the number of inodes per group. See the section on inodes for more
+information.
diff --git a/Documentation/filesystems/ext4/ondisk/blockgroup.rst b/Documentation/filesystems/ext4/ondisk/blockgroup.rst
new file mode 100644
index 000000000000..baf888e4c06a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blockgroup.rst
@@ -0,0 +1,135 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Layout
+------
+
+The layout of a standard block group is approximately as follows (each
+of these fields is discussed in a separate section below):
+
+.. list-table::
+ :widths: 1 1 1 1 1 1 1 1
+ :header-rows: 1
+
+ * - Group 0 Padding
+ - ext4 Super Block
+ - Group Descriptors
+ - Reserved GDT Blocks
+ - Data Block Bitmap
+ - inode Bitmap
+ - inode Table
+ - Data Blocks
+ * - 1024 bytes
+ - 1 block
+ - many blocks
+ - many blocks
+ - 1 block
+ - 1 block
+ - many blocks
+ - many more blocks
+
+For the special case of block group 0, the first 1024 bytes are unused,
+to allow for the installation of x86 boot sectors and other oddities.
+The superblock will start at offset 1024 bytes, whichever block that
+happens to be (usually 0). However, if for some reason the block size =
+1024, then block 0 is marked in use and the superblock goes in block 1.
+For all other block groups, there is no padding.
+
+The ext4 driver primarily works with the superblock and the group
+descriptors that are found in block group 0. Redundant copies of the
+superblock and group descriptors are written to some of the block groups
+across the disk in case the beginning of the disk gets trashed, though
+not all block groups necessarily host a redundant copy (see following
+paragraph for more details). If the group does not have a redundant
+copy, the block group begins with the data block bitmap. Note also that
+when the filesystem is freshly formatted, mkfs will allocate “reserve
+GDT block” space after the block group descriptors and before the start
+of the block bitmaps to allow for future expansion of the filesystem. By
+default, a filesystem is allowed to increase in size by a factor of
+1024x over the original filesystem size.
+
+The location of the inode table is given by ``grp.bg_inode_table_*``. It
+is continuous range of blocks large enough to contain
+``sb.s_inodes_per_group * sb.s_inode_size`` bytes.
+
+As for the ordering of items in a block group, it is generally
+established that the super block and the group descriptor table, if
+present, will be at the beginning of the block group. The bitmaps and
+the inode table can be anywhere, and it is quite possible for the
+bitmaps to come after the inode table, or for both to be in different
+groups (flex\_bg). Leftover space is used for file data blocks, indirect
+block maps, extent tree blocks, and extended attributes.
+
+Flexible Block Groups
+---------------------
+
+Starting in ext4, there is a new feature called flexible block groups
+(flex\_bg). In a flex\_bg, several block groups are tied together as one
+logical block group; the bitmap spaces and the inode table space in the
+first block group of the flex\_bg are expanded to include the bitmaps
+and inode tables of all other block groups in the flex\_bg. For example,
+if the flex\_bg size is 4, then group 0 will contain (in order) the
+superblock, group descriptors, data block bitmaps for groups 0-3, inode
+bitmaps for groups 0-3, inode tables for groups 0-3, and the remaining
+space in group 0 is for file data. The effect of this is to group the
+block metadata close together for faster loading, and to enable large
+files to be continuous on disk. Backup copies of the superblock and
+group descriptors are always at the beginning of block groups, even if
+flex\_bg is enabled. The number of block groups that make up a flex\_bg
+is given by 2 ^ ``sb.s_log_groups_per_flex``.
+
+Meta Block Groups
+-----------------
+
+Without the option META\_BG, for safety concerns, all block group
+descriptors copies are kept in the first block group. Given the default
+128MiB(2^27 bytes) block group size and 64-byte group descriptors, ext4
+can have at most 2^27/64 = 2^21 block groups. This limits the entire
+filesystem size to 2^21 ∗ 2^27 = 2^48bytes or 256TiB.
+
+The solution to this problem is to use the metablock group feature
+(META\_BG), which is already in ext3 for all 2.6 releases. With the
+META\_BG feature, ext4 filesystems are partitioned into many metablock
+groups. Each metablock group is a cluster of block groups whose group
+descriptor structures can be stored in a single disk block. For ext4
+filesystems with 4 KB block size, a single metablock group partition
+includes 64 block groups, or 8 GiB of disk space. The metablock group
+feature moves the location of the group descriptors from the congested
+first block group of the whole filesystem into the first group of each
+metablock group itself. The backups are in the second and last group of
+each metablock group. This increases the 2^21 maximum block groups limit
+to the hard limit 2^32, allowing support for a 512PiB filesystem.
+
+The change in the filesystem format replaces the current scheme where
+the superblock is followed by a variable-length set of block group
+descriptors. Instead, the superblock and a single block group descriptor
+block is placed at the beginning of the first, second, and last block
+groups in a meta-block group. A meta-block group is a collection of
+block groups which can be described by a single block group descriptor
+block. Since the size of the block group descriptor structure is 32
+bytes, a meta-block group contains 32 block groups for filesystems with
+a 1KB block size, and 128 block groups for filesystems with a 4KB
+blocksize. Filesystems can either be created using this new block group
+descriptor layout, or existing filesystems can be resized on-line, and
+the field s\_first\_meta\_bg in the superblock will indicate the first
+block group using this new layout.
+
+Please see an important note about ``BLOCK_UNINIT`` in the section about
+block and inode bitmaps.
+
+Lazy Block Group Initialization
+-------------------------------
+
+A new feature for ext4 are three block group descriptor flags that
+enable mkfs to skip initializing other parts of the block group
+metadata. Specifically, the INODE\_UNINIT and BLOCK\_UNINIT flags mean
+that the inode and block bitmaps for that group can be calculated and
+therefore the on-disk bitmap blocks are not initialized. This is
+generally the case for an empty block group or a block group containing
+only fixed-location block group metadata. The INODE\_ZEROED flag means
+that the inode table has been initialized; mkfs will unset this flag and
+rely on the kernel to initialize the inode tables in the background.
+
+By not writing zeroes to the bitmaps and inode table, mkfs time is
+reduced considerably. Note the feature flag is RO\_COMPAT\_GDT\_CSUM,
+but the dumpe2fs output prints this as “uninit\_bg”. They are the same
+thing.
diff --git a/Documentation/filesystems/ext4/ondisk/blockmap.rst b/Documentation/filesystems/ext4/ondisk/blockmap.rst
new file mode 100644
index 000000000000..30e25750d88a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blockmap.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| i.i\_block Offset | Where It Points |
++=====================+==============================================================================================================================================================================================================================+
+| 0 to 11 | Direct map to file blocks 0 to 11. |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 12 | Indirect block: (file blocks 12 to (``$block_size`` / 4) + 11, or 12 to 1035 if 4KiB blocks) |
+| | |
+| | +------------------------------+--------------------------------------------------------------------+ |
+| | | Indirect Block Offset | Where It Points | |
+| | +==============================+====================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | |
+| | +------------------------------+--------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 13 | Double-indirect block: (file blocks ``$block_size``/4 + 12 to (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 11, or 1036 to 1049611 if 4KiB blocks) |
+| | |
+| | +--------------------------------+---------------------------------------------------------------------------------------------------------+ |
+| | | Double Indirect Block Offset | Where It Points | |
+| | +================================+=========================================================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) | |
+| | | | | |
+| | | | +------------------------------+--------------------------------------------------------------------+ | |
+| | | | | Indirect Block Offset | Where It Points | | |
+| | | | +==============================+====================================================================+ | |
+| | | | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | | |
+| | | | +------------------------------+--------------------------------------------------------------------+ | |
+| | +--------------------------------+---------------------------------------------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 14 | Triple-indirect block: (file blocks (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 12 to (``$block_size`` / 4) ^ 3 + (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 12, or 1049612 to 1074791436 if 4KiB blocks) |
+| | |
+| | +--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ |
+| | | Triple Indirect Block Offset | Where It Points | |
+| | +================================+================================================================================================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) double indirect blocks (1024 if 4KiB blocks) | |
+| | | | | |
+| | | | +--------------------------------+---------------------------------------------------------------------------------------------------------+ | |
+| | | | | Double Indirect Block Offset | Where It Points | | |
+| | | | +================================+=========================================================================================================+ | |
+| | | | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) | | |
+| | | | | | | | |
+| | | | | | +------------------------------+--------------------------------------------------------------------+ | | |
+| | | | | | | Indirect Block Offset | Where It Points | | | |
+| | | | | | +==============================+====================================================================+ | | |
+| | | | | | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | | | |
+| | | | | | +------------------------------+--------------------------------------------------------------------+ | | |
+| | | | +--------------------------------+---------------------------------------------------------------------------------------------------------+ | |
+| | +--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/Documentation/filesystems/ext4/ondisk/blocks.rst b/Documentation/filesystems/ext4/ondisk/blocks.rst
new file mode 100644
index 000000000000..73d4dc0f7bda
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blocks.rst
@@ -0,0 +1,142 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Blocks
+------
+
+ext4 allocates storage space in units of “blocks”. A block is a group of
+sectors between 1KiB and 64KiB, and the number of sectors must be an
+integral power of 2. Blocks are in turn grouped into larger units called
+block groups. Block size is specified at mkfs time and typically is
+4KiB. You may experience mounting problems if block size is greater than
+page size (i.e. 64KiB blocks on a i386 which only has 4KiB memory
+pages). By default a filesystem can contain 2^32 blocks; if the '64bit'
+feature is enabled, then a filesystem can have 2^64 blocks.
+
+For 32-bit filesystems, limits are as follows:
+
+.. list-table::
+ :widths: 1 1 1 1 1
+ :header-rows: 1
+
+ * - Item
+ - 1KiB
+ - 2KiB
+ - 4KiB
+ - 64KiB
+ * - Blocks
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Inodes
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - File System Size
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256PiB
+ * - Blocks Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Inodes Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Block Group Size
+ - 8MiB
+ - 32MiB
+ - 128MiB
+ - 32GiB
+ * - Blocks Per File, Extents
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Blocks Per File, Block Maps
+ - 16,843,020
+ - 134,480,396
+ - 1,074,791,436
+ - 4,398,314,962,956 (really 2^32 due to field size limitations)
+ * - File Size, Extents
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256TiB
+ * - File Size, Block Maps
+ - 16GiB
+ - 256GiB
+ - 4TiB
+ - 256TiB
+
+For 64-bit filesystems, limits are as follows:
+
+.. list-table::
+ :widths: 1 1 1 1 1
+ :header-rows: 1
+
+ * - Item
+ - 1KiB
+ - 2KiB
+ - 4KiB
+ - 64KiB
+ * - Blocks
+ - 2^64
+ - 2^64
+ - 2^64
+ - 2^64
+ * - Inodes
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - File System Size
+ - 16ZiB
+ - 32ZiB
+ - 64ZiB
+ - 1YiB
+ * - Blocks Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Inodes Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Block Group Size
+ - 8MiB
+ - 32MiB
+ - 128MiB
+ - 32GiB
+ * - Blocks Per File, Extents
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Blocks Per File, Block Maps
+ - 16,843,020
+ - 134,480,396
+ - 1,074,791,436
+ - 4,398,314,962,956 (really 2^32 due to field size limitations)
+ * - File Size, Extents
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256TiB
+ * - File Size, Block Maps
+ - 16GiB
+ - 256GiB
+ - 4TiB
+ - 256TiB
+
+Note: Files not using extents (i.e. files using block maps) must be
+placed within the first 2^32 blocks of a filesystem. Files with extents
+must be placed within the first 2^48 blocks of a filesystem. It's not
+clear what happens with larger filesystems.
diff --git a/Documentation/filesystems/ext4/ondisk/checksums.rst b/Documentation/filesystems/ext4/ondisk/checksums.rst
new file mode 100644
index 000000000000..9d6a793b2e03
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/checksums.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Checksums
+---------
+
+Starting in early 2012, metadata checksums were added to all major ext4
+and jbd2 data structures. The associated feature flag is metadata\_csum.
+The desired checksum algorithm is indicated in the superblock, though as
+of October 2012 the only supported algorithm is crc32c. Some data
+structures did not have space to fit a full 32-bit checksum, so only the
+lower 16 bits are stored. Enabling the 64bit feature increases the data
+structure size so that full 32-bit checksums can be stored for many data
+structures. However, existing 32-bit filesystems cannot be extended to
+enable 64bit mode, at least not without the experimental resize2fs
+patches to do so.
+
+Existing filesystems can have checksumming added by running
+``tune2fs -O metadata_csum`` against the underlying device. If tune2fs
+encounters directory blocks that lack sufficient empty space to add a
+checksum, it will request that you run ``e2fsck -D`` to have the
+directories rebuilt with checksums. This has the added benefit of
+removing slack space from the directory files and rebalancing the htree
+indexes. If you \_ignore\_ this step, your directories will not be
+protected by a checksum!
+
+The following table describes the data elements that go into each type
+of checksum. The checksum function is whatever the superblock describes
+(crc32c as of October 2013) unless noted otherwise.
+
+.. list-table::
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Metadata
+ - Length
+ - Ingredients
+ * - Superblock
+ - \_\_le32
+ - The entire superblock up to the checksum field. The UUID lives inside
+ the superblock.
+ * - MMP
+ - \_\_le32
+ - UUID + the entire MMP block up to the checksum field.
+ * - Extended Attributes
+ - \_\_le32
+ - UUID + the entire extended attribute block. The checksum field is set to
+ zero.
+ * - Directory Entries
+ - \_\_le32
+ - UUID + inode number + inode generation + the directory block up to the
+ fake entry enclosing the checksum field.
+ * - HTREE Nodes
+ - \_\_le32
+ - UUID + inode number + inode generation + all valid extents + HTREE tail.
+ The checksum field is set to zero.
+ * - Extents
+ - \_\_le32
+ - UUID + inode number + inode generation + the entire extent block up to
+ the checksum field.
+ * - Bitmaps
+ - \_\_le32 or \_\_le16
+ - UUID + the entire bitmap. Checksums are stored in the group descriptor,
+ and truncated if the group descriptor size is 32 bytes (i.e. ^64bit)
+ * - Inodes
+ - \_\_le32
+ - UUID + inode number + inode generation + the entire inode. The checksum
+ field is set to zero. Each inode has its own checksum.
+ * - Group Descriptors
+ - \_\_le16
+ - If metadata\_csum, then UUID + group number + the entire descriptor;
+ else if gdt\_csum, then crc16(UUID + group number + the entire
+ descriptor). In all cases, only the lower 16 bits are stored.
+
diff --git a/Documentation/filesystems/ext4/ondisk/directory.rst b/Documentation/filesystems/ext4/ondisk/directory.rst
new file mode 100644
index 000000000000..8fcba68c2884
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/directory.rst
@@ -0,0 +1,426 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Directory Entries
+-----------------
+
+In an ext4 filesystem, a directory is more or less a flat file that maps
+an arbitrary byte string (usually ASCII) to an inode number on the
+filesystem. There can be many directory entries across the filesystem
+that reference the same inode number--these are known as hard links, and
+that is why hard links cannot reference files on other filesystems. As
+such, directory entries are found by reading the data block(s)
+associated with a directory file for the particular directory entry that
+is desired.
+
+Linear (Classic) Directories
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, each directory lists its entries in an “almost-linear”
+array. I write “almost” because it's not a linear array in the memory
+sense because directory entries are not split across filesystem blocks.
+Therefore, it is more accurate to say that a directory is a series of
+data blocks and that each block contains a linear array of directory
+entries. The end of each per-block array is signified by reaching the
+end of the block; the last entry in the block has a record length that
+takes it all the way to the end of the block. The end of the entire
+directory is of course signified by reaching the end of the file. Unused
+directory entries are signified by inode = 0. By default the filesystem
+uses ``struct ext4_dir_entry_2`` for directory entries unless the
+“filetype” feature flag is not set, in which case it uses
+``struct ext4_dir_entry``.
+
+The original directory entry format is ``struct ext4_dir_entry``, which
+is at most 263 bytes long, though on disk you'll need to reference
+``dirent.rec_len`` to know for sure.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - inode
+ - Number of the inode that this directory entry points to.
+ * - 0x4
+ - \_\_le16
+ - rec\_len
+ - Length of this directory entry. Must be a multiple of 4.
+ * - 0x6
+ - \_\_le16
+ - name\_len
+ - Length of the file name.
+ * - 0x8
+ - char
+ - name[EXT4\_NAME\_LEN]
+ - File name.
+
+Since file names cannot be longer than 255 bytes, the new directory
+entry format shortens the rec\_len field and uses the space for a file
+type flag, probably to avoid having to load every inode during directory
+tree traversal. This format is ``ext4_dir_entry_2``, which is at most
+263 bytes long, though on disk you'll need to reference
+``dirent.rec_len`` to know for sure.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - inode
+ - Number of the inode that this directory entry points to.
+ * - 0x4
+ - \_\_le16
+ - rec\_len
+ - Length of this directory entry.
+ * - 0x6
+ - \_\_u8
+ - name\_len
+ - Length of the file name.
+ * - 0x7
+ - \_\_u8
+ - file\_type
+ - File type code, see ftype_ table below.
+ * - 0x8
+ - char
+ - name[EXT4\_NAME\_LEN]
+ - File name.
+
+.. _ftype:
+
+The directory file type is one of the following values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Unknown.
+ * - 0x1
+ - Regular file.
+ * - 0x2
+ - Directory.
+ * - 0x3
+ - Character device file.
+ * - 0x4
+ - Block device file.
+ * - 0x5
+ - FIFO.
+ * - 0x6
+ - Socket.
+ * - 0x7
+ - Symbolic link.
+
+In order to add checksums to these classic directory blocks, a phony
+``struct ext4_dir_entry`` is placed at the end of each leaf block to
+hold the checksum. The directory entry is 12 bytes long. The inode
+number and name\_len fields are set to zero to fool old software into
+ignoring an apparently empty directory entry, and the checksum is stored
+in the place where the name normally goes. The structure is
+``struct ext4_dir_entry_tail``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - det\_reserved\_zero1
+ - Inode number, which must be zero.
+ * - 0x4
+ - \_\_le16
+ - det\_rec\_len
+ - Length of this directory entry, which must be 12.
+ * - 0x6
+ - \_\_u8
+ - det\_reserved\_zero2
+ - Length of the file name, which must be zero.
+ * - 0x7
+ - \_\_u8
+ - det\_reserved\_ft
+ - File type, which must be 0xDE.
+ * - 0x8
+ - \_\_le32
+ - det\_checksum
+ - Directory leaf block checksum.
+
+The leaf directory block checksum is calculated against the FS UUID, the
+directory's inode number, the directory's inode generation number, and
+the entire directory entry block up to (but not including) the fake
+directory entry.
+
+Hash Tree Directories
+~~~~~~~~~~~~~~~~~~~~~
+
+A linear array of directory entries isn't great for performance, so a
+new feature was added to ext3 to provide a faster (but peculiar)
+balanced tree keyed off a hash of the directory entry name. If the
+EXT4\_INDEX\_FL (0x1000) flag is set in the inode, this directory uses a
+hashed btree (htree) to organize and find directory entries. For
+backwards read-only compatibility with ext2, this tree is actually
+hidden inside the directory file, masquerading as “empty” directory data
+blocks! It was stated previously that the end of the linear directory
+entry table was signified with an entry pointing to inode 0; this is
+(ab)used to fool the old linear-scan algorithm into thinking that the
+rest of the directory block is empty so that it moves on.
+
+The root of the tree always lives in the first data block of the
+directory. By ext2 custom, the '.' and '..' entries must appear at the
+beginning of this first block, so they are put here as two
+``struct ext4_dir_entry_2``\ s and not stored in the tree. The rest of
+the root node contains metadata about the tree and finally a hash->block
+map to find nodes that are lower in the htree. If
+``dx_root.info.indirect_levels`` is non-zero then the htree has two
+levels; the data block pointed to by the root node's map is an interior
+node, which is indexed by a minor hash. Interior nodes in this tree
+contains a zeroed out ``struct ext4_dir_entry_2`` followed by a
+minor\_hash->block map to find leafe nodes. Leaf nodes contain a linear
+array of all ``struct ext4_dir_entry_2``; all of these entries
+(presumably) hash to the same value. If there is an overflow, the
+entries simply overflow into the next leaf node, and the
+least-significant bit of the hash (in the interior node map) that gets
+us to this next leaf node is set.
+
+To traverse the directory as a htree, the code calculates the hash of
+the desired file name and uses it to find the corresponding block
+number. If the tree is flat, the block is a linear array of directory
+entries that can be searched; otherwise, the minor hash of the file name
+is computed and used against this second block to find the corresponding
+third block number. That third block number will be a linear array of
+directory entries.
+
+To traverse the directory as a linear array (such as the old code does),
+the code simply reads every data block in the directory. The blocks used
+for the htree will appear to have no entries (aside from '.' and '..')
+and so only the leaf nodes will appear to have any interesting content.
+
+The root of the htree is in ``struct dx_root``, which is the full length
+of a data block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - dot.inode
+ - inode number of this directory.
+ * - 0x4
+ - \_\_le16
+ - dot.rec\_len
+ - Length of this record, 12.
+ * - 0x6
+ - u8
+ - dot.name\_len
+ - Length of the name, 1.
+ * - 0x7
+ - u8
+ - dot.file\_type
+ - File type of this entry, 0x2 (directory) (if the feature flag is set).
+ * - 0x8
+ - char
+ - dot.name[4]
+ - “.\\0\\0\\0”
+ * - 0xC
+ - \_\_le32
+ - dotdot.inode
+ - inode number of parent directory.
+ * - 0x10
+ - \_\_le16
+ - dotdot.rec\_len
+ - block\_size - 12. The record length is long enough to cover all htree
+ data.
+ * - 0x12
+ - u8
+ - dotdot.name\_len
+ - Length of the name, 2.
+ * - 0x13
+ - u8
+ - dotdot.file\_type
+ - File type of this entry, 0x2 (directory) (if the feature flag is set).
+ * - 0x14
+ - char
+ - dotdot\_name[4]
+ - “..\\0\\0”
+ * - 0x18
+ - \_\_le32
+ - struct dx\_root\_info.reserved\_zero
+ - Zero.
+ * - 0x1C
+ - u8
+ - struct dx\_root\_info.hash\_version
+ - Hash type, see dirhash_ table below.
+ * - 0x1D
+ - u8
+ - struct dx\_root\_info.info\_length
+ - Length of the tree information, 0x8.
+ * - 0x1E
+ - u8
+ - struct dx\_root\_info.indirect\_levels
+ - Depth of the htree. Cannot be larger than 3 if the INCOMPAT\_LARGEDIR
+ feature is set; cannot be larger than 2 otherwise.
+ * - 0x1F
+ - u8
+ - struct dx\_root\_info.unused\_flags
+ -
+ * - 0x20
+ - \_\_le16
+ - limit
+ - Maximum number of dx\_entries that can follow this header, plus 1 for
+ the header itself.
+ * - 0x22
+ - \_\_le16
+ - count
+ - Actual number of dx\_entries that follow this header, plus 1 for the
+ header itself.
+ * - 0x24
+ - \_\_le32
+ - block
+ - The block number (within the directory file) that goes with hash=0.
+ * - 0x28
+ - struct dx\_entry
+ - entries[0]
+ - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
+
+.. _dirhash:
+
+The directory hash is one of the following values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Legacy.
+ * - 0x1
+ - Half MD4.
+ * - 0x2
+ - Tea.
+ * - 0x3
+ - Legacy, unsigned.
+ * - 0x4
+ - Half MD4, unsigned.
+ * - 0x5
+ - Tea, unsigned.
+
+Interior nodes of an htree are recorded as ``struct dx_node``, which is
+also the full length of a data block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - fake.inode
+ - Zero, to make it look like this entry is not in use.
+ * - 0x4
+ - \_\_le16
+ - fake.rec\_len
+ - The size of the block, in order to hide all of the dx\_node data.
+ * - 0x6
+ - u8
+ - name\_len
+ - Zero. There is no name for this “unused” directory entry.
+ * - 0x7
+ - u8
+ - file\_type
+ - Zero. There is no file type for this “unused” directory entry.
+ * - 0x8
+ - \_\_le16
+ - limit
+ - Maximum number of dx\_entries that can follow this header, plus 1 for
+ the header itself.
+ * - 0xA
+ - \_\_le16
+ - count
+ - Actual number of dx\_entries that follow this header, plus 1 for the
+ header itself.
+ * - 0xE
+ - \_\_le32
+ - block
+ - The block number (within the directory file) that goes with the lowest
+ hash value of this block. This value is stored in the parent block.
+ * - 0x12
+ - struct dx\_entry
+ - entries[0]
+ - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
+
+The hash maps that exist in both ``struct dx_root`` and
+``struct dx_node`` are recorded as ``struct dx_entry``, which is 8 bytes
+long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - hash
+ - Hash code.
+ * - 0x4
+ - \_\_le32
+ - block
+ - Block number (within the directory file, not filesystem blocks) of the
+ next node in the htree.
+
+(If you think this is all quite clever and peculiar, so does the
+author.)
+
+If metadata checksums are enabled, the last 8 bytes of the directory
+block (precisely the length of one dx\_entry) are used to store a
+``struct dx_tail``, which contains the checksum. The ``limit`` and
+``count`` entries in the dx\_root/dx\_node structures are adjusted as
+necessary to fit the dx\_tail into the block. If there is no space for
+the dx\_tail, the user is notified to run e2fsck -D to rebuild the
+directory index (which will ensure that there's space for the checksum.
+The dx\_tail structure is 8 bytes long and looks like this:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - u32
+ - dt\_reserved
+ - Zero.
+ * - 0x4
+ - \_\_le32
+ - dt\_checksum
+ - Checksum of the htree directory block.
+
+The checksum is calculated against the FS UUID, the htree index header
+(dx\_root or dx\_node), all of the htree indices (dx\_entry) that are in
+use, and the tail block (dx\_tail).
diff --git a/Documentation/filesystems/ext4/ondisk/dynamic.rst b/Documentation/filesystems/ext4/ondisk/dynamic.rst
new file mode 100644
index 000000000000..bb0c84333341
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/dynamic.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Dynamic Structures
+==================
+
+Dynamic metadata are created on the fly when files and blocks are
+allocated to files.
+
+.. include:: inodes.rst
+.. include:: ifork.rst
+.. include:: directory.rst
+.. include:: attributes.rst
diff --git a/Documentation/filesystems/ext4/ondisk/eainode.rst b/Documentation/filesystems/ext4/ondisk/eainode.rst
new file mode 100644
index 000000000000..ecc0d01a0a72
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/eainode.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Large Extended Attribute Values
+-------------------------------
+
+To enable ext4 to store extended attribute values that do not fit in the
+inode or in the single extended attribute block attached to an inode,
+the EA\_INODE feature allows us to store the value in the data blocks of
+a regular file inode. This “EA inode” is linked only from the extended
+attribute name index and must not appear in a directory entry. The
+inode's i\_atime field is used to store a checksum of the xattr value;
+and i\_ctime/i\_version store a 64-bit reference count, which enables
+sharing of large xattr values between multiple owning inodes. For
+backward compatibility with older versions of this feature, the
+i\_mtime/i\_generation *may* store a back-reference to the inode number
+and i\_generation of the **one** owning inode (in cases where the EA
+inode is not referenced by multiple inodes) to verify that the EA inode
+is the correct one being accessed.
diff --git a/Documentation/filesystems/ext4/ondisk/globals.rst b/Documentation/filesystems/ext4/ondisk/globals.rst
new file mode 100644
index 000000000000..368bf7662b96
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/globals.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Global Structures
+=================
+
+The filesystem is sharded into a number of block groups, each of which
+have static metadata at fixed locations.
+
+.. include:: super.rst
+.. include:: group_descr.rst
+.. include:: bitmaps.rst
+.. include:: mmp.rst
+.. include:: journal.rst
diff --git a/Documentation/filesystems/ext4/ondisk/group_descr.rst b/Documentation/filesystems/ext4/ondisk/group_descr.rst
new file mode 100644
index 000000000000..759827e5d2cf
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/group_descr.rst
@@ -0,0 +1,170 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block Group Descriptors
+-----------------------
+
+Each block group on the filesystem has one of these descriptors
+associated with it. As noted in the Layout section above, the group
+descriptors (if present) are the second item in the block group. The
+standard configuration is for each block group to contain a full copy of
+the block group descriptor table unless the sparse\_super feature flag
+is set.
+
+Notice how the group descriptor records the location of both bitmaps and
+the inode table (i.e. they can float). This means that within a block
+group, the only data structures with fixed locations are the superblock
+and the group descriptor table. The flex\_bg mechanism uses this
+property to group several block groups into a flex group and lay out all
+of the groups' bitmaps and inode tables into one long run in the first
+group of the flex group.
+
+If the meta\_bg feature flag is set, then several block groups are
+grouped together into a meta group. Note that in the meta\_bg case,
+however, the first and last two block groups within the larger meta
+group contain only group descriptors for the groups inside the meta
+group.
+
+flex\_bg and meta\_bg do not appear to be mutually exclusive features.
+
+In ext2, ext3, and ext4 (when the 64bit feature is not enabled), the
+block group descriptor was only 32 bytes long and therefore ends at
+bg\_checksum. On an ext4 filesystem with the 64bit feature enabled, the
+block group descriptor expands to at least the 64 bytes described below;
+the size is stored in the superblock.
+
+If gdt\_csum is set and metadata\_csum is not set, the block group
+checksum is the crc16 of the FS UUID, the group number, and the group
+descriptor structure. If metadata\_csum is set, then the block group
+checksum is the lower 16 bits of the checksum of the FS UUID, the group
+number, and the group descriptor structure. Both block and inode bitmap
+checksums are calculated against the FS UUID, the group number, and the
+entire bitmap.
+
+The block group descriptor is laid out in ``struct ext4_group_desc``.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - bg\_block\_bitmap\_lo
+ - Lower 32-bits of location of block bitmap.
+ * - 0x4
+ - \_\_le32
+ - bg\_inode\_bitmap\_lo
+ - Lower 32-bits of location of inode bitmap.
+ * - 0x8
+ - \_\_le32
+ - bg\_inode\_table\_lo
+ - Lower 32-bits of location of inode table.
+ * - 0xC
+ - \_\_le16
+ - bg\_free\_blocks\_count\_lo
+ - Lower 16-bits of free block count.
+ * - 0xE
+ - \_\_le16
+ - bg\_free\_inodes\_count\_lo
+ - Lower 16-bits of free inode count.
+ * - 0x10
+ - \_\_le16
+ - bg\_used\_dirs\_count\_lo
+ - Lower 16-bits of directory count.
+ * - 0x12
+ - \_\_le16
+ - bg\_flags
+ - Block group flags. See the bgflags_ table below.
+ * - 0x14
+ - \_\_le32
+ - bg\_exclude\_bitmap\_lo
+ - Lower 32-bits of location of snapshot exclusion bitmap.
+ * - 0x18
+ - \_\_le16
+ - bg\_block\_bitmap\_csum\_lo
+ - Lower 16-bits of the block bitmap checksum.
+ * - 0x1A
+ - \_\_le16
+ - bg\_inode\_bitmap\_csum\_lo
+ - Lower 16-bits of the inode bitmap checksum.
+ * - 0x1C
+ - \_\_le16
+ - bg\_itable\_unused\_lo
+ - Lower 16-bits of unused inode count. If set, we needn't scan past the
+ ``(sb.s_inodes_per_group - gdt.bg_itable_unused)``\ th entry in the
+ inode table for this group.
+ * - 0x1E
+ - \_\_le16
+ - bg\_checksum
+ - Group descriptor checksum; crc16(sb\_uuid+group+desc) if the
+ RO\_COMPAT\_GDT\_CSUM feature is set, or crc32c(sb\_uuid+group\_desc) &
+ 0xFFFF if the RO\_COMPAT\_METADATA\_CSUM feature is set.
+ * -
+ -
+ -
+ - These fields only exist if the 64bit feature is enabled and s_desc_size
+ > 32.
+ * - 0x20
+ - \_\_le32
+ - bg\_block\_bitmap\_hi
+ - Upper 32-bits of location of block bitmap.
+ * - 0x24
+ - \_\_le32
+ - bg\_inode\_bitmap\_hi
+ - Upper 32-bits of location of inodes bitmap.
+ * - 0x28
+ - \_\_le32
+ - bg\_inode\_table\_hi
+ - Upper 32-bits of location of inodes table.
+ * - 0x2C
+ - \_\_le16
+ - bg\_free\_blocks\_count\_hi
+ - Upper 16-bits of free block count.
+ * - 0x2E
+ - \_\_le16
+ - bg\_free\_inodes\_count\_hi
+ - Upper 16-bits of free inode count.
+ * - 0x30
+ - \_\_le16
+ - bg\_used\_dirs\_count\_hi
+ - Upper 16-bits of directory count.
+ * - 0x32
+ - \_\_le16
+ - bg\_itable\_unused\_hi
+ - Upper 16-bits of unused inode count.
+ * - 0x34
+ - \_\_le32
+ - bg\_exclude\_bitmap\_hi
+ - Upper 32-bits of location of snapshot exclusion bitmap.
+ * - 0x38
+ - \_\_le16
+ - bg\_block\_bitmap\_csum\_hi
+ - Upper 16-bits of the block bitmap checksum.
+ * - 0x3A
+ - \_\_le16
+ - bg\_inode\_bitmap\_csum\_hi
+ - Upper 16-bits of the inode bitmap checksum.
+ * - 0x3C
+ - \_\_u32
+ - bg\_reserved
+ - Padding to 64 bytes.
+
+.. _bgflags:
+
+Block group flags can be any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - inode table and bitmap are not initialized (EXT4\_BG\_INODE\_UNINIT).
+ * - 0x2
+ - block bitmap is not initialized (EXT4\_BG\_BLOCK\_UNINIT).
+ * - 0x4
+ - inode table is zeroed (EXT4\_BG\_INODE\_ZEROED).
diff --git a/Documentation/filesystems/ext4/ondisk/ifork.rst b/Documentation/filesystems/ext4/ondisk/ifork.rst
new file mode 100644
index 000000000000..5dbe3b2b121a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/ifork.rst
@@ -0,0 +1,194 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+The Contents of inode.i\_block
+------------------------------
+
+Depending on the type of file an inode describes, the 60 bytes of
+storage in ``inode.i_block`` can be used in different ways. In general,
+regular files and directories will use it for file block indexing
+information, and special files will use it for special purposes.
+
+Symbolic Links
+~~~~~~~~~~~~~~
+
+The target of a symbolic link will be stored in this field if the target
+string is less than 60 bytes long. Otherwise, either extents or block
+maps will be used to allocate data blocks to store the link target.
+
+Direct/Indirect Block Addressing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In ext2/3, file block numbers were mapped to logical block numbers by
+means of an (up to) three level 1-1 block map. To find the logical block
+that stores a particular file block, the code would navigate through
+this increasingly complicated structure. Notice that there is neither a
+magic number nor a checksum to provide any level of confidence that the
+block isn't full of garbage.
+
+.. ifconfig:: builder != 'latex'
+
+ .. include:: blockmap.rst
+
+.. ifconfig:: builder == 'latex'
+
+ [Table omitted because LaTeX doesn't support nested tables.]
+
+Note that with this block mapping scheme, it is necessary to fill out a
+lot of mapping data even for a large contiguous file! This inefficiency
+led to the creation of the extent mapping scheme, discussed below.
+
+Notice also that a file using this mapping scheme cannot be placed
+higher than 2^32 blocks.
+
+Extent Tree
+~~~~~~~~~~~
+
+In ext4, the file to logical block map has been replaced with an extent
+tree. Under the old scheme, allocating a contiguous run of 1,000 blocks
+requires an indirect block to map all 1,000 entries; with extents, the
+mapping is reduced to a single ``struct ext4_extent`` with
+``ee_len = 1000``. If flex\_bg is enabled, it is possible to allocate
+very large files with a single extent, at a considerable reduction in
+metadata block use, and some improvement in disk efficiency. The inode
+must have the extents flag (0x80000) flag set for this feature to be in
+use.
+
+Extents are arranged as a tree. Each node of the tree begins with a
+``struct ext4_extent_header``. If the node is an interior node
+(``eh.eh_depth`` > 0), the header is followed by ``eh.eh_entries``
+instances of ``struct ext4_extent_idx``; each of these index entries
+points to a block containing more nodes in the extent tree. If the node
+is a leaf node (``eh.eh_depth == 0``), then the header is followed by
+``eh.eh_entries`` instances of ``struct ext4_extent``; these instances
+point to the file's data blocks. The root node of the extent tree is
+stored in ``inode.i_block``, which allows for the first four extents to
+be recorded without the use of extra metadata blocks.
+
+The extent tree header is recorded in ``struct ext4_extent_header``,
+which is 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - eh\_magic
+ - Magic number, 0xF30A.
+ * - 0x2
+ - \_\_le16
+ - eh\_entries
+ - Number of valid entries following the header.
+ * - 0x4
+ - \_\_le16
+ - eh\_max
+ - Maximum number of entries that could follow the header.
+ * - 0x6
+ - \_\_le16
+ - eh\_depth
+ - Depth of this extent node in the extent tree. 0 = this extent node
+ points to data blocks; otherwise, this extent node points to other
+ extent nodes. The extent tree can be at most 5 levels deep: a logical
+ block number can be at most ``2^32``, and the smallest ``n`` that
+ satisfies ``4*(((blocksize - 12)/12)^n) >= 2^32`` is 5.
+ * - 0x8
+ - \_\_le32
+ - eh\_generation
+ - Generation of the tree. (Used by Lustre, but not standard ext4).
+
+Internal nodes of the extent tree, also known as index nodes, are
+recorded as ``struct ext4_extent_idx``, and are 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - ei\_block
+ - This index node covers file blocks from 'block' onward.
+ * - 0x4
+ - \_\_le32
+ - ei\_leaf\_lo
+ - Lower 32-bits of the block number of the extent node that is the next
+ level lower in the tree. The tree node pointed to can be either another
+ internal node or a leaf node, described below.
+ * - 0x8
+ - \_\_le16
+ - ei\_leaf\_hi
+ - Upper 16-bits of the previous field.
+ * - 0xA
+ - \_\_u16
+ - ei\_unused
+ -
+
+Leaf nodes of the extent tree are recorded as ``struct ext4_extent``,
+and are also 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - ee\_block
+ - First file block number that this extent covers.
+ * - 0x4
+ - \_\_le16
+ - ee\_len
+ - Number of blocks covered by extent. If the value of this field is <=
+ 32768, the extent is initialized. If the value of the field is > 32768,
+ the extent is uninitialized and the actual extent length is ``ee_len`` -
+ 32768. Therefore, the maximum length of a initialized extent is 32768
+ blocks, and the maximum length of an uninitialized extent is 32767.
+ * - 0x6
+ - \_\_le16
+ - ee\_start\_hi
+ - Upper 16-bits of the block number to which this extent points.
+ * - 0x8
+ - \_\_le32
+ - ee\_start\_lo
+ - Lower 32-bits of the block number to which this extent points.
+
+Prior to the introduction of metadata checksums, the extent header +
+extent entries always left at least 4 bytes of unallocated space at the
+end of each extent tree data block (because (2^x % 12) >= 4). Therefore,
+the 32-bit checksum is inserted into this space. The 4 extents in the
+inode do not need checksumming, since the inode is already checksummed.
+The checksum is calculated against the FS UUID, the inode number, the
+inode generation, and the entire extent block leading up to (but not
+including) the checksum itself.
+
+``struct ext4_extent_tail`` is 4 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - eb\_checksum
+ - Checksum of the extent block, crc32c(uuid+inum+igeneration+extentblock)
+
+Inline Data
+~~~~~~~~~~~
+
+If the inline data feature is enabled for the filesystem and the flag is
+set for the inode, it is possible that the first 60 bytes of the file
+data are stored here.
diff --git a/Documentation/filesystems/ext4/ondisk/index.rst b/Documentation/filesystems/ext4/ondisk/index.rst
new file mode 100644
index 000000000000..f7d082c3a435
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/index.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Data Structures and Algorithms
+==============================
+.. include:: about.rst
+.. include:: overview.rst
+.. include:: globals.rst
+.. include:: dynamic.rst
diff --git a/Documentation/filesystems/ext4/ondisk/inlinedata.rst b/Documentation/filesystems/ext4/ondisk/inlinedata.rst
new file mode 100644
index 000000000000..d1075178ce0b
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/inlinedata.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Inline Data
+-----------
+
+The inline data feature was designed to handle the case that a file's
+data is so tiny that it readily fits inside the inode, which
+(theoretically) reduces disk block consumption and reduces seeks. If the
+file is smaller than 60 bytes, then the data are stored inline in
+``inode.i_block``. If the rest of the file would fit inside the extended
+attribute space, then it might be found as an extended attribute
+“system.data” within the inode body (“ibody EA”). This of course
+constrains the amount of extended attributes one can attach to an inode.
+If the data size increases beyond i\_block + ibody EA, a regular block
+is allocated and the contents moved to that block.
+
+Pending a change to compact the extended attribute key used to store
+inline data, one ought to be able to store 160 bytes of data in a
+256-byte inode (as of June 2015, when i\_extra\_isize is 28). Prior to
+that, the limit was 156 bytes due to inefficient use of inode space.
+
+The inline data feature requires the presence of an extended attribute
+for “system.data”, even if the attribute value is zero length.
+
+Inline Directories
+~~~~~~~~~~~~~~~~~~
+
+The first four bytes of i\_block are the inode number of the parent
+directory. Following that is a 56-byte space for an array of directory
+entries; see ``struct ext4_dir_entry``. If there is a “system.data”
+attribute in the inode body, the EA value is an array of
+``struct ext4_dir_entry`` as well. Note that for inline directories, the
+i\_block and EA space are treated as separate dirent blocks; directory
+entries cannot span the two.
+
+Inline directory entries are not checksummed, as the inode checksum
+should protect all inline data contents.
diff --git a/Documentation/filesystems/ext4/ondisk/inodes.rst b/Documentation/filesystems/ext4/ondisk/inodes.rst
new file mode 100644
index 000000000000..655ce898f3f5
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/inodes.rst
@@ -0,0 +1,575 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Index Nodes
+-----------
+
+In a regular UNIX filesystem, the inode stores all the metadata
+pertaining to the file (time stamps, block maps, extended attributes,
+etc), not the directory entry. To find the information associated with a
+file, one must traverse the directory files to find the directory entry
+associated with a file, then load the inode to find the metadata for
+that file. ext4 appears to cheat (for performance reasons) a little bit
+by storing a copy of the file type (normally stored in the inode) in the
+directory entry. (Compare all this to FAT, which stores all the file
+information directly in the directory entry, but does not support hard
+links and is in general more seek-happy than ext4 due to its simpler
+block allocator and extensive use of linked lists.)
+
+The inode table is a linear array of ``struct ext4_inode``. The table is
+sized to have enough blocks to store at least
+``sb.s_inode_size * sb.s_inodes_per_group`` bytes. The number of the
+block group containing an inode can be calculated as
+``(inode_number - 1) / sb.s_inodes_per_group``, and the offset into the
+group's table is ``(inode_number - 1) % sb.s_inodes_per_group``. There
+is no inode 0.
+
+The inode checksum is calculated against the FS UUID, the inode number,
+and the inode structure itself.
+
+The inode table entry is laid out in ``struct ext4_inode``.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - i\_mode
+ - File mode. See the table i_mode_ below.
+ * - 0x2
+ - \_\_le16
+ - i\_uid
+ - Lower 16-bits of Owner UID.
+ * - 0x4
+ - \_\_le32
+ - i\_size\_lo
+ - Lower 32-bits of size in bytes.
+ * - 0x8
+ - \_\_le32
+ - i\_atime
+ - Last access time, in seconds since the epoch. However, if the EA\_INODE
+ inode flag is set, this inode stores an extended attribute value and
+ this field contains the checksum of the value.
+ * - 0xC
+ - \_\_le32
+ - i\_ctime
+ - Last inode change time, in seconds since the epoch. However, if the
+ EA\_INODE inode flag is set, this inode stores an extended attribute
+ value and this field contains the lower 32 bits of the attribute value's
+ reference count.
+ * - 0x10
+ - \_\_le32
+ - i\_mtime
+ - Last data modification time, in seconds since the epoch. However, if the
+ EA\_INODE inode flag is set, this inode stores an extended attribute
+ value and this field contains the number of the inode that owns the
+ extended attribute.
+ * - 0x14
+ - \_\_le32
+ - i\_dtime
+ - Deletion Time, in seconds since the epoch.
+ * - 0x18
+ - \_\_le16
+ - i\_gid
+ - Lower 16-bits of GID.
+ * - 0x1A
+ - \_\_le16
+ - i\_links\_count
+ - Hard link count. Normally, ext4 does not permit an inode to have more
+ than 65,000 hard links. This applies to files as well as directories,
+ which means that there cannot be more than 64,998 subdirectories in a
+ directory (each subdirectory's '..' entry counts as a hard link, as does
+ the '.' entry in the directory itself). With the DIR\_NLINK feature
+ enabled, ext4 supports more than 64,998 subdirectories by setting this
+ field to 1 to indicate that the number of hard links is not known.
+ * - 0x1C
+ - \_\_le32
+ - i\_blocks\_lo
+ - Lower 32-bits of “block” count. If the huge\_file feature flag is not
+ set on the filesystem, the file consumes ``i_blocks_lo`` 512-byte blocks
+ on disk. If huge\_file is set and EXT4\_HUGE\_FILE\_FL is NOT set in
+ ``inode.i_flags``, then the file consumes ``i_blocks_lo + (i_blocks_hi
+ << 32)`` 512-byte blocks on disk. If huge\_file is set and
+ EXT4\_HUGE\_FILE\_FL IS set in ``inode.i_flags``, then this file
+ consumes (``i_blocks_lo + i_blocks_hi`` << 32) filesystem blocks on
+ disk.
+ * - 0x20
+ - \_\_le32
+ - i\_flags
+ - Inode flags. See the table i_flags_ below.
+ * - 0x24
+ - 4 bytes
+ - i\_osd1
+ - See the table i_osd1_ for more details.
+ * - 0x28
+ - 60 bytes
+ - i\_block[EXT4\_N\_BLOCKS=15]
+ - Block map or extent tree. See the section “The Contents of inode.i\_block”.
+ * - 0x64
+ - \_\_le32
+ - i\_generation
+ - File version (for NFS).
+ * - 0x68
+ - \_\_le32
+ - i\_file\_acl\_lo
+ - Lower 32-bits of extended attribute block. ACLs are of course one of
+ many possible extended attributes; I think the name of this field is a
+ result of the first use of extended attributes being for ACLs.
+ * - 0x6C
+ - \_\_le32
+ - i\_size\_high / i\_dir\_acl
+ - Upper 32-bits of file/directory size. In ext2/3 this field was named
+ i\_dir\_acl, though it was usually set to zero and never used.
+ * - 0x70
+ - \_\_le32
+ - i\_obso\_faddr
+ - (Obsolete) fragment address.
+ * - 0x74
+ - 12 bytes
+ - i\_osd2
+ - See the table i_osd2_ for more details.
+ * - 0x80
+ - \_\_le16
+ - i\_extra\_isize
+ - Size of this inode - 128. Alternately, the size of the extended inode
+ fields beyond the original ext2 inode, including this field.
+ * - 0x82
+ - \_\_le16
+ - i\_checksum\_hi
+ - Upper 16-bits of the inode checksum.
+ * - 0x84
+ - \_\_le32
+ - i\_ctime\_extra
+ - Extra change time bits. This provides sub-second precision. See Inode
+ Timestamps section.
+ * - 0x88
+ - \_\_le32
+ - i\_mtime\_extra
+ - Extra modification time bits. This provides sub-second precision.
+ * - 0x8C
+ - \_\_le32
+ - i\_atime\_extra
+ - Extra access time bits. This provides sub-second precision.
+ * - 0x90
+ - \_\_le32
+ - i\_crtime
+ - File creation time, in seconds since the epoch.
+ * - 0x94
+ - \_\_le32
+ - i\_crtime\_extra
+ - Extra file creation time bits. This provides sub-second precision.
+ * - 0x98
+ - \_\_le32
+ - i\_version\_hi
+ - Upper 32-bits for version number.
+ * - 0x9C
+ - \_\_le32
+ - i\_projid
+ - Project ID.
+
+.. _i_mode:
+
+The ``i_mode`` value is a combination of the following flags:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - S\_IXOTH (Others may execute)
+ * - 0x2
+ - S\_IWOTH (Others may write)
+ * - 0x4
+ - S\_IROTH (Others may read)
+ * - 0x8
+ - S\_IXGRP (Group members may execute)
+ * - 0x10
+ - S\_IWGRP (Group members may write)
+ * - 0x20
+ - S\_IRGRP (Group members may read)
+ * - 0x40
+ - S\_IXUSR (Owner may execute)
+ * - 0x80
+ - S\_IWUSR (Owner may write)
+ * - 0x100
+ - S\_IRUSR (Owner may read)
+ * - 0x200
+ - S\_ISVTX (Sticky bit)
+ * - 0x400
+ - S\_ISGID (Set GID)
+ * - 0x800
+ - S\_ISUID (Set UID)
+ * -
+ - These are mutually-exclusive file types:
+ * - 0x1000
+ - S\_IFIFO (FIFO)
+ * - 0x2000
+ - S\_IFCHR (Character device)
+ * - 0x4000
+ - S\_IFDIR (Directory)
+ * - 0x6000
+ - S\_IFBLK (Block device)
+ * - 0x8000
+ - S\_IFREG (Regular file)
+ * - 0xA000
+ - S\_IFLNK (Symbolic link)
+ * - 0xC000
+ - S\_IFSOCK (Socket)
+
+.. _i_flags:
+
+The ``i_flags`` field is a combination of these values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - This file requires secure deletion (EXT4\_SECRM\_FL). (not implemented)
+ * - 0x2
+ - This file should be preserved, should undeletion be desired
+ (EXT4\_UNRM\_FL). (not implemented)
+ * - 0x4
+ - File is compressed (EXT4\_COMPR\_FL). (not really implemented)
+ * - 0x8
+ - All writes to the file must be synchronous (EXT4\_SYNC\_FL).
+ * - 0x10
+ - File is immutable (EXT4\_IMMUTABLE\_FL).
+ * - 0x20
+ - File can only be appended (EXT4\_APPEND\_FL).
+ * - 0x40
+ - The dump(1) utility should not dump this file (EXT4\_NODUMP\_FL).
+ * - 0x80
+ - Do not update access time (EXT4\_NOATIME\_FL).
+ * - 0x100
+ - Dirty compressed file (EXT4\_DIRTY\_FL). (not used)
+ * - 0x200
+ - File has one or more compressed clusters (EXT4\_COMPRBLK\_FL). (not used)
+ * - 0x400
+ - Do not compress file (EXT4\_NOCOMPR\_FL). (not used)
+ * - 0x800
+ - Encrypted inode (EXT4\_ENCRYPT\_FL). This bit value previously was
+ EXT4\_ECOMPR\_FL (compression error), which was never used.
+ * - 0x1000
+ - Directory has hashed indexes (EXT4\_INDEX\_FL).
+ * - 0x2000
+ - AFS magic directory (EXT4\_IMAGIC\_FL).
+ * - 0x4000
+ - File data must always be written through the journal
+ (EXT4\_JOURNAL\_DATA\_FL).
+ * - 0x8000
+ - File tail should not be merged (EXT4\_NOTAIL\_FL). (not used by ext4)
+ * - 0x10000
+ - All directory entry data should be written synchronously (see
+ ``dirsync``) (EXT4\_DIRSYNC\_FL).
+ * - 0x20000
+ - Top of directory hierarchy (EXT4\_TOPDIR\_FL).
+ * - 0x40000
+ - This is a huge file (EXT4\_HUGE\_FILE\_FL).
+ * - 0x80000
+ - Inode uses extents (EXT4\_EXTENTS\_FL).
+ * - 0x200000
+ - Inode stores a large extended attribute value in its data blocks
+ (EXT4\_EA\_INODE\_FL).
+ * - 0x400000
+ - This file has blocks allocated past EOF (EXT4\_EOFBLOCKS\_FL).
+ (deprecated)
+ * - 0x01000000
+ - Inode is a snapshot (``EXT4_SNAPFILE_FL``). (not in mainline)
+ * - 0x04000000
+ - Snapshot is being deleted (``EXT4_SNAPFILE_DELETED_FL``). (not in
+ mainline)
+ * - 0x08000000
+ - Snapshot shrink has completed (``EXT4_SNAPFILE_SHRUNK_FL``). (not in
+ mainline)
+ * - 0x10000000
+ - Inode has inline data (EXT4\_INLINE\_DATA\_FL).
+ * - 0x20000000
+ - Create children with the same project ID (EXT4\_PROJINHERIT\_FL).
+ * - 0x80000000
+ - Reserved for ext4 library (EXT4\_RESERVED\_FL).
+ * -
+ - Aggregate flags:
+ * - 0x4BDFFF
+ - User-visible flags.
+ * - 0x4B80FF
+ - User-modifiable flags. Note that while EXT4\_JOURNAL\_DATA\_FL and
+ EXT4\_EXTENTS\_FL can be set with setattr, they are not in the kernel's
+ EXT4\_FL\_USER\_MODIFIABLE mask, since it needs to handle the setting of
+ these flags in a special manner and they are masked out of the set of
+ flags that are saved directly to i\_flags.
+
+.. _i_osd1:
+
+The ``osd1`` field has multiple meanings depending on the creator:
+
+Linux:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - l\_i\_version
+ - Inode version. However, if the EA\_INODE inode flag is set, this inode
+ stores an extended attribute value and this field contains the upper 32
+ bits of the attribute value's reference count.
+
+Hurd:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_i\_translator
+ - ??
+
+Masix:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - m\_i\_reserved
+ - ??
+
+.. _i_osd2:
+
+The ``osd2`` field has multiple meanings depending on the filesystem creator:
+
+Linux:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - l\_i\_blocks\_high
+ - Upper 16-bits of the block count. Please see the note attached to
+ i\_blocks\_lo.
+ * - 0x2
+ - \_\_le16
+ - l\_i\_file\_acl\_high
+ - Upper 16-bits of the extended attribute block (historically, the file
+ ACL location). See the Extended Attributes section below.
+ * - 0x4
+ - \_\_le16
+ - l\_i\_uid\_high
+ - Upper 16-bits of the Owner UID.
+ * - 0x6
+ - \_\_le16
+ - l\_i\_gid\_high
+ - Upper 16-bits of the GID.
+ * - 0x8
+ - \_\_le16
+ - l\_i\_checksum\_lo
+ - Lower 16-bits of the inode checksum.
+ * - 0xA
+ - \_\_le16
+ - l\_i\_reserved
+ - Unused.
+
+Hurd:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - h\_i\_reserved1
+ - ??
+ * - 0x2
+ - \_\_u16
+ - h\_i\_mode\_high
+ - Upper 16-bits of the file mode.
+ * - 0x4
+ - \_\_le16
+ - h\_i\_uid\_high
+ - Upper 16-bits of the Owner UID.
+ * - 0x6
+ - \_\_le16
+ - h\_i\_gid\_high
+ - Upper 16-bits of the GID.
+ * - 0x8
+ - \_\_u32
+ - h\_i\_author
+ - Author code?
+
+Masix:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - h\_i\_reserved1
+ - ??
+ * - 0x2
+ - \_\_u16
+ - m\_i\_file\_acl\_high
+ - Upper 16-bits of the extended attribute block (historically, the file
+ ACL location).
+ * - 0x4
+ - \_\_u32
+ - m\_i\_reserved2[2]
+ - ??
+
+Inode Size
+~~~~~~~~~~
+
+In ext2 and ext3, the inode structure size was fixed at 128 bytes
+(``EXT2_GOOD_OLD_INODE_SIZE``) and each inode had a disk record size of
+128 bytes. Starting with ext4, it is possible to allocate a larger
+on-disk inode at format time for all inodes in the filesystem to provide
+space beyond the end of the original ext2 inode. The on-disk inode
+record size is recorded in the superblock as ``s_inode_size``. The
+number of bytes actually used by struct ext4\_inode beyond the original
+128-byte ext2 inode is recorded in the ``i_extra_isize`` field for each
+inode, which allows struct ext4\_inode to grow for a new kernel without
+having to upgrade all of the on-disk inodes. Access to fields beyond
+EXT2\_GOOD\_OLD\_INODE\_SIZE should be verified to be within
+``i_extra_isize``. By default, ext4 inode records are 256 bytes, and (as
+of October 2013) the inode structure is 156 bytes
+(``i_extra_isize = 28``). The extra space between the end of the inode
+structure and the end of the inode record can be used to store extended
+attributes. Each inode record can be as large as the filesystem block
+size, though this is not terribly efficient.
+
+Finding an Inode
+~~~~~~~~~~~~~~~~
+
+Each block group contains ``sb->s_inodes_per_group`` inodes. Because
+inode 0 is defined not to exist, this formula can be used to find the
+block group that an inode lives in:
+``bg = (inode_num - 1) / sb->s_inodes_per_group``. The particular inode
+can be found within the block group's inode table at
+``index = (inode_num - 1) % sb->s_inodes_per_group``. To get the byte
+address within the inode table, use
+``offset = index * sb->s_inode_size``.
+
+Inode Timestamps
+~~~~~~~~~~~~~~~~
+
+Four timestamps are recorded in the lower 128 bytes of the inode
+structure -- inode change time (ctime), access time (atime), data
+modification time (mtime), and deletion time (dtime). The four fields
+are 32-bit signed integers that represent seconds since the Unix epoch
+(1970-01-01 00:00:00 GMT), which means that the fields will overflow in
+January 2038. For inodes that are not linked from any directory but are
+still open (orphan inodes), the dtime field is overloaded for use with
+the orphan list. The superblock field ``s_last_orphan`` points to the
+first inode in the orphan list; dtime is then the number of the next
+orphaned inode, or zero if there are no more orphans.
+
+If the inode structure size ``sb->s_inode_size`` is larger than 128
+bytes and the ``i_inode_extra`` field is large enough to encompass the
+respective ``i_[cma]time_extra`` field, the ctime, atime, and mtime
+inode fields are widened to 64 bits. Within this “extra” 32-bit field,
+the lower two bits are used to extend the 32-bit seconds field to be 34
+bit wide; the upper 30 bits are used to provide nanosecond timestamp
+accuracy. Therefore, timestamps should not overflow until May 2446.
+dtime was not widened. There is also a fifth timestamp to record inode
+creation time (crtime); this field is 64-bits wide and decoded in the
+same manner as 64-bit [cma]time. Neither crtime nor dtime are accessible
+through the regular stat() interface, though debugfs will report them.
+
+We use the 32-bit signed time value plus (2^32 \* (extra epoch bits)).
+In other words:
+
+.. list-table::
+ :widths: 20 20 20 20 20
+ :header-rows: 1
+
+ * - Extra epoch bits
+ - MSB of 32-bit time
+ - Adjustment for signed 32-bit to 64-bit tv\_sec
+ - Decoded 64-bit tv\_sec
+ - valid time range
+ * - 0 0
+ - 1
+ - 0
+ - ``-0x80000000 - -0x00000001``
+ - 1901-12-13 to 1969-12-31
+ * - 0 0
+ - 0
+ - 0
+ - ``0x000000000 - 0x07fffffff``
+ - 1970-01-01 to 2038-01-19
+ * - 0 1
+ - 1
+ - 0x100000000
+ - ``0x080000000 - 0x0ffffffff``
+ - 2038-01-19 to 2106-02-07
+ * - 0 1
+ - 0
+ - 0x100000000
+ - ``0x100000000 - 0x17fffffff``
+ - 2106-02-07 to 2174-02-25
+ * - 1 0
+ - 1
+ - 0x200000000
+ - ``0x180000000 - 0x1ffffffff``
+ - 2174-02-25 to 2242-03-16
+ * - 1 0
+ - 0
+ - 0x200000000
+ - ``0x200000000 - 0x27fffffff``
+ - 2242-03-16 to 2310-04-04
+ * - 1 1
+ - 1
+ - 0x300000000
+ - ``0x280000000 - 0x2ffffffff``
+ - 2310-04-04 to 2378-04-22
+ * - 1 1
+ - 0
+ - 0x300000000
+ - ``0x300000000 - 0x37fffffff``
+ - 2378-04-22 to 2446-05-10
+
+This is a somewhat odd encoding since there are effectively seven times
+as many positive values as negative values. There have also been
+long-standing bugs decoding and encoding dates beyond 2038, which don't
+seem to be fixed as of kernel 3.12 and e2fsprogs 1.42.8. 64-bit kernels
+incorrectly use the extra epoch bits 1,1 for dates between 1901 and
+1970. At some point the kernel will be fixed and e2fsck will fix this
+situation, assuming that it is run before 2310.
diff --git a/Documentation/filesystems/ext4/ondisk/journal.rst b/Documentation/filesystems/ext4/ondisk/journal.rst
new file mode 100644
index 000000000000..e7031af86876
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/journal.rst
@@ -0,0 +1,611 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Journal (jbd2)
+--------------
+
+Introduced in ext3, the ext4 filesystem employs a journal to protect the
+filesystem against corruption in the case of a system crash. A small
+continuous region of disk (default 128MiB) is reserved inside the
+filesystem as a place to land “important” data writes on-disk as quickly
+as possible. Once the important data transaction is fully written to the
+disk and flushed from the disk write cache, a record of the data being
+committed is also written to the journal. At some later point in time,
+the journal code writes the transactions to their final locations on
+disk (this could involve a lot of seeking or a lot of small
+read-write-erases) before erasing the commit record. Should the system
+crash during the second slow write, the journal can be replayed all the
+way to the latest commit record, guaranteeing the atomicity of whatever
+gets written through the journal to the disk. The effect of this is to
+guarantee that the filesystem does not become stuck midway through a
+metadata update.
+
+For performance reasons, ext4 by default only writes filesystem metadata
+through the journal. This means that file data blocks are /not/
+guaranteed to be in any consistent state after a crash. If this default
+guarantee level (``data=ordered``) is not satisfactory, there is a mount
+option to control journal behavior. If ``data=journal``, all data and
+metadata are written to disk through the journal. This is slower but
+safest. If ``data=writeback``, dirty data blocks are not flushed to the
+disk before the metadata are written to disk through the journal.
+
+The journal inode is typically inode 8. The first 68 bytes of the
+journal inode are replicated in the ext4 superblock. The journal itself
+is normal (but hidden) file within the filesystem. The file usually
+consumes an entire block group, though mke2fs tries to put it in the
+middle of the disk.
+
+All fields in jbd2 are written to disk in big-endian order. This is the
+opposite of ext4.
+
+NOTE: Both ext4 and ocfs2 use jbd2.
+
+The maximum size of a journal embedded in an ext4 filesystem is 2^32
+blocks. jbd2 itself does not seem to care.
+
+Layout
+~~~~~~
+
+Generally speaking, the journal has this format:
+
+.. list-table::
+ :widths: 1 1 78
+ :header-rows: 1
+
+ * - Superblock
+ - descriptor\_block (data\_blocks or revocation\_block) [more data or
+ revocations] commmit\_block
+ - [more transactions...]
+ * -
+ - One transaction
+ -
+
+Notice that a transaction begins with either a descriptor and some data,
+or a block revocation list. A finished transaction always ends with a
+commit. If there is no commit record (or the checksums don't match), the
+transaction will be discarded during replay.
+
+External Journal
+~~~~~~~~~~~~~~~~
+
+Optionally, an ext4 filesystem can be created with an external journal
+device (as opposed to an internal journal, which uses a reserved inode).
+In this case, on the filesystem device, ``s_journal_inum`` should be
+zero and ``s_journal_uuid`` should be set. On the journal device there
+will be an ext4 super block in the usual place, with a matching UUID.
+The journal superblock will be in the next full block after the
+superblock.
+
+.. list-table::
+ :widths: 1 1 1 1 76
+ :header-rows: 1
+
+ * - 1024 bytes of padding
+ - ext4 Superblock
+ - Journal Superblock
+ - descriptor\_block (data\_blocks or revocation\_block) [more data or
+ revocations] commmit\_block
+ - [more transactions...]
+ * -
+ -
+ -
+ - One transaction
+ -
+
+Block Header
+~~~~~~~~~~~~
+
+Every block in the journal starts with a common 12-byte header
+``struct journal_header_s``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_be32
+ - h\_magic
+ - jbd2 magic number, 0xC03B3998.
+ * - 0x4
+ - \_\_be32
+ - h\_blocktype
+ - Description of what this block contains. See the jbd2_blocktype_ table
+ below.
+ * - 0x8
+ - \_\_be32
+ - h\_sequence
+ - The transaction ID that goes with this block.
+
+.. _jbd2_blocktype:
+
+The journal block type can be any one of:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - Descriptor. This block precedes a series of data blocks that were
+ written through the journal during a transaction.
+ * - 2
+ - Block commit record. This block signifies the completion of a
+ transaction.
+ * - 3
+ - Journal superblock, v1.
+ * - 4
+ - Journal superblock, v2.
+ * - 5
+ - Block revocation records. This speeds up recovery by enabling the
+ journal to skip writing blocks that were subsequently rewritten.
+
+Super Block
+~~~~~~~~~~~
+
+The super block for the journal is much simpler as compared to ext4's.
+The key data kept within are size of the journal, and where to find the
+start of the log of transactions.
+
+The journal superblock is recorded as ``struct journal_superblock_s``,
+which is 1024 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * -
+ -
+ -
+ - Static information describing the journal.
+ * - 0x0
+ - journal\_header\_t (12 bytes)
+ - s\_header
+ - Common header identifying this as a superblock.
+ * - 0xC
+ - \_\_be32
+ - s\_blocksize
+ - Journal device block size.
+ * - 0x10
+ - \_\_be32
+ - s\_maxlen
+ - Total number of blocks in this journal.
+ * - 0x14
+ - \_\_be32
+ - s\_first
+ - First block of log information.
+ * -
+ -
+ -
+ - Dynamic information describing the current state of the log.
+ * - 0x18
+ - \_\_be32
+ - s\_sequence
+ - First commit ID expected in log.
+ * - 0x1C
+ - \_\_be32
+ - s\_start
+ - Block number of the start of log. Contrary to the comments, this field
+ being zero does not imply that the journal is clean!
+ * - 0x20
+ - \_\_be32
+ - s\_errno
+ - Error value, as set by jbd2\_journal\_abort().
+ * -
+ -
+ -
+ - The remaining fields are only valid in a v2 superblock.
+ * - 0x24
+ - \_\_be32
+ - s\_feature\_compat;
+ - Compatible feature set. See the table jbd2_compat_ below.
+ * - 0x28
+ - \_\_be32
+ - s\_feature\_incompat
+ - Incompatible feature set. See the table jbd2_incompat_ below.
+ * - 0x2C
+ - \_\_be32
+ - s\_feature\_ro\_compat
+ - Read-only compatible feature set. There aren't any of these currently.
+ * - 0x30
+ - \_\_u8
+ - s\_uuid[16]
+ - 128-bit uuid for journal. This is compared against the copy in the ext4
+ super block at mount time.
+ * - 0x40
+ - \_\_be32
+ - s\_nr\_users
+ - Number of file systems sharing this journal.
+ * - 0x44
+ - \_\_be32
+ - s\_dynsuper
+ - Location of dynamic super block copy. (Not used?)
+ * - 0x48
+ - \_\_be32
+ - s\_max\_transaction
+ - Limit of journal blocks per transaction. (Not used?)
+ * - 0x4C
+ - \_\_be32
+ - s\_max\_trans\_data
+ - Limit of data blocks per transaction. (Not used?)
+ * - 0x50
+ - \_\_u8
+ - s\_checksum\_type
+ - Checksum algorithm used for the journal. See jbd2_checksum_type_ for
+ more info.
+ * - 0x51
+ - \_\_u8[3]
+ - s\_padding2
+ -
+ * - 0x54
+ - \_\_u32
+ - s\_padding[42]
+ -
+ * - 0xFC
+ - \_\_be32
+ - s\_checksum
+ - Checksum of the entire superblock, with this field set to zero.
+ * - 0x100
+ - \_\_u8
+ - s\_users[16\*48]
+ - ids of all file systems sharing the log. e2fsprogs/Linux don't allow
+ shared external journals, but I imagine Lustre (or ocfs2?), which use
+ the jbd2 code, might.
+
+.. _jbd2_compat:
+
+The journal compat features are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Journal maintains checksums on the data blocks.
+ (JBD2\_FEATURE\_COMPAT\_CHECKSUM)
+
+.. _jbd2_incompat:
+
+The journal incompat features are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Journal has block revocation records. (JBD2\_FEATURE\_INCOMPAT\_REVOKE)
+ * - 0x2
+ - Journal can deal with 64-bit block numbers.
+ (JBD2\_FEATURE\_INCOMPAT\_64BIT)
+ * - 0x4
+ - Journal commits asynchronously. (JBD2\_FEATURE\_INCOMPAT\_ASYNC\_COMMIT)
+ * - 0x8
+ - This journal uses v2 of the checksum on-disk format. Each journal
+ metadata block gets its own checksum, and the block tags in the
+ descriptor table contain checksums for each of the data blocks in the
+ journal. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2)
+ * - 0x10
+ - This journal uses v3 of the checksum on-disk format. This is the same as
+ v2, but the journal block tag size is fixed regardless of the size of
+ block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3)
+
+.. _jbd2_checksum_type:
+
+Journal checksum type codes are one of the following. crc32 or crc32c are the
+most likely choices.
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - CRC32
+ * - 2
+ - MD5
+ * - 3
+ - SHA1
+ * - 4
+ - CRC32C
+
+Descriptor Block
+~~~~~~~~~~~~~~~~
+
+The descriptor block contains an array of journal block tags that
+describe the final locations of the data blocks that follow in the
+journal. Descriptor blocks are open-coded instead of being completely
+described by a data structure, but here is the block structure anyway.
+Descriptor blocks consume at least 36 bytes, but use a full block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - journal\_header\_t
+ - (open coded)
+ - Common block header.
+ * - 0xC
+ - struct journal\_block\_tag\_s
+ - open coded array[]
+ - Enough tags either to fill up the block or to describe all the data
+ blocks that follow this descriptor block.
+
+Journal block tags have any of the following formats, depending on which
+journal feature and block tag flags are set.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is set, the journal block tag is
+defined as ``struct journal_block_tag3_s``, which looks like the
+following. The size is 16 or 32 bytes.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_blocknr
+ - Lower 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * - 0x4
+ - \_\_be32
+ - t\_flags
+ - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
+ more info.
+ * - 0x8
+ - \_\_be32
+ - t\_blocknr\_high
+ - Upper 32-bits of the location of where the corresponding data block
+ should end up on disk. This is zero if JBD2\_FEATURE\_INCOMPAT\_64BIT is
+ not enabled.
+ * - 0xC
+ - \_\_be32
+ - t\_checksum
+ - Checksum of the journal UUID, the sequence number, and the data block.
+ * -
+ -
+ -
+ - This field appears to be open coded. It always comes at the end of the
+ tag, after t_checksum. This field is not present if the "same UUID" flag
+ is set.
+ * - 0x8 or 0xC
+ - char
+ - uuid[16]
+ - A UUID to go with this tag. This field appears to be copied from the
+ ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that
+ field.
+
+.. _jbd2_tag_flags:
+
+The journal tag flags are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - On-disk block is escaped. The first four bytes of the data block just
+ happened to match the jbd2 magic number.
+ * - 0x2
+ - This block has the same UUID as previous, therefore the UUID field is
+ omitted.
+ * - 0x4
+ - The data block was deleted by the transaction. (Not used?)
+ * - 0x8
+ - This is the last tag in this descriptor block.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is NOT set, the journal block tag
+is defined as ``struct journal_block_tag_s``, which looks like the
+following. The size is 8, 12, 24, or 28 bytes:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_blocknr
+ - Lower 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * - 0x4
+ - \_\_be16
+ - t\_checksum
+ - Checksum of the journal UUID, the sequence number, and the data block.
+ Note that only the lower 16 bits are stored.
+ * - 0x6
+ - \_\_be16
+ - t\_flags
+ - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
+ more info.
+ * -
+ -
+ -
+ - This next field is only present if the super block indicates support for
+ 64-bit block numbers.
+ * - 0x8
+ - \_\_be32
+ - t\_blocknr\_high
+ - Upper 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * -
+ -
+ -
+ - This field appears to be open coded. It always comes at the end of the
+ tag, after t_flags or t_blocknr_high. This field is not present if the
+ "same UUID" flag is set.
+ * - 0x8 or 0xC
+ - char
+ - uuid[16]
+ - A UUID to go with this tag. This field appears to be copied from the
+ ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that
+ field.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
+JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a
+``struct jbd2_journal_block_tail``, which looks like this:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_checksum
+ - Checksum of the journal UUID + the descriptor block, with this field set
+ to zero.
+
+Data Block
+~~~~~~~~~~
+
+In general, the data blocks being written to disk through the journal
+are written verbatim into the journal file after the descriptor block.
+However, if the first four bytes of the block match the jbd2 magic
+number then those four bytes are replaced with zeroes and the “escaped”
+flag is set in the descriptor block tag.
+
+Revocation Block
+~~~~~~~~~~~~~~~~
+
+A revocation block is used to prevent replay of a block in an earlier
+transaction. This is used to mark blocks that were journalled at one
+time but are no longer journalled. Typically this happens if a metadata
+block is freed and re-allocated as a file data block; in this case, a
+journal replay after the file block was written to disk will cause
+corruption.
+
+**NOTE**: This mechanism is NOT used to express “this journal block is
+superseded by this other journal block”, as the author (djwong)
+mistakenly thought. Any block being added to a transaction will cause
+the removal of all existing revocation records for that block.
+
+Revocation blocks are described in
+``struct jbd2_journal_revoke_header_s``, are at least 16 bytes in
+length, but use a full block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - journal\_header\_t
+ - r\_header
+ - Common block header.
+ * - 0xC
+ - \_\_be32
+ - r\_count
+ - Number of bytes used in this block.
+ * - 0x10
+ - \_\_be32 or \_\_be64
+ - blocks[0]
+ - Blocks to revoke.
+
+After r\_count is a linear array of block numbers that are effectively
+revoked by this transaction. The size of each block number is 8 bytes if
+the superblock advertises 64-bit block number support, or 4 bytes
+otherwise.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
+JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the revocation
+block is a ``struct jbd2_journal_revoke_tail``, which has this format:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_be32
+ - r\_checksum
+ - Checksum of the journal UUID + revocation block
+
+Commit Block
+~~~~~~~~~~~~
+
+The commit block is a sentry that indicates that a transaction has been
+completely written to the journal. Once this commit block reaches the
+journal, the data stored with this transaction can be written to their
+final locations on disk.
+
+The commit block is described by ``struct commit_header``, which is 32
+bytes long (but uses a full block):
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - journal\_header\_s
+ - (open coded)
+ - Common block header.
+ * - 0xC
+ - unsigned char
+ - h\_chksum\_type
+ - The type of checksum to use to verify the integrity of the data blocks
+ in the transaction. See jbd2_checksum_type_ for more info.
+ * - 0xD
+ - unsigned char
+ - h\_chksum\_size
+ - The number of bytes used by the checksum. Most likely 4.
+ * - 0xE
+ - unsigned char
+ - h\_padding[2]
+ -
+ * - 0x10
+ - \_\_be32
+ - h\_chksum[JBD2\_CHECKSUM\_BYTES]
+ - 32 bytes of space to store checksums. If
+ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3
+ are set, the first ``__be32`` is the checksum of the journal UUID and
+ the entire commit block, with this field zeroed. If
+ JBD2\_FEATURE\_COMPAT\_CHECKSUM is set, the first ``__be32`` is the
+ crc32 of all the blocks already written to the transaction.
+ * - 0x30
+ - \_\_be64
+ - h\_commit\_sec
+ - The time that the transaction was committed, in seconds since the epoch.
+ * - 0x38
+ - \_\_be32
+ - h\_commit\_nsec
+ - Nanoseconds component of the above timestamp.
+
diff --git a/Documentation/filesystems/ext4/ondisk/mmp.rst b/Documentation/filesystems/ext4/ondisk/mmp.rst
new file mode 100644
index 000000000000..b7d7a3137f80
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/mmp.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Multiple Mount Protection
+-------------------------
+
+Multiple mount protection (MMP) is a feature that protects the
+filesystem against multiple hosts trying to use the filesystem
+simultaneously. When a filesystem is opened (for mounting, or fsck,
+etc.), the MMP code running on the node (call it node A) checks a
+sequence number. If the sequence number is EXT4\_MMP\_SEQ\_CLEAN, the
+open continues. If the sequence number is EXT4\_MMP\_SEQ\_FSCK, then
+fsck is (hopefully) running, and open fails immediately. Otherwise, the
+open code will wait for twice the specified MMP check interval and check
+the sequence number again. If the sequence number has changed, then the
+filesystem is active on another machine and the open fails. If the MMP
+code passes all of those checks, a new MMP sequence number is generated
+and written to the MMP block, and the mount proceeds.
+
+While the filesystem is live, the kernel sets up a timer to re-check the
+MMP block at the specified MMP check interval. To perform the re-check,
+the MMP sequence number is re-read; if it does not match the in-memory
+MMP sequence number, then another node (node B) has mounted the
+filesystem, and node A remounts the filesystem read-only. If the
+sequence numbers match, the sequence number is incremented both in
+memory and on disk, and the re-check is complete.
+
+The hostname and device filename are written into the MMP block whenever
+an open operation succeeds. The MMP code does not use these values; they
+are provided purely for informational purposes.
+
+The checksum is calculated against the FS UUID and the MMP structure.
+The MMP structure (``struct mmp_struct``) is as follows:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - mmp\_magic
+ - Magic number for MMP, 0x004D4D50 (“MMP”).
+ * - 0x4
+ - \_\_le32
+ - mmp\_seq
+ - Sequence number, updated periodically.
+ * - 0x8
+ - \_\_le64
+ - mmp\_time
+ - Time that the MMP block was last updated.
+ * - 0x10
+ - char[64]
+ - mmp\_nodename
+ - Hostname of the node that opened the filesystem.
+ * - 0x50
+ - char[32]
+ - mmp\_bdevname
+ - Block device name of the filesystem.
+ * - 0x70
+ - \_\_le16
+ - mmp\_check\_interval
+ - The MMP re-check interval, in seconds.
+ * - 0x72
+ - \_\_le16
+ - mmp\_pad1
+ - Zero.
+ * - 0x74
+ - \_\_le32[226]
+ - mmp\_pad2
+ - Zero.
+ * - 0x3FC
+ - \_\_le32
+ - mmp\_checksum
+ - Checksum of the MMP block.
diff --git a/Documentation/filesystems/ext4/ondisk/overview.rst b/Documentation/filesystems/ext4/ondisk/overview.rst
new file mode 100644
index 000000000000..cbab18baba12
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/overview.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+High Level Design
+=================
+
+An ext4 file system is split into a series of block groups. To reduce
+performance difficulties due to fragmentation, the block allocator tries
+very hard to keep each file's blocks within the same group, thereby
+reducing seek times. The size of a block group is specified in
+``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 \*
+``block_size_in_bytes``. With the default block size of 4KiB, each group
+will contain 32,768 blocks, for a length of 128MiB. The number of block
+groups is the size of the device divided by the size of a block group.
+
+All fields in ext4 are written to disk in little-endian order. HOWEVER,
+all fields in jbd2 (the journal) are written to disk in big-endian
+order.
+
+.. include:: blocks.rst
+.. include:: blockgroup.rst
+.. include:: special_inodes.rst
+.. include:: allocators.rst
+.. include:: checksums.rst
+.. include:: bigalloc.rst
+.. include:: inlinedata.rst
+.. include:: eainode.rst
diff --git a/Documentation/filesystems/ext4/ondisk/special_inodes.rst b/Documentation/filesystems/ext4/ondisk/special_inodes.rst
new file mode 100644
index 000000000000..a82f70c9baeb
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/special_inodes.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Special inodes
+--------------
+
+ext4 reserves some inode for special features, as follows:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - inode Number
+ - Purpose
+ * - 0
+ - Doesn't exist; there is no inode 0.
+ * - 1
+ - List of defective blocks.
+ * - 2
+ - Root directory.
+ * - 3
+ - User quota.
+ * - 4
+ - Group quota.
+ * - 5
+ - Boot loader.
+ * - 6
+ - Undelete directory.
+ * - 7
+ - Reserved group descriptors inode. (“resize inode”)
+ * - 8
+ - Journal inode.
+ * - 9
+ - The “exclude” inode, for snapshots(?)
+ * - 10
+ - Replica inode, used for some non-upstream feature?
+ * - 11
+ - Traditional first non-reserved inode. Usually this is the lost+found directory. See s\_first\_ino in the superblock.
+
diff --git a/Documentation/filesystems/ext4/ondisk/super.rst b/Documentation/filesystems/ext4/ondisk/super.rst
new file mode 100644
index 000000000000..5f81dd87e0b9
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/super.rst
@@ -0,0 +1,801 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Super Block
+-----------
+
+The superblock records various information about the enclosing
+filesystem, such as block counts, inode counts, supported features,
+maintenance information, and more.
+
+If the sparse\_super feature flag is set, redundant copies of the
+superblock and group descriptors are kept only in the groups whose group
+number is either 0 or a power of 3, 5, or 7. If the flag is not set,
+redundant copies are kept in all groups.
+
+The superblock checksum is calculated against the superblock structure,
+which includes the FS UUID.
+
+The ext4 superblock is laid out as follows in
+``struct ext4_super_block``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - s\_inodes\_count
+ - Total inode count.
+ * - 0x4
+ - \_\_le32
+ - s\_blocks\_count\_lo
+ - Total block count.
+ * - 0x8
+ - \_\_le32
+ - s\_r\_blocks\_count\_lo
+ - This number of blocks can only be allocated by the super-user.
+ * - 0xC
+ - \_\_le32
+ - s\_free\_blocks\_count\_lo
+ - Free block count.
+ * - 0x10
+ - \_\_le32
+ - s\_free\_inodes\_count
+ - Free inode count.
+ * - 0x14
+ - \_\_le32
+ - s\_first\_data\_block
+ - First data block. This must be at least 1 for 1k-block filesystems and
+ is typically 0 for all other block sizes.
+ * - 0x18
+ - \_\_le32
+ - s\_log\_block\_size
+ - Block size is 2 ^ (10 + s\_log\_block\_size).
+ * - 0x1C
+ - \_\_le32
+ - s\_log\_cluster\_size
+ - Cluster size is (2 ^ s\_log\_cluster\_size) blocks if bigalloc is
+ enabled. Otherwise s\_log\_cluster\_size must equal s\_log\_block\_size.
+ * - 0x20
+ - \_\_le32
+ - s\_blocks\_per\_group
+ - Blocks per group.
+ * - 0x24
+ - \_\_le32
+ - s\_clusters\_per\_group
+ - Clusters per group, if bigalloc is enabled. Otherwise
+ s\_clusters\_per\_group must equal s\_blocks\_per\_group.
+ * - 0x28
+ - \_\_le32
+ - s\_inodes\_per\_group
+ - Inodes per group.
+ * - 0x2C
+ - \_\_le32
+ - s\_mtime
+ - Mount time, in seconds since the epoch.
+ * - 0x30
+ - \_\_le32
+ - s\_wtime
+ - Write time, in seconds since the epoch.
+ * - 0x34
+ - \_\_le16
+ - s\_mnt\_count
+ - Number of mounts since the last fsck.
+ * - 0x36
+ - \_\_le16
+ - s\_max\_mnt\_count
+ - Number of mounts beyond which a fsck is needed.
+ * - 0x38
+ - \_\_le16
+ - s\_magic
+ - Magic signature, 0xEF53
+ * - 0x3A
+ - \_\_le16
+ - s\_state
+ - File system state. See super_state_ for more info.
+ * - 0x3C
+ - \_\_le16
+ - s\_errors
+ - Behaviour when detecting errors. See super_errors_ for more info.
+ * - 0x3E
+ - \_\_le16
+ - s\_minor\_rev\_level
+ - Minor revision level.
+ * - 0x40
+ - \_\_le32
+ - s\_lastcheck
+ - Time of last check, in seconds since the epoch.
+ * - 0x44
+ - \_\_le32
+ - s\_checkinterval
+ - Maximum time between checks, in seconds.
+ * - 0x48
+ - \_\_le32
+ - s\_creator\_os
+ - Creator OS. See the table super_creator_ for more info.
+ * - 0x4C
+ - \_\_le32
+ - s\_rev\_level
+ - Revision level. See the table super_revision_ for more info.
+ * - 0x50
+ - \_\_le16
+ - s\_def\_resuid
+ - Default uid for reserved blocks.
+ * - 0x52
+ - \_\_le16
+ - s\_def\_resgid
+ - Default gid for reserved blocks.
+ * -
+ -
+ -
+ - These fields are for EXT4_DYNAMIC_REV superblocks only.
+
+ Note: the difference between the compatible feature set and the
+ incompatible feature set is that if there is a bit set in the
+ incompatible feature set that the kernel doesn't know about, it should
+ refuse to mount the filesystem.
+
+ e2fsck's requirements are more strict; if it doesn't know
+ about a feature in either the compatible or incompatible feature set, it
+ must abort and not try to meddle with things it doesn't understand...
+ * - 0x54
+ - \_\_le32
+ - s\_first\_ino
+ - First non-reserved inode.
+ * - 0x58
+ - \_\_le16
+ - s\_inode\_size
+ - Size of inode structure, in bytes.
+ * - 0x5A
+ - \_\_le16
+ - s\_block\_group\_nr
+ - Block group # of this superblock.
+ * - 0x5C
+ - \_\_le32
+ - s\_feature\_compat
+ - Compatible feature set flags. Kernel can still read/write this fs even
+ if it doesn't understand a flag; fsck should not do that. See the
+ super_compat_ table for more info.
+ * - 0x60
+ - \_\_le32
+ - s\_feature\_incompat
+ - Incompatible feature set. If the kernel or fsck doesn't understand one
+ of these bits, it should stop. See the super_incompat_ table for more
+ info.
+ * - 0x64
+ - \_\_le32
+ - s\_feature\_ro\_compat
+ - Readonly-compatible feature set. If the kernel doesn't understand one of
+ these bits, it can still mount read-only. See the super_rocompat_ table
+ for more info.
+ * - 0x68
+ - \_\_u8
+ - s\_uuid[16]
+ - 128-bit UUID for volume.
+ * - 0x78
+ - char
+ - s\_volume\_name[16]
+ - Volume label.
+ * - 0x88
+ - char
+ - s\_last\_mounted[64]
+ - Directory where filesystem was last mounted.
+ * - 0xC8
+ - \_\_le32
+ - s\_algorithm\_usage\_bitmap
+ - For compression (Not used in e2fsprogs/Linux)
+ * -
+ -
+ -
+ - Performance hints. Directory preallocation should only happen if the
+ EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on.
+ * - 0xCC
+ - \_\_u8
+ - s\_prealloc\_blocks
+ - #. of blocks to try to preallocate for ... files? (Not used in
+ e2fsprogs/Linux)
+ * - 0xCD
+ - \_\_u8
+ - s\_prealloc\_dir\_blocks
+ - #. of blocks to preallocate for directories. (Not used in
+ e2fsprogs/Linux)
+ * - 0xCE
+ - \_\_le16
+ - s\_reserved\_gdt\_blocks
+ - Number of reserved GDT entries for future filesystem expansion.
+ * -
+ -
+ -
+ - Journalling support is valid only if EXT4_FEATURE_COMPAT_HAS_JOURNAL is
+ set.
+ * - 0xD0
+ - \_\_u8
+ - s\_journal\_uuid[16]
+ - UUID of journal superblock
+ * - 0xE0
+ - \_\_le32
+ - s\_journal\_inum
+ - inode number of journal file.
+ * - 0xE4
+ - \_\_le32
+ - s\_journal\_dev
+ - Device number of journal file, if the external journal feature flag is
+ set.
+ * - 0xE8
+ - \_\_le32
+ - s\_last\_orphan
+ - Start of list of orphaned inodes to delete.
+ * - 0xEC
+ - \_\_le32
+ - s\_hash\_seed[4]
+ - HTREE hash seed.
+ * - 0xFC
+ - \_\_u8
+ - s\_def\_hash\_version
+ - Default hash algorithm to use for directory hashes. See super_def_hash_
+ for more info.
+ * - 0xFD
+ - \_\_u8
+ - s\_jnl\_backup\_type
+ - If this value is 0 or EXT3\_JNL\_BACKUP\_BLOCKS (1), then the
+ ``s_jnl_blocks`` field contains a duplicate copy of the inode's
+ ``i_block[]`` array and ``i_size``.
+ * - 0xFE
+ - \_\_le16
+ - s\_desc\_size
+ - Size of group descriptors, in bytes, if the 64bit incompat feature flag
+ is set.
+ * - 0x100
+ - \_\_le32
+ - s\_default\_mount\_opts
+ - Default mount options. See the super_mountopts_ table for more info.
+ * - 0x104
+ - \_\_le32
+ - s\_first\_meta\_bg
+ - First metablock block group, if the meta\_bg feature is enabled.
+ * - 0x108
+ - \_\_le32
+ - s\_mkfs\_time
+ - When the filesystem was created, in seconds since the epoch.
+ * - 0x10C
+ - \_\_le32
+ - s\_jnl\_blocks[17]
+ - Backup copy of the journal inode's ``i_block[]`` array in the first 15
+ elements and i\_size\_high and i\_size in the 16th and 17th elements,
+ respectively.
+ * -
+ -
+ -
+ - 64bit support is valid only if EXT4_FEATURE_COMPAT_64BIT is set.
+ * - 0x150
+ - \_\_le32
+ - s\_blocks\_count\_hi
+ - High 32-bits of the block count.
+ * - 0x154
+ - \_\_le32
+ - s\_r\_blocks\_count\_hi
+ - High 32-bits of the reserved block count.
+ * - 0x158
+ - \_\_le32
+ - s\_free\_blocks\_count\_hi
+ - High 32-bits of the free block count.
+ * - 0x15C
+ - \_\_le16
+ - s\_min\_extra\_isize
+ - All inodes have at least # bytes.
+ * - 0x15E
+ - \_\_le16
+ - s\_want\_extra\_isize
+ - New inodes should reserve # bytes.
+ * - 0x160
+ - \_\_le32
+ - s\_flags
+ - Miscellaneous flags. See the super_flags_ table for more info.
+ * - 0x164
+ - \_\_le16
+ - s\_raid\_stride
+ - RAID stride. This is the number of logical blocks read from or written
+ to the disk before moving to the next disk. This affects the placement
+ of filesystem metadata, which will hopefully make RAID storage faster.
+ * - 0x166
+ - \_\_le16
+ - s\_mmp\_interval
+ - #. seconds to wait in multi-mount prevention (MMP) checking. In theory,
+ MMP is a mechanism to record in the superblock which host and device
+ have mounted the filesystem, in order to prevent multiple mounts. This
+ feature does not seem to be implemented...
+ * - 0x168
+ - \_\_le64
+ - s\_mmp\_block
+ - Block # for multi-mount protection data.
+ * - 0x170
+ - \_\_le32
+ - s\_raid\_stripe\_width
+ - RAID stripe width. This is the number of logical blocks read from or
+ written to the disk before coming back to the current disk. This is used
+ by the block allocator to try to reduce the number of read-modify-write
+ operations in a RAID5/6.
+ * - 0x174
+ - \_\_u8
+ - s\_log\_groups\_per\_flex
+ - Size of a flexible block group is 2 ^ ``s_log_groups_per_flex``.
+ * - 0x175
+ - \_\_u8
+ - s\_checksum\_type
+ - Metadata checksum algorithm type. The only valid value is 1 (crc32c).
+ * - 0x176
+ - \_\_le16
+ - s\_reserved\_pad
+ -
+ * - 0x178
+ - \_\_le64
+ - s\_kbytes\_written
+ - Number of KiB written to this filesystem over its lifetime.
+ * - 0x180
+ - \_\_le32
+ - s\_snapshot\_inum
+ - inode number of active snapshot. (Not used in e2fsprogs/Linux.)
+ * - 0x184
+ - \_\_le32
+ - s\_snapshot\_id
+ - Sequential ID of active snapshot. (Not used in e2fsprogs/Linux.)
+ * - 0x188
+ - \_\_le64
+ - s\_snapshot\_r\_blocks\_count
+ - Number of blocks reserved for active snapshot's future use. (Not used in
+ e2fsprogs/Linux.)
+ * - 0x190
+ - \_\_le32
+ - s\_snapshot\_list
+ - inode number of the head of the on-disk snapshot list. (Not used in
+ e2fsprogs/Linux.)
+ * - 0x194
+ - \_\_le32
+ - s\_error\_count
+ - Number of errors seen.
+ * - 0x198
+ - \_\_le32
+ - s\_first\_error\_time
+ - First time an error happened, in seconds since the epoch.
+ * - 0x19C
+ - \_\_le32
+ - s\_first\_error\_ino
+ - inode involved in first error.
+ * - 0x1A0
+ - \_\_le64
+ - s\_first\_error\_block
+ - Number of block involved of first error.
+ * - 0x1A8
+ - \_\_u8
+ - s\_first\_error\_func[32]
+ - Name of function where the error happened.
+ * - 0x1C8
+ - \_\_le32
+ - s\_first\_error\_line
+ - Line number where error happened.
+ * - 0x1CC
+ - \_\_le32
+ - s\_last\_error\_time
+ - Time of most recent error, in seconds since the epoch.
+ * - 0x1D0
+ - \_\_le32
+ - s\_last\_error\_ino
+ - inode involved in most recent error.
+ * - 0x1D4
+ - \_\_le32
+ - s\_last\_error\_line
+ - Line number where most recent error happened.
+ * - 0x1D8
+ - \_\_le64
+ - s\_last\_error\_block
+ - Number of block involved in most recent error.
+ * - 0x1E0
+ - \_\_u8
+ - s\_last\_error\_func[32]
+ - Name of function where the most recent error happened.
+ * - 0x200
+ - \_\_u8
+ - s\_mount\_opts[64]
+ - ASCIIZ string of mount options.
+ * - 0x240
+ - \_\_le32
+ - s\_usr\_quota\_inum
+ - Inode number of user `quota <quota>`__ file.
+ * - 0x244
+ - \_\_le32
+ - s\_grp\_quota\_inum
+ - Inode number of group `quota <quota>`__ file.
+ * - 0x248
+ - \_\_le32
+ - s\_overhead\_blocks
+ - Overhead blocks/clusters in fs. (Huh? This field is always zero, which
+ means that the kernel calculates it dynamically.)
+ * - 0x24C
+ - \_\_le32
+ - s\_backup\_bgs[2]
+ - Block groups containing superblock backups (if sparse\_super2)
+ * - 0x254
+ - \_\_u8
+ - s\_encrypt\_algos[4]
+ - Encryption algorithms in use. There can be up to four algorithms in use
+ at any time; valid algorithm codes are given in the super_encrypt_ table
+ below.
+ * - 0x258
+ - \_\_u8
+ - s\_encrypt\_pw\_salt[16]
+ - Salt for the string2key algorithm for encryption.
+ * - 0x268
+ - \_\_le32
+ - s\_lpf\_ino
+ - Inode number of lost+found
+ * - 0x26C
+ - \_\_le32
+ - s\_prj\_quota\_inum
+ - Inode that tracks project quotas.
+ * - 0x270
+ - \_\_le32
+ - s\_checksum\_seed
+ - Checksum seed used for metadata\_csum calculations. This value is
+ crc32c(~0, $orig\_fs\_uuid).
+ * - 0x274
+ - \_\_u8
+ - s\_wtime_hi
+ - Upper 8 bits of the s_wtime field.
+ * - 0x275
+ - \_\_u8
+ - s\_wtime_hi
+ - Upper 8 bits of the s_mtime field.
+ * - 0x276
+ - \_\_u8
+ - s\_mkfs_time_hi
+ - Upper 8 bits of the s_mkfs_time field.
+ * - 0x277
+ - \_\_u8
+ - s\_lastcheck_hi
+ - Upper 8 bits of the s_lastcheck_hi field.
+ * - 0x278
+ - \_\_u8
+ - s\_first_error_time_hi
+ - Upper 8 bits of the s_first_error_time_hi field.
+ * - 0x279
+ - \_\_u8
+ - s\_last_error_time_hi
+ - Upper 8 bits of the s_last_error_time_hi field.
+ * - 0x27A
+ - \_\_u8[2]
+ - s\_pad
+ - Zero padding.
+ * - 0x27C
+ - \_\_le32
+ - s\_reserved[96]
+ - Padding to the end of the block.
+ * - 0x3FC
+ - \_\_le32
+ - s\_checksum
+ - Superblock checksum.
+
+.. _super_state:
+
+The superblock state is some combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Cleanly umounted
+ * - 0x0002
+ - Errors detected
+ * - 0x0004
+ - Orphans being recovered
+
+.. _super_errors:
+
+The superblock error policy is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - Continue
+ * - 2
+ - Remount read-only
+ * - 3
+ - Panic
+
+.. _super_creator:
+
+The filesystem creator is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Linux
+ * - 1
+ - Hurd
+ * - 2
+ - Masix
+ * - 3
+ - FreeBSD
+ * - 4
+ - Lites
+
+.. _super_revision:
+
+The superblock revision is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Original format
+ * - 1
+ - v2 format w/ dynamic inode sizes
+
+Note that ``EXT4_DYNAMIC_REV`` refers to a revision 1 or newer filesystem.
+
+.. _super_compat:
+
+The superblock compatible features field is a combination of any of the
+following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Directory preallocation (COMPAT\_DIR\_PREALLOC).
+ * - 0x2
+ - “imagic inodes”. Not clear from the code what this does
+ (COMPAT\_IMAGIC\_INODES).
+ * - 0x4
+ - Has a journal (COMPAT\_HAS\_JOURNAL).
+ * - 0x8
+ - Supports extended attributes (COMPAT\_EXT\_ATTR).
+ * - 0x10
+ - Has reserved GDT blocks for filesystem expansion
+ (COMPAT\_RESIZE\_INODE). Requires RO\_COMPAT\_SPARSE\_SUPER.
+ * - 0x20
+ - Has directory indices (COMPAT\_DIR\_INDEX).
+ * - 0x40
+ - “Lazy BG”. Not in Linux kernel, seems to have been for uninitialized
+ block groups? (COMPAT\_LAZY\_BG)
+ * - 0x80
+ - “Exclude inode”. Not used. (COMPAT\_EXCLUDE\_INODE).
+ * - 0x100
+ - “Exclude bitmap”. Seems to be used to indicate the presence of
+ snapshot-related exclude bitmaps? Not defined in kernel or used in
+ e2fsprogs (COMPAT\_EXCLUDE\_BITMAP).
+ * - 0x200
+ - Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs
+ points to the two block groups that contain backup superblocks
+ (COMPAT\_SPARSE\_SUPER2).
+
+.. _super_incompat:
+
+The superblock incompatible features field is a combination of any of the
+following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Compression (INCOMPAT\_COMPRESSION).
+ * - 0x2
+ - Directory entries record the file type. See ext4\_dir\_entry\_2 below
+ (INCOMPAT\_FILETYPE).
+ * - 0x4
+ - Filesystem needs recovery (INCOMPAT\_RECOVER).
+ * - 0x8
+ - Filesystem has a separate journal device (INCOMPAT\_JOURNAL\_DEV).
+ * - 0x10
+ - Meta block groups. See the earlier discussion of this feature
+ (INCOMPAT\_META\_BG).
+ * - 0x40
+ - Files in this filesystem use extents (INCOMPAT\_EXTENTS).
+ * - 0x80
+ - Enable a filesystem size of 2^64 blocks (INCOMPAT\_64BIT).
+ * - 0x100
+ - Multiple mount protection. Not implemented (INCOMPAT\_MMP).
+ * - 0x200
+ - Flexible block groups. See the earlier discussion of this feature
+ (INCOMPAT\_FLEX\_BG).
+ * - 0x400
+ - Inodes can be used to store large extended attribute values
+ (INCOMPAT\_EA\_INODE).
+ * - 0x1000
+ - Data in directory entry (INCOMPAT\_DIRDATA). (Not implemented?)
+ * - 0x2000
+ - Metadata checksum seed is stored in the superblock. This feature enables
+ the administrator to change the UUID of a metadata\_csum filesystem
+ while the filesystem is mounted; without it, the checksum definition
+ requires all metadata blocks to be rewritten (INCOMPAT\_CSUM\_SEED).
+ * - 0x4000
+ - Large directory >2GB or 3-level htree (INCOMPAT\_LARGEDIR). Prior to
+ this feature, directories could not be larger than 4GiB and could not
+ have an htree more than 2 levels deep. If this feature is enabled,
+ directories can be larger than 4GiB and have a maximum htree depth of 3.
+ * - 0x8000
+ - Data in inode (INCOMPAT\_INLINE\_DATA).
+ * - 0x10000
+ - Encrypted inodes are present on the filesystem. (INCOMPAT\_ENCRYPT).
+
+.. _super_rocompat:
+
+The superblock read-only compatible features field is a combination of any of
+the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Sparse superblocks. See the earlier discussion of this feature
+ (RO\_COMPAT\_SPARSE\_SUPER).
+ * - 0x2
+ - This filesystem has been used to store a file greater than 2GiB
+ (RO\_COMPAT\_LARGE\_FILE).
+ * - 0x4
+ - Not used in kernel or e2fsprogs (RO\_COMPAT\_BTREE\_DIR).
+ * - 0x8
+ - This filesystem has files whose sizes are represented in units of
+ logical blocks, not 512-byte sectors. This implies a very large file
+ indeed! (RO\_COMPAT\_HUGE\_FILE)
+ * - 0x10
+ - Group descriptors have checksums. In addition to detecting corruption,
+ this is useful for lazy formatting with uninitialized groups
+ (RO\_COMPAT\_GDT\_CSUM).
+ * - 0x20
+ - Indicates that the old ext3 32,000 subdirectory limit no longer applies
+ (RO\_COMPAT\_DIR\_NLINK). A directory's i\_links\_count will be set to 1
+ if it is incremented past 64,999.
+ * - 0x40
+ - Indicates that large inodes exist on this filesystem
+ (RO\_COMPAT\_EXTRA\_ISIZE).
+ * - 0x80
+ - This filesystem has a snapshot (RO\_COMPAT\_HAS\_SNAPSHOT).
+ * - 0x100
+ - `Quota <Quota>`__ (RO\_COMPAT\_QUOTA).
+ * - 0x200
+ - This filesystem supports “bigalloc”, which means that file extents are
+ tracked in units of clusters (of blocks) instead of blocks
+ (RO\_COMPAT\_BIGALLOC).
+ * - 0x400
+ - This filesystem supports metadata checksumming.
+ (RO\_COMPAT\_METADATA\_CSUM; implies RO\_COMPAT\_GDT\_CSUM, though
+ GDT\_CSUM must not be set)
+ * - 0x800
+ - Filesystem supports replicas. This feature is neither in the kernel nor
+ e2fsprogs. (RO\_COMPAT\_REPLICA)
+ * - 0x1000
+ - Read-only filesystem image; the kernel will not mount this image
+ read-write and most tools will refuse to write to the image.
+ (RO\_COMPAT\_READONLY)
+ * - 0x2000
+ - Filesystem tracks project quotas. (RO\_COMPAT\_PROJECT)
+
+.. _super_def_hash:
+
+The ``s_def_hash_version`` field is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Legacy.
+ * - 0x1
+ - Half MD4.
+ * - 0x2
+ - Tea.
+ * - 0x3
+ - Legacy, unsigned.
+ * - 0x4
+ - Half MD4, unsigned.
+ * - 0x5
+ - Tea, unsigned.
+
+.. _super_mountopts:
+
+The ``s_default_mount_opts`` field is any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Print debugging info upon (re)mount. (EXT4\_DEFM\_DEBUG)
+ * - 0x0002
+ - New files take the gid of the containing directory (instead of the fsgid
+ of the current process). (EXT4\_DEFM\_BSDGROUPS)
+ * - 0x0004
+ - Support userspace-provided extended attributes. (EXT4\_DEFM\_XATTR\_USER)
+ * - 0x0008
+ - Support POSIX access control lists (ACLs). (EXT4\_DEFM\_ACL)
+ * - 0x0010
+ - Do not support 32-bit UIDs. (EXT4\_DEFM\_UID16)
+ * - 0x0020
+ - All data and metadata are commited to the journal.
+ (EXT4\_DEFM\_JMODE\_DATA)
+ * - 0x0040
+ - All data are flushed to the disk before metadata are committed to the
+ journal. (EXT4\_DEFM\_JMODE\_ORDERED)
+ * - 0x0060
+ - Data ordering is not preserved; data may be written after the metadata
+ has been written. (EXT4\_DEFM\_JMODE\_WBACK)
+ * - 0x0100
+ - Disable write flushes. (EXT4\_DEFM\_NOBARRIER)
+ * - 0x0200
+ - Track which blocks in a filesystem are metadata and therefore should not
+ be used as data blocks. This option will be enabled by default on 3.18,
+ hopefully. (EXT4\_DEFM\_BLOCK\_VALIDITY)
+ * - 0x0400
+ - Enable DISCARD support, where the storage device is told about blocks
+ becoming unused. (EXT4\_DEFM\_DISCARD)
+ * - 0x0800
+ - Disable delayed allocation. (EXT4\_DEFM\_NODELALLOC)
+
+.. _super_flags:
+
+The ``s_flags`` field is any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Signed directory hash in use.
+ * - 0x0002
+ - Unsigned directory hash in use.
+ * - 0x0004
+ - To test development code.
+
+.. _super_encrypt:
+
+The ``s_encrypt_algos`` list can contain any of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Invalid algorithm (ENCRYPTION\_MODE\_INVALID).
+ * - 1
+ - 256-bit AES in XTS mode (ENCRYPTION\_MODE\_AES\_256\_XTS).
+ * - 2
+ - 256-bit AES in GCM mode (ENCRYPTION\_MODE\_AES\_256\_GCM).
+ * - 3
+ - 256-bit AES in CBC mode (ENCRYPTION\_MODE\_AES\_256\_CBC).
+
+Total size of the superblock is 1024 bytes.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 69f8de995739..e5edd29687b5 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -157,6 +157,24 @@ data_flush Enable data flushing before checkpoint in order to
persist data of regular and symlink.
fault_injection=%d Enable fault injection in all supported types with
specified injection rate.
+fault_type=%d Support configuring fault injection type, should be
+ enabled with fault_injection option, fault type value
+ is shown below, it supports single or combined type.
+ Type_Name Type_Value
+ FAULT_KMALLOC 0x000000001
+ FAULT_KVMALLOC 0x000000002
+ FAULT_PAGE_ALLOC 0x000000004
+ FAULT_PAGE_GET 0x000000008
+ FAULT_ALLOC_BIO 0x000000010
+ FAULT_ALLOC_NID 0x000000020
+ FAULT_ORPHAN 0x000000040
+ FAULT_BLOCK 0x000000080
+ FAULT_DIR_DEPTH 0x000000100
+ FAULT_EVICT_INODE 0x000000200
+ FAULT_TRUNCATE 0x000000400
+ FAULT_IO 0x000000800
+ FAULT_CHECKPOINT 0x000001000
+ FAULT_DISCARD 0x000002000
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 53b89d0edc15..46d1b1be3a51 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -71,6 +71,39 @@ Other Functions
.. kernel-doc:: fs/block_dev.c
:export:
+.. kernel-doc:: fs/anon_inodes.c
+ :export:
+
+.. kernel-doc:: fs/attr.c
+ :export:
+
+.. kernel-doc:: fs/d_path.c
+ :export:
+
+.. kernel-doc:: fs/dax.c
+ :export:
+
+.. kernel-doc:: fs/direct-io.c
+ :export:
+
+.. kernel-doc:: fs/file_table.c
+ :export:
+
+.. kernel-doc:: fs/libfs.c
+ :export:
+
+.. kernel-doc:: fs/posix_acl.c
+ :export:
+
+.. kernel-doc:: fs/stat.c
+ :export:
+
+.. kernel-doc:: fs/sync.c
+ :export:
+
+.. kernel-doc:: fs/xattr.c
+ :export:
+
The proc filesystem
===================
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 72615a2c0752..51c136c821bf 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -10,10 +10,6 @@ union-filesystems). An overlay-filesystem tries to present a
filesystem which is the result over overlaying one filesystem on top
of the other.
-The result will inevitably fail to look exactly like a normal
-filesystem for various technical reasons. The expectation is that
-many use cases will be able to ignore these differences.
-
Overlay objects
---------------
@@ -266,6 +262,30 @@ rightmost one and going left. In the above example lower1 will be the
top, lower2 the middle and lower3 the bottom layer.
+Metadata only copy up
+--------------------
+
+When metadata only copy up feature is enabled, overlayfs will only copy
+up metadata (as opposed to whole file), when a metadata specific operation
+like chown/chmod is performed. Full file will be copied up later when
+file is opened for WRITE operation.
+
+In other words, this is delayed data copy up operation and data is copied
+up when there is a need to actually modify data.
+
+There are multiple ways to enable/disable this feature. A config option
+CONFIG_OVERLAY_FS_METACOPY can be set/unset to enable/disable this feature
+by default. Or one can enable/disable it at module load time with module
+parameter metacopy=on/off. Lastly, there is also a per mount option
+metacopy=on/off to enable/disable this feature per mount.
+
+Do not use metacopy=on with untrusted upper/lower directories. Otherwise
+it is possible that an attacker can create a handcrafted file with
+appropriate REDIRECT and METACOPY xattrs, and gain access to file on lower
+pointed by REDIRECT. This should not be possible on local system as setting
+"trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
+for untrusted layers like from a pen drive.
+
Sharing and copying layers
--------------------------
@@ -284,7 +304,7 @@ though it will not result in a crash or deadlock.
Mounting an overlay using an upper layer path, where the upper layer path
was previously used by another mounted overlay in combination with a
different lower layer path, is allowed, unless the "inodes index" feature
-is enabled.
+or "metadata only copy up" feature is enabled.
With the "inodes index" feature, on the first time mount, an NFS file
handle of the lower layer root directory, along with the UUID of the lower
@@ -297,6 +317,10 @@ lower root origin, mount will fail with ESTALE. An overlayfs mount with
does not support NFS export, lower filesystem does not have a valid UUID or
if the upper filesystem does not support extended attributes.
+For "metadata only copy up" feature there is no verification mechanism at
+mount time. So if same upper is mounted with different set of lower, mount
+probably will succeed but expect the unexpected later on. So don't do it.
+
It is quite a common practice to copy overlay layers to a different
directory tree on the same or different underlying filesystem, and even
to a different machine. With the "inodes index" feature, trying to mount
@@ -306,27 +330,40 @@ the copied layers will fail the verification of the lower root file handle.
Non-standard behavior
---------------------
-The copy_up operation essentially creates a new, identical file and
-moves it over to the old name. Any open files referring to this inode
-will access the old data.
+Overlayfs can now act as a POSIX compliant filesystem with the following
+features turned on:
+
+1) "redirect_dir"
+
+Enabled with the mount option or module option: "redirect_dir=on" or with
+the kernel config option CONFIG_OVERLAY_FS_REDIRECT_DIR=y.
+
+If this feature is disabled, then rename(2) on a lower or merged directory
+will fail with EXDEV ("Invalid cross-device link").
+
+2) "inode index"
+
+Enabled with the mount option or module option "index=on" or with the
+kernel config option CONFIG_OVERLAY_FS_INDEX=y.
-The new file may be on a different filesystem, so both st_dev and st_ino
-of the real file may change. The values of st_dev and st_ino returned by
-stat(2) on an overlay object are often not the same as the real file
-stat(2) values to prevent the values from changing on copy_up.
+If this feature is disabled and a file with multiple hard links is copied
+up, then this will "break" the link. Changes will not be propagated to
+other names referring to the same inode.
-Unless "xino" feature is enabled, when overlay layers are not all on the
-same underlying filesystem, the value of st_dev may be different for two
-non-directory objects in the same overlay filesystem and the value of
-st_ino for directory objects may be non persistent and could change even
-while the overlay filesystem is still mounted.
+3) "xino"
-Unless "inode index" feature is enabled, if a file with multiple hard
-links is copied up, then this will "break" the link. Changes will not be
-propagated to other names referring to the same inode.
+Enabled with the mount option "xino=auto" or "xino=on", with the module
+option "xino_auto=on" or with the kernel config option
+CONFIG_OVERLAY_FS_XINO_AUTO=y. Also implicitly enabled by using the same
+underlying filesystem for all layers making up the overlay.
-Unless "redirect_dir" feature is enabled, rename(2) on a lower or merged
-directory will fail with EXDEV.
+If this feature is disabled or the underlying filesystem doesn't have
+enough free bits in the inode number, then overlayfs will not be able to
+guarantee that the values of st_ino and st_dev returned by stat(2) and the
+value of d_ino returned by readdir(3) will act like on a normal filesystem.
+E.g. the value of st_dev may be different for two objects in the same
+overlay filesystem and the value of st_ino for directory objects may not be
+persistent and could change even while the overlay filesystem is mounted.
Changes to underlying filesystems
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 17bb4dc28fae..7b7b845c490a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -602,3 +602,23 @@ in your dentry operations instead.
dentry separately, and it now has request_mask and query_flags arguments
to specify the fields and sync type requested by statx. Filesystems not
supporting any statx-specific features may ignore the new arguments.
+--
+[mandatory]
+ ->atomic_open() calling conventions have changed. Gone is int *opened,
+ along with FILE_OPENED/FILE_CREATED. In place of those we have
+ FMODE_OPENED/FMODE_CREATED, set in file->f_mode. Additionally, return
+ value for 'called finish_no_open(), open it yourself' case has become
+ 0, not 1. Since finish_no_open() itself is returning 0 now, that part
+ does not need any changes in ->atomic_open() instances.
+--
+[mandatory]
+ alloc_file() has become static now; two wrappers are to be used instead.
+ alloc_file_pseudo(inode, vfsmount, name, flags, ops) is for the cases
+ when dentry needs to be created; that's the majority of old alloc_file()
+ users. Calling conventions: on success a reference to new struct file
+ is returned and callers reference to inode is subsumed by that. On
+ failure, ERR_PTR() is returned and no caller's references are affected,
+ so the caller needs to drop the inode reference it held.
+ alloc_file_clone(file, flags, ops) does not affect any caller's references.
+ On success you get a new struct file sharing the mount/dentry with the
+ original, on failure - ERR_PTR().
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 520f6a84cf50..22b4b00dee31 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -870,6 +870,8 @@ Committed_AS: 100056 kB
VmallocTotal: 112216 kB
VmallocUsed: 428 kB
VmallocChunk: 111088 kB
+Percpu: 62080 kB
+HardwareCorrupted: 0 kB
AnonHugePages: 49152 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
@@ -915,6 +917,8 @@ MemAvailable: An estimate of how much memory is available for starting new
Dirty: Memory which is waiting to get written back to the disk
Writeback: Memory which is actively being written back to the disk
AnonPages: Non-file backed pages mapped into userspace page tables
+HardwareCorrupted: The amount of RAM/memory in KB, the kernel identifies as
+ corrupted.
AnonHugePages: Non-file backed huge pages mapped into userspace page tables
Mapped: files which have been mmaped, such as libraries
Shmem: Total memory used by shared memory (shmem) and tmpfs
@@ -959,6 +963,8 @@ Committed_AS: The amount of memory presently allocated on the system.
VmallocTotal: total size of vmalloc memory area
VmallocUsed: amount of vmalloc area which is used
VmallocChunk: largest contiguous block of vmalloc area which is free
+ Percpu: Memory allocated to the percpu allocator used to back percpu
+ allocations. This stat excludes the cost of metadata.
..............................................................................
diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt
index 33e2f3694733..cd709a94d054 100644
--- a/Documentation/filesystems/relay.txt
+++ b/Documentation/filesystems/relay.txt
@@ -222,7 +222,7 @@ using debugfs:
*/
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
- int mode,
+ umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
@@ -375,7 +375,7 @@ would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
- unsigned int prev_padding)
+ size_t prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index 9de4303201e1..d412b236a9d6 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -66,23 +66,39 @@ kernel 3.10. Current versions require the following update
The iterator interface
-Modules implementing a virtual file with seq_file must implement a simple
-iterator object that allows stepping through the data of interest.
-Iterators must be able to move to a specific position - like the file they
-implement - but the interpretation of that position is up to the iterator
-itself. A seq_file implementation that is formatting firewall rules, for
-example, could interpret position N as the Nth rule in the chain.
-Positioning can thus be done in whatever way makes the most sense for the
-generator of the data, which need not be aware of how a position translates
-to an offset in the virtual file. The one obvious exception is that a
-position of zero should indicate the beginning of the file.
+Modules implementing a virtual file with seq_file must implement an
+iterator object that allows stepping through the data of interest
+during a "session" (roughly one read() system call). If the iterator
+is able to move to a specific position - like the file they implement,
+though with freedom to map the position number to a sequence location
+in whatever way is convenient - the iterator need only exist
+transiently during a session. If the iterator cannot easily find a
+numerical position but works well with a first/next interface, the
+iterator can be stored in the private data area and continue from one
+session to the next.
+
+A seq_file implementation that is formatting firewall rules from a
+table, for example, could provide a simple iterator that interprets
+position N as the Nth rule in the chain. A seq_file implementation
+that presents the content of a, potentially volatile, linked list
+might record a pointer into that list, providing that can be done
+without risk of the current location being removed.
+
+Positioning can thus be done in whatever way makes the most sense for
+the generator of the data, which need not be aware of how a position
+translates to an offset in the virtual file. The one obvious exception
+is that a position of zero should indicate the beginning of the file.
The /proc/sequence iterator just uses the count of the next number it
will output as its position.
-Four functions must be implemented to make the iterator work. The first,
-called start() takes a position as an argument and returns an iterator
-which will start reading at that position. For our simple sequence example,
+Four functions must be implemented to make the iterator work. The
+first, called start(), starts a session and takes a position as an
+argument, returning an iterator which will start reading at that
+position. The pos passed to start() will always be either zero, or
+the most recent pos used in the previous session.
+
+For our simple sequence example,
the start() function looks like:
static void *ct_seq_start(struct seq_file *s, loff_t *pos)
@@ -101,11 +117,12 @@ implementations; in most cases the start() function should check for a
"past end of file" condition and return NULL if need be.
For more complicated applications, the private field of the seq_file
-structure can be used. There is also a special value which can be returned
-by the start() function called SEQ_START_TOKEN; it can be used if you wish
-to instruct your show() function (described below) to print a header at the
-top of the output. SEQ_START_TOKEN should only be used if the offset is
-zero, however.
+structure can be used to hold state from session to session. There is
+also a special value which can be returned by the start() function
+called SEQ_START_TOKEN; it can be used if you wish to instruct your
+show() function (described below) to print a header at the top of the
+output. SEQ_START_TOKEN should only be used if the offset is zero,
+however.
The next function to implement is called, amazingly, next(); its job is to
move the iterator forward to the next position in the sequence. The
@@ -121,9 +138,13 @@ complete. Here's the example version:
return spos;
}
-The stop() function is called when iteration is complete; its job, of
-course, is to clean up. If dynamic memory is allocated for the iterator,
-stop() is the place to free it.
+The stop() function closes a session; its job, of course, is to clean
+up. If dynamic memory is allocated for the iterator, stop() is the
+place to free it; if a lock was taken by start(), stop() must release
+that lock. The value that *pos was set to by the last next() call
+before stop() is remembered, and used for the first start() call of
+the next session unless lseek() has been called on the file; in that
+case next start() will be asked to start at position zero.
static void ct_seq_stop(struct seq_file *s, void *v)
{
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f608180ad59d..4b2084d0f1fb 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -386,7 +386,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
- unsigned open_flag, umode_t create_mode, int *opened);
+ unsigned open_flag, umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
};
@@ -496,13 +496,15 @@ otherwise noted.
atomic_open: called on the last component of an open. Using this optional
method the filesystem can look up, possibly create and open the file in
- one atomic operation. If it cannot perform this (e.g. the file type
- turned out to be wrong) it may signal this by returning 1 instead of
- usual 0 or -ve . This method is only called if the last component is
- negative or needs lookup. Cached positive dentries are still handled by
- f_op->open(). If the file was created, the FILE_CREATED flag should be
- set in "opened". In case of O_EXCL the method must only succeed if the
- file didn't exist and hence FILE_CREATED shall always be set on success.
+ one atomic operation. If it wants to leave actual opening to the
+ caller (e.g. if the file turned out to be a symlink, device, or just
+ something filesystem won't do atomic open for), it may signal this by
+ returning finish_no_open(file, dentry). This method is only called if
+ the last component is negative or needs lookup. Cached positive dentries
+ are still handled by f_op->open(). If the file was created,
+ FMODE_CREATED flag should be set in file->f_mode. In case of O_EXCL
+ the method must only succeed if the file didn't exist and hence FMODE_CREATED
+ shall always be set on success.
tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to
atomically creating, opening and unlinking a file in given directory.
@@ -987,8 +989,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int, unsigned int);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *);
};
d_revalidate: called when the VFS needs to revalidate a dentry. This
@@ -1122,22 +1123,15 @@ struct dentry_operations {
dentry being transited from.
d_real: overlay/union type filesystems implement this method to return one of
- the underlying dentries hidden by the overlay. It is used in three
+ the underlying dentries hidden by the overlay. It is used in two
different modes:
- Called from open it may need to copy-up the file depending on the
- supplied open flags. This mode is selected with a non-zero flags
- argument. In this mode the d_real method can return an error.
-
Called from file_dentry() it returns the real dentry matching the inode
argument. The real dentry may be from a lower layer already copied up,
but still referenced from the file. This mode is selected with a
- non-NULL inode argument. This will always succeed.
-
- With NULL inode and zero flags the topmost real underlying dentry is
- returned. This will always succeed.
+ non-NULL inode argument.
- This method is never called with both non-NULL inode and non-zero flags.
+ With NULL inode the topmost real underlying dentry is returned.
Each dentry has a pointer to its parent dentry, as well as a hash list
of child dentries. Child dentries are basically like files in a
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 4d9ff0a7f8e1..a9ae82fb9d13 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -223,8 +223,6 @@ Deprecated Mount Options
Name Removal Schedule
---- ----------------
- barrier no earlier than v4.15
- nobarrier no earlier than v4.15
Removed Mount Options
@@ -236,6 +234,8 @@ Removed Mount Options
ihashsize v4.0
irixsgid v4.0
osyncisdsync/osyncisosync v4.0
+ barrier v4.19
+ nobarrier v4.19
sysctls
diff --git a/Documentation/fpga/dfl.txt b/Documentation/fpga/dfl.txt
new file mode 100644
index 000000000000..6df4621c3f2a
--- /dev/null
+++ b/Documentation/fpga/dfl.txt
@@ -0,0 +1,285 @@
+===============================================================================
+ FPGA Device Feature List (DFL) Framework Overview
+-------------------------------------------------------------------------------
+ Enno Luebbers <enno.luebbers@intel.com>
+ Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ Wu Hao <hao.wu@intel.com>
+
+The Device Feature List (DFL) FPGA framework (and drivers according to this
+this framework) hides the very details of low layer hardwares and provides
+unified interfaces to userspace. Applications could use these interfaces to
+configure, enumerate, open and access FPGA accelerators on platforms which
+implement the DFL in the device memory. Besides this, the DFL framework
+enables system level management functions such as FPGA reconfiguration.
+
+
+Device Feature List (DFL) Overview
+==================================
+Device Feature List (DFL) defines a linked list of feature headers within the
+device MMIO space to provide an extensible way of adding features. Software can
+walk through these predefined data structures to enumerate FPGA features:
+FPGA Interface Unit (FIU), Accelerated Function Unit (AFU) and Private Features,
+as illustrated below:
+
+ Header Header Header Header
+ +----------+ +-->+----------+ +-->+----------+ +-->+----------+
+ | Type | | | Type | | | Type | | | Type |
+ | FIU | | | Private | | | Private | | | Private |
+ +----------+ | | Feature | | | Feature | | | Feature |
+ | Next_DFH |--+ +----------+ | +----------+ | +----------+
+ +----------+ | Next_DFH |--+ | Next_DFH |--+ | Next_DFH |--> NULL
+ | ID | +----------+ +----------+ +----------+
+ +----------+ | ID | | ID | | ID |
+ | Next_AFU |--+ +----------+ +----------+ +----------+
+ +----------+ | | Feature | | Feature | | Feature |
+ | Header | | | Register | | Register | | Register |
+ | Register | | | Set | | Set | | Set |
+ | Set | | +----------+ +----------+ +----------+
+ +----------+ | Header
+ +-->+----------+
+ | Type |
+ | AFU |
+ +----------+
+ | Next_DFH |--> NULL
+ +----------+
+ | GUID |
+ +----------+
+ | Header |
+ | Register |
+ | Set |
+ +----------+
+
+FPGA Interface Unit (FIU) represents a standalone functional unit for the
+interface to FPGA, e.g. the FPGA Management Engine (FME) and Port (more
+descriptions on FME and Port in later sections).
+
+Accelerated Function Unit (AFU) represents a FPGA programmable region and
+always connects to a FIU (e.g. a Port) as its child as illustrated above.
+
+Private Features represent sub features of the FIU and AFU. They could be
+various function blocks with different IDs, but all private features which
+belong to the same FIU or AFU, must be linked to one list via the Next Device
+Feature Header (Next_DFH) pointer.
+
+Each FIU, AFU and Private Feature could implement its own functional registers.
+The functional register set for FIU and AFU, is named as Header Register Set,
+e.g. FME Header Register Set, and the one for Private Feature, is named as
+Feature Register Set, e.g. FME Partial Reconfiguration Feature Register Set.
+
+This Device Feature List provides a way of linking features together, it's
+convenient for software to locate each feature by walking through this list,
+and can be implemented in register regions of any FPGA device.
+
+
+FIU - FME (FPGA Management Engine)
+==================================
+The FPGA Management Engine performs reconfiguration and other infrastructure
+functions. Each FPGA device only has one FME.
+
+User-space applications can acquire exclusive access to the FME using open(),
+and release it using close().
+
+The following functions are exposed through ioctls:
+
+ Get driver API version (DFL_FPGA_GET_API_VERSION)
+ Check for extensions (DFL_FPGA_CHECK_EXTENSION)
+ Program bitstream (DFL_FPGA_FME_PORT_PR)
+
+More functions are exposed through sysfs
+(/sys/class/fpga_region/regionX/dfl-fme.n/):
+
+ Read bitstream ID (bitstream_id)
+ bitstream_id indicates version of the static FPGA region.
+
+ Read bitstream metadata (bitstream_metadata)
+ bitstream_metadata includes detailed information of static FPGA region,
+ e.g. synthesis date and seed.
+
+ Read number of ports (ports_num)
+ one FPGA device may have more than one port, this sysfs interface indicates
+ how many ports the FPGA device has.
+
+
+FIU - PORT
+==========
+A port represents the interface between the static FPGA fabric and a partially
+reconfigurable region containing an AFU. It controls the communication from SW
+to the accelerator and exposes features such as reset and debug. Each FPGA
+device may have more than one port, but always one AFU per port.
+
+
+AFU
+===
+An AFU is attached to a port FIU and exposes a fixed length MMIO region to be
+used for accelerator-specific control registers.
+
+User-space applications can acquire exclusive access to an AFU attached to a
+port by using open() on the port device node and release it using close().
+
+The following functions are exposed through ioctls:
+
+ Get driver API version (DFL_FPGA_GET_API_VERSION)
+ Check for extensions (DFL_FPGA_CHECK_EXTENSION)
+ Get port info (DFL_FPGA_PORT_GET_INFO)
+ Get MMIO region info (DFL_FPGA_PORT_GET_REGION_INFO)
+ Map DMA buffer (DFL_FPGA_PORT_DMA_MAP)
+ Unmap DMA buffer (DFL_FPGA_PORT_DMA_UNMAP)
+ Reset AFU (*DFL_FPGA_PORT_RESET)
+
+*DFL_FPGA_PORT_RESET: reset the FPGA Port and its AFU. Userspace can do Port
+reset at any time, e.g. during DMA or Partial Reconfiguration. But it should
+never cause any system level issue, only functional failure (e.g. DMA or PR
+operation failure) and be recoverable from the failure.
+
+User-space applications can also mmap() accelerator MMIO regions.
+
+More functions are exposed through sysfs:
+(/sys/class/fpga_region/<regionX>/<dfl-port.m>/):
+
+ Read Accelerator GUID (afu_id)
+ afu_id indicates which PR bitstream is programmed to this AFU.
+
+
+DFL Framework Overview
+======================
+
+ +----------+ +--------+ +--------+ +--------+
+ | FME | | AFU | | AFU | | AFU |
+ | Module | | Module | | Module | | Module |
+ +----------+ +--------+ +--------+ +--------+
+ +-----------------------+
+ | FPGA Container Device | Device Feature List
+ | (FPGA Base Region) | Framework
+ +-----------------------+
+--------------------------------------------------------------------
+ +----------------------------+
+ | FPGA DFL Device Module |
+ | (e.g. PCIE/Platform Device)|
+ +----------------------------+
+ +------------------------+
+ | FPGA Hardware Device |
+ +------------------------+
+
+DFL framework in kernel provides common interfaces to create container device
+(FPGA base region), discover feature devices and their private features from the
+given Device Feature Lists and create platform devices for feature devices
+(e.g. FME, Port and AFU) with related resources under the container device. It
+also abstracts operations for the private features and exposes common ops to
+feature device drivers.
+
+The FPGA DFL Device could be different hardwares, e.g. PCIe device, platform
+device and etc. Its driver module is always loaded first once the device is
+created by the system. This driver plays an infrastructural role in the
+driver architecture. It locates the DFLs in the device memory, handles them
+and related resources to common interfaces from DFL framework for enumeration.
+(Please refer to drivers/fpga/dfl.c for detailed enumeration APIs).
+
+The FPGA Management Engine (FME) driver is a platform driver which is loaded
+automatically after FME platform device creation from the DFL device module. It
+provides the key features for FPGA management, including:
+
+ a) Expose static FPGA region information, e.g. version and metadata.
+ Users can read related information via sysfs interfaces exposed
+ by FME driver.
+
+ b) Partial Reconfiguration. The FME driver creates FPGA manager, FPGA
+ bridges and FPGA regions during PR sub feature initialization. Once
+ it receives a DFL_FPGA_FME_PORT_PR ioctl from user, it invokes the
+ common interface function from FPGA Region to complete the partial
+ reconfiguration of the PR bitstream to the given port.
+
+Similar to the FME driver, the FPGA Accelerated Function Unit (AFU) driver is
+probed once the AFU platform device is created. The main function of this module
+is to provide an interface for userspace applications to access the individual
+accelerators, including basic reset control on port, AFU MMIO region export, dma
+buffer mapping service functions.
+
+After feature platform devices creation, matched platform drivers will be loaded
+automatically to handle different functionalities. Please refer to next sections
+for detailed information on functional units which have been already implemented
+under this DFL framework.
+
+
+Partial Reconfiguration
+=======================
+As mentioned above, accelerators can be reconfigured through partial
+reconfiguration of a PR bitstream file. The PR bitstream file must have been
+generated for the exact static FPGA region and targeted reconfigurable region
+(port) of the FPGA, otherwise, the reconfiguration operation will fail and
+possibly cause system instability. This compatibility can be checked by
+comparing the compatibility ID noted in the header of PR bitstream file against
+the compat_id exposed by the target FPGA region. This check is usually done by
+userspace before calling the reconfiguration IOCTL.
+
+
+Device enumeration
+==================
+This section introduces how applications enumerate the fpga device from
+the sysfs hierarchy under /sys/class/fpga_region.
+
+In the example below, two DFL based FPGA devices are installed in the host. Each
+fpga device has one FME and two ports (AFUs).
+
+FPGA regions are created under /sys/class/fpga_region/
+
+ /sys/class/fpga_region/region0
+ /sys/class/fpga_region/region1
+ /sys/class/fpga_region/region2
+ ...
+
+Application needs to search each regionX folder, if feature device is found,
+(e.g. "dfl-port.n" or "dfl-fme.m" is found), then it's the base
+fpga region which represents the FPGA device.
+
+Each base region has one FME and two ports (AFUs) as child devices:
+
+ /sys/class/fpga_region/region0/dfl-fme.0
+ /sys/class/fpga_region/region0/dfl-port.0
+ /sys/class/fpga_region/region0/dfl-port.1
+ ...
+
+ /sys/class/fpga_region/region3/dfl-fme.1
+ /sys/class/fpga_region/region3/dfl-port.2
+ /sys/class/fpga_region/region3/dfl-port.3
+ ...
+
+In general, the FME/AFU sysfs interfaces are named as follows:
+
+ /sys/class/fpga_region/<regionX>/<dfl-fme.n>/
+ /sys/class/fpga_region/<regionX>/<dfl-port.m>/
+
+with 'n' consecutively numbering all FMEs and 'm' consecutively numbering all
+ports.
+
+The device nodes used for ioctl() or mmap() can be referenced through:
+
+ /sys/class/fpga_region/<regionX>/<dfl-fme.n>/dev
+ /sys/class/fpga_region/<regionX>/<dfl-port.n>/dev
+
+
+Add new FIUs support
+====================
+It's possible that developers made some new function blocks (FIUs) under this
+DFL framework, then new platform device driver needs to be developed for the
+new feature dev (FIU) following the same way as existing feature dev drivers
+(e.g. FME and Port/AFU platform device driver). Besides that, it requires
+modification on DFL framework enumeration code too, for new FIU type detection
+and related platform devices creation.
+
+
+Add new private features support
+================================
+In some cases, we may need to add some new private features to existing FIUs
+(e.g. FME or Port). Developers don't need to touch enumeration code in DFL
+framework, as each private feature will be parsed automatically and related
+mmio resources can be found under FIU platform device created by DFL framework.
+Developer only needs to provide a sub feature driver with matched feature id.
+FME Partial Reconfiguration Sub Feature driver (see drivers/fpga/dfl-fme-pr.c)
+could be a reference.
+
+
+Open discussion
+===============
+FME driver exports one ioctl (DFL_FPGA_FME_PORT_PR) for partial reconfiguration
+to user now. In the future, if unified user interfaces for reconfiguration are
+added, FME driver should switch to them from ioctl interface.
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
new file mode 100644
index 000000000000..a740e491dfcc
--- /dev/null
+++ b/Documentation/gpu/amdgpu.rst
@@ -0,0 +1,129 @@
+=========================
+ drm/amdgpu AMDgpu driver
+=========================
+
+The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
+Next (GCN) architecture.
+
+Module Parameters
+=================
+
+The amdgpu driver supports the following module parameters:
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure.
+
+.. _amdgpu_memory_domains:
+
+Memory Domains
+--------------
+
+.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
+ :doc: memory domains
+
+Buffer Objects
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :doc: amdgpu_object
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+ :doc: PRIME Buffer Sharing
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+ :internal:
+
+MMU Notifier
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :doc: MMU Notifier
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :internal:
+
+AMDGPU Virtual Memory
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :doc: GPUVM
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :doc: Interrupt Handling
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :internal:
+
+GPU Power/Thermal Controls and Monitoring
+=========================================
+
+This section covers hwmon and power/thermal controls.
+
+HWMON Interfaces
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: hwmon
+
+GPU sysfs Power State Interfaces
+--------------------------------
+
+GPU power controls are exposed via sysfs files.
+
+power_dpm_state
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: power_dpm_state
+
+power_dpm_force_performance_level
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: power_dpm_force_performance_level
+
+pp_table
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_table
+
+pp_od_clk_voltage
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_od_clk_voltage
+
+pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+
+pp_power_profile_mode
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_power_profile_mode
+
+busy_percent
+~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: busy_percent
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index f982558fc25d..65be325bf282 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -4,6 +4,7 @@ GPU Driver Documentation
.. toctree::
+ amdgpu
i915
meson
pl111
diff --git a/Documentation/gpu/drm-client.rst b/Documentation/gpu/drm-client.rst
new file mode 100644
index 000000000000..7e672063e7eb
--- /dev/null
+++ b/Documentation/gpu/drm-client.rst
@@ -0,0 +1,12 @@
+=================
+Kernel clients
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_client.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+ :export:
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index e37557b30f62..f9cfcdcdf024 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -109,6 +109,15 @@ Framebuffer CMA Helper Functions Reference
.. _drm_bridges:
+Framebuffer GEM Helper Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :export:
+
Bridges
=======
@@ -169,6 +178,15 @@ Display Port Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
:export:
+Display Port CEC Helper Functions Reference
+===========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+ :doc: dp cec helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+ :export:
+
Display Port Dual Mode Adaptor Helper Functions Reference
=========================================================
@@ -282,13 +300,13 @@ Auxiliary Modeset Helpers
.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
:export:
-Framebuffer GEM Helper Reference
-================================
+OF/DT Helpers
+=============
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
:export:
Legacy Plane Helper Reference
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 1dffd1ac4cd4..5dee6b8a4c12 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -56,11 +56,12 @@ Overview
The basic object structure KMS presents to userspace is fairly simple.
Framebuffers (represented by :c:type:`struct drm_framebuffer <drm_framebuffer>`,
-see `Frame Buffer Abstraction`_) feed into planes. One or more (or even no)
-planes feed their pixel data into a CRTC (represented by :c:type:`struct
-drm_crtc <drm_crtc>`, see `CRTC Abstraction`_) for blending. The precise
-blending step is explained in more detail in `Plane Composition Properties`_ and
-related chapters.
+see `Frame Buffer Abstraction`_) feed into planes. Planes are represented by
+:c:type:`struct drm_plane <drm_plane>`, see `Plane Abstraction`_ for more
+details. One or more (or even no) planes feed their pixel data into a CRTC
+(represented by :c:type:`struct drm_crtc <drm_crtc>`, see `CRTC Abstraction`_)
+for blending. The precise blending step is explained in more detail in `Plane
+Composition Properties`_ and related chapters.
For the output routing the first step is encoders (represented by
:c:type:`struct drm_encoder <drm_encoder>`, see `Encoder Abstraction`_). Those
@@ -373,6 +374,15 @@ Connector Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:export:
+Writeback Connectors
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+ :export:
+
Encoder Abstraction
===================
@@ -457,7 +467,7 @@ Output discovery and initialization example
drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_output->base,
+ drm_connector_attach_encoder(&intel_output->base,
&intel_output->enc);
/* Set up the DDC bus. */
@@ -517,6 +527,12 @@ Standard Connector Properties
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: standard connector properties
+HDMI Specific Connector Properties
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: HDMI connector properties
+
Plane Composition Properties
----------------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index b08e9dcd9177..21b6b72a9ba8 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -395,6 +395,8 @@ VMA Offset Manager
.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
:export:
+.. _prime_buffer_sharing:
+
PRIME Buffer Sharing
====================
@@ -496,3 +498,21 @@ DRM Sync Objects
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:export:
+
+GPU Scheduler
+=============
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+ :doc: Overview
+
+Scheduler Function References
+-----------------------------
+
+.. kernel-doc:: include/drm/gpu_scheduler.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+ :export:
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index 00288f34c5a6..1fcf8e851e15 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide
drm-kms
drm-kms-helpers
drm-uapi
+ drm-client
drivers
vga-switcheroo
vgaarbiter
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index 07ed22ea3bd6..bfde04eddd14 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -17,6 +17,7 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
diff --git a/Documentation/gpu/msm-crash-dump.rst b/Documentation/gpu/msm-crash-dump.rst
new file mode 100644
index 000000000000..757cd257e0d8
--- /dev/null
+++ b/Documentation/gpu/msm-crash-dump.rst
@@ -0,0 +1,96 @@
+=====================
+MSM Crash Dump Format
+=====================
+
+Following a GPU hang the MSM driver outputs debugging information via
+/sys/kernel/dri/X/show or via devcoredump (/sys/class/devcoredump/dcdX/data).
+This document describes how the output is formatted.
+
+Each entry is in the form key: value. Sections headers will not have a value
+and all the contents of a section will be indented two spaces from the header.
+Each section might have multiple array entries the start of which is designated
+by a (-).
+
+Mappings
+--------
+
+kernel
+ The kernel version that generated the dump (UTS_RELEASE).
+
+module
+ The module that generated the crashdump.
+
+time
+ The kernel time at crash formated as seconds.microseconds.
+
+comm
+ Comm string for the binary that generated the fault.
+
+cmdline
+ Command line for the binary that generated the fault.
+
+revision
+ ID of the GPU that generated the crash formatted as
+ core.major.minor.patchlevel separated by dots.
+
+rbbm-status
+ The current value of RBBM_STATUS which shows what top level GPU
+ components are in use at the time of crash.
+
+ringbuffer
+ Section containing the contents of each ringbuffer. Each ringbuffer is
+ identified with an id number.
+
+ id
+ Ringbuffer ID (0 based index). Each ringbuffer in the section
+ will have its own unique id.
+ iova
+ GPU address of the ringbuffer.
+
+ last-fence
+ The last fence that was issued on the ringbuffer
+
+ retired-fence
+ The last fence retired on the ringbuffer.
+
+ rptr
+ The current read pointer (rptr) for the ringbuffer.
+
+ wptr
+ The current write pointer (wptr) for the ringbuffer.
+
+ size
+ Maximum size of the ringbuffer programmed in the hardware.
+
+ data
+ The contents of the ring encoded as ascii85. Only the used
+ portions of the ring will be printed.
+
+bo
+ List of buffers from the hanging submission if available.
+ Each buffer object will have a uinque iova.
+
+ iova
+ GPU address of the buffer object.
+
+ size
+ Allocated size of the buffer object.
+
+ data
+ The contents of the buffer object encoded with ascii85. Only
+ Trailing zeros at the end of the buffer will be skipped.
+
+registers
+ Set of registers values. Each entry is on its own line enclosed
+ by brackets { }.
+
+ offset
+ Byte offset of the register from the start of the
+ GPU memory region.
+
+ value
+ Hexadecimal value of the register.
+
+registers-hlsq
+ (5xx only) Register values from the HLSQ aperture.
+ Same format as the register section.
diff --git a/Documentation/gpu/v3d.rst b/Documentation/gpu/v3d.rst
new file mode 100644
index 000000000000..543f7fbf526e
--- /dev/null
+++ b/Documentation/gpu/v3d.rst
@@ -0,0 +1,28 @@
+=====================================
+ drm/v3d Broadcom V3D Graphics Driver
+=====================================
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_drv.c
+ :doc: Broadcom V3D Graphics Driver
+
+GPU buffer object (BO) management
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_bo.c
+ :doc: V3D GEM BO management support
+
+Address space management
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_mmu.c
+ :doc: Broadcom V3D MMU
+
+GPU Scheduling
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_sched.c
+ :doc: Broadcom V3D scheduling
+
+Interrupts
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_irq.c
+ :doc: Interrupt management for the V3D engine
diff --git a/Documentation/hwmon/ibmpowernv b/Documentation/hwmon/ibmpowernv
index 8826ba29db36..56468258711f 100644
--- a/Documentation/hwmon/ibmpowernv
+++ b/Documentation/hwmon/ibmpowernv
@@ -33,9 +33,48 @@ fanX_input Measured RPM value.
fanX_min Threshold RPM for alert generation.
fanX_fault 0: No fail condition
1: Failing fan
+
tempX_input Measured ambient temperature.
tempX_max Threshold ambient temperature for alert generation.
-inX_input Measured power supply voltage
+tempX_highest Historical maximum temperature
+tempX_lowest Historical minimum temperature
+tempX_enable Enable/disable all temperature sensors belonging to the
+ sub-group. In POWER9, this attribute corresponds to
+ each OCC. Using this attribute each OCC can be asked to
+ disable/enable all of its temperature sensors.
+ 1: Enable
+ 0: Disable
+
+inX_input Measured power supply voltage (millivolt)
inX_fault 0: No fail condition.
1: Failing power supply.
-power1_input System power consumption (microWatt)
+inX_highest Historical maximum voltage
+inX_lowest Historical minimum voltage
+inX_enable Enable/disable all voltage sensors belonging to the
+ sub-group. In POWER9, this attribute corresponds to
+ each OCC. Using this attribute each OCC can be asked to
+ disable/enable all of its voltage sensors.
+ 1: Enable
+ 0: Disable
+
+powerX_input Power consumption (microWatt)
+powerX_input_highest Historical maximum power
+powerX_input_lowest Historical minimum power
+powerX_enable Enable/disable all power sensors belonging to the
+ sub-group. In POWER9, this attribute corresponds to
+ each OCC. Using this attribute each OCC can be asked to
+ disable/enable all of its power sensors.
+ 1: Enable
+ 0: Disable
+
+currX_input Measured current (milliampere)
+currX_highest Historical maximum current
+currX_lowest Historical minimum current
+currX_enable Enable/disable all current sensors belonging to the
+ sub-group. In POWER9, this attribute corresponds to
+ each OCC. Using this attribute each OCC can be asked to
+ disable/enable all of its current sensors.
+ 1: Enable
+ 0: Disable
+
+energyX_input Cumulative energy (microJoule)
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 9ba6587b7657..b2de8fa49273 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -16,6 +16,11 @@ Supported chips:
Prefixes: 'max34446'
Addresses scanned: -
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+ * Maxim MAX34451
+ PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+ Prefixes: 'max34451'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
* Maxim MAX34460
PMBus 12-Channel Voltage Monitor & Sequencer
Prefix: 'max34460'
@@ -36,9 +41,10 @@ Description
This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
-It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
-The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
-channels.
+It also supports the MAX34451, MAX34460, and MAX34461 PMBus Voltage Monitor &
+Sequencers. The MAX34451 supports monitoring voltage or current of 12 channels
+based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
+supports 16 voltage channels.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -93,7 +99,7 @@ curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
-curr[1-4]_average Historical average current (MAX34446 only).
+curr[1-4]_average Historical average current (MAX34446/34451 only).
curr[1-6]_highest Historical maximum current.
curr[1-6]_reset_history Write any value to reset history.
@@ -123,5 +129,7 @@ temp[1-8]_reset_history Write any value to reset history.
temp7 and temp8 attributes only exist for MAX34440.
MAX34446 only supports temp[1-3].
+MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
+and temp[1-5].
MAX34460 supports attribute groups in[1-12] and temp[1-5].
MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan
new file mode 100644
index 000000000000..fc531c6978d4
--- /dev/null
+++ b/Documentation/hwmon/mlxreg-fan
@@ -0,0 +1,60 @@
+Kernel driver mlxreg-fan
+========================
+
+Provides FAN control for the next Mellanox systems:
+QMB700, equipped with 40x200GbE InfiniBand ports;
+MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+MSN3800, equipped with 64x1000GbE Ethernet ports;
+These are the Top of the Rack systems, equipped with Mellanox switch
+board with Mellanox Quantum or Spectrume-2 devices.
+FAN controller is implemented by the programmable device logic.
+
+The default registers offsets set within the programmable device is as
+following:
+- pwm1 0xe3
+- fan1 (tacho1) 0xe4
+- fan2 (tacho2) 0xe5
+- fan3 (tacho3) 0xe6
+- fan4 (tacho4) 0xe7
+- fan5 (tacho5) 0xe8
+- fan6 (tacho6) 0xe9
+- fan7 (tacho7) 0xea
+- fan8 (tacho8) 0xeb
+- fan9 (tacho9) 0xec
+- fan10 (tacho10) 0xed
+- fan11 (tacho11) 0xee
+- fan12 (tacho12) 0xef
+This setup can be re-programmed with other registers.
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+The driver implements a simple interface for driving a fan connected to
+a PWM output and tachometer inputs.
+This driver obtains PWM and tachometers registers location according to
+the system configuration and creates FAN/PWM hwmon objects and a cooling
+device. PWM and tachometers are sensed through the on-board programmable
+device, which exports its register map. This device could be attached to
+any bus type, for which register mapping is supported.
+Single instance is created with one PWM control, up to 12 tachometers and
+one cooling device. It could be as many instances as programmable device
+supports.
+The driver exposes the fan to the user space through the hwmon's and
+thermal's sysfs interfaces.
+
+/sys files in hwmon subsystem
+-----------------------------
+
+fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1 - RW file for fan[1-12] target duty cycle (0..255)
+
+/sys files in thermal subsystem
+-------------------------------
+
+cur_state - RW file for current cooling state of the cooling device
+ (0..max_state)
+max_state - RO file for maximum cooling state of the cooling device
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan
new file mode 100644
index 000000000000..6156ef7398e6
--- /dev/null
+++ b/Documentation/hwmon/npcm750-pwm-fan
@@ -0,0 +1,22 @@
+Kernel driver npcm750-pwm-fan
+=============================
+
+Supported chips:
+ NUVOTON NPCM750/730/715/705
+
+Authors:
+ <tomer.maimon@nuvoton.com>
+
+Description:
+------------
+This driver implements support for NUVOTON NPCM7XX PWM and Fan Tacho
+controller. The PWM controller supports up to 8 PWM outputs. The Fan tacho
+controller supports up to 16 tachometer inputs.
+
+The driver provides the following sensor accesses in sysfs:
+
+fanX_input ro provide current fan rotation value in RPM as reported
+ by the fan to the device.
+
+pwmX rw get or set PWM fan control value. This is an integer
+ value between 0(off) and 255(full speed).
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index fc337c317c67..2b9e1005d88b 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -171,6 +171,13 @@ in[0-*]_label Suggested voltage channel label.
user-space.
RO
+in[0-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
cpu[0-*]_vid CPU core reference voltage.
Unit: millivolt
RO
@@ -236,6 +243,13 @@ fan[1-*]_label Suggested fan channel label.
In all other cases, the label is provided by user-space.
RO
+fan[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with fans.
@@ -409,6 +423,13 @@ temp_reset_history
Reset temp_lowest and temp_highest for all sensors
WO
+temp[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Some chips measure temperature using external thermistors and an ADC, and
report the temperature measurement as a voltage. Converting this voltage
back to a temperature (or the other way around for limits) requires
@@ -468,6 +489,13 @@ curr_reset_history
Reset currX_lowest and currX_highest for all sensors
WO
+curr[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with currents.
*********
@@ -566,6 +594,13 @@ power[1-*]_crit Critical maximum power.
Unit: microWatt
RW
+power[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with power readings.
**********
@@ -576,6 +611,12 @@ energy[1-*]_input Cumulative energy use
Unit: microJoule
RO
+energy[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
************
* Humidity *
@@ -586,6 +627,13 @@ humidity[1-*]_input Humidity
RO
+humidity[1-*]_enable Enable or disable the sensors
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
**********
* Alarms *
**********
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 65514c251318..d1ee484a787d 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -21,24 +21,21 @@ Supported adapters:
* Intel DH89xxCC (PCH)
* Intel Panther Point (PCH)
* Intel Lynx Point (PCH)
- * Intel Lynx Point-LP (PCH)
* Intel Avoton (SOC)
* Intel Wellsburg (PCH)
* Intel Coleto Creek (PCH)
* Intel Wildcat Point (PCH)
- * Intel Wildcat Point-LP (PCH)
* Intel BayTrail (SOC)
* Intel Braswell (SOC)
- * Intel Sunrise Point-H (PCH)
- * Intel Sunrise Point-LP (PCH)
- * Intel Kaby Lake-H (PCH)
+ * Intel Sunrise Point (PCH)
+ * Intel Kaby Lake (PCH)
* Intel DNV (SOC)
* Intel Broxton (SOC)
* Intel Lewisburg (PCH)
* Intel Gemini Lake (SOC)
- * Intel Cannon Lake-H (PCH)
- * Intel Cannon Lake-LP (PCH)
+ * Intel Cannon Lake (PCH)
* Intel Cedar Fork (PCH)
+ * Intel Ice Lake (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/i2c/gpio-fault-injection b/Documentation/i2c/gpio-fault-injection
index e0c4f775e239..a4ce62090fd5 100644
--- a/Documentation/i2c/gpio-fault-injection
+++ b/Documentation/i2c/gpio-fault-injection
@@ -34,21 +34,48 @@ I2C specification version 4, section 3.1.16) using the helpers of the Linux I2C
core (see 'struct bus_recovery_info'). However, the bus recovery will not
succeed because SDA is still pinned low until you manually release it again
with "echo 1 > sda". A test with an automatic release can be done with the
-'incomplete_transfer' file.
+following class of fault injectors.
-"incomplete_transfer"
----------------------
+Introduction to incomplete transfers
+------------------------------------
+
+The following fault injectors create situations where SDA will be held low by a
+device. Bus recovery should be able to fix these situations. But please note:
+there are I2C client devices which detect a stuck SDA on their side and release
+it on their own after a few milliseconds. Also, there might be an external
+device deglitching and monitoring the I2C bus. It could also detect a stuck SDA
+and will init a bus recovery on its own. If you want to implement bus recovery
+in a bus master driver, make sure you checked your hardware setup for such
+devices before. And always verify with a scope or logic analyzer!
+
+"incomplete_address_phase"
+--------------------------
This file is write only and you need to write the address of an existing I2C
-client device to it. Then, a transfer to this device will be started, but it
-will stop at the ACK phase after the address of the client has been
+client device to it. Then, a read transfer to this device will be started, but
+it will stop at the ACK phase after the address of the client has been
transmitted. Because the device will ACK its presence, this results in SDA
being pulled low by the device while SCL is high. So, similar to the "sda" file
above, the bus master under test should detect this condition and try a bus
recovery. This time, however, it should succeed and the device should release
-SDA after toggling SCL. Please note: there are I2C client devices which detect
-a stuck SDA on their side and release it on their own after a few milliseconds.
-Also, there are external devices deglitching and monitoring the I2C bus. They
-can also detect a stuck SDA and will init a bus recovery on their own. If you
-want to implement bus recovery in a bus master driver, make sure you checked
-your hardware setup carefully before.
+SDA after toggling SCL.
+
+"incomplete_write_byte"
+-----------------------
+
+Similar to above, this file is write only and you need to write the address of
+an existing I2C client device to it.
+
+The injector will again stop at one ACK phase, so the device will keep SDA low
+because it acknowledges data. However, there are two differences compared to
+'incomplete_address_phase':
+
+a) the message sent out will be a write message
+b) after the address byte, a 0x00 byte will be transferred. Then, stop at ACK.
+
+This is a highly delicate state, the device is set up to write any data to
+register 0x00 (if it has registers) when further clock pulses happen on SCL.
+This is why bus recovery (up to 9 clock pulses) must either check SDA or send
+additional STOP conditions to ensure the bus has been released. Otherwise
+random data will be written to a device!
+
diff --git a/Documentation/index.rst b/Documentation/index.rst
index fdc585703498..5db7e87c7cb1 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -3,6 +3,8 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
+.. _linux_doc:
+
The Linux Kernel documentation
==============================
@@ -90,6 +92,7 @@ needed).
crypto/index
filesystems/index
vm/index
+ bpf/index
Architecture-specific documentation
-----------------------------------
@@ -102,29 +105,24 @@ implementation.
sh/index
-Korean translations
--------------------
-
-.. toctree::
- :maxdepth: 1
-
- translations/ko_KR/index
+Filesystem Documentation
+------------------------
-Chinese translations
---------------------
+The documentation in this section are provided by specific filesystem
+subprojects.
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
- translations/zh_CN/index
+ filesystems/ext4/index
-Japanese translations
----------------------
+Translations
+------------
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
- translations/ja_JP/index
+ translations/index
Indices and tables
==================
diff --git a/Documentation/input/multi-touch-protocol.rst b/Documentation/input/multi-touch-protocol.rst
index b51751a0cd5d..6be70342e709 100644
--- a/Documentation/input/multi-touch-protocol.rst
+++ b/Documentation/input/multi-touch-protocol.rst
@@ -310,12 +310,12 @@ ABS_MT_TOOL_Y
ABS_MT_TOOL_TYPE
The type of approaching tool. A lot of kernel drivers cannot distinguish
between different tool types, such as a finger or a pen. In such cases, the
- event should be omitted. The protocol currently supports MT_TOOL_FINGER,
- MT_TOOL_PEN, and MT_TOOL_PALM [#f2]_. For type B devices, this event is
- handled by input core; drivers should instead use
- input_mt_report_slot_state(). A contact's ABS_MT_TOOL_TYPE may change over
- time while still touching the device, because the firmware may not be able
- to determine which tool is being used when it first appears.
+ event should be omitted. The protocol currently mainly supports
+ MT_TOOL_FINGER, MT_TOOL_PEN, and MT_TOOL_PALM [#f2]_.
+ For type B devices, this event is handled by input core; drivers should
+ instead use input_mt_report_slot_state(). A contact's ABS_MT_TOOL_TYPE may
+ change over time while still touching the device, because the firmware may
+ not be able to determine which tool is being used when it first appears.
ABS_MT_BLOB_ID
The BLOB_ID groups several packets together into one arbitrarily shaped
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 480c8609dc58..13a7c999c04a 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -166,6 +166,7 @@ Code Seq#(hex) Include File Comments
'P' all linux/soundcard.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
+'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
@@ -274,6 +275,7 @@ Code Seq#(hex) Include File Comments
'v' 00-1F linux/ext2_fs.h conflict!
'v' 00-1F linux/fs.h conflict!
'v' 00-0F linux/sonypi.h conflict!
+'v' 00-0F media/v4l2-subdev.h conflict!
'v' C0-FF linux/meye.h conflict!
'w' all CERN SCI driver
'y' 00-1F packet based user level communications
@@ -322,6 +324,7 @@ Code Seq#(hex) Include File Comments
0xB3 00 linux/mmc/ioctl.h
0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
+0xB6 all linux/fpga-dfl.h
0xC0 00-0F linux/usb/iowarrior.h
0xCA 00-0F uapi/misc/cxl.h
0xCA 10-2F uapi/misc/ocxl.h
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 04d394a2e06c..49df45f90e8a 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -31,6 +31,9 @@ Here are examples of these different formats::
3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
3 1 hda1 35486 38030 38030 38030
+ 4.18+ diskstats:
+ 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 0 0 0 0
+
On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have
a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``.
@@ -101,6 +104,18 @@ Field 11 -- weighted # of milliseconds spent doing I/Os
last update of this field. This can provide an easy measure of both
I/O completion time and the backlog that may be accumulating.
+Field 12 -- # of discards completed
+ This is the total number of discards completed successfully.
+
+Field 13 -- # of discards merged
+ See the description of field 2
+
+Field 14 -- # of sectors discarded
+ This is the total number of sectors discarded successfully.
+
+Field 15 -- # of milliseconds spent discarding
+ This is the total number of milliseconds spent by all discards (as
+ measured from __make_request() to end_that_request_last()).
To avoid introducing performance bottlenecks, no locks are held while
modifying these counters. This implies that minor inaccuracies may be
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 6c9c69ec3986..8390c360d4b3 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,27 @@ LDFLAGS_MODULE
--------------------------------------------------
Additional options used for $(LD) when linking modules.
+HOSTCFLAGS
+--------------------------------------------------
+Additional flags to be passed to $(HOSTCC) when building host programs.
+
+HOSTCXXFLAGS
+--------------------------------------------------
+Additional flags to be passed to $(HOSTCXX) when building host programs.
+
+HOSTLDFLAGS
+--------------------------------------------------
+Additional flags to be passed when linking host programs.
+
+HOSTLDLIBS
+--------------------------------------------------
+Additional libraries to link against when building host programs.
+
+KBUILD_KCONFIG
+--------------------------------------------------
+Set the top-level Kconfig file to the value of this environment
+variable. The default name is "Kconfig".
+
KBUILD_VERBOSE
--------------------------------------------------
Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -88,7 +109,8 @@ In most cases the name of the architecture is the same as the
directory name found in the arch/ directory.
But some architectures such as x86 and sparc have aliases.
x86: i386 for 32 bit, x86_64 for 64 bit
-sparc: sparc for 32 bit, sparc64 for 64 bit
+sh: sh for 32 bit, sh64 for 64 bit
+sparc: sparc32 for 32 bit, sparc64 for 64 bit
CROSS_COMPILE
--------------------------------------------------
@@ -148,15 +170,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
the default option --strip-debug will be used. Otherwise,
INSTALL_MOD_STRIP value will be used as the options to the strip command.
-INSTALL_FW_PATH
---------------------------------------------------
-INSTALL_FW_PATH specifies where to install the firmware blobs.
-The default value is:
-
- $(INSTALL_MOD_PATH)/lib/firmware
-
-The value can be overridden in which case the default value is ignored.
-
INSTALL_HDR_PATH
--------------------------------------------------
INSTALL_HDR_PATH specifies where to install user space headers when
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 64e0775a62d4..c54cb7cb9ff4 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -370,7 +370,7 @@ choices:
This defines a choice group and accepts any of the above attributes as
options. A choice can only be of type bool or tristate. If no type is
-specified for a choice, it's type will be determined by the type of
+specified for a choice, its type will be determined by the type of
the first choice element in the group or remain unknown if none of the
choice elements have a type specified, as well.
@@ -384,7 +384,7 @@ A choice accepts another option "optional", which allows to set the
choice to 'n' and no entry needs to be selected.
If no [symbol] is associated with a choice, then you can not have multiple
definitions of that choice. If a [symbol] is associated to the choice,
-then you may define the same choice (ie. with the same entries) in another
+then you may define the same choice (i.e. with the same entries) in another
place.
comment:
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 7233118f3a05..68c82914c0f3 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
Use "make help" to list all of the possible configuration targets.
-The xconfig ('qconf') and menuconfig ('mconf') programs also
-have embedded help text. Be sure to check it for navigation,
-search, and other general help text.
+The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
+programs also have embedded help text. Be sure to check that for
+navigation, search, and other general help text.
======================================================================
General
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
for you, so you may find that you need to see what NEW kernel
symbols have been introduced.
-To see a list of new config symbols when using "make oldconfig", use
+To see a list of new config symbols, use
cp user/some/old.config .config
make listnewconfig
and the config program will list any new symbols, one per line.
+Alternatively, you can use the brute force method:
+
+ make oldconfig
scripts/diffconfig .config.old .config | less
______________________________________________________________________
@@ -160,7 +163,7 @@ Searching in menuconfig:
This lists all config symbols that contain "hotplug",
e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
- For search help, enter / followed TAB-TAB-TAB (to highlight
+ For search help, enter / followed by TAB-TAB (to highlight
<Help>) and Enter. This will tell you that you can also use
regular expressions (regexes) in the search string, so if you
are not interested in MEMORY_HOTPLUG, you could try
@@ -203,6 +206,39 @@ Example:
======================================================================
+nconfig
+--------------------------------------------------
+
+nconfig is an alternate text-based configurator. It lists function
+keys across the bottom of the terminal (window) that execute commands.
+You can also just use the corresponding numeric key to execute the
+commands unless you are in a data entry window. E.g., instead of F6
+for Save, you can just press 6.
+
+Use F1 for Global help or F3 for the Short help menu.
+
+Searching in nconfig:
+
+ You can search either in the menu entry "prompt" strings
+ or in the configuration symbols.
+
+ Use / to begin a search through the menu entries. This does
+ not support regular expressions. Use <Down> or <Up> for
+ Next hit and Previous hit, respectively. Use <Esc> to
+ terminate the search mode.
+
+ F8 (SymSearch) searches the configuration symbols for the
+ given string or regular expression (regex).
+
+NCONFIG_MODE
+--------------------------------------------------
+This mode shows all sub-menus in one large tree.
+
+Example:
+ make NCONFIG_MODE=single_menu nconfig
+
+
+======================================================================
xconfig
--------------------------------------------------
@@ -230,8 +266,7 @@ gconfig
Searching in gconfig:
- None (gconfig isn't maintained as well as xconfig or menuconfig);
- however, gconfig does have a few more viewing choices than
- xconfig does.
+ There is no search command in gconfig. However, gconfig does
+ have several different viewing choices, modes, and options.
###
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 048fc39a6b91..766355b1d221 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -661,7 +661,7 @@ Both possibilities are described in the following.
When compiling host programs, it is possible to set specific flags.
The programs will always be compiled utilising $(HOSTCC) passed
- the options specified in $(HOSTCFLAGS).
+ the options specified in $(KBUILD_HOSTCFLAGS).
To set flags that will take effect for all host programs created
in that Makefile, use the variable HOST_EXTRACFLAGS.
@@ -1105,6 +1105,12 @@ When kbuild executes, the following steps are followed (roughly):
target: source(s) FORCE
#WRONG!# $(call if_changed, ld/objcopy/gzip/...)
+ Note: if_changed should not be used more than once per target.
+ It stores the executed command in a corresponding .cmd
+ file and multiple calls would result in overwrites and
+ unwanted results when the target is up to date and only the
+ tests on changed commands trigger execution of commands.
+
ld
Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index 9999c8468293..d824e4feaff3 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking_hack:
+
============================================
Unreliable Guide To Hacking The Linux Kernel
============================================
diff --git a/Documentation/kernel-hacking/index.rst b/Documentation/kernel-hacking/index.rst
index fcb0eda3cca3..f53027652290 100644
--- a/Documentation/kernel-hacking/index.rst
+++ b/Documentation/kernel-hacking/index.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking:
+
=====================
Kernel Hacking Guides
=====================
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index f937c0fd11aa..519673df0e82 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking_lock:
+
===========================
Unreliable Guide To Locking
===========================
@@ -177,7 +179,7 @@ perfect world).
Note that you can also use :c:func:`spin_lock_irq()` or
:c:func:`spin_lock_irqsave()` here, which stop hardware interrupts
-as well: see `Hard IRQ Context <#hardirq-context>`__.
+as well: see `Hard IRQ Context <#hard-irq-context>`__.
This works perfectly for UP as well: the spin lock vanishes, and this
macro simply becomes :c:func:`local_bh_disable()`
@@ -228,7 +230,7 @@ The Same Softirq
~~~~~~~~~~~~~~~~
The same softirq can run on the other CPUs: you can use a per-CPU array
-(see `Per-CPU Data <#per-cpu>`__) for better performance. If you're
+(see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
going so far as to use a softirq, you probably care about scalable
performance enough to justify the extra complexity.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index cb3b0de83fc6..10f4499e677c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the
"post_handler," if any, that is associated with the kprobe.
Execution then continues with the instruction following the probepoint.
+Changing Execution Path
+-----------------------
+
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
+
+If you change the instruction pointer (and set up other related
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
+This also means post_handler should not be called anymore.
+
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
+
Return Probes
-------------
@@ -262,7 +282,7 @@ is optimized, that modification is ignored. Thus, if you want to
tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
-- Specify an empty function for the kprobe's post_handler or break_handler.
+- Specify an empty function for the kprobe's post_handler.
or
@@ -474,7 +494,7 @@ error occurs during registration, all probes in the array, up to
the bad probe, are safely unregistered before the register_*probes
function returns.
-- kps/rps/jps: an array of pointers to ``*probe`` data structures
+- kps/rps: an array of pointers to ``*probe`` data structures
- num: the number of the array entries.
.. note::
@@ -566,12 +586,11 @@ the same handler) may run concurrently on different CPUs.
Kprobes does not use mutexes or allocate memory except during
registration and unregistration.
-Probe handlers are run with preemption disabled. Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64). In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+Probe handlers are run with preemption disabled or interrupt disabled,
+which depends on the architecture and optimization state. (e.g.,
+kretprobe handlers and optimized kprobe handlers run without interrupt
+disabled on x86/x86-64). In any case, your handler should not yield
+the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O).
Since a return probe is implemented by replacing the return
address with the trampoline's address, stack backtraces and calls
diff --git a/Documentation/locking/ww-mutex-design.txt b/Documentation/locking/ww-mutex-design.txt
index 34c3a1b50b9a..f0ed7c30e695 100644
--- a/Documentation/locking/ww-mutex-design.txt
+++ b/Documentation/locking/ww-mutex-design.txt
@@ -1,4 +1,4 @@
-Wait/Wound Deadlock-Proof Mutex Design
+Wound/Wait Deadlock-Proof Mutex Design
======================================
Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
@@ -32,10 +32,26 @@ the oldest task) wins, and the one with the higher reservation id (i.e. the
younger task) unlocks all of the buffers that it has already locked, and then
tries again.
-In the RDBMS literature this deadlock handling approach is called wait/wound:
-The older tasks waits until it can acquire the contended lock. The younger tasks
-needs to back off and drop all the locks it is currently holding, i.e. the
-younger task is wounded.
+In the RDBMS literature, a reservation ticket is associated with a transaction.
+and the deadlock handling approach is called Wait-Die. The name is based on
+the actions of a locking thread when it encounters an already locked mutex.
+If the transaction holding the lock is younger, the locking transaction waits.
+If the transaction holding the lock is older, the locking transaction backs off
+and dies. Hence Wait-Die.
+There is also another algorithm called Wound-Wait:
+If the transaction holding the lock is younger, the locking transaction
+wounds the transaction holding the lock, requesting it to die.
+If the transaction holding the lock is older, it waits for the other
+transaction. Hence Wound-Wait.
+The two algorithms are both fair in that a transaction will eventually succeed.
+However, the Wound-Wait algorithm is typically stated to generate fewer backoffs
+compared to Wait-Die, but is, on the other hand, associated with more work than
+Wait-Die when recovering from a backoff. Wound-Wait is also a preemptive
+algorithm in that transactions are wounded by other transactions, and that
+requires a reliable way to pick up up the wounded condition and preempt the
+running transaction. Note that this is not the same as process preemption. A
+Wound-Wait transaction is considered preempted when it dies (returning
+-EDEADLK) following a wound.
Concepts
--------
@@ -47,18 +63,20 @@ Acquire context: To ensure eventual forward progress it is important the a task
trying to acquire locks doesn't grab a new reservation id, but keeps the one it
acquired when starting the lock acquisition. This ticket is stored in the
acquire context. Furthermore the acquire context keeps track of debugging state
-to catch w/w mutex interface abuse.
+to catch w/w mutex interface abuse. An acquire context is representing a
+transaction.
W/w class: In contrast to normal mutexes the lock class needs to be explicit for
-w/w mutexes, since it is required to initialize the acquire context.
+w/w mutexes, since it is required to initialize the acquire context. The lock
+class also specifies what algorithm to use, Wound-Wait or Wait-Die.
Furthermore there are three different class of w/w lock acquire functions:
* Normal lock acquisition with a context, using ww_mutex_lock.
-* Slowpath lock acquisition on the contending lock, used by the wounded task
- after having dropped all already acquired locks. These functions have the
- _slow postfix.
+* Slowpath lock acquisition on the contending lock, used by the task that just
+ killed its transaction after having dropped all already acquired locks.
+ These functions have the _slow postfix.
From a simple semantics point-of-view the _slow functions are not strictly
required, since simply calling the normal ww_mutex_lock functions on the
@@ -90,6 +108,12 @@ provided.
Usage
-----
+The algorithm (Wait-Die vs Wound-Wait) is chosen by using either
+DEFINE_WW_CLASS() (Wound-Wait) or DEFINE_WD_CLASS() (Wait-Die)
+As a rough rule of thumb, use Wound-Wait iff you
+expect the number of simultaneous competing transactions to be typically small,
+and you want to reduce the number of rollbacks.
+
Three different ways to acquire locks within the same w/w class. Common
definitions for methods #1 and #2:
@@ -220,7 +244,7 @@ mutexes are a natural fit for such a case for two reasons:
Note that this approach differs in two important ways from the above methods:
- Since the list of objects is dynamically constructed (and might very well be
- different when retrying due to hitting the -EDEADLK wound condition) there's
+ different when retrying due to hitting the -EDEADLK die condition) there's
no need to keep any object on a persistent list when it's not locked. We can
therefore move the list_head into the object itself.
- On the other hand the dynamic object list construction also means that the -EALREADY return
@@ -312,12 +336,23 @@ Design:
We maintain the following invariants for the wait list:
(1) Waiters with an acquire context are sorted by stamp order; waiters
without an acquire context are interspersed in FIFO order.
- (2) Among waiters with contexts, only the first one can have other locks
- acquired already (ctx->acquired > 0). Note that this waiter may come
- after other waiters without contexts in the list.
+ (2) For Wait-Die, among waiters with contexts, only the first one can have
+ other locks acquired already (ctx->acquired > 0). Note that this waiter
+ may come after other waiters without contexts in the list.
+
+ The Wound-Wait preemption is implemented with a lazy-preemption scheme:
+ The wounded status of the transaction is checked only when there is
+ contention for a new lock and hence a true chance of deadlock. In that
+ situation, if the transaction is wounded, it backs off, clears the
+ wounded status and retries. A great benefit of implementing preemption in
+ this way is that the wounded transaction can identify a contending lock to
+ wait for before restarting the transaction. Just blindly restarting the
+ transaction would likely make the transaction end up in a situation where
+ it would have to back off again.
In general, not much contention is expected. The locks are typically used to
- serialize access to resources for devices.
+ serialize access to resources for devices, and optimization focus should
+ therefore be directed towards the uncontended cases.
Lockdep:
Special care has been taken to warn for as many cases of api abuse
diff --git a/Documentation/media/audio.h.rst.exceptions b/Documentation/media/audio.h.rst.exceptions
index f40f3cbfe4c9..940458774cf6 100644
--- a/Documentation/media/audio.h.rst.exceptions
+++ b/Documentation/media/audio.h.rst.exceptions
@@ -1,9 +1,6 @@
# Ignore header name
ignore define _DVBAUDIO_H_
-# Typedef pointing to structs
-replace typedef audio_karaoke_t :c:type:`audio_karaoke`
-
# Undocumented audio caps, as this is a deprecated API anyway
ignore define AUDIO_CAP_DTS
ignore define AUDIO_CAP_LPCM
diff --git a/Documentation/media/media.h.rst.exceptions b/Documentation/media/media.h.rst.exceptions
index 83d7f7c722fb..684fe9c86dee 100644
--- a/Documentation/media/media.h.rst.exceptions
+++ b/Documentation/media/media.h.rst.exceptions
@@ -6,10 +6,10 @@ ignore define MEDIA_API_VERSION
ignore define MEDIA_ENT_F_BASE
ignore define MEDIA_ENT_F_OLD_BASE
ignore define MEDIA_ENT_F_OLD_SUBDEV_BASE
+ignore define MEDIA_ENT_F_DTV_DECODER
ignore define MEDIA_INTF_T_DVB_BASE
ignore define MEDIA_INTF_T_V4L_BASE
ignore define MEDIA_INTF_T_ALSA_BASE
-
#ignore legacy entity type macros
ignore define MEDIA_ENT_TYPE_SHIFT
ignore define MEDIA_ENT_TYPE_MASK
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index b6fd86424fbb..8d5633e6ae04 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -179,6 +179,24 @@ it is guaranteed that the state did change in between the two events.
capability set. When open() is called, the HPD pin can be read and
if the HPD is high, then an initial event will be generated for that
filehandle.
+ * .. _`CEC-EVENT-PIN-5V-LOW`:
+
+ - ``CEC_EVENT_PIN_5V_LOW``
+ - 6
+ - Generated if the 5V pin goes from a high voltage to a low voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the 5V pin can be read and
+ if the 5V is low, then an initial event will be generated for that
+ filehandle.
+ * .. _`CEC-EVENT-PIN-5V-HIGH`:
+
+ - ``CEC_EVENT_PIN_5V_HIGH``
+ - 7
+ - Generated if the 5V pin goes from a low voltage to a high voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the 5V pin can be read and
+ if the 5V is high, then an initial event will be generated for that
+ filehandle.
.. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
diff --git a/Documentation/media/uapi/dvb/audio-get-pts.rst b/Documentation/media/uapi/dvb/audio-get-pts.rst
deleted file mode 100644
index 2d1396b003de..000000000000
--- a/Documentation/media/uapi/dvb/audio-get-pts.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_GET_PTS:
-
-=============
-AUDIO_GET_PTS
-=============
-
-Name
-----
-
-AUDIO_GET_PTS
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, AUDIO_GET_PTS, __u64 *pts)
- :name: AUDIO_GET_PTS
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - __u64 \*pts
-
- - Returns the 33-bit timestamp as defined in ITU T-REC-H.222.0 /
- ISO/IEC 13818-1.
-
- The PTS should belong to the currently played frame if possible,
- but may also be a value close to it like the PTS of the last
- decoded frame or the last PTS extracted by the PES parser.
-
-
-Description
------------
-
-This ioctl is obsolete. Do not use in new drivers. If you need this
-functionality, then please contact the linux-media mailing list
-(`https://linuxtv.org/lists.php <https://linuxtv.org/lists.php>`__).
-
-This ioctl call asks the Audio Device to return the current PTS
-timestamp.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/audio-set-attributes.rst b/Documentation/media/uapi/dvb/audio-set-attributes.rst
deleted file mode 100644
index f0c6153ca80f..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-attributes.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_ATTRIBUTES:
-
-====================
-AUDIO_SET_ATTRIBUTES
-====================
-
-Name
-----
-
-AUDIO_SET_ATTRIBUTES
-
-.. attention:: This ioctl is deprecated
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_ATTRIBUTES, struct audio_attributes *attr )
- :name: AUDIO_SET_ATTRIBUTES
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - audio_attributes_t attr
-
- - audio attributes according to section ??
-
-
-Description
------------
-
-This ioctl is intended for DVD playback and allows you to set certain
-information about the audio stream.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - attr is not a valid or supported attribute setting.
diff --git a/Documentation/media/uapi/dvb/audio-set-ext-id.rst b/Documentation/media/uapi/dvb/audio-set-ext-id.rst
deleted file mode 100644
index 8503c47f26bd..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-ext-id.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_EXT_ID:
-
-================
-AUDIO_SET_EXT_ID
-================
-
-Name
-----
-
-AUDIO_SET_EXT_ID
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_EXT_ID, int id)
- :name: AUDIO_SET_EXT_ID
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - int id
-
- - audio sub_stream_id
-
-
-Description
------------
-
-This ioctl can be used to set the extension id for MPEG streams in DVD
-playback. Only the first 3 bits are recognized.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - id is not a valid id.
diff --git a/Documentation/media/uapi/dvb/audio-set-karaoke.rst b/Documentation/media/uapi/dvb/audio-set-karaoke.rst
deleted file mode 100644
index c759952d88aa..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-karaoke.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_KARAOKE:
-
-=================
-AUDIO_SET_KARAOKE
-=================
-
-Name
-----
-
-AUDIO_SET_KARAOKE
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_KARAOKE, struct audio_karaoke *karaoke)
- :name: AUDIO_SET_KARAOKE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - audio_karaoke_t \*karaoke
-
- - karaoke settings according to section ??.
-
-
-Description
------------
-
-This ioctl allows one to set the mixer settings for a karaoke DVD.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - karaoke is not a valid or supported karaoke setting.
diff --git a/Documentation/media/uapi/dvb/audio_data_types.rst b/Documentation/media/uapi/dvb/audio_data_types.rst
index 6b93359d64f7..5bffa2c98a24 100644
--- a/Documentation/media/uapi/dvb/audio_data_types.rst
+++ b/Documentation/media/uapi/dvb/audio_data_types.rst
@@ -114,40 +114,3 @@ following bits set according to the hardwares capabilities.
#define AUDIO_CAP_OGG 64
#define AUDIO_CAP_SDDS 128
#define AUDIO_CAP_AC3 256
-
-.. c:type:: audio_karaoke
-
-The ioctl AUDIO_SET_KARAOKE uses the following format:
-
-
-.. code-block:: c
-
- typedef
- struct audio_karaoke {
- int vocal1;
- int vocal2;
- int melody;
- } audio_karaoke_t;
-
-If Vocal1 or Vocal2 are non-zero, they get mixed into left and right t
-at 70% each. If both, Vocal1 and Vocal2 are non-zero, Vocal1 gets mixed
-into the left channel and Vocal2 into the right channel at 100% each. Ff
-Melody is non-zero, the melody channel gets mixed into left and right.
-
-
-.. c:type:: audio_attributes
-
-The following attributes can be set by a call to AUDIO_SET_ATTRIBUTES:
-
-
-.. code-block:: c
-
- typedef uint16_t audio_attributes_t;
- /* bits: descr. */
- /* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
- /* 12 multichannel extension */
- /* 11-10 audio type (0=not spec, 1=language included) */
- /* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
- /* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
- /* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
- /* 2- 0 number of audio channels (n+1 channels) */
diff --git a/Documentation/media/uapi/dvb/audio_function_calls.rst b/Documentation/media/uapi/dvb/audio_function_calls.rst
index 0bb56f0cfed4..7dba16285dab 100644
--- a/Documentation/media/uapi/dvb/audio_function_calls.rst
+++ b/Documentation/media/uapi/dvb/audio_function_calls.rst
@@ -22,13 +22,9 @@ Audio Function Calls
audio-set-bypass-mode
audio-channel-select
audio-bilingual-channel-select
- audio-get-pts
audio-get-status
audio-get-capabilities
audio-clear-buffer
audio-set-id
audio-set-mixer
audio-set-streamtype
- audio-set-ext-id
- audio-set-attributes
- audio-set-karaoke
diff --git a/Documentation/media/uapi/dvb/video-get-frame-rate.rst b/Documentation/media/uapi/dvb/video-get-frame-rate.rst
deleted file mode 100644
index 400042a854cf..000000000000
--- a/Documentation/media/uapi/dvb/video-get-frame-rate.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_GET_FRAME_RATE:
-
-====================
-VIDEO_GET_FRAME_RATE
-====================
-
-Name
-----
-
-VIDEO_GET_FRAME_RATE
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, VIDEO_GET_FRAME_RATE, unsigned int *rate)
- :name: VIDEO_GET_FRAME_RATE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_GET_FRAME_RATE for this command.
-
- - .. row 3
-
- - unsigned int \*rate
-
- - Returns the framerate in number of frames per 1000 seconds.
-
-
-Description
------------
-
-This ioctl call asks the Video Device to return the current framerate.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/video-get-navi.rst b/Documentation/media/uapi/dvb/video-get-navi.rst
deleted file mode 100644
index 114a9ac48b9e..000000000000
--- a/Documentation/media/uapi/dvb/video-get-navi.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_GET_NAVI:
-
-==============
-VIDEO_GET_NAVI
-==============
-
-Name
-----
-
-VIDEO_GET_NAVI
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_GET_NAVI , struct video_navi_pack *navipack)
- :name: VIDEO_GET_NAVI
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_GET_NAVI for this command.
-
- - .. row 3
-
- - video_navi_pack_t \*navipack
-
- - PCI or DSI pack (private stream 2) according to section ??.
-
-
-Description
------------
-
-This ioctl returns navigational information from the DVD stream. This is
-especially needed if an encoded stream has to be decoded by the
-hardware.
-
-.. c:type:: video_navi_pack
-
-.. code-block::c
-
- typedef struct video_navi_pack {
- int length; /* 0 ... 1024 */
- __u8 data[1024];
- } video_navi_pack_t;
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EFAULT``
-
- - driver is not able to return navigational information
diff --git a/Documentation/media/uapi/dvb/video-set-attributes.rst b/Documentation/media/uapi/dvb/video-set-attributes.rst
deleted file mode 100644
index b2f11a6746e9..000000000000
--- a/Documentation/media/uapi/dvb/video-set-attributes.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_ATTRIBUTES:
-
-====================
-VIDEO_SET_ATTRIBUTES
-====================
-
-Name
-----
-
-VIDEO_SET_ATTRIBUTES
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_ATTRIBUTE ,video_attributes_t vattr)
- :name: VIDEO_SET_ATTRIBUTE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_ATTRIBUTE for this command.
-
- - .. row 3
-
- - video_attributes_t vattr
-
- - video attributes according to section ??.
-
-
-Description
------------
-
-This ioctl is intended for DVD playback and allows you to set certain
-information about the stream. Some hardware may not need this
-information, but the call also tells the hardware to prepare for DVD
-playback.
-
-.. c:type:: video_attributes_t
-
-.. code-block::c
-
- typedef __u16 video_attributes_t;
- /* bits: descr. */
- /* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
- /* 13-12 TV system (0=525/60, 1=625/50) */
- /* 11-10 Aspect ratio (0=4:3, 3=16:9) */
- /* 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
- /* 7 line 21-1 data present in GOP (1=yes, 0=no) */
- /* 6 line 21-2 data present in GOP (1=yes, 0=no) */
- /* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
- /* 2 source letterboxed (1=yes, 0=no) */
- /* 0 film/camera mode (0=camera, 1=film (625/50 only)) */
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid attribute setting.
diff --git a/Documentation/media/uapi/dvb/video-set-highlight.rst b/Documentation/media/uapi/dvb/video-set-highlight.rst
deleted file mode 100644
index 90aeafd923b7..000000000000
--- a/Documentation/media/uapi/dvb/video-set-highlight.rst
+++ /dev/null
@@ -1,86 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_HIGHLIGHT:
-
-===================
-VIDEO_SET_HIGHLIGHT
-===================
-
-Name
-----
-
-VIDEO_SET_HIGHLIGHT
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_HIGHLIGHT, struct video_highlight *vhilite)
- :name: VIDEO_SET_HIGHLIGHT
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_HIGHLIGHT for this command.
-
- - .. row 3
-
- - video_highlight_t \*vhilite
-
- - SPU Highlight information according to section ??.
-
-
-Description
------------
-
-This ioctl sets the SPU highlight information for the menu access of a
-DVD.
-
-.. c:type:: video_highlight
-
-.. code-block:: c
-
- typedef
- struct video_highlight {
- int active; /* 1=show highlight, 0=hide highlight */
- __u8 contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- __u8 color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- __u8 color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- __u32 ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- __u32 xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
- } video_highlight_t;
-
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/video-set-id.rst b/Documentation/media/uapi/dvb/video-set-id.rst
deleted file mode 100644
index 18f66875ae3f..000000000000
--- a/Documentation/media/uapi/dvb/video-set-id.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_ID:
-
-============
-VIDEO_SET_ID
-============
-
-Name
-----
-
-VIDEO_SET_ID
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, VIDEO_SET_ID, int id)
- :name: VIDEO_SET_ID
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_ID for this command.
-
- - .. row 3
-
- - int id
-
- - video sub-stream id
-
-
-Description
------------
-
-This ioctl selects which sub-stream is to be decoded if a program or
-system stream is sent to the video device.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - Invalid sub-stream id.
diff --git a/Documentation/media/uapi/dvb/video-set-spu-palette.rst b/Documentation/media/uapi/dvb/video-set-spu-palette.rst
deleted file mode 100644
index 51a1913d21d2..000000000000
--- a/Documentation/media/uapi/dvb/video-set-spu-palette.rst
+++ /dev/null
@@ -1,82 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SPU_PALETTE:
-
-=====================
-VIDEO_SET_SPU_PALETTE
-=====================
-
-Name
-----
-
-VIDEO_SET_SPU_PALETTE
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SPU_PALETTE, struct video_spu_palette *palette )
- :name: VIDEO_SET_SPU_PALETTE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_SPU_PALETTE for this command.
-
- - .. row 3
-
- - video_spu_palette_t \*palette
-
- - SPU palette according to section ??.
-
-
-Description
------------
-
-This ioctl sets the SPU color palette.
-
-.. c:type:: video_spu_palette
-
-.. code-block::c
-
- typedef struct video_spu_palette { /* SPU Palette information */
- int length;
- __u8 __user *palette;
- } video_spu_palette_t;
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid palette or driver doesn’t handle SPU.
diff --git a/Documentation/media/uapi/dvb/video-set-spu.rst b/Documentation/media/uapi/dvb/video-set-spu.rst
deleted file mode 100644
index 739e5e7bd133..000000000000
--- a/Documentation/media/uapi/dvb/video-set-spu.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SPU:
-
-=============
-VIDEO_SET_SPU
-=============
-
-Name
-----
-
-VIDEO_SET_SPU
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SPU , struct video_spu *spu)
- :name: VIDEO_SET_SPU
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_SPU for this command.
-
- - .. row 3
-
- - video_spu_t \*spu
-
- - SPU decoding (de)activation and subid setting according to section
- ??.
-
-
-Description
------------
-
-This ioctl activates or deactivates SPU decoding in a DVD input stream.
-It can only be used, if the driver is able to handle a DVD stream.
-
-.. c:type:: struct video_spu
-
-.. code-block:: c
-
- typedef struct video_spu {
- int active;
- int stream_id;
- } video_spu_t;
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid spu setting or driver cannot handle SPU.
diff --git a/Documentation/media/uapi/dvb/video-set-system.rst b/Documentation/media/uapi/dvb/video-set-system.rst
deleted file mode 100644
index e39cbe080ef7..000000000000
--- a/Documentation/media/uapi/dvb/video-set-system.rst
+++ /dev/null
@@ -1,77 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SYSTEM:
-
-================
-VIDEO_SET_SYSTEM
-================
-
-Name
-----
-
-VIDEO_SET_SYSTEM
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SYSTEM , video_system_t system)
- :name: VIDEO_SET_SYSTEM
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_FORMAT for this command.
-
- - .. row 3
-
- - video_system_t system
-
- - video system of TV output.
-
-
-Description
------------
-
-This ioctl sets the television output format. The format (see section
-??) may vary from the color format of the displayed MPEG stream. If the
-hardware is not able to display the requested format the call will
-return an error.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - system is not a valid or supported video system.
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 68588ac7fecb..3f4f6c9ffad7 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -21,7 +21,6 @@ Video Function Calls
video-get-status
video-get-frame-count
video-get-pts
- video-get-frame-rate
video-get-event
video-command
video-try-command
@@ -31,13 +30,7 @@ Video Function Calls
video-fast-forward
video-slowmotion
video-get-capabilities
- video-set-id
video-clear-buffer
video-set-streamtype
video-set-format
- video-set-system
- video-set-highlight
- video-set-spu
- video-set-spu-palette
- video-get-navi
video-set-attributes
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index 640a21de6b8a..a0942171596c 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -246,134 +246,3 @@ following bits set according to the hardwares capabilities.
#define VIDEO_CAP_SPU 16
#define VIDEO_CAP_NAVI 32
#define VIDEO_CAP_CSS 64
-
-
-.. _video-system:
-
-video_system_t
-==============
-
-A call to VIDEO_SET_SYSTEM sets the desired video system for TV
-output. The following system types can be set:
-
-
-.. code-block:: c
-
- typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
- } video_system_t;
-
-
-.. c:type:: video_highlight
-
-struct video_highlight
-======================
-
-Calling the ioctl VIDEO_SET_HIGHLIGHTS posts the SPU highlight
-information. The call expects the following format for that information:
-
-
-.. code-block:: c
-
- typedef
- struct video_highlight {
- boolean active; /* 1=show highlight, 0=hide highlight */
- uint8_t contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- uint8_t color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- uint8_t color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- uint32_t ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- uint32_t xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
- } video_highlight_t;
-
-
-.. c:type:: video_spu
-
-struct video_spu
-================
-
-Calling VIDEO_SET_SPU deactivates or activates SPU decoding, according
-to the following format:
-
-
-.. code-block:: c
-
- typedef
- struct video_spu {
- boolean active;
- int stream_id;
- } video_spu_t;
-
-
-.. c:type:: video_spu_palette
-
-struct video_spu_palette
-========================
-
-The following structure is used to set the SPU palette by calling
-VIDEO_SPU_PALETTE:
-
-
-.. code-block:: c
-
- typedef
- struct video_spu_palette {
- int length;
- uint8_t *palette;
- } video_spu_palette_t;
-
-
-.. c:type:: video_navi_pack
-
-struct video_navi_pack
-======================
-
-In order to get the navigational data the following structure has to be
-passed to the ioctl VIDEO_GET_NAVI:
-
-
-.. code-block:: c
-
- typedef
- struct video_navi_pack {
- int length; /* 0 ... 1024 */
- uint8_t data[1024];
- } video_navi_pack_t;
-
-
-.. _video-attributes-t:
-
-video_attributes_t
-==================
-
-The following attributes can be set by a call to VIDEO_SET_ATTRIBUTES:
-
-
-.. code-block:: c
-
- typedef uint16_t video_attributes_t;
- /* bits: descr. */
- /* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
- /* 13-12 TV system (0=525/60, 1=625/50) */
- /* 11-10 Aspect ratio (0=4:3, 3=16:9) */
- /* 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
- /* 7 line 21-1 data present in GOP (1=yes, 0=no) */
- /* 6 line 21-2 data present in GOP (1=yes, 0=no) */
- /* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
- /* 2 source letterboxed (1=yes, 0=no) */
- /* 0 film/camera mode (0=camera, 1=film (625/50 only)) */
diff --git a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
index f690f9afc470..649cb3d9e058 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
@@ -48,12 +48,8 @@ ioctl never fails.
:widths: 1 1 2
- - .. row 1
-
- - char
-
+ * - char
- ``driver``\ [16]
-
- Name of the driver implementing the media API as a NUL-terminated
ASCII string. The driver version is stored in the
``driver_version`` field.
@@ -62,66 +58,38 @@ ioctl never fails.
the driver identity. It is also useful to work around known bugs,
or to identify drivers in error reports.
- - .. row 2
-
- - char
-
+ * - char
- ``model``\ [32]
-
- Device model name as a NUL-terminated UTF-8 string. The device
version is stored in the ``device_version`` field and is not be
appended to the model name.
- - .. row 3
-
- - char
-
+ * - char
- ``serial``\ [40]
-
- Serial number as a NUL-terminated ASCII string.
- - .. row 4
-
- - char
-
+ * - char
- ``bus_info``\ [32]
-
- Location of the device in the system as a NUL-terminated ASCII
string. This includes the bus type name (PCI, USB, ...) and a
bus-specific identifier.
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``media_version``
-
- Media API version, formatted with the ``KERNEL_VERSION()`` macro.
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``hw_revision``
-
- Hardware device revision in a driver-specific format.
- - .. row 7
-
- - __u32
-
+ * - __u32
- ``driver_version``
-
- Media device driver version, formatted with the
``KERNEL_VERSION()`` macro. Together with the ``driver`` field
this identifies a particular driver.
- - .. row 8
-
- - __u32
-
+ * - __u32
- ``reserved``\ [31]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
index 582fda488810..fc2e39c070c9 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
@@ -58,142 +58,90 @@ id's until they get an error.
:stub-columns: 0
:widths: 1 1 1 1 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
-
-
-
- - Entity id, set by the application. When the id is or'ed with
+ - Entity ID, set by the application. When the ID is or'ed with
``MEDIA_ENT_ID_FLAG_NEXT``, the driver clears the flag and returns
- the first entity with a larger id.
-
- - .. row 2
-
- - char
+ the first entity with a larger ID. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode entity IDs in an application.
+ * - char
- ``name``\ [32]
-
-
-
- - Entity name as an UTF-8 NULL-terminated string.
-
- - .. row 3
-
- - __u32
+ - Entity name as an UTF-8 NULL-terminated string. This name must be unique
+ within the media topology.
+ * - __u32
- ``type``
-
-
-
- Entity type, see :ref:`media-entity-functions` for details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``revision``
-
-
-
- Entity revision. Always zero (obsolete)
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``flags``
-
-
-
- Entity flags, see :ref:`media-entity-flag` for details.
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``group_id``
-
-
-
- Entity group ID. Always zero (obsolete)
- - .. row 7
-
- - __u16
-
+ * - __u16
- ``pads``
-
-
-
- Number of pads
- - .. row 8
-
- - __u16
-
+ * - __u16
- ``links``
-
-
-
- Total number of outbound links. Inbound links are not counted in
this field.
- - .. row 9
-
- - __u32
-
+ * - __u32
- ``reserved[4]``
-
-
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
- - .. row 10
-
- - union
-
- - .. row 11
+ * - union
- -
+ * -
- struct
-
- ``dev``
-
-
- Valid for (sub-)devices that create a single device node.
- - .. row 12
-
- -
+ * -
-
- __u32
-
- ``major``
-
- Device node major number.
- - .. row 13
-
- -
+ * -
-
- __u32
-
- ``minor``
-
- Device node minor number.
- - .. row 14
-
- -
+ * -
- __u8
-
- ``raw``\ [184]
-
-
-
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
index 256168b3c3be..f158c134e9b0 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
@@ -62,35 +62,21 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``entity``
-
- Entity id, set by the application.
- - .. row 2
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- \*\ ``pads``
-
- Pointer to a pads array allocated by the application. Ignored if
NULL.
- - .. row 3
-
- - struct :c:type:`media_link_desc`
-
+ * - struct :c:type:`media_link_desc`
- \*\ ``links``
-
- Pointer to a links array allocated by the application. Ignored if
NULL.
-
.. c:type:: media_pad_desc
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -100,37 +86,20 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``entity``
-
- ID of the entity this pad belongs to.
- - .. row 2
-
- - __u16
-
+ * - __u16
- ``index``
+ - Pad index, starts at 0.
- - 0-based pad index.
-
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Pad flags, see :ref:`media-pad-flag` for more details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved[2]``
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
@@ -145,37 +114,20 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- ``source``
-
- Pad at the origin of this link.
- - .. row 2
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- ``sink``
-
- Pad at the target of this link.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Link flags, see :ref:`media-link-flag` for more details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved[4]``
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index c4055ddf070a..bac128c7eda9 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -55,119 +55,66 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u64
-
+ * - __u64
- ``topology_version``
-
- Version of the media graph topology. When the graph is created,
this field starts with zero. Every time a graph element is added
or removed, this field is incremented.
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``num_entities``
-
- Number of entities in the graph
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``reserved1``
-
- Applications and drivers shall set this to 0.
- - .. row 4
-
- - __u64
-
+ * - __u64
- ``ptr_entities``
-
- A pointer to a memory area where the entities array will be
stored, converted to a 64-bits integer. It can be zero. if zero,
the ioctl won't store the entities. It will just update
``num_entities``
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``num_interfaces``
-
- Number of interfaces in the graph
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``reserved2``
-
- Applications and drivers shall set this to 0.
- - .. row 7
-
- - __u64
-
+ * - __u64
- ``ptr_interfaces``
-
- A pointer to a memory area where the interfaces array will be
stored, converted to a 64-bits integer. It can be zero. if zero,
the ioctl won't store the interfaces. It will just update
``num_interfaces``
- - .. row 8
-
- - __u32
-
+ * - __u32
- ``num_pads``
-
- Total number of pads in the graph
- - .. row 9
-
- - __u32
-
+ * - __u32
- ``reserved3``
-
- Applications and drivers shall set this to 0.
- - .. row 10
-
- - __u64
-
+ * - __u64
- ``ptr_pads``
-
- A pointer to a memory area where the pads array will be stored,
converted to a 64-bits integer. It can be zero. if zero, the ioctl
won't store the pads. It will just update ``num_pads``
- - .. row 11
-
- - __u32
-
+ * - __u32
- ``num_links``
-
- Total number of data and interface links in the graph
- - .. row 12
-
- - __u32
-
+ * - __u32
- ``reserved4``
-
- Applications and drivers shall set this to 0.
- - .. row 13
-
- - __u64
-
+ * - __u64
- ``ptr_links``
-
- A pointer to a memory area where the links array will be stored,
converted to a 64-bits integer. It can be zero. if zero, the ioctl
won't store the links. It will just update ``num_links``
@@ -182,37 +129,31 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the entity. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode entity IDs in an application.
- - Unique ID for the entity.
-
- - .. row 2
-
- - char
-
+ * - char
- ``name``\ [64]
+ - Entity name as an UTF-8 NULL-terminated string. This name must be unique
+ within the media topology.
- - Entity name as an UTF-8 NULL-terminated string.
-
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``function``
-
- Entity main function, see :ref:`media-entity-functions` for details.
- - .. row 4
-
- - __u32
-
- - ``reserved``\ [6]
+ * - __u32
+ - ``flags``
+ - Entity flags, see :ref:`media-entity-flag` for details.
+ Only valid if ``MEDIA_V2_ENTITY_HAS_FLAGS(media_version)``
+ returns true. The ``media_version`` is defined in struct
+ :c:type:`media_device_info` and can be retrieved using
+ :ref:`MEDIA_IOC_DEVICE_INFO`.
+ * - __u32
+ - ``reserved``\ [5]
- Reserved for future extensions. Drivers and applications must set
this array to zero.
@@ -226,47 +167,29 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the interface. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode interface IDs in an application.
- - Unique ID for the interface.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``intf_type``
-
- Interface type, see :ref:`media-intf-type` for details.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Interface flags. Currently unused.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved``\ [9]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
- - .. row 5
-
- - struct media_v2_intf_devnode
-
+ * - struct media_v2_intf_devnode
- ``devnode``
-
- Used only for device node interfaces. See
- :c:type:`media_v2_intf_devnode` for details..
+ :c:type:`media_v2_intf_devnode` for details.
.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
@@ -278,24 +201,14 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``major``
-
- Device node major number.
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``minor``
-
- Device node minor number.
-
.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
.. c:type:: media_v2_pad
@@ -305,37 +218,29 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the pad. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode pad IDs in an application.
- - Unique ID for the pad.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``entity_id``
-
- Unique ID for the entity where this pad belongs.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Pad flags, see :ref:`media-pad-flag` for more details.
- - .. row 4
-
- - __u32
-
- - ``reserved``\ [5]
+ * - __u32
+ - ``index``
+ - Pad index, starts at 0. Only valid if ``MEDIA_V2_PAD_HAS_INDEX(media_version)``
+ returns true. The ``media_version`` is defined in struct
+ :c:type:`media_device_info` and can be retrieved using
+ :ref:`MEDIA_IOC_DEVICE_INFO`.
+ * - __u32
+ - ``reserved``\ [4]
- Reserved for future extensions. Drivers and applications must set
this array to zero.
@@ -349,49 +254,30 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the link. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode link IDs in an application.
- - Unique ID for the link.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``source_id``
-
- On pad to pad links: unique ID for the source pad.
On interface to entity links: unique ID for the interface.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``sink_id``
-
- On pad to pad links: unique ID for the sink pad.
On interface to entity links: unique ID for the entity.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Link flags, see :ref:`media-link-flag` for more details.
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``reserved``\ [6]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 2dda14bd89b7..e4c57c8f4553 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -8,6 +8,41 @@ Types and flags used to represent the media graph elements
.. tabularcolumns:: |p{8.2cm}|p{10.3cm}|
.. _media-entity-functions:
+.. _MEDIA-ENT-F-UNKNOWN:
+.. _MEDIA-ENT-F-V4L2-SUBDEV-UNKNOWN:
+.. _MEDIA-ENT-F-IO-V4L:
+.. _MEDIA-ENT-F-IO-VBI:
+.. _MEDIA-ENT-F-IO-SWRADIO:
+.. _MEDIA-ENT-F-IO-DTV:
+.. _MEDIA-ENT-F-DTV-DEMOD:
+.. _MEDIA-ENT-F-TS-DEMUX:
+.. _MEDIA-ENT-F-DTV-CA:
+.. _MEDIA-ENT-F-DTV-NET-DECAP:
+.. _MEDIA-ENT-F-CONN-RF:
+.. _MEDIA-ENT-F-CONN-SVIDEO:
+.. _MEDIA-ENT-F-CONN-COMPOSITE:
+.. _MEDIA-ENT-F-CAM-SENSOR:
+.. _MEDIA-ENT-F-FLASH:
+.. _MEDIA-ENT-F-LENS:
+.. _MEDIA-ENT-F-ATV-DECODER:
+.. _MEDIA-ENT-F-TUNER:
+.. _MEDIA-ENT-F-IF-VID-DECODER:
+.. _MEDIA-ENT-F-IF-AUD-DECODER:
+.. _MEDIA-ENT-F-AUDIO-CAPTURE:
+.. _MEDIA-ENT-F-AUDIO-PLAYBACK:
+.. _MEDIA-ENT-F-AUDIO-MIXER:
+.. _MEDIA-ENT-F-PROC-VIDEO-COMPOSER:
+.. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-FORMATTER:
+.. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-ENC-CONV:
+.. _MEDIA-ENT-F-PROC-VIDEO-LUT:
+.. _MEDIA-ENT-F-PROC-VIDEO-SCALER:
+.. _MEDIA-ENT-F-PROC-VIDEO-STATISTICS:
+.. _MEDIA-ENT-F-PROC-VIDEO-ENCODER:
+.. _MEDIA-ENT-F-PROC-VIDEO-DECODER:
+.. _MEDIA-ENT-F-VID-MUX:
+.. _MEDIA-ENT-F-VID-IF-BRIDGE:
+.. _MEDIA-ENT-F-DV-DECODER:
+.. _MEDIA-ENT-F-DV-ENCODER:
.. cssclass:: longtable
@@ -15,139 +50,56 @@ Types and flags used to represent the media graph elements
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-ENT-F-UNKNOWN:
- .. _MEDIA-ENT-F-V4L2-SUBDEV-UNKNOWN:
-
- - ``MEDIA_ENT_F_UNKNOWN`` and
-
+ * - ``MEDIA_ENT_F_UNKNOWN`` and
``MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN``
-
- Unknown entity. That generally indicates that a driver didn't
initialize properly the entity, which is a Kernel bug
- - .. row 2
-
- .. _MEDIA-ENT-F-IO-V4L:
-
- - ``MEDIA_ENT_F_IO_V4L``
-
+ * - ``MEDIA_ENT_F_IO_V4L``
- Data streaming input and/or output entity.
- - .. row 3
-
- .. _MEDIA-ENT-F-IO-VBI:
-
- - ``MEDIA_ENT_F_IO_VBI``
-
+ * - ``MEDIA_ENT_F_IO_VBI``
- V4L VBI streaming input or output entity
- - .. row 4
-
- .. _MEDIA-ENT-F-IO-SWRADIO:
-
- - ``MEDIA_ENT_F_IO_SWRADIO``
-
+ * - ``MEDIA_ENT_F_IO_SWRADIO``
- V4L Software Digital Radio (SDR) streaming input or output entity
- - .. row 5
-
- .. _MEDIA-ENT-F-IO-DTV:
-
- - ``MEDIA_ENT_F_IO_DTV``
-
+ * - ``MEDIA_ENT_F_IO_DTV``
- DVB Digital TV streaming input or output entity
- - .. row 6
-
- .. _MEDIA-ENT-F-DTV-DEMOD:
-
- - ``MEDIA_ENT_F_DTV_DEMOD``
-
+ * - ``MEDIA_ENT_F_DTV_DEMOD``
- Digital TV demodulator entity.
- - .. row 7
-
- .. _MEDIA-ENT-F-TS-DEMUX:
-
- - ``MEDIA_ENT_F_TS_DEMUX``
-
+ * - ``MEDIA_ENT_F_TS_DEMUX``
- MPEG Transport stream demux entity. Could be implemented on
hardware or in Kernelspace by the Linux DVB subsystem.
- - .. row 8
-
- .. _MEDIA-ENT-F-DTV-CA:
-
- - ``MEDIA_ENT_F_DTV_CA``
-
+ * - ``MEDIA_ENT_F_DTV_CA``
- Digital TV Conditional Access module (CAM) entity
- - .. row 9
-
- .. _MEDIA-ENT-F-DTV-NET-DECAP:
-
- - ``MEDIA_ENT_F_DTV_NET_DECAP``
-
+ * - ``MEDIA_ENT_F_DTV_NET_DECAP``
- Digital TV network ULE/MLE desencapsulation entity. Could be
implemented on hardware or in Kernelspace
- - .. row 10
-
- .. _MEDIA-ENT-F-CONN-RF:
-
- - ``MEDIA_ENT_F_CONN_RF``
-
+ * - ``MEDIA_ENT_F_CONN_RF``
- Connector for a Radio Frequency (RF) signal.
- - .. row 11
-
- .. _MEDIA-ENT-F-CONN-SVIDEO:
-
- - ``MEDIA_ENT_F_CONN_SVIDEO``
-
+ * - ``MEDIA_ENT_F_CONN_SVIDEO``
- Connector for a S-Video signal.
- - .. row 12
-
- .. _MEDIA-ENT-F-CONN-COMPOSITE:
-
- - ``MEDIA_ENT_F_CONN_COMPOSITE``
-
+ * - ``MEDIA_ENT_F_CONN_COMPOSITE``
- Connector for a RGB composite signal.
- - .. row 13
-
- .. _MEDIA-ENT-F-CAM-SENSOR:
-
- - ``MEDIA_ENT_F_CAM_SENSOR``
-
+ * - ``MEDIA_ENT_F_CAM_SENSOR``
- Camera video sensor entity.
- - .. row 14
-
- .. _MEDIA-ENT-F-FLASH:
-
- - ``MEDIA_ENT_F_FLASH``
-
+ * - ``MEDIA_ENT_F_FLASH``
- Flash controller entity.
- - .. row 15
-
- .. _MEDIA-ENT-F-LENS:
-
- - ``MEDIA_ENT_F_LENS``
-
+ * - ``MEDIA_ENT_F_LENS``
- Lens controller entity.
- - .. row 16
-
- .. _MEDIA-ENT-F-ATV-DECODER:
-
- - ``MEDIA_ENT_F_ATV_DECODER``
-
+ * - ``MEDIA_ENT_F_ATV_DECODER``
- Analog video decoder, the basic function of the video decoder is
to accept analogue video from a wide variety of sources such as
broadcast, DVD players, cameras and video cassette recorders, in
@@ -155,36 +107,21 @@ Types and flags used to represent the media graph elements
its component parts, luminance and chrominance, and output it in
some digital video standard, with appropriate timing signals.
- - .. row 17
-
- .. _MEDIA-ENT-F-TUNER:
-
- - ``MEDIA_ENT_F_TUNER``
-
+ * - ``MEDIA_ENT_F_TUNER``
- Digital TV, analog TV, radio and/or software radio tuner, with
consists on a PLL tuning stage that converts radio frequency (RF)
signal into an Intermediate Frequency (IF). Modern tuners have
internally IF-PLL decoders for audio and video, but older models
have those stages implemented on separate entities.
- - .. row 18
-
- .. _MEDIA-ENT-F-IF-VID-DECODER:
-
- - ``MEDIA_ENT_F_IF_VID_DECODER``
-
+ * - ``MEDIA_ENT_F_IF_VID_DECODER``
- IF-PLL video decoder. It receives the IF from a PLL and decodes
the analog TV video signal. This is commonly found on some very
old analog tuners, like Philips MK3 designs. They all contain a
tda9887 (or some software compatible similar chip, like tda9885).
Those devices use a different I2C address than the tuner PLL.
- - .. row 19
-
- .. _MEDIA-ENT-F-IF-AUD-DECODER:
-
- - ``MEDIA_ENT_F_IF_AUD_DECODER``
-
+ * - ``MEDIA_ENT_F_IF_AUD_DECODER``
- IF-PLL sound decoder. It receives the IF from a PLL and decodes
the analog TV audio signal. This is commonly found on some very
old analog hardware, like Micronas msp3400, Philips tda9840,
@@ -192,36 +129,16 @@ Types and flags used to represent the media graph elements
tuner PLL and should be controlled together with the IF-PLL video
decoder.
- - .. row 20
-
- .. _MEDIA-ENT-F-AUDIO-CAPTURE:
-
- - ``MEDIA_ENT_F_AUDIO_CAPTURE``
-
+ * - ``MEDIA_ENT_F_AUDIO_CAPTURE``
- Audio Capture Function Entity.
- - .. row 21
-
- .. _MEDIA-ENT-F-AUDIO-PLAYBACK:
-
- - ``MEDIA_ENT_F_AUDIO_PLAYBACK``
-
+ * - ``MEDIA_ENT_F_AUDIO_PLAYBACK``
- Audio Playback Function Entity.
- - .. row 22
-
- .. _MEDIA-ENT-F-AUDIO-MIXER:
-
- - ``MEDIA_ENT_F_AUDIO_MIXER``
-
+ * - ``MEDIA_ENT_F_AUDIO_MIXER``
- Audio Mixer Function Entity.
- - .. row 23
-
- .. _MEDIA-ENT-F-PROC-VIDEO-COMPOSER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_COMPOSER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_COMPOSER``
- Video composer (blender). An entity capable of video
composing must have at least two sink pads and one source
pad, and composes input video frames onto output video
@@ -229,12 +146,7 @@ Types and flags used to represent the media graph elements
color keying, raster operations (ROP), stitching or any other
means.
- - .. row 24
-
- .. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-FORMATTER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER``
- Video pixel formatter. An entity capable of pixel formatting
must have at least one sink pad and one source pad. Read
pixel formatters read pixels from memory and perform a subset
@@ -243,12 +155,7 @@ Types and flags used to represent the media graph elements
a subset of dithering, pixel encoding conversion and packing
and write pixels to memory.
- - .. row 25
-
- .. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-ENC-CONV:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV``
- Video pixel encoding converter. An entity capable of pixel
enconding conversion must have at least one sink pad and one
source pad, and convert the encoding of pixels received on
@@ -257,12 +164,7 @@ Types and flags used to represent the media graph elements
to RGB to/from HSV, RGB to/from YUV and CFA (Bayer) to RGB
conversions.
- - .. row 26
-
- .. _MEDIA-ENT-F-PROC-VIDEO-LUT:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_LUT``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_LUT``
- Video look-up table. An entity capable of video lookup table
processing must have one sink pad and one source pad. It uses
the values of the pixels received on its sink pad to look up
@@ -271,12 +173,7 @@ Types and flags used to represent the media graph elements
separately or combine them for multi-dimensional table
lookups.
- - .. row 27
-
- .. _MEDIA-ENT-F-PROC-VIDEO-SCALER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_SCALER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_SCALER``
- Video scaler. An entity capable of video scaling must have
at least one sink pad and one source pad, and scale the
video frame(s) received on its sink pad(s) to a different
@@ -287,311 +184,190 @@ Types and flags used to represent the media graph elements
sub-sampling (occasionally also referred to as skipping) are
considered as scaling.
- - .. row 28
-
- .. _MEDIA-ENT-F-PROC-VIDEO-STATISTICS:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_STATISTICS``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_STATISTICS``
- Video statistics computation (histogram, 3A, etc.). An entity
capable of statistics computation must have one sink pad and
one source pad. It computes statistics over the frames
received on its sink pad and outputs the statistics data on
its source pad.
- - .. row 29
+ * - ``MEDIA_ENT_F_PROC_VIDEO_ENCODER``
+ - Video (MPEG, HEVC, VPx, etc.) encoder. An entity capable of
+ compressing video frames. Must have one sink pad and at least
+ one source pad.
- .. _MEDIA-ENT-F-VID-MUX:
-
- - ``MEDIA_ENT_F_VID_MUX``
+ * - ``MEDIA_ENT_F_PROC_VIDEO_DECODER``
+ - Video (MPEG, HEVC, VPx, etc.) decoder. An entity capable of
+ decompressing a compressed video stream into uncompressed video
+ frames. Must have one sink pad and at least one source pad.
+ * - ``MEDIA_ENT_F_VID_MUX``
- Video multiplexer. An entity capable of multiplexing must have at
least two sink pads and one source pad, and must pass the video
frame(s) received from the active sink pad to the source pad.
- - .. row 30
-
- .. _MEDIA-ENT-F-VID-IF-BRIDGE:
-
- - ``MEDIA_ENT_F_VID_IF_BRIDGE``
-
+ * - ``MEDIA_ENT_F_VID_IF_BRIDGE``
- Video interface bridge. A video interface bridge entity must have at
least one sink pad and at least one source pad. It receives video
frames on its sink pad from an input video bus of one type (HDMI, eDP,
MIPI CSI-2, etc.), and outputs them on its source pad to an output
video bus of another type (eDP, MIPI CSI-2, parallel, etc.).
- - .. row 31
-
- .. _MEDIA-ENT-F-DTV-DECODER:
-
- - ``MEDIA_ENT_F_DTV_DECODER``
-
+ * - ``MEDIA_ENT_F_DV_DECODER``
- Digital video decoder. The basic function of the video decoder is
to accept digital video from a wide variety of sources
and output it in some digital video standard, with appropriate
timing signals.
+ * - ``MEDIA_ENT_F_DV_ENCODER``
+ - Digital video encoder. The basic function of the video encoder is
+ to accept digital video from some digital video standard with
+ appropriate timing signals (usually a parallel video bus with sync
+ signals) and output this to a digital video output connector such
+ as HDMI or DisplayPort.
+
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-entity-flag:
+.. _MEDIA-ENT-FL-DEFAULT:
+.. _MEDIA-ENT-FL-CONNECTOR:
.. flat-table:: Media entity flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-ENT-FL-DEFAULT:
-
- - ``MEDIA_ENT_FL_DEFAULT``
-
+ * - ``MEDIA_ENT_FL_DEFAULT``
- Default entity for its type. Used to discover the default audio,
VBI and video devices, the default camera sensor, etc.
- - .. row 2
-
- .. _MEDIA-ENT-FL-CONNECTOR:
-
- - ``MEDIA_ENT_FL_CONNECTOR``
-
+ * - ``MEDIA_ENT_FL_CONNECTOR``
- The entity represents a connector.
.. tabularcolumns:: |p{6.5cm}|p{6.0cm}|p{5.0cm}|
.. _media-intf-type:
+.. _MEDIA-INTF-T-DVB-FE:
+.. _MEDIA-INTF-T-DVB-DEMUX:
+.. _MEDIA-INTF-T-DVB-DVR:
+.. _MEDIA-INTF-T-DVB-CA:
+.. _MEDIA-INTF-T-DVB-NET:
+.. _MEDIA-INTF-T-V4L-VIDEO:
+.. _MEDIA-INTF-T-V4L-VBI:
+.. _MEDIA-INTF-T-V4L-RADIO:
+.. _MEDIA-INTF-T-V4L-SUBDEV:
+.. _MEDIA-INTF-T-V4L-SWRADIO:
+.. _MEDIA-INTF-T-V4L-TOUCH:
+.. _MEDIA-INTF-T-ALSA-PCM-CAPTURE:
+.. _MEDIA-INTF-T-ALSA-PCM-PLAYBACK:
+.. _MEDIA-INTF-T-ALSA-CONTROL:
+.. _MEDIA-INTF-T-ALSA-COMPRESS:
+.. _MEDIA-INTF-T-ALSA-RAWMIDI:
+.. _MEDIA-INTF-T-ALSA-HWDEP:
+.. _MEDIA-INTF-T-ALSA-SEQUENCER:
+.. _MEDIA-INTF-T-ALSA-TIMER:
.. flat-table:: Media interface types
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-INTF-T-DVB-FE:
-
- - ``MEDIA_INTF_T_DVB_FE``
-
+ * - ``MEDIA_INTF_T_DVB_FE``
- Device node interface for the Digital TV frontend
-
- typically, /dev/dvb/adapter?/frontend?
- - .. row 2
-
- .. _MEDIA-INTF-T-DVB-DEMUX:
-
- - ``MEDIA_INTF_T_DVB_DEMUX``
-
+ * - ``MEDIA_INTF_T_DVB_DEMUX``
- Device node interface for the Digital TV demux
-
- typically, /dev/dvb/adapter?/demux?
- - .. row 3
-
- .. _MEDIA-INTF-T-DVB-DVR:
-
- - ``MEDIA_INTF_T_DVB_DVR``
-
+ * - ``MEDIA_INTF_T_DVB_DVR``
- Device node interface for the Digital TV DVR
-
- typically, /dev/dvb/adapter?/dvr?
- - .. row 4
-
- .. _MEDIA-INTF-T-DVB-CA:
-
- - ``MEDIA_INTF_T_DVB_CA``
-
+ * - ``MEDIA_INTF_T_DVB_CA``
- Device node interface for the Digital TV Conditional Access
-
- typically, /dev/dvb/adapter?/ca?
- - .. row 5
-
- .. _MEDIA-INTF-T-DVB-NET:
-
- - ``MEDIA_INTF_T_DVB_NET``
-
+ * - ``MEDIA_INTF_T_DVB_NET``
- Device node interface for the Digital TV network control
-
- typically, /dev/dvb/adapter?/net?
- - .. row 6
-
- .. _MEDIA-INTF-T-V4L-VIDEO:
-
- - ``MEDIA_INTF_T_V4L_VIDEO``
-
+ * - ``MEDIA_INTF_T_V4L_VIDEO``
- Device node interface for video (V4L)
-
- typically, /dev/video?
- - .. row 7
-
- .. _MEDIA-INTF-T-V4L-VBI:
-
- - ``MEDIA_INTF_T_V4L_VBI``
-
+ * - ``MEDIA_INTF_T_V4L_VBI``
- Device node interface for VBI (V4L)
-
- typically, /dev/vbi?
- - .. row 8
-
- .. _MEDIA-INTF-T-V4L-RADIO:
-
- - ``MEDIA_INTF_T_V4L_RADIO``
-
+ * - ``MEDIA_INTF_T_V4L_RADIO``
- Device node interface for radio (V4L)
-
- typically, /dev/radio?
- - .. row 9
-
- .. _MEDIA-INTF-T-V4L-SUBDEV:
-
- - ``MEDIA_INTF_T_V4L_SUBDEV``
-
+ * - ``MEDIA_INTF_T_V4L_SUBDEV``
- Device node interface for a V4L subdevice
-
- typically, /dev/v4l-subdev?
- - .. row 10
-
- .. _MEDIA-INTF-T-V4L-SWRADIO:
-
- - ``MEDIA_INTF_T_V4L_SWRADIO``
-
+ * - ``MEDIA_INTF_T_V4L_SWRADIO``
- Device node interface for Software Defined Radio (V4L)
-
- typically, /dev/swradio?
- - .. row 11
-
- .. _MEDIA-INTF-T-V4L-TOUCH:
-
- - ``MEDIA_INTF_T_V4L_TOUCH``
-
+ * - ``MEDIA_INTF_T_V4L_TOUCH``
- Device node interface for Touch device (V4L)
-
- typically, /dev/v4l-touch?
- - .. row 12
-
- .. _MEDIA-INTF-T-ALSA-PCM-CAPTURE:
-
- - ``MEDIA_INTF_T_ALSA_PCM_CAPTURE``
-
+ * - ``MEDIA_INTF_T_ALSA_PCM_CAPTURE``
- Device node interface for ALSA PCM Capture
-
- typically, /dev/snd/pcmC?D?c
- - .. row 13
-
- .. _MEDIA-INTF-T-ALSA-PCM-PLAYBACK:
-
- - ``MEDIA_INTF_T_ALSA_PCM_PLAYBACK``
-
+ * - ``MEDIA_INTF_T_ALSA_PCM_PLAYBACK``
- Device node interface for ALSA PCM Playback
-
- typically, /dev/snd/pcmC?D?p
- - .. row 14
-
- .. _MEDIA-INTF-T-ALSA-CONTROL:
-
- - ``MEDIA_INTF_T_ALSA_CONTROL``
-
+ * - ``MEDIA_INTF_T_ALSA_CONTROL``
- Device node interface for ALSA Control
-
- typically, /dev/snd/controlC?
- - .. row 15
-
- .. _MEDIA-INTF-T-ALSA-COMPRESS:
-
- - ``MEDIA_INTF_T_ALSA_COMPRESS``
-
+ * - ``MEDIA_INTF_T_ALSA_COMPRESS``
- Device node interface for ALSA Compress
-
- typically, /dev/snd/compr?
- - .. row 16
-
- .. _MEDIA-INTF-T-ALSA-RAWMIDI:
-
- - ``MEDIA_INTF_T_ALSA_RAWMIDI``
-
+ * - ``MEDIA_INTF_T_ALSA_RAWMIDI``
- Device node interface for ALSA Raw MIDI
-
- typically, /dev/snd/midi?
- - .. row 17
-
- .. _MEDIA-INTF-T-ALSA-HWDEP:
-
- - ``MEDIA_INTF_T_ALSA_HWDEP``
-
+ * - ``MEDIA_INTF_T_ALSA_HWDEP``
- Device node interface for ALSA Hardware Dependent
-
- typically, /dev/snd/hwC?D?
- - .. row 18
-
- .. _MEDIA-INTF-T-ALSA-SEQUENCER:
-
- - ``MEDIA_INTF_T_ALSA_SEQUENCER``
-
+ * - ``MEDIA_INTF_T_ALSA_SEQUENCER``
- Device node interface for ALSA Sequencer
-
- typically, /dev/snd/seq
- - .. row 19
-
- .. _MEDIA-INTF-T-ALSA-TIMER:
-
- - ``MEDIA_INTF_T_ALSA_TIMER``
-
+ * - ``MEDIA_INTF_T_ALSA_TIMER``
- Device node interface for ALSA Timer
-
- typically, /dev/snd/timer
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-pad-flag:
+.. _MEDIA-PAD-FL-SINK:
+.. _MEDIA-PAD-FL-SOURCE:
+.. _MEDIA-PAD-FL-MUST-CONNECT:
.. flat-table:: Media pad flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-PAD-FL-SINK:
-
- - ``MEDIA_PAD_FL_SINK``
-
+ * - ``MEDIA_PAD_FL_SINK``
- Input pad, relative to the entity. Input pads sink data and are
targets of links.
- - .. row 2
-
- .. _MEDIA-PAD-FL-SOURCE:
-
- - ``MEDIA_PAD_FL_SOURCE``
-
+ * - ``MEDIA_PAD_FL_SOURCE``
- Output pad, relative to the entity. Output pads source data and
are origins of links.
- - .. row 3
-
- .. _MEDIA-PAD-FL-MUST-CONNECT:
-
- - ``MEDIA_PAD_FL_MUST_CONNECT``
-
+ * - ``MEDIA_PAD_FL_MUST_CONNECT``
- If this flag is set and the pad is linked to any other pad, then
at least one of those links must be enabled for the entity to be
able to stream. There could be temporary reasons (e.g. device
@@ -606,46 +382,29 @@ must be set for every pad.
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-link-flag:
+.. _MEDIA-LNK-FL-ENABLED:
+.. _MEDIA-LNK-FL-IMMUTABLE:
+.. _MEDIA-LNK-FL-DYNAMIC:
+.. _MEDIA-LNK-FL-LINK-TYPE:
.. flat-table:: Media link flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-LNK-FL-ENABLED:
-
- - ``MEDIA_LNK_FL_ENABLED``
-
+ * - ``MEDIA_LNK_FL_ENABLED``
- The link is enabled and can be used to transfer media data. When
two or more links target a sink pad, only one of them can be
enabled at a time.
- - .. row 2
-
- .. _MEDIA-LNK-FL-IMMUTABLE:
-
- - ``MEDIA_LNK_FL_IMMUTABLE``
-
+ * - ``MEDIA_LNK_FL_IMMUTABLE``
- The link enabled state can't be modified at runtime. An immutable
link is always enabled.
- - .. row 3
-
- .. _MEDIA-LNK-FL-DYNAMIC:
-
- - ``MEDIA_LNK_FL_DYNAMIC``
-
+ * - ``MEDIA_LNK_FL_DYNAMIC``
- The link enabled state can be modified during streaming. This flag
is set by drivers and is read-only for applications.
- - .. row 4
-
- .. _MEDIA-LNK-FL-LINK-TYPE:
-
- - ``MEDIA_LNK_FL_LINK_TYPE``
-
+ * - ``MEDIA_LNK_FL_LINK_TYPE``
- This is a bitmask that defines the type of the link. Currently,
two types of links are supported:
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 03931f9b1285..9f7312bf3365 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -1955,9 +1955,51 @@ enum v4l2_vp8_golden_frame_sel -
``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)``
Quantization parameter for a P frame for VP8.
-``V4L2_CID_MPEG_VIDEO_VPX_PROFILE (integer)``
- Select the desired profile for VPx encoder. Acceptable values are 0,
- 1, 2 and 3 corresponding to encoder profiles 0, 1, 2 and 3.
+.. _v4l2-mpeg-video-vp8-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP8_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp8_profile -
+ This control allows selecting the profile for VP8 encoder.
+ This is also used to enumerate supported profiles by VP8 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3``
+ - Profile 3
+
+.. _v4l2-mpeg-video-vp9-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP9_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp9_profile -
+ This control allows selecting the profile for VP9 encoder.
+ This is also used to enumerate supported profiles by VP9 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3``
+ - Profile 3
High Efficiency Video Coding (HEVC/H.265) Control Reference
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index abec03937bb3..d382e7a5c38e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -95,3 +95,10 @@ Compressed Formats
- ``V4L2_PIX_FMT_HEVC``
- 'HEVC'
- HEVC/H.265 video elementary stream.
+ * .. _V4L2-PIX-FMT-FWHT:
+
+ - ``V4L2_PIX_FMT_FWHT``
+ - 'FWHT'
+ - Video elementary stream using a codec based on the Fast Walsh Hadamard
+ Transform. This codec is implemented by the vicodec ('Virtual Codec')
+ driver. See the vicodec-codec.h header for more details.
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index cf2ef7df9616..1f9a7e3a07c9 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -19,4 +19,5 @@ RGB Formats
pixfmt-srggb10-ipu3
pixfmt-srggb12
pixfmt-srggb12p
+ pixfmt-srggb14p
pixfmt-srggb16
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
new file mode 100644
index 000000000000..88d20c0e4282
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
@@ -0,0 +1,127 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-SRGGB14P:
+.. _v4l2-pix-fmt-sbggr14p:
+.. _v4l2-pix-fmt-sgbrg14p:
+.. _v4l2-pix-fmt-sgrbg14p:
+
+*******************************************************************************************************************************
+V4L2_PIX_FMT_SRGGB14P ('pRCC'), V4L2_PIX_FMT_SGRBG14P ('pgCC'), V4L2_PIX_FMT_SGBRG14P ('pGCC'), V4L2_PIX_FMT_SBGGR14P ('pBCC'),
+*******************************************************************************************************************************
+
+*man V4L2_PIX_FMT_SRGGB14P(2)*
+
+V4L2_PIX_FMT_SGRBG14P
+V4L2_PIX_FMT_SGBRG14P
+V4L2_PIX_FMT_SBGGR14P
+14-bit packed Bayer formats
+
+
+Description
+===========
+
+These four pixel formats are packed raw sRGB / Bayer formats with 14
+bits per colour. Every four consecutive samples are packed into seven
+bytes. Each of the first four bytes contain the eight high order bits
+of the pixels, and the three following bytes contains the six least
+significants bits of each pixel, in the same order.
+
+Each n-pixel row contains n/2 green samples and n/2 blue or red samples,
+with alternating green-red and green-blue rows. They are conventionally
+described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
+of one of these formats:
+
+**Byte Order.**
+Each cell is one byte.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 2 1 1 1 1 1 1 1
+
+
+ - .. row 1
+
+ - start + 0:
+
+ - B\ :sub:`00high`
+
+ - G\ :sub:`01high`
+
+ - B\ :sub:`02high`
+
+ - G\ :sub:`03high`
+
+ - G\ :sub:`01low bits 1--0`\ (bits 7--6)
+ B\ :sub:`00low bits 5--0`\ (bits 5--0)
+
+ - R\ :sub:`02low bits 3--0`\ (bits 7--4)
+ G\ :sub:`01low bits 5--2`\ (bits 3--0)
+
+ - G\ :sub:`03low bits 5--0`\ (bits 7--2)
+ R\ :sub:`02low bits 5--4`\ (bits 1--0)
+
+ - .. row 2
+
+ - start + 7:
+
+ - G\ :sub:`00high`
+
+ - R\ :sub:`01high`
+
+ - G\ :sub:`02high`
+
+ - R\ :sub:`03high`
+
+ - R\ :sub:`01low bits 1--0`\ (bits 7--6)
+ G\ :sub:`00low bits 5--0`\ (bits 5--0)
+
+ - G\ :sub:`02low bits 3--0`\ (bits 7--4)
+ R\ :sub:`01low bits 5--2`\ (bits 3--0)
+
+ - R\ :sub:`03low bits 5--0`\ (bits 7--2)
+ G\ :sub:`02low bits 5--4`\ (bits 1--0)
+
+ - .. row 3
+
+ - start + 14
+
+ - B\ :sub:`20high`
+
+ - G\ :sub:`21high`
+
+ - B\ :sub:`22high`
+
+ - G\ :sub:`23high`
+
+ - G\ :sub:`21low bits 1--0`\ (bits 7--6)
+ B\ :sub:`20low bits 5--0`\ (bits 5--0)
+
+ - R\ :sub:`22low bits 3--0`\ (bits 7--4)
+ G\ :sub:`21low bits 5--2`\ (bits 3--0)
+
+ - G\ :sub:`23low bits 5--0`\ (bits 7--2)
+ R\ :sub:`22low bits 5--4`\ (bits 1--0)
+
+ - .. row 4
+
+ - start + 21
+
+ - G\ :sub:`30high`
+
+ - R\ :sub:`31high`
+
+ - G\ :sub:`32high`
+
+ - R\ :sub:`33high`
+
+ - R\ :sub:`31low bits 1--0`\ (bits 7--6)
+ G\ :sub:`30low bits 5--0`\ (bits 5--0)
+
+ - G\ :sub:`32low bits 3--0`\ (bits 7--4)
+ R\ :sub:`31low bits 5--2`\ (bits 3--0)
+
+ - R\ :sub:`33low bits 5--0`\ (bits 7--2)
+ G\ :sub:`32low bits 5--4`\ (bits 1--0)
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10p.rst b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
new file mode 100644
index 000000000000..13b571306915
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
@@ -0,0 +1,33 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-Y10P:
+
+******************************
+V4L2_PIX_FMT_Y10P ('Y10P')
+******************************
+
+Grey-scale image as a MIPI RAW10 packed array
+
+
+Description
+===========
+
+This is a packed grey-scale image format with a depth of 10 bits per
+pixel. Every four consecutive pixels are packed into 5 bytes. Each of
+the first 4 bytes contain the 8 high order bits of the pixels, and
+the 5th byte contains the 2 least significants bits of each pixel,
+in the same order.
+
+**Bit-packed representation.**
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 8 8 8 8 64
+
+ * - Y'\ :sub:`00[9:2]`
+ - Y'\ :sub:`01[9:2]`
+ - Y'\ :sub:`02[9:2]`
+ - Y'\ :sub:`03[9:2]`
+ - Y'\ :sub:`03[1:0]`\ (bits 7--6) Y'\ :sub:`02[1:0]`\ (bits 5--4)
+ Y'\ :sub:`01[1:0]`\ (bits 3--2) Y'\ :sub:`00[1:0]`\ (bits 1--0)
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 9fcabe7f9367..8e73fcfc6900 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -37,19 +37,22 @@ Media Bus Formats
- Image colorspace, from enum
:c:type:`v4l2_colorspace`. See
:ref:`colorspaces` for details.
- * - enum :c:type:`v4l2_ycbcr_encoding`
+ * - __u16
- ``ycbcr_enc``
- - This information supplements the ``colorspace`` and must be set by
+ - Y'CbCr encoding, from enum :c:type:`v4l2_ycbcr_encoding`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
- * - enum :c:type:`v4l2_quantization`
+ * - __u16
- ``quantization``
- - This information supplements the ``colorspace`` and must be set by
+ - Quantization range, from enum :c:type:`v4l2_quantization`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
- * - enum :c:type:`v4l2_xfer_func`
+ * - __u16
- ``xfer_func``
- - This information supplements the ``colorspace`` and must be set by
+ - Transfer function, from enum :c:type:`v4l2_xfer_func`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
* - __u16
@@ -4315,6 +4318,78 @@ the following codes.
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y10-2X8-PADHI_LE:
+
+ - MEDIA_BUS_FMT_Y10_2X8_PADHI_LE
+ - 0x202c
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - y\ :sub:`9`
+ - y\ :sub:`8`
* .. _MEDIA-BUS-FMT-UYVY10-2X10:
- MEDIA_BUS_FMT_UYVY10_2X10
diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
index b7fda29f46a1..2644a62acd4b 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_ENUMSTD:
-********************
-ioctl VIDIOC_ENUMSTD
-********************
+*******************************************
+ioctl VIDIOC_ENUMSTD, VIDIOC_SUBDEV_ENUMSTD
+*******************************************
Name
====
-VIDIOC_ENUMSTD - Enumerate supported video standards
+VIDIOC_ENUMSTD - VIDIOC_SUBDEV_ENUMSTD - Enumerate supported video standards
Synopsis
@@ -18,6 +18,9 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_ENUMSTD, struct v4l2_standard *argp )
:name: VIDIOC_ENUMSTD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUMSTD, struct v4l2_standard *argp )
+ :name: VIDIOC_SUBDEV_ENUMSTD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst
index 90791ab51a53..8d94f0404df2 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-std.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_G_STD:
-********************************
-ioctl VIDIOC_G_STD, VIDIOC_S_STD
-********************************
+**************************************************************************
+ioctl VIDIOC_G_STD, VIDIOC_S_STD, VIDIOC_SUBDEV_G_STD, VIDIOC_SUBDEV_S_STD
+**************************************************************************
Name
====
-VIDIOC_G_STD - VIDIOC_S_STD - Query or select the video standard of the current input
+VIDIOC_G_STD - VIDIOC_S_STD - VIDIOC_SUBDEV_G_STD - VIDIOC_SUBDEV_S_STD - Query or select the video standard of the current input
Synopsis
@@ -21,6 +21,12 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_S_STD, const v4l2_std_id *argp )
:name: VIDIOC_S_STD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_STD, v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_G_STD
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_STD, const v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_S_STD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst
index cf40bca19b9f..a8385cc74818 100644
--- a/Documentation/media/uapi/v4l/vidioc-querystd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_QUERYSTD:
-*********************
-ioctl VIDIOC_QUERYSTD
-*********************
+*********************************************
+ioctl VIDIOC_QUERYSTD, VIDIOC_SUBDEV_QUERYSTD
+*********************************************
Name
====
-VIDIOC_QUERYSTD - Sense the video standard received by the current input
+VIDIOC_QUERYSTD - VIDIOC_SUBDEV_QUERYSTD - Sense the video standard received by the current input
Synopsis
@@ -18,6 +18,9 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_QUERYSTD, v4l2_std_id *argp )
:name: VIDIOC_QUERYSTD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_QUERYSTD, v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_QUERYSTD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/yuv-formats.rst b/Documentation/media/uapi/v4l/yuv-formats.rst
index 3334ea445657..9ab0592d08da 100644
--- a/Documentation/media/uapi/v4l/yuv-formats.rst
+++ b/Documentation/media/uapi/v4l/yuv-formats.rst
@@ -29,6 +29,7 @@ to brightness information.
pixfmt-y10
pixfmt-y12
pixfmt-y10b
+ pixfmt-y10p
pixfmt-y16
pixfmt-y16-be
pixfmt-y8i
diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst
index 9e66b7b5770f..f27c8df20b2b 100644
--- a/Documentation/media/v4l-drivers/qcom_camss.rst
+++ b/Documentation/media/v4l-drivers/qcom_camss.rst
@@ -7,34 +7,34 @@ Introduction
------------
This file documents the Qualcomm Camera Subsystem driver located under
-drivers/media/platform/qcom/camss-8x16.
+drivers/media/platform/qcom/camss.
The current version of the driver supports the Camera Subsystem found on
-Qualcomm MSM8916 and APQ8016 processors.
+Qualcomm MSM8916/APQ8016 and MSM8996/APQ8096 processors.
The driver implements V4L2, Media controller and V4L2 subdev interfaces.
Camera sensor using V4L2 subdev interface in the kernel is supported.
The driver is implemented using as a reference the Qualcomm Camera Subsystem
-driver for Android as found in Code Aurora [#f1]_.
+driver for Android as found in Code Aurora [#f1]_ [#f2]_.
Qualcomm Camera Subsystem hardware
----------------------------------
-The Camera Subsystem hardware found on 8x16 processors and supported by the
-driver consists of:
+The Camera Subsystem hardware found on 8x16 / 8x96 processors and supported by
+the driver consists of:
-- 2 CSIPHY modules. They handle the Physical layer of the CSI2 receivers.
+- 2 / 3 CSIPHY modules. They handle the Physical layer of the CSI2 receivers.
A separate camera sensor can be connected to each of the CSIPHY module;
-- 2 CSID (CSI Decoder) modules. They handle the Protocol and Application layer
- of the CSI2 receivers. A CSID can decode data stream from any of the CSIPHY.
- Each CSID also contains a TG (Test Generator) block which can generate
+- 2 / 4 CSID (CSI Decoder) modules. They handle the Protocol and Application
+ layer of the CSI2 receivers. A CSID can decode data stream from any of the
+ CSIPHY. Each CSID also contains a TG (Test Generator) block which can generate
artificial input data for test purposes;
- ISPIF (ISP Interface) module. Handles the routing of the data streams from
the CSIDs to the inputs of the VFE;
-- VFE (Video Front End) module. Contains a pipeline of image processing hardware
- blocks. The VFE has different input interfaces. The PIX (Pixel) input
+- 1 / 2 VFE (Video Front End) module(s). Contain a pipeline of image processing
+ hardware blocks. The VFE has different input interfaces. The PIX (Pixel) input
interface feeds the input data to the image processing pipeline. The image
processing pipeline contains also a scale and crop module at the end. Three
RDI (Raw Dump Interface) input interfaces bypass the image processing
@@ -49,18 +49,33 @@ The current version of the driver supports:
- Input from camera sensor via CSIPHY;
- Generation of test input data by the TG in CSID;
-- RDI interface of VFE - raw dump of the input data to memory.
+- RDI interface of VFE
- Supported formats:
+ - Raw dump of the input data to memory.
- - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
- V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY);
- - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 /
- V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8);
- - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P /
- V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P);
- - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P /
- V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P).
+ Supported formats:
+
+ - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY);
+ - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 /
+ V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8);
+ - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P /
+ V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P /
+ V4L2_PIX_FMT_Y10P);
+ - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P /
+ V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P).
+ - (8x96 only) MIPI RAW14 (14bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB14P /
+ V4L2_PIX_FMT_SGBRG14P / V4L2_PIX_FMT_SGRBG14P / V4L2_PIX_FMT_SRGGB14P).
+
+ - (8x96 only) Format conversion of the input data.
+
+ Supported input formats:
+
+ - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P / V4L2_PIX_FMT_Y10P).
+
+ Supported output formats:
+
+ - Plain16 RAW10 (10bit unpacked Bayer RAW - V4L2_PIX_FMT_SBGGR10 / V4L2_PIX_FMT_Y10).
- PIX interface of VFE
@@ -75,14 +90,16 @@ The current version of the driver supports:
- NV12/NV21 (two plane YUV 4:2:0 - V4L2_PIX_FMT_NV12 / V4L2_PIX_FMT_NV21);
- NV16/NV61 (two plane YUV 4:2:2 - V4L2_PIX_FMT_NV16 / V4L2_PIX_FMT_NV61).
+ - (8x96 only) YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY).
- Scaling support. Configuration of the VFE Encoder Scale module
for downscalling with ratio up to 16x.
- Cropping support. Configuration of the VFE Encoder Crop module.
-- Concurrent and independent usage of two data inputs - could be camera sensors
- and/or TG.
+- Concurrent and independent usage of two (8x96: three) data inputs -
+ could be camera sensors and/or TG.
Driver Architecture and Design
@@ -90,14 +107,14 @@ Driver Architecture and Design
The driver implements the V4L2 subdev interface. With the goal to model the
hardware links between the modules and to expose a clean, logical and usable
-interface, the driver is split into V4L2 sub-devices as follows:
+interface, the driver is split into V4L2 sub-devices as follows (8x16 / 8x96):
-- 2 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device;
-- 2 CSID sub-devices - each CSID is represented by a single sub-device;
-- 2 ISPIF sub-devices - ISPIF is represented by a number of sub-devices equal
- to the number of CSID sub-devices;
-- 4 VFE sub-devices - VFE is represented by a number of sub-devices equal to
- the number of the input interfaces (3 RDI and 1 PIX).
+- 2 / 3 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device;
+- 2 / 4 CSID sub-devices - each CSID is represented by a single sub-device;
+- 2 / 4 ISPIF sub-devices - ISPIF is represented by a number of sub-devices
+ equal to the number of CSID sub-devices;
+- 4 / 8 VFE sub-devices - VFE is represented by a number of sub-devices equal to
+ the number of the input interfaces (3 RDI and 1 PIX for each VFE).
The considerations to split the driver in this particular way are as follows:
@@ -115,8 +132,8 @@ The considerations to split the driver in this particular way are as follows:
Each VFE sub-device is linked to a separate video device node.
-The media controller pipeline graph is as follows (with connected two OV5645
-camera sensors):
+The media controller pipeline graph is as follows (with connected two / three
+OV5645 camera sensors):
.. _qcom_camss_graph:
@@ -124,7 +141,13 @@ camera sensors):
:alt: qcom_camss_graph.dot
:align: center
- Media pipeline graph
+ Media pipeline graph 8x16
+
+.. kernel-figure:: qcom_camss_8x96_graph.dot
+ :alt: qcom_camss_8x96_graph.dot
+ :align: center
+
+ Media pipeline graph 8x96
Implementation
@@ -149,8 +172,12 @@ APQ8016 Specification:
https://developer.qualcomm.com/download/sd410/snapdragon-410-processor-device-specification.pdf
Referenced 2016-11-24.
+APQ8096 Specification:
+https://developer.qualcomm.com/download/sd820e/qualcomm-snapdragon-820e-processor-apq8096sge-device-specification.pdf
+Referenced 2018-06-22.
References
----------
.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/
+.. [#f2] https://source.codeaurora.org/quic/la/kernel/msm-3.18/
diff --git a/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
new file mode 100644
index 000000000000..de34f0a7afdc
--- /dev/null
+++ b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
@@ -0,0 +1,104 @@
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port1 -> n0000000a:port0 [style=dashed]
+ n00000001:port1 -> n0000000d:port0 [style=dashed]
+ n00000001:port1 -> n00000010:port0 [style=dashed]
+ n00000001:port1 -> n00000013:port0 [style=dashed]
+ n00000004 [label="{{<port0> 0} | msm_csiphy1\n/dev/v4l-subdev1 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000004:port1 -> n0000000a:port0 [style=dashed]
+ n00000004:port1 -> n0000000d:port0 [style=dashed]
+ n00000004:port1 -> n00000010:port0 [style=dashed]
+ n00000004:port1 -> n00000013:port0 [style=dashed]
+ n00000007 [label="{{<port0> 0} | msm_csiphy2\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000007:port1 -> n0000000a:port0 [style=dashed]
+ n00000007:port1 -> n0000000d:port0 [style=dashed]
+ n00000007:port1 -> n00000010:port0 [style=dashed]
+ n00000007:port1 -> n00000013:port0 [style=dashed]
+ n0000000a [label="{{<port0> 0} | msm_csid0\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000a:port1 -> n00000016:port0 [style=dashed]
+ n0000000a:port1 -> n00000019:port0 [style=dashed]
+ n0000000a:port1 -> n0000001c:port0 [style=dashed]
+ n0000000a:port1 -> n0000001f:port0 [style=dashed]
+ n0000000d [label="{{<port0> 0} | msm_csid1\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000d:port1 -> n00000016:port0 [style=dashed]
+ n0000000d:port1 -> n00000019:port0 [style=dashed]
+ n0000000d:port1 -> n0000001c:port0 [style=dashed]
+ n0000000d:port1 -> n0000001f:port0 [style=dashed]
+ n00000010 [label="{{<port0> 0} | msm_csid2\n/dev/v4l-subdev5 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000010:port1 -> n00000016:port0 [style=dashed]
+ n00000010:port1 -> n00000019:port0 [style=dashed]
+ n00000010:port1 -> n0000001c:port0 [style=dashed]
+ n00000010:port1 -> n0000001f:port0 [style=dashed]
+ n00000013 [label="{{<port0> 0} | msm_csid3\n/dev/v4l-subdev6 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000013:port1 -> n00000016:port0 [style=dashed]
+ n00000013:port1 -> n00000019:port0 [style=dashed]
+ n00000013:port1 -> n0000001c:port0 [style=dashed]
+ n00000013:port1 -> n0000001f:port0 [style=dashed]
+ n00000016 [label="{{<port0> 0} | msm_ispif0\n/dev/v4l-subdev7 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000016:port1 -> n00000022:port0 [style=dashed]
+ n00000016:port1 -> n0000002b:port0 [style=dashed]
+ n00000016:port1 -> n00000034:port0 [style=dashed]
+ n00000016:port1 -> n0000003d:port0 [style=dashed]
+ n00000016:port1 -> n00000046:port0 [style=dashed]
+ n00000016:port1 -> n0000004f:port0 [style=dashed]
+ n00000016:port1 -> n00000058:port0 [style=dashed]
+ n00000016:port1 -> n00000061:port0 [style=dashed]
+ n00000019 [label="{{<port0> 0} | msm_ispif1\n/dev/v4l-subdev8 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000019:port1 -> n00000022:port0 [style=dashed]
+ n00000019:port1 -> n0000002b:port0 [style=dashed]
+ n00000019:port1 -> n00000034:port0 [style=dashed]
+ n00000019:port1 -> n0000003d:port0 [style=dashed]
+ n00000019:port1 -> n00000046:port0 [style=dashed]
+ n00000019:port1 -> n0000004f:port0 [style=dashed]
+ n00000019:port1 -> n00000058:port0 [style=dashed]
+ n00000019:port1 -> n00000061:port0 [style=dashed]
+ n0000001c [label="{{<port0> 0} | msm_ispif2\n/dev/v4l-subdev9 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001c:port1 -> n00000022:port0 [style=dashed]
+ n0000001c:port1 -> n0000002b:port0 [style=dashed]
+ n0000001c:port1 -> n00000034:port0 [style=dashed]
+ n0000001c:port1 -> n0000003d:port0 [style=dashed]
+ n0000001c:port1 -> n00000046:port0 [style=dashed]
+ n0000001c:port1 -> n0000004f:port0 [style=dashed]
+ n0000001c:port1 -> n00000058:port0 [style=dashed]
+ n0000001c:port1 -> n00000061:port0 [style=dashed]
+ n0000001f [label="{{<port0> 0} | msm_ispif3\n/dev/v4l-subdev10 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001f:port1 -> n00000022:port0 [style=dashed]
+ n0000001f:port1 -> n0000002b:port0 [style=dashed]
+ n0000001f:port1 -> n00000034:port0 [style=dashed]
+ n0000001f:port1 -> n0000003d:port0 [style=dashed]
+ n0000001f:port1 -> n00000046:port0 [style=dashed]
+ n0000001f:port1 -> n0000004f:port0 [style=dashed]
+ n0000001f:port1 -> n00000058:port0 [style=dashed]
+ n0000001f:port1 -> n00000061:port0 [style=dashed]
+ n00000022 [label="{{<port0> 0} | msm_vfe0_rdi0\n/dev/v4l-subdev11 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000022:port1 -> n00000025 [style=bold]
+ n00000025 [label="msm_vfe0_video0\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n0000002b [label="{{<port0> 0} | msm_vfe0_rdi1\n/dev/v4l-subdev12 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000002b:port1 -> n0000002e [style=bold]
+ n0000002e [label="msm_vfe0_video1\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n00000034 [label="{{<port0> 0} | msm_vfe0_rdi2\n/dev/v4l-subdev13 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000034:port1 -> n00000037 [style=bold]
+ n00000037 [label="msm_vfe0_video2\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n0000003d [label="{{<port0> 0} | msm_vfe0_pix\n/dev/v4l-subdev14 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000003d:port1 -> n00000040 [style=bold]
+ n00000040 [label="msm_vfe0_video3\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n00000046 [label="{{<port0> 0} | msm_vfe1_rdi0\n/dev/v4l-subdev15 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000046:port1 -> n00000049 [style=bold]
+ n00000049 [label="msm_vfe1_video0\n/dev/video4", shape=box, style=filled, fillcolor=yellow]
+ n0000004f [label="{{<port0> 0} | msm_vfe1_rdi1\n/dev/v4l-subdev16 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000004f:port1 -> n00000052 [style=bold]
+ n00000052 [label="msm_vfe1_video1\n/dev/video5", shape=box, style=filled, fillcolor=yellow]
+ n00000058 [label="{{<port0> 0} | msm_vfe1_rdi2\n/dev/v4l-subdev17 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000058:port1 -> n0000005b [style=bold]
+ n0000005b [label="msm_vfe1_video2\n/dev/video6", shape=box, style=filled, fillcolor=yellow]
+ n00000061 [label="{{<port0> 0} | msm_vfe1_pix\n/dev/v4l-subdev18 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000061:port1 -> n00000064 [style=bold]
+ n00000064 [label="msm_vfe1_video3\n/dev/video7", shape=box, style=filled, fillcolor=yellow]
+ n000000e2 [label="{{} | ov5645 3-0039\n/dev/v4l-subdev19 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e2:port0 -> n00000004:port0 [style=bold]
+ n000000e4 [label="{{} | ov5645 3-003a\n/dev/v4l-subdev20 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e4:port0 -> n00000007:port0 [style=bold]
+ n000000e6 [label="{{} | ov5645 3-003b\n/dev/v4l-subdev21 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e6:port0 -> n00000001:port0 [style=bold]
+}
diff --git a/Documentation/media/video.h.rst.exceptions b/Documentation/media/video.h.rst.exceptions
index a91aa884ce0e..371cdbd7d062 100644
--- a/Documentation/media/video.h.rst.exceptions
+++ b/Documentation/media/video.h.rst.exceptions
@@ -34,7 +34,4 @@ replace typedef video_displayformat_t :c:type:`video_displayformat`
replace typedef video_size_t :c:type:`video_size`
replace typedef video_stream_source_t :c:type:`video_stream_source`
replace typedef video_play_state_t :c:type:`video_play_state`
-replace typedef video_highlight_t :c:type:`video_highlight`
-replace typedef video_spu_t :c:type:`video_spu`
-replace typedef video_spu_palette_t :c:type:`video_spu_palette`
replace typedef video_navi_pack_t :c:type:`video_navi_pack`
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index a5cb0a8686ac..ca9f0edc579e 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -517,7 +517,6 @@ ignore define V4L2_CTRL_WHICH_DEF_VAL
ignore define V4L2_OUT_CAP_CUSTOM_TIMINGS
ignore define V4L2_CID_MAX_CTRLS
-ignore ioctl VIDIOC_RESERVED
ignore define BASE_VIDIOC_PRIVATE
# Associate ioctls with their counterparts
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index a02d6bbfc9d0..0d8d7ef131e9 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -2179,32 +2179,41 @@ or:
event_indicated = 1;
wake_up_process(event_daemon);
-A write memory barrier is implied by wake_up() and co. if and only if they
-wake something up. The barrier occurs before the task state is cleared, and so
-sits between the STORE to indicate the event and the STORE to set TASK_RUNNING:
+A general memory barrier is executed by wake_up() if it wakes something up.
+If it doesn't wake anything up then a memory barrier may or may not be
+executed; you must not rely on it. The barrier occurs before the task state
+is accessed, in particular, it sits between the STORE to indicate the event
+and the STORE to set TASK_RUNNING:
- CPU 1 CPU 2
+ CPU 1 (Sleeper) CPU 2 (Waker)
=============================== ===============================
set_current_state(); STORE event_indicated
smp_store_mb(); wake_up();
- STORE current->state <write barrier>
- <general barrier> STORE current->state
- LOAD event_indicated
+ STORE current->state ...
+ <general barrier> <general barrier>
+ LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL)
+ STORE task->state
-To repeat, this write memory barrier is present if and only if something
-is actually awakened. To see this, consider the following sequence of
-events, where X and Y are both initially zero:
+where "task" is the thread being woken up and it equals CPU 1's "current".
+
+To repeat, a general memory barrier is guaranteed to be executed by wake_up()
+if something is actually awakened, but otherwise there is no such guarantee.
+To see this, consider the following sequence of events, where X and Y are both
+initially zero:
CPU 1 CPU 2
=============================== ===============================
- X = 1; STORE event_indicated
+ X = 1; Y = 1;
smp_mb(); wake_up();
- Y = 1; wait_event(wq, Y == 1);
- wake_up(); load from Y sees 1, no memory barrier
- load from X might see 0
+ LOAD Y LOAD X
+
+If a wakeup does occur, one (at least) of the two loads must see 1. If, on
+the other hand, a wakeup does not occur, both loads might see 0.
-In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
-to see 1.
+wake_up_process() always executes a general memory barrier. The barrier again
+occurs before the task state is accessed. In particular, if the wake_up() in
+the previous snippet were replaced by a call to wake_up_process() then one of
+the two loads would be guaranteed to see 1.
The available waker functions include:
@@ -2224,6 +2233,8 @@ The available waker functions include:
wake_up_poll();
wake_up_process();
+In terms of memory ordering, these functions all provide the same guarantees of
+a wake_up() (or stronger).
[!] Note that the memory barriers implied by the sleeper and the waker do _not_
order multiple stores before the wake-up with respect to loads of those stored
diff --git a/Documentation/misc-devices/pci-endpoint-test.txt b/Documentation/misc-devices/pci-endpoint-test.txt
index 4ebc3594b32c..58ccca4416b1 100644
--- a/Documentation/misc-devices/pci-endpoint-test.txt
+++ b/Documentation/misc-devices/pci-endpoint-test.txt
@@ -10,6 +10,7 @@ The PCI driver for the test device performs the following tests
*) verifying addresses programmed in BAR
*) raise legacy IRQ
*) raise MSI IRQ
+ *) raise MSI-X IRQ
*) read data
*) write data
*) copy data
@@ -25,6 +26,11 @@ ioctl
PCITEST_LEGACY_IRQ: Tests legacy IRQ
PCITEST_MSI: Tests message signalled interrupts. The MSI number
to be tested should be passed as argument.
+ PCITEST_MSIX: Tests message signalled interrupts. The MSI-X number
+ to be tested should be passed as argument.
+ PCITEST_SET_IRQTYPE: Changes driver IRQ type configuration. The IRQ type
+ should be passed as argument (0: Legacy, 1:MSI, 2:MSI-X).
+ PCITEST_GET_IRQTYPE: Gets driver IRQ type configuration.
PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
as argument.
PCITEST_READ: Perform read tests. The size of the buffer should be passed
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 2b89d91b376f..02a323c43261 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -18,8 +18,6 @@ README.ipw2200
- README for the Intel PRO/Wireless 2915ABG and 2200BG driver.
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
-alias.txt
- - info on using alias network devices.
altera_tse.txt
- Altera Triple-Speed Ethernet controller.
arcnet-hardware.txt
@@ -140,8 +138,6 @@ multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
-netdev-FAQ.txt
- - FAQ describing how to submit net changes to netdev mailing list.
netdev-features.txt
- Network interface features API description.
netdevices.txt
diff --git a/Documentation/networking/alias.rst b/Documentation/networking/alias.rst
new file mode 100644
index 000000000000..af7c5ee92014
--- /dev/null
+++ b/Documentation/networking/alias.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+IP-Aliasing
+===========
+
+IP-aliases are an obsolete way to manage multiple IP-addresses/masks
+per interface. Newer tools such as iproute2 support multiple
+address/prefixes per interface, but aliases are still supported
+for backwards compatibility.
+
+An alias is formed by adding a colon and a string when running ifconfig.
+This string is usually numeric, but this is not a must.
+
+
+Alias creation
+==============
+
+Alias creation is done by 'magic' interface naming: eg. to create a
+200.1.1.1 alias for eth0 ...
+::
+
+ # ifconfig eth0:0 200.1.1.1 etc,etc....
+ ~~ -> request alias #0 creation (if not yet exists) for eth0
+
+The corresponding route is also set up by this command. Please note:
+The route always points to the base interface.
+
+
+Alias deletion
+==============
+
+The alias is removed by shutting the alias down::
+
+ # ifconfig eth0:0 down
+ ~~~~~~~~~~ -> will delete alias
+
+
+Alias (re-)configuring
+======================
+
+Aliases are not real devices, but programs should be able to configure
+and refer to them as usual (ifconfig, route, etc).
+
+
+Relationship with main device
+=============================
+
+If the base device is shut down the added aliases will be deleted too.
diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt
deleted file mode 100644
index 85046f53fcfc..000000000000
--- a/Documentation/networking/alias.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-
-IP-Aliasing:
-============
-
-IP-aliases are an obsolete way to manage multiple IP-addresses/masks
-per interface. Newer tools such as iproute2 support multiple
-address/prefixes per interface, but aliases are still supported
-for backwards compatibility.
-
-An alias is formed by adding a colon and a string when running ifconfig.
-This string is usually numeric, but this is not a must.
-
-o Alias creation.
- Alias creation is done by 'magic' interface naming: eg. to create a
- 200.1.1.1 alias for eth0 ...
-
- # ifconfig eth0:0 200.1.1.1 etc,etc....
- ~~ -> request alias #0 creation (if not yet exists) for eth0
-
- The corresponding route is also set up by this command.
- Please note: The route always points to the base interface.
-
-
-o Alias deletion.
- The alias is removed by shutting the alias down:
-
- # ifconfig eth0:0 down
- ~~~~~~~~~~ -> will delete alias
-
-
-o Alias (re-)configuring
-
- Aliases are not real devices, but programs should be able to configure and
- refer to them as usual (ifconfig, route, etc).
-
-
-o Relationship with main device
-
- If the base device is shut down the added aliases will be deleted
- too.
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index c13214d073a4..d3e5dd26db12 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1490,7 +1490,7 @@ To remove an ARP target:
To configure the interval between learning packet transmits:
# echo 12 > /sys/class/net/bond0/bonding/lp_interval
- NOTE: the lp_inteval is the number of seconds between instances where
+ NOTE: the lp_interval is the number of seconds between instances where
the bonding driver sends learning packets to each slaves peer switch. The
default interval is 1 second.
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.rst
index a27cb6214ed7..4aef9cddde2f 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Ethernet Bridging
+=================
+
In order to use the Ethernet bridging functionality, you'll need the
userspace tools.
diff --git a/Documentation/networking/can_ucan_protocol.rst b/Documentation/networking/can_ucan_protocol.rst
new file mode 100644
index 000000000000..4cef88d24fc7
--- /dev/null
+++ b/Documentation/networking/can_ucan_protocol.rst
@@ -0,0 +1,332 @@
+=================
+The UCAN Protocol
+=================
+
+UCAN is the protocol used by the microcontroller-based USB-CAN
+adapter that is integrated on System-on-Modules from Theobroma Systems
+and that is also available as a standalone USB stick.
+
+The UCAN protocol has been designed to be hardware-independent.
+It is modeled closely after how Linux represents CAN devices
+internally. All multi-byte integers are encoded as Little Endian.
+
+All structures mentioned in this document are defined in
+``drivers/net/can/usb/ucan.c``.
+
+USB Endpoints
+=============
+
+UCAN devices use three USB endpoints:
+
+CONTROL endpoint
+ The driver sends device management commands on this endpoint
+
+IN endpoint
+ The device sends CAN data frames and CAN error frames
+
+OUT endpoint
+ The driver sends CAN data frames on the out endpoint
+
+
+CONTROL Messages
+================
+
+UCAN devices are configured using vendor requests on the control pipe.
+
+To support multiple CAN interfaces in a single USB device all
+configuration commands target the corresponding interface in the USB
+descriptor.
+
+The driver uses ``ucan_ctrl_command_in/out`` and
+``ucan_device_request_in`` to deliver commands to the device.
+
+Setup Packet
+------------
+
+================= =====================================================
+``bmRequestType`` Direction | Vendor | (Interface or Device)
+``bRequest`` Command Number
+``wValue`` Subcommand Number (16 Bit) or 0 if not used
+``wIndex`` USB Interface Index (0 for device commands)
+``wLength`` * Host to Device - Number of bytes to transmit
+ * Device to Host - Maximum Number of bytes to
+ receive. If the device send less. Commom ZLP
+ semantics are used.
+================= =====================================================
+
+Error Handling
+--------------
+
+The device indicates failed control commands by stalling the
+pipe.
+
+Device Commands
+---------------
+
+UCAN_DEVICE_GET_FW_STRING
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*Dev2Host; optional*
+
+Request the device firmware string.
+
+
+Interface Commands
+------------------
+
+UCAN_COMMAND_START
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Bring the CAN interface up.
+
+Payload Format
+ ``ucan_ctl_payload_t.cmd_start``
+
+==== ============================
+mode or mask of ``UCAN_MODE_*``
+==== ============================
+
+UCAN_COMMAND_STOP
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Stop the CAN interface
+
+Payload Format
+ *empty*
+
+UCAN_COMMAND_RESET
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Reset the CAN controller (including error counters)
+
+Payload Format
+ *empty*
+
+UCAN_COMMAND_GET
+~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Get Information from the Device
+
+Subcommands
+^^^^^^^^^^^
+
+UCAN_COMMAND_GET_INFO
+ Request the device information structure ``ucan_ctl_payload_t.device_info``.
+
+ See the ``device_info`` field for details, and
+ ``uapi/linux/can/netlink.h`` for an explanation of the
+ ``can_bittiming fields``.
+
+ Payload Format
+ ``ucan_ctl_payload_t.device_info``
+
+UCAN_COMMAND_GET_PROTOCOL_VERSION
+
+ Request the device protocol version
+ ``ucan_ctl_payload_t.protocol_version``. The current protocol version is 3.
+
+ Payload Format
+ ``ucan_ctl_payload_t.protocol_version``
+
+.. note:: Devices that do not implement this command use the old
+ protocol version 1
+
+UCAN_COMMAND_SET_BITTIMING
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Setup bittiming by sending the the structure
+``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
+details)
+
+Payload Format
+ ``ucan_ctl_payload_t.cmd_set_bittiming``.
+
+UCAN_SLEEP/WAKE
+~~~~~~~~~~~~~~~
+
+*Host2Dev; optional*
+
+Configure sleep and wake modes. Not yet supported by the driver.
+
+UCAN_FILTER
+~~~~~~~~~~~
+
+*Host2Dev; optional*
+
+Setup hardware CAN filters. Not yet supported by the driver.
+
+Allowed interface commands
+--------------------------
+
+================== =================== ==================
+Legal Device State Command New Device State
+================== =================== ==================
+stopped SET_BITTIMING stopped
+stopped START started
+started STOP or RESET stopped
+stopped STOP or RESET stopped
+started RESTART started
+any GET *no change*
+================== =================== ==================
+
+IN Message Format
+=================
+
+A data packet on the USB IN endpoint contains one or more
+``ucan_message_in`` values. If multiple messages are batched in a USB
+data packet, the ``len`` field can be used to jump to the next
+``ucan_message_in`` value (take care to sanity-check the ``len`` value
+against the actual data size).
+
+.. _can_ucan_in_message_len:
+
+``len`` field
+-------------
+
+Each ``ucan_message_in`` must be aligned to a 4-byte boundary (relative
+to the start of the start of the data buffer). That means that there
+may be padding bytes between multiple ``ucan_message_in`` values:
+
+.. code::
+
+ +----------------------------+ < 0
+ | |
+ | struct ucan_message_in |
+ | |
+ +----------------------------+ < len
+ [padding]
+ +----------------------------+ < round_up(len, 4)
+ | |
+ | struct ucan_message_in |
+ | |
+ +----------------------------+
+ [...]
+
+``type`` field
+--------------
+
+The ``type`` field specifies the type of the message.
+
+UCAN_IN_RX
+~~~~~~~~~~
+
+``subtype``
+ zero
+
+Data received from the CAN bus (ID + payload).
+
+UCAN_IN_TX_COMPLETE
+~~~~~~~~~~~~~~~~~~~
+
+``subtype``
+ zero
+
+The CAN device has sent a message to the CAN bus. It answers with a
+list of of tuples <echo-ids, flags>.
+
+The echo-id identifies the frame from (echos the id from a previous
+UCAN_OUT_TX message). The flag indicates the result of the
+transmission. Whereas a set Bit 0 indicates success. All other bits
+are reserved and set to zero.
+
+Flow Control
+------------
+
+When receiving CAN messages there is no flow control on the USB
+buffer. The driver has to handle inbound message quickly enough to
+avoid drops. I case the device buffer overflow the condition is
+reported by sending corresponding error frames (see
+:ref:`can_ucan_error_handling`)
+
+
+OUT Message Format
+==================
+
+A data packet on the USB OUT endpoint contains one or more ``struct
+ucan_message_out`` values. If multiple messages are batched into one
+data packet, the device uses the ``len`` field to jump to the next
+ucan_message_out value. Each ucan_message_out must be aligned to 4
+bytes (relative to the start of the data buffer). The mechanism is
+same as described in :ref:`can_ucan_in_message_len`.
+
+.. code::
+
+ +----------------------------+ < 0
+ | |
+ | struct ucan_message_out |
+ | |
+ +----------------------------+ < len
+ [padding]
+ +----------------------------+ < round_up(len, 4)
+ | |
+ | struct ucan_message_out |
+ | |
+ +----------------------------+
+ [...]
+
+``type`` field
+--------------
+
+In protocol version 3 only ``UCAN_OUT_TX`` is defined, others are used
+only by legacy devices (protocol version 1).
+
+UCAN_OUT_TX
+~~~~~~~~~~~
+``subtype``
+ echo id to be replied within a CAN_IN_TX_COMPLETE message
+
+Transmit a CAN frame. (parameters: ``id``, ``data``)
+
+Flow Control
+------------
+
+When the device outbound buffers are full it starts sending *NAKs* on
+the *OUT* pipe until more buffers are available. The driver stops the
+queue when a certain threshold of out packets are incomplete.
+
+.. _can_ucan_error_handling:
+
+CAN Error Handling
+==================
+
+If error reporting is turned on the device encodes errors into CAN
+error frames (see ``uapi/linux/can/error.h``) and sends it using the
+IN endpoint. The driver updates its error statistics and forwards
+it.
+
+Although UCAN devices can suppress error frames completely, in Linux
+the driver is always interested. Hence, the device is always started with
+the ``UCAN_MODE_BERR_REPORT`` set. Filtering those messages for the
+user space is done by the driver.
+
+Bus OFF
+-------
+
+- The device does not recover from bus of automatically.
+- Bus OFF is indicated by an error frame (see ``uapi/linux/can/error.h``)
+- Bus OFF recovery is started by ``UCAN_COMMAND_RESTART``
+- Once Bus OFF recover is completed the device sends an error frame
+ indicating that it is on ERROR-ACTIVE state.
+- During Bus OFF no frames are sent by the device.
+- During Bus OFF transmission requests from the host are completed
+ immediately with the success bit left unset.
+
+Example Conversation
+====================
+
+#) Device is connected to USB
+#) Host sends command ``UCAN_COMMAND_RESET``, subcmd 0
+#) Host sends command ``UCAN_COMMAND_GET``, subcmd ``UCAN_COMMAND_GET_INFO``
+#) Device sends ``UCAN_IN_DEVICE_INFO``
+#) Host sends command ``UCAN_OUT_SET_BITTIMING``
+#) Host sends command ``UCAN_COMMAND_START``, subcmd 0, mode ``UCAN_MODE_BERR_REPORT``
diff --git a/Documentation/networking/dpaa2/overview.rst b/Documentation/networking/dpaa2/overview.rst
index 79fede4447d6..d638b5a8aadd 100644
--- a/Documentation/networking/dpaa2/overview.rst
+++ b/Documentation/networking/dpaa2/overview.rst
@@ -1,5 +1,6 @@
.. include:: <isonum.txt>
+=========================================================
DPAA2 (Data Path Acceleration Architecture Gen2) Overview
=========================================================
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst
index 9708f5fa76de..f81111eba9c5 100644
--- a/Documentation/networking/e100.rst
+++ b/Documentation/networking/e100.rst
@@ -47,41 +47,45 @@ Driver Configuration Parameters
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
-Rx Descriptors: Number of receive descriptors. A receive descriptor is a data
+Rx Descriptors:
+ Number of receive descriptors. A receive descriptor is a data
structure that describes a receive buffer and its attributes to the network
controller. The data in the descriptor is used by the controller to write
data from the controller to host memory. In the 3.x.x driver the valid range
for this parameter is 64-256. The default value is 256. This parameter can be
changed using the command::
- ethtool -G eth? rx n
+ ethtool -G eth? rx n
Where n is the number of desired Rx descriptors.
-Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
+Tx Descriptors:
+ Number of transmit descriptors. A transmit descriptor is a data
structure that describes a transmit buffer and its attributes to the network
controller. The data in the descriptor is used by the controller to read
data from the host memory to the controller. In the 3.x.x driver the valid
range for this parameter is 64-256. The default value is 128. This parameter
can be changed using the command::
- ethtool -G eth? tx n
+ ethtool -G eth? tx n
Where n is the number of desired Tx descriptors.
-Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
+Speed/Duplex:
+ The driver auto-negotiates the link speed and duplex settings by
default. The ethtool utility can be used as follows to force speed/duplex.::
- ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
+ ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
NOTE: setting the speed/duplex to incorrect values will cause the link to
fail.
-Event Log Message Level: The driver uses the message level flag to log events
+Event Log Message Level:
+ The driver uses the message level flag to log events
to syslog. The message level can be set at driver load time. It can also be
set using the command::
- ethtool -s eth? msglvl n
+ ethtool -s eth? msglvl n
Additional Configurations
@@ -92,7 +96,7 @@ Configuring the Driver on Different Distributions
Configuring a network driver to load properly when the system is started
is distribution dependent. Typically, the configuration process involves
-adding an alias line to /etc/modprobe.d/*.conf as well as editing other
+adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
system startup scripts and/or configuration files. Many popular Linux
distributions ship with tools to make these changes for you. To learn
the proper way to configure a network device for your system, refer to
@@ -160,7 +164,10 @@ This results in unbalanced receive traffic.
If you have multiple interfaces in a server, either turn on ARP
filtering by
-(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+(1) entering::
+
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+
(this only works if your kernel's version is higher than 2.4.5), or
(2) installing the interfaces in separate broadcast domains (either
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst
index 144b87eef153..f10dd4086921 100644
--- a/Documentation/networking/e1000.rst
+++ b/Documentation/networking/e1000.rst
@@ -34,7 +34,8 @@ Command Line Parameters
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
-NOTES: For more information about the AutoNeg, Duplex, and Speed
+NOTES:
+ For more information about the AutoNeg, Duplex, and Speed
parameters, see the "Speed and Duplex Configuration" section in
this document.
@@ -45,22 +46,27 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
AutoNeg
-------
+
(Supported only on adapters with copper connections)
-Valid Range: 0x01-0x0F, 0x20-0x2F
-Default Value: 0x2F
+
+:Valid Range: 0x01-0x0F, 0x20-0x2F
+:Default Value: 0x2F
This parameter is a bit-mask that specifies the speed and duplex settings
advertised by the adapter. When this parameter is used, the Speed and
Duplex parameters must not be specified.
-NOTE: Refer to the Speed and Duplex section of this readme for more
+NOTE:
+ Refer to the Speed and Duplex section of this readme for more
information on the AutoNeg parameter.
Duplex
------
+
(Supported only on adapters with copper connections)
-Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
-Default Value: 0
+
+:Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
+:Default Value: 0
This defines the direction in which data is allowed to flow. Can be
either one or two-directional. If both Duplex and the link partner are
@@ -70,18 +76,22 @@ duplex.
FlowControl
-----------
-Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default Value: Reads flow control settings from the EEPROM
+
+:Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: Reads flow control settings from the EEPROM
This parameter controls the automatic generation(Tx) and response(Rx)
to Ethernet PAUSE frames.
InterruptThrottleRate
---------------------
+
(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
- 4=simplified balancing)
-Default Value: 3
+
+:Valid Range:
+ 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+ 4=simplified balancing)
+:Default Value: 3
The driver can limit the amount of interrupts per second that the adapter
will generate for incoming packets. It does this by writing a value to the
@@ -135,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
and may improve small packet latency, but is generally not suitable
for bulk throughput traffic.
-NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+NOTE:
+ InterruptThrottleRate takes precedence over the TxAbsIntDelay and
RxAbsIntDelay parameters. In other words, minimizing the receive
and/or transmit absolute delays does not force the controller to
generate more interrupts than what the Interrupt Throttle Rate
allows.
-CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
+CAUTION:
+ If you are using the Intel(R) PRO/1000 CT Network Connection
(controller 82547), setting InterruptThrottleRate to a value
greater than 75,000, may hang (stop transmitting) adapters
under certain network conditions. If this occurs a NETDEV
@@ -151,7 +163,8 @@ CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
hang, ensure that InterruptThrottleRate is set no greater
than 75,000 and is not set to 0.
-NOTE: When e1000 is loaded with default settings and multiple adapters
+NOTE:
+ When e1000 is loaded with default settings and multiple adapters
are in use simultaneously, the CPU utilization may increase non-
linearly. In order to limit the CPU utilization without impacting
the overall throughput, we recommend that you load the driver as
@@ -168,9 +181,11 @@ NOTE: When e1000 is loaded with default settings and multiple adapters
RxDescriptors
-------------
-Valid Range: 48-256 for 82542 and 82543-based adapters
- 48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
This value specifies the number of receive buffer descriptors allocated
by the driver. Increasing this value allows the driver to buffer more
@@ -180,15 +195,17 @@ Each descriptor is 16 bytes. A receive buffer is also allocated for each
descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
on the MTU setting. The maximum MTU size is 16110.
-NOTE: MTU designates the frame size. It only needs to be set for Jumbo
+NOTE:
+ MTU designates the frame size. It only needs to be set for Jumbo
Frames. Depending on the available system resources, the request
for a higher number of receive descriptors may be denied. In this
case, use a lower number.
RxIntDelay
----------
-Valid Range: 0-65535 (0=off)
-Default Value: 0
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 0
This value delays the generation of receive interrupts in units of 1.024
microseconds. Receive interrupt reduction can improve CPU efficiency if
@@ -198,7 +215,8 @@ of TCP traffic. If the system is reporting dropped receives, this value
may be set too high, causing the driver to run out of available receive
descriptors.
-CAUTION: When setting RxIntDelay to a value other than 0, adapters may
+CAUTION:
+ When setting RxIntDelay to a value other than 0, adapters may
hang (stop transmitting) under certain network conditions. If
this occurs a NETDEV WATCHDOG message is logged in the system
event log. In addition, the controller is automatically reset,
@@ -207,9 +225,11 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may
RxAbsIntDelay
-------------
+
(This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range: 0-65535 (0=off)
-Default Value: 128
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 128
This value, in units of 1.024 microseconds, limits the delay in which a
receive interrupt is generated. Useful only if RxIntDelay is non-zero,
@@ -220,9 +240,11 @@ conditions.
Speed
-----
+
(This parameter is supported only on adapters with copper connections.)
-Valid Settings: 0, 10, 100, 1000
-Default Value: 0 (auto-negotiate at all supported speeds)
+
+:Valid Settings: 0, 10, 100, 1000
+:Default Value: 0 (auto-negotiate at all supported speeds)
Speed forces the line speed to the specified value in megabits per second
(Mbps). If this parameter is not specified or is set to 0 and the link
@@ -231,22 +253,26 @@ speed. Duplex should also be set when Speed is set to either 10 or 100.
TxDescriptors
-------------
-Valid Range: 48-256 for 82542 and 82543-based adapters
- 48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
This value is the number of transmit descriptors allocated by the driver.
Increasing this value allows the driver to queue more transmits. Each
descriptor is 16 bytes.
-NOTE: Depending on the available system resources, the request for a
+NOTE:
+ Depending on the available system resources, the request for a
higher number of transmit descriptors may be denied. In this case,
use a lower number.
TxIntDelay
----------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 8
This value delays the generation of transmit interrupts in units of
1.024 microseconds. Transmit interrupt reduction can improve CPU
@@ -256,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
TxAbsIntDelay
-------------
+
(This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range: 0-65535 (0=off)
-Default Value: 32
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 32
This value, in units of 1.024 microseconds, limits the delay in which a
transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
@@ -269,18 +297,21 @@ network conditions.
XsumRX
------
+
(This parameter is NOT supported on the 82542-based adapter.)
-Valid Range: 0-1
-Default Value: 1
+
+:Valid Range: 0-1
+:Default Value: 1
A value of '1' indicates that the driver should enable IP checksum
offload for received packets (both UDP and TCP) to the adapter hardware.
Copybreak
---------
-Valid Range: 0-xxxxxxx (0=off)
-Default Value: 256
-Usage: modprobe e1000.ko copybreak=128
+
+:Valid Range: 0-xxxxxxx (0=off)
+:Default Value: 256
+:Usage: modprobe e1000.ko copybreak=128
Driver copies all packets below or equaling this size to a fresh RX
buffer before handing it up the stack.
@@ -292,8 +323,9 @@ it is also available during runtime at
SmartPowerDownEnable
--------------------
-Valid Range: 0-1
-Default Value: 0 (disabled)
+
+:Valid Range: 0-1
+:Default Value: 0 (disabled)
Allows PHY to turn off in lower power states. The user can turn off
this parameter in supported chipsets.
@@ -309,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
For copper-based boards, the keywords interact as follows:
- The default operation is auto-negotiate. The board advertises all
+- The default operation is auto-negotiate. The board advertises all
supported speed and duplex combinations, and it links at the highest
common speed and duplex mode IF the link partner is set to auto-negotiate.
- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
+- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
is advertised (The 1000BaseT spec requires auto-negotiation.)
- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
+- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
negotiation is disabled, and the AutoNeg parameter is ignored. Partner
SHOULD also be forced.
@@ -328,13 +360,15 @@ process.
The parameter may be specified as either a decimal or hexadecimal value as
determined by the bitmap below.
+============== ====== ====== ======= ======= ====== ====== ======= ======
Bit position 7 6 5 4 3 2 1 0
Decimal Value 128 64 32 16 8 4 2 1
Hex value 80 40 20 10 8 4 2 1
Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
Duplex Full Full Half Full Half
+============== ====== ====== ======= ======= ====== ====== ======= ======
-Some examples of using AutoNeg:
+Some examples of using AutoNeg::
modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
modprobe e1000 AutoNeg=1 (Same as above)
@@ -357,56 +391,59 @@ Additional Configurations
Jumbo Frames
------------
-Jumbo Frames support is enabled by changing the MTU to a value larger
-than the default of 1500. Use the ifconfig command to increase the MTU
-size. For example::
+
+ Jumbo Frames support is enabled by changing the MTU to a value larger than
+ the default of 1500. Use the ifconfig command to increase the MTU size.
+ For example::
ifconfig eth<x> mtu 9000 up
-This setting is not saved across reboots. It can be made permanent if
-you add::
+ This setting is not saved across reboots. It can be made permanent if
+ you add::
MTU=9000
-to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
-applies to the Red Hat distributions; other distributions may store this
-setting in a different location.
+ to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
+ applies to the Red Hat distributions; other distributions may store this
+ setting in a different location.
+
+Notes:
+ Degradation in throughput performance may be observed in some Jumbo frames
+ environments. If this is observed, increasing the application's socket buffer
+ size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
+ See the specific application manual and /usr/src/linux*/Documentation/
+ networking/ip-sysctl.txt for more details.
-Notes: Degradation in throughput performance may be observed in some
-Jumbo frames environments. If this is observed, increasing the
-application's socket buffer size and/or increasing the
-/proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific
-application manual and /usr/src/linux*/Documentation/
-networking/ip-sysctl.txt for more details.
+ - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
+ with the maximum Jumbo Frames size of 16128.
-- The maximum MTU setting for Jumbo Frames is 16110. This value
- coincides with the maximum Jumbo Frames size of 16128.
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
-- Using Jumbo frames at 10 or 100 Mbps is not supported and may result
- in poor performance or loss of link.
+ - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
+ support Jumbo Frames. These correspond to the following product names::
-- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
- support Jumbo Frames. These correspond to the following product names:
- Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network
- Connection
+ Intel(R) PRO/1000 Gigabit Server Adapter
+ Intel(R) PRO/1000 PM Network Connection
ethtool
-------
-The driver utilizes the ethtool interface for driver configuration and
-diagnostics, as well as displaying statistical information. The ethtool
-version 1.6 or later is required for this functionality.
-The latest release of ethtool can be found from
-https://www.kernel.org/pub/software/network/ethtool/
+ The driver utilizes the ethtool interface for driver configuration and
+ diagnostics, as well as displaying statistical information. The ethtool
+ version 1.6 or later is required for this functionality.
+
+ The latest release of ethtool can be found from
+ https://www.kernel.org/pub/software/network/ethtool/
Enabling Wake on LAN* (WoL)
---------------------------
-WoL is configured through the ethtool* utility.
-WoL will be enabled on the system during the next shut down or reboot.
-For this driver version, in order to enable WoL, the e1000 driver must be
-loaded when shutting down or rebooting the system.
+ WoL is configured through the ethtool* utility.
+ WoL will be enabled on the system during the next shut down or reboot.
+ For this driver version, in order to enable WoL, the e1000 driver must be
+ loaded when shutting down or rebooting the system.
Support
=======
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index fec8588a588e..fcd710f2cc7a 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -6,15 +6,21 @@ Contents:
.. toctree::
:maxdepth: 2
+ netdev-FAQ
af_xdp
batman-adv
can
+ can_ucan_protocol
dpaa2/index
e100
e1000
kapi
z8530book
msg_zerocopy
+ failover
+ net_failover
+ alias
+ bridge
.. only:: subproject
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ce8fbf5aa63c..8313a636dd53 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -81,6 +81,15 @@ fib_multipath_hash_policy - INTEGER
0 - Layer 3
1 - Layer 4
+ip_forward_update_priority - INTEGER
+ Whether to update SKB priority from "TOS" field in IPv4 header after it
+ is forwarded. The new SKB priority is mapped from TOS field value
+ according to an rt_tos2priority table (see e.g. man tc-prio).
+ Default: 1 (Update priority.)
+ Possible values:
+ 0 - Do not update priority.
+ 1 - Update priority.
+
route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes.
@@ -733,11 +742,11 @@ tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
TCP bulk sender tends to increase packets in flight until it
gets losses notifications. With SNDBUF autotuning, this can
- result in a large amount of packets queued in qdisc/device
- on the local machine, hurting latency of other flows, for
- typical pfifo_fast qdiscs.
- tcp_limit_output_bytes limits the number of bytes on qdisc
- or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+ result in a large amount of packets queued on the local machine
+ (e.g.: qdiscs, CPU backlog, or device) hurting latency of other
+ flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes
+ limits the number of bytes on qdisc or device to reduce artificial
+ RTT/cwnd and reduce bufferbloat.
Default: 262144
tcp_challenge_ack_limit - INTEGER
@@ -1834,6 +1843,16 @@ stable_secret - IPv6 address
By default the stable secret is unset.
+addr_gen_mode - INTEGER
+ Defines how link-local and autoconf addresses are generated.
+
+ 0: generate address based on EUI64 (default)
+ 1: do no generate a link-local address, use EUI64 for addresses generated
+ from autoconf
+ 2: generate stable privacy addresses, using the secret from
+ stable_secret (RFC7217)
+ 3: generate stable privacy addresses, using a random secret if unset
+
drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IPv6 packets that are received in link-layer
multicast (or broadcast) frames.
@@ -1863,6 +1882,11 @@ ratelimit - INTEGER
otherwise the minimal space between responses in milliseconds.
Default: 1000
+echo_ignore_all - BOOLEAN
+ If set non-zero, then the kernel will ignore all ICMP ECHO
+ requests sent to it over the IPv6 protocol.
+ Default: 0
+
xfrm6_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index 70ca2f5800c4..06c97dcb57ca 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -36,37 +36,39 @@ feature on the virtio-net interface and assign the same MAC address to both
virtio-net and VF interfaces.
Here is an example XML snippet that shows such configuration.
-
- <interface type='network'>
- <mac address='52:54:00:00:12:53'/>
- <source network='enp66s0f0_br'/>
- <target dev='tap01'/>
- <model type='virtio'/>
- <driver name='vhost' queues='4'/>
- <link state='down'/>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
- </interface>
- <interface type='hostdev' managed='yes'>
- <mac address='52:54:00:00:12:53'/>
- <source>
- <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
- </source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
- </interface>
+::
+
+ <interface type='network'>
+ <mac address='52:54:00:00:12:53'/>
+ <source network='enp66s0f0_br'/>
+ <target dev='tap01'/>
+ <model type='virtio'/>
+ <driver name='vhost' queues='4'/>
+ <link state='down'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+ </interface>
+ <interface type='hostdev' managed='yes'>
+ <mac address='52:54:00:00:12:53'/>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+ </source>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ </interface>
Booting a VM with the above configuration will result in the following 3
netdevs created in the VM.
-
-4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
- inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
- valid_lft 42482sec preferred_lft 42482sec
- inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
- valid_lft forever preferred_lft forever
-5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
-7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+::
+
+ 4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
+ valid_lft 42482sec preferred_lft 42482sec
+ inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
+ valid_lft forever preferred_lft forever
+ 5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+ 7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave
'standby' and 'primary' netdevs respectively.
@@ -80,37 +82,38 @@ the paravirtual datapath when the VF is unplugged.
Here is a sample script that shows the steps to initiate live migration on
the source hypervisor.
+::
-# cat vf_xml
-<interface type='hostdev' managed='yes'>
- <mac address='52:54:00:00:12:53'/>
- <source>
- <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
- </source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
-</interface>
+ # cat vf_xml
+ <interface type='hostdev' managed='yes'>
+ <mac address='52:54:00:00:12:53'/>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+ </source>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ </interface>
-# Source Hypervisor
-#!/bin/bash
+ # Source Hypervisor
+ #!/bin/bash
-DOMAIN=fedora27-tap01
-PF=enp66s0f0
-VF_NUM=5
-TAP_IF=tap01
-VF_XML=
+ DOMAIN=fedora27-tap01
+ PF=enp66s0f0
+ VF_NUM=5
+ TAP_IF=tap01
+ VF_XML=
-MAC=52:54:00:00:12:53
-ZERO_MAC=00:00:00:00:00:00
+ MAC=52:54:00:00:12:53
+ ZERO_MAC=00:00:00:00:00:00
-virsh domif-setlink $DOMAIN $TAP_IF up
-bridge fdb del $MAC dev $PF master
-virsh detach-device $DOMAIN $VF_XML
-ip link set $PF vf $VF_NUM mac $ZERO_MAC
+ virsh domif-setlink $DOMAIN $TAP_IF up
+ bridge fdb del $MAC dev $PF master
+ virsh detach-device $DOMAIN $VF_XML
+ ip link set $PF vf $VF_NUM mac $ZERO_MAC
-virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
+ virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
-# Destination Hypervisor
-#!/bin/bash
+ # Destination Hypervisor
+ #!/bin/bash
-virsh attach-device $DOMAIN $VF_XML
-virsh domif-setlink $DOMAIN $TAP_IF down
+ virsh attach-device $DOMAIN $VF_XML
+ virsh domif-setlink $DOMAIN $TAP_IF down
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
new file mode 100644
index 000000000000..0ac5fa77f501
--- /dev/null
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -0,0 +1,259 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _netdev-FAQ:
+
+==========
+netdev FAQ
+==========
+
+Q: What is netdev?
+------------------
+A: It is a mailing list for all network-related Linux stuff. This
+includes anything found under net/ (i.e. core code like IPv6) and
+drivers/net (i.e. hardware specific drivers) in the Linux source tree.
+
+Note that some subsystems (e.g. wireless drivers) which have a high
+volume of traffic have their own specific mailing lists.
+
+The netdev list is managed (like many other Linux mailing lists) through
+VGER (http://vger.kernel.org/) and archives can be found below:
+
+- http://marc.info/?l=linux-netdev
+- http://www.spinics.net/lists/netdev/
+
+Aside from subsystems like that mentioned above, all network-related
+Linux development (i.e. RFC, review, comments, etc.) takes place on
+netdev.
+
+Q: How do the changes posted to netdev make their way into Linux?
+-----------------------------------------------------------------
+A: There are always two trees (git repositories) in play. Both are
+driven by David Miller, the main network maintainer. There is the
+``net`` tree, and the ``net-next`` tree. As you can probably guess from
+the names, the ``net`` tree is for fixes to existing code already in the
+mainline tree from Linus, and ``net-next`` is where the new code goes
+for the future release. You can find the trees here:
+
+- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+
+Q: How often do changes from these trees make it to the mainline Linus tree?
+----------------------------------------------------------------------------
+A: To understand this, you need to know a bit of background information on
+the cadence of Linux development. Each new release starts off with a
+two week "merge window" where the main maintainers feed their new stuff
+to Linus for merging into the mainline tree. After the two weeks, the
+merge window is closed, and it is called/tagged ``-rc1``. No new
+features get mainlined after this -- only fixes to the rc1 content are
+expected. After roughly a week of collecting fixes to the rc1 content,
+rc2 is released. This repeats on a roughly weekly basis until rc7
+(typically; sometimes rc6 if things are quiet, or rc8 if things are in a
+state of churn), and a week after the last vX.Y-rcN was done, the
+official vX.Y is released.
+
+Relating that to netdev: At the beginning of the 2-week merge window,
+the ``net-next`` tree will be closed - no new changes/features. The
+accumulated new content of the past ~10 weeks will be passed onto
+mainline/Linus via a pull request for vX.Y -- at the same time, the
+``net`` tree will start accumulating fixes for this pulled content
+relating to vX.Y
+
+An announcement indicating when ``net-next`` has been closed is usually
+sent to netdev, but knowing the above, you can predict that in advance.
+
+IMPORTANT: Do not send new ``net-next`` content to netdev during the
+period during which ``net-next`` tree is closed.
+
+Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
+tree for ``net-next`` reopens to collect content for the next (vX.Y+1)
+release.
+
+If you aren't subscribed to netdev and/or are simply unsure if
+``net-next`` has re-opened yet, simply check the ``net-next`` git
+repository link above for any new networking-related commits. You may
+also check the following website for the current status:
+
+ http://vger.kernel.org/~davem/net-next.html
+
+The ``net`` tree continues to collect fixes for the vX.Y content, and is
+fed back to Linus at regular (~weekly) intervals. Meaning that the
+focus for ``net`` is on stabilization and bug fixes.
+
+Finally, the vX.Y gets released, and the whole cycle starts over.
+
+Q: So where are we now in this cycle?
+
+Load the mainline (Linus) page here:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+and note the top of the "tags" section. If it is rc1, it is early in
+the dev cycle. If it was tagged rc7 a week ago, then a release is
+probably imminent.
+
+Q: How do I indicate which tree (net vs. net-next) my patch should be in?
+-------------------------------------------------------------------------
+A: Firstly, think whether you have a bug fix or new "next-like" content.
+Then once decided, assuming that you use git, use the prefix flag, i.e.
+::
+
+ git format-patch --subject-prefix='PATCH net-next' start..finish
+
+Use ``net`` instead of ``net-next`` (always lower case) in the above for
+bug-fix ``net`` content. If you don't use git, then note the only magic
+in the above is just the subject text of the outgoing e-mail, and you
+can manually change it yourself with whatever MUA you are comfortable
+with.
+
+Q: I sent a patch and I'm wondering what happened to it?
+--------------------------------------------------------
+Q: How can I tell whether it got merged?
+A: Start by looking at the main patchworks queue for netdev:
+
+ http://patchwork.ozlabs.org/project/netdev/list/
+
+The "State" field will tell you exactly where things are at with your
+patch.
+
+Q: The above only says "Under Review". How can I find out more?
+----------------------------------------------------------------
+A: Generally speaking, the patches get triaged quickly (in less than
+48h). So be patient. Asking the maintainer for status updates on your
+patch is a good way to ensure your patch is ignored or pushed to the
+bottom of the priority list.
+
+Q: I submitted multiple versions of the patch series
+----------------------------------------------------
+Q: should I directly update patchwork for the previous versions of these
+patch series?
+A: No, please don't interfere with the patch status on patchwork, leave
+it to the maintainer to figure out what is the most recent and current
+version that should be applied. If there is any doubt, the maintainer
+will reply and ask what should be done.
+
+Q: How can I tell what patches are queued up for backporting to the various stable releases?
+--------------------------------------------------------------------------------------------
+A: Normally Greg Kroah-Hartman collects stable commits himself, but for
+networking, Dave collects up patches he deems critical for the
+networking subsystem, and then hands them off to Greg.
+
+There is a patchworks queue that you can see here:
+
+ http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
+
+It contains the patches which Dave has selected, but not yet handed off
+to Greg. If Greg already has the patch, then it will be here:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
+
+A quick way to find whether the patch is in this stable-queue is to
+simply clone the repo, and then git grep the mainline commit ID, e.g.
+::
+
+ stable-queue$ git grep -l 284041ef21fdf2e
+ releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ stable/stable-queue$
+
+Q: I see a network patch and I think it should be backported to stable.
+-----------------------------------------------------------------------
+Q: Should I request it via stable@vger.kernel.org like the references in
+the kernel's Documentation/process/stable-kernel-rules.rst file say?
+A: No, not for networking. Check the stable queues as per above first
+to see if it is already queued. If not, then send a mail to netdev,
+listing the upstream commit ID and why you think it should be a stable
+candidate.
+
+Before you jump to go do the above, do note that the normal stable rules
+in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+still apply. So you need to explicitly indicate why it is a critical
+fix and exactly what users are impacted. In addition, you need to
+convince yourself that you *really* think it has been overlooked,
+vs. having been considered and rejected.
+
+Generally speaking, the longer it has had a chance to "soak" in
+mainline, the better the odds that it is an OK candidate for stable. So
+scrambling to request a commit be added the day after it appears should
+be avoided.
+
+Q: I have created a network patch and I think it should be backported to stable.
+--------------------------------------------------------------------------------
+Q: Should I add a Cc: stable@vger.kernel.org like the references in the
+kernel's Documentation/ directory say?
+A: No. See above answer. In short, if you think it really belongs in
+stable, then ensure you write a decent commit log that describes who
+gets impacted by the bug fix and how it manifests itself, and when the
+bug was introduced. If you do that properly, then the commit will get
+handled appropriately and most likely get put in the patchworks stable
+queue if it really warrants it.
+
+If you think there is some valid information relating to it being in
+stable that does *not* belong in the commit log, then use the three dash
+marker line as described in
+:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
+to temporarily embed that information into the patch that you send.
+
+Q: Are all networking bug fixes backported to all stable releases?
+------------------------------------------------------------------
+A: Due to capacity, Dave could only take care of the backports for the
+last two stable releases. For earlier stable releases, each stable
+branch maintainer is supposed to take care of them. If you find any
+patch is missing from an earlier stable branch, please notify
+stable@vger.kernel.org with either a commit ID or a formal patch
+backported, and CC Dave and other relevant networking developers.
+
+Q: Is the comment style convention different for the networking content?
+------------------------------------------------------------------------
+A: Yes, in a largely trivial way. Instead of this::
+
+ /*
+ * foobar blah blah blah
+ * another line of text
+ */
+
+it is requested that you make it look like this::
+
+ /* foobar blah blah blah
+ * another line of text
+ */
+
+Q: I am working in existing code that has the former comment style and not the latter.
+--------------------------------------------------------------------------------------
+Q: Should I submit new code in the former style or the latter?
+A: Make it the latter style, so that eventually all code in the domain
+of netdev is of this format.
+
+Q: I found a bug that might have possible security implications or similar.
+---------------------------------------------------------------------------
+Q: Should I mail the main netdev maintainer off-list?**
+A: No. The current netdev maintainer has consistently requested that
+people use the mailing lists and not reach out directly. If you aren't
+OK with that, then perhaps consider mailing security@kernel.org or
+reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
+as possible alternative mechanisms.
+
+Q: What level of testing is expected before I submit my change?
+---------------------------------------------------------------
+A: If your changes are against ``net-next``, the expectation is that you
+have tested by layering your changes on top of ``net-next``. Ideally
+you will have done run-time testing specific to your change, but at a
+minimum, your changes should survive an ``allyesconfig`` and an
+``allmodconfig`` build without new warnings or failures.
+
+Q: Any other tips to help ensure my net/net-next patch gets OK'd?
+-----------------------------------------------------------------
+A: Attention to detail. Re-read your own work as if you were the
+reviewer. You can start with using ``checkpatch.pl``, perhaps even with
+the ``--strict`` flag. But do not be mindlessly robotic in doing so.
+If your change is a bug fix, make sure your commit log indicates the
+end-user visible symptom, the underlying reason as to why it happens,
+and then if necessary, explain why the fix proposed is the best way to
+get things done. Don't mangle whitespace, and as is common, don't
+mis-indent function arguments that span multiple lines. If it is your
+first patch, mail it to yourself so you can test apply it to an
+unpatched tree to confirm infrastructure didn't mangle it.
+
+Finally, go back and read
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+to be sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
deleted file mode 100644
index fa951b820b25..000000000000
--- a/Documentation/networking/netdev-FAQ.txt
+++ /dev/null
@@ -1,244 +0,0 @@
-
-Information you need to know about netdev
------------------------------------------
-
-Q: What is netdev?
-
-A: It is a mailing list for all network-related Linux stuff. This includes
- anything found under net/ (i.e. core code like IPv6) and drivers/net
- (i.e. hardware specific drivers) in the Linux source tree.
-
- Note that some subsystems (e.g. wireless drivers) which have a high volume
- of traffic have their own specific mailing lists.
-
- The netdev list is managed (like many other Linux mailing lists) through
- VGER ( http://vger.kernel.org/ ) and archives can be found below:
-
- http://marc.info/?l=linux-netdev
- http://www.spinics.net/lists/netdev/
-
- Aside from subsystems like that mentioned above, all network-related Linux
- development (i.e. RFC, review, comments, etc.) takes place on netdev.
-
-Q: How do the changes posted to netdev make their way into Linux?
-
-A: There are always two trees (git repositories) in play. Both are driven
- by David Miller, the main network maintainer. There is the "net" tree,
- and the "net-next" tree. As you can probably guess from the names, the
- net tree is for fixes to existing code already in the mainline tree from
- Linus, and net-next is where the new code goes for the future release.
- You can find the trees here:
-
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
-
-Q: How often do changes from these trees make it to the mainline Linus tree?
-
-A: To understand this, you need to know a bit of background information
- on the cadence of Linux development. Each new release starts off with
- a two week "merge window" where the main maintainers feed their new
- stuff to Linus for merging into the mainline tree. After the two weeks,
- the merge window is closed, and it is called/tagged "-rc1". No new
- features get mainlined after this -- only fixes to the rc1 content
- are expected. After roughly a week of collecting fixes to the rc1
- content, rc2 is released. This repeats on a roughly weekly basis
- until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
- things are in a state of churn), and a week after the last vX.Y-rcN
- was done, the official "vX.Y" is released.
-
- Relating that to netdev: At the beginning of the 2-week merge window,
- the net-next tree will be closed - no new changes/features. The
- accumulated new content of the past ~10 weeks will be passed onto
- mainline/Linus via a pull request for vX.Y -- at the same time,
- the "net" tree will start accumulating fixes for this pulled content
- relating to vX.Y
-
- An announcement indicating when net-next has been closed is usually
- sent to netdev, but knowing the above, you can predict that in advance.
-
- IMPORTANT: Do not send new net-next content to netdev during the
- period during which net-next tree is closed.
-
- Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
- tree for net-next reopens to collect content for the next (vX.Y+1) release.
-
- If you aren't subscribed to netdev and/or are simply unsure if net-next
- has re-opened yet, simply check the net-next git repository link above for
- any new networking-related commits. You may also check the following
- website for the current status:
-
- http://vger.kernel.org/~davem/net-next.html
-
- The "net" tree continues to collect fixes for the vX.Y content, and
- is fed back to Linus at regular (~weekly) intervals. Meaning that the
- focus for "net" is on stabilization and bugfixes.
-
- Finally, the vX.Y gets released, and the whole cycle starts over.
-
-Q: So where are we now in this cycle?
-
-A: Load the mainline (Linus) page here:
-
- https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
- and note the top of the "tags" section. If it is rc1, it is early
- in the dev cycle. If it was tagged rc7 a week ago, then a release
- is probably imminent.
-
-Q: How do I indicate which tree (net vs. net-next) my patch should be in?
-
-A: Firstly, think whether you have a bug fix or new "next-like" content.
- Then once decided, assuming that you use git, use the prefix flag, i.e.
-
- git format-patch --subject-prefix='PATCH net-next' start..finish
-
- Use "net" instead of "net-next" (always lower case) in the above for
- bug-fix net content. If you don't use git, then note the only magic in
- the above is just the subject text of the outgoing e-mail, and you can
- manually change it yourself with whatever MUA you are comfortable with.
-
-Q: I sent a patch and I'm wondering what happened to it. How can I tell
- whether it got merged?
-
-A: Start by looking at the main patchworks queue for netdev:
-
- http://patchwork.ozlabs.org/project/netdev/list/
-
- The "State" field will tell you exactly where things are at with
- your patch.
-
-Q: The above only says "Under Review". How can I find out more?
-
-A: Generally speaking, the patches get triaged quickly (in less than 48h).
- So be patient. Asking the maintainer for status updates on your
- patch is a good way to ensure your patch is ignored or pushed to
- the bottom of the priority list.
-
-Q: I submitted multiple versions of the patch series, should I directly update
- patchwork for the previous versions of these patch series?
-
-A: No, please don't interfere with the patch status on patchwork, leave it to
- the maintainer to figure out what is the most recent and current version that
- should be applied. If there is any doubt, the maintainer will reply and ask
- what should be done.
-
-Q: How can I tell what patches are queued up for backporting to the
- various stable releases?
-
-A: Normally Greg Kroah-Hartman collects stable commits himself, but
- for networking, Dave collects up patches he deems critical for the
- networking subsystem, and then hands them off to Greg.
-
- There is a patchworks queue that you can see here:
- http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
-
- It contains the patches which Dave has selected, but not yet handed
- off to Greg. If Greg already has the patch, then it will be here:
- https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
-
- A quick way to find whether the patch is in this stable-queue is
- to simply clone the repo, and then git grep the mainline commit ID, e.g.
-
- stable-queue$ git grep -l 284041ef21fdf2e
- releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- stable/stable-queue$
-
-Q: I see a network patch and I think it should be backported to stable.
- Should I request it via "stable@vger.kernel.org" like the references in
- the kernel's Documentation/process/stable-kernel-rules.rst file say?
-
-A: No, not for networking. Check the stable queues as per above 1st to see
- if it is already queued. If not, then send a mail to netdev, listing
- the upstream commit ID and why you think it should be a stable candidate.
-
- Before you jump to go do the above, do note that the normal stable rules
- in Documentation/process/stable-kernel-rules.rst still apply. So you need to
- explicitly indicate why it is a critical fix and exactly what users are
- impacted. In addition, you need to convince yourself that you _really_
- think it has been overlooked, vs. having been considered and rejected.
-
- Generally speaking, the longer it has had a chance to "soak" in mainline,
- the better the odds that it is an OK candidate for stable. So scrambling
- to request a commit be added the day after it appears should be avoided.
-
-Q: I have created a network patch and I think it should be backported to
- stable. Should I add a "Cc: stable@vger.kernel.org" like the references
- in the kernel's Documentation/ directory say?
-
-A: No. See above answer. In short, if you think it really belongs in
- stable, then ensure you write a decent commit log that describes who
- gets impacted by the bugfix and how it manifests itself, and when the
- bug was introduced. If you do that properly, then the commit will
- get handled appropriately and most likely get put in the patchworks
- stable queue if it really warrants it.
-
- If you think there is some valid information relating to it being in
- stable that does _not_ belong in the commit log, then use the three
- dash marker line as described in Documentation/process/submitting-patches.rst to
- temporarily embed that information into the patch that you send.
-
-Q: Are all networking bug fixes backported to all stable releases?
-
-A: Due to capacity, Dave could only take care of the backports for the last
- 2 stable releases. For earlier stable releases, each stable branch maintainer
- is supposed to take care of them. If you find any patch is missing from an
- earlier stable branch, please notify stable@vger.kernel.org with either a
- commit ID or a formal patch backported, and CC Dave and other relevant
- networking developers.
-
-Q: Someone said that the comment style and coding convention is different
- for the networking content. Is this true?
-
-A: Yes, in a largely trivial way. Instead of this:
-
- /*
- * foobar blah blah blah
- * another line of text
- */
-
- it is requested that you make it look like this:
-
- /* foobar blah blah blah
- * another line of text
- */
-
-Q: I am working in existing code that has the former comment style and not the
- latter. Should I submit new code in the former style or the latter?
-
-A: Make it the latter style, so that eventually all code in the domain of
- netdev is of this format.
-
-Q: I found a bug that might have possible security implications or similar.
- Should I mail the main netdev maintainer off-list?
-
-A: No. The current netdev maintainer has consistently requested that people
- use the mailing lists and not reach out directly. If you aren't OK with
- that, then perhaps consider mailing "security@kernel.org" or reading about
- http://oss-security.openwall.org/wiki/mailing-lists/distros
- as possible alternative mechanisms.
-
-Q: What level of testing is expected before I submit my change?
-
-A: If your changes are against net-next, the expectation is that you
- have tested by layering your changes on top of net-next. Ideally you
- will have done run-time testing specific to your change, but at a
- minimum, your changes should survive an "allyesconfig" and an
- "allmodconfig" build without new warnings or failures.
-
-Q: Any other tips to help ensure my net/net-next patch gets OK'd?
-
-A: Attention to detail. Re-read your own work as if you were the
- reviewer. You can start with using checkpatch.pl, perhaps even
- with the "--strict" flag. But do not be mindlessly robotic in
- doing so. If your change is a bug-fix, make sure your commit log
- indicates the end-user visible symptom, the underlying reason as
- to why it happens, and then if necessary, explain why the fix proposed
- is the best way to get things done. Don't mangle whitespace, and as
- is common, don't mis-indent function arguments that span multiple lines.
- If it is your first patch, mail it to yourself so you can test apply
- it to an unpatched tree to confirm infrastructure didn't mangle it.
-
- Finally, go back and read Documentation/process/submitting-patches.rst to be
- sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index f55639d71d35..b7056a8a0540 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -366,8 +366,13 @@ XPS: Transmit Packet Steering
Transmit Packet Steering is a mechanism for intelligently selecting
which transmit queue to use when transmitting a packet on a multi-queue
-device. To accomplish this, a mapping from CPU to hardware queue(s) is
-recorded. The goal of this mapping is usually to assign queues
+device. This can be accomplished by recording two kinds of maps, either
+a mapping of CPU to hardware queue(s) or a mapping of receive queue(s)
+to hardware transmit queue(s).
+
+1. XPS using CPUs map
+
+The goal of this mapping is usually to assign queues
exclusively to a subset of CPUs, where the transmit completions for
these queues are processed on a CPU within this set. This choice
provides two benefits. First, contention on the device queue lock is
@@ -377,15 +382,40 @@ transmit queue). Secondly, cache miss rate on transmit completion is
reduced, in particular for data cache lines that hold the sk_buff
structures.
-XPS is configured per transmit queue by setting a bitmap of CPUs that
-may use that queue to transmit. The reverse mapping, from CPUs to
-transmit queues, is computed and maintained for each network device.
-When transmitting the first packet in a flow, the function
-get_xps_queue() is called to select a queue. This function uses the ID
-of the running CPU as a key into the CPU-to-queue lookup table. If the
+2. XPS using receive queues map
+
+This mapping is used to pick transmit queue based on the receive
+queue(s) map configuration set by the administrator. A set of receive
+queues can be mapped to a set of transmit queues (many:many), although
+the common use case is a 1:1 mapping. This will enable sending packets
+on the same queue associations for transmit and receive. This is useful for
+busy polling multi-threaded workloads where there are challenges in
+associating a given CPU to a given application thread. The application
+threads are not pinned to CPUs and each thread handles packets
+received on a single queue. The receive queue number is cached in the
+socket for the connection. In this model, sending the packets on the same
+transmit queue corresponding to the associated receive queue has benefits
+in keeping the CPU overhead low. Transmit completion work is locked into
+the same queue-association that a given application is polling on. This
+avoids the overhead of triggering an interrupt on another CPU. When the
+application cleans up the packets during the busy poll, transmit completion
+may be processed along with it in the same thread context and so result in
+reduced latency.
+
+XPS is configured per transmit queue by setting a bitmap of
+CPUs/receive-queues that may use that queue to transmit. The reverse
+mapping, from CPUs to transmit queues or from receive-queues to transmit
+queues, is computed and maintained for each network device. When
+transmitting the first packet in a flow, the function get_xps_queue() is
+called to select a queue. This function uses the ID of the receive queue
+for the socket connection for a match in the receive queue-to-transmit queue
+lookup table. Alternatively, this function can also use the ID of the
+running CPU as a key into the CPU-to-queue lookup table. If the
ID matches a single queue, that is used for transmission. If multiple
queues match, one is selected by using the flow hash to compute an index
-into the set.
+into the set. When selecting the transmit queue based on receive queue(s)
+map, the transmit device is not validated against the receive device as it
+requires expensive lookup operation in the datapath.
The queue chosen for transmitting a particular flow is saved in the
corresponding socket structure for the flow (e.g. a TCP connection).
@@ -404,11 +434,15 @@ acknowledged.
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
-configured. To enable XPS, the bitmap of CPUs that may use a transmit
-queue is configured using the sysfs file entry:
+configured. To enable XPS, the bitmap of CPUs/receive-queues that may
+use a transmit queue is configured using the sysfs file entry:
+For selection based on CPUs map:
/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
+For selection based on receive-queues map:
+/sys/class/net/<dev>/queues/tx-<n>/xps_rxqs
+
== Suggested Configuration
For a network device with a single transmission queue, XPS configuration
@@ -421,6 +455,11 @@ best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
+For transmit queue selection based on receive queue(s), XPS has to be
+explicitly configured mapping receive-queue(s) to transmit queue(s). If the
+user configuration for receive-queue map does not apply, then the transmit
+queue is selected based on the CPUs map.
+
Per TX Queue rate limitation:
=============================
diff --git a/Documentation/networking/ti-cpsw.txt b/Documentation/networking/ti-cpsw.txt
new file mode 100644
index 000000000000..d4d4c0751a09
--- /dev/null
+++ b/Documentation/networking/ti-cpsw.txt
@@ -0,0 +1,541 @@
+* Texas Instruments CPSW ethernet driver
+
+Multiqueue & CBS & MQPRIO
+=====================================================================
+=====================================================================
+
+The cpsw has 3 CBS shapers for each external ports. This document
+describes MQPRIO and CBS Qdisc offload configuration for cpsw driver
+based on examples. It potentially can be used in audio video bridging
+(AVB) and time sensitive networking (TSN).
+
+The following examples were tested on AM572x EVM and BBB boards.
+
+Test setup
+==========
+
+Under consideration two examples with AM572x EVM running cpsw driver
+in dual_emac mode.
+
+Several prerequisites:
+- TX queues must be rated starting from txq0 that has highest priority
+- Traffic classes are used starting from 0, that has highest priority
+- CBS shapers should be used with rated queues
+- The bandwidth for CBS shapers has to be set a little bit more then
+ potential incoming rate, thus, rate of all incoming tx queues has
+ to be a little less
+- Real rates can differ, due to discreetness
+- Map skb-priority to txq is not enough, also skb-priority to l2 prio
+ map has to be created with ip or vconfig tool
+- Any l2/socket prio (0 - 7) for classes can be used, but for
+ simplicity default values are used: 3 and 2
+- only 2 classes tested: A and B, but checked and can work with more,
+ maximum allowed 4, but only for 3 rate can be set.
+
+Test setup for examples
+=======================
+ +-------------------------------+
+ |--+ |
+ | | Workstation0 |
+ |E | MAC 18:03:73:66:87:42 |
++-----------------------------+ +--|t | |
+| | 1 | E | | |h |./tsn_listener -d \ |
+| Target board: | 0 | t |--+ |0 | 18:03:73:66:87:42 -i eth0 \|
+| AM572x EVM | 0 | h | | | -s 1500 |
+| | 0 | 0 | |--+ |
+| Only 2 classes: |Mb +---| +-------------------------------+
+| class A, class B | |
+| | +---| +-------------------------------+
+| | 1 | E | |--+ |
+| | 0 | t | | | Workstation1 |
+| | 0 | h |--+ |E | MAC 20:cf:30:85:7d:fd |
+| |Mb | 1 | +--|t | |
++-----------------------------+ |h |./tsn_listener -d \ |
+ |0 | 20:cf:30:85:7d:fd -i eth0 \|
+ | | -s 1500 |
+ |--+ |
+ +-------------------------------+
+
+*********************************************************************
+*********************************************************************
+*********************************************************************
+Example 1: One port tx AVB configuration scheme for target board
+----------------------------------------------------------------------
+(prints and scheme for AM572x evm, applicable for single port boards)
+
+tc - traffic class
+txq - transmit queue
+p - priority
+f - fifo (cpsw fifo)
+S - shaper configured
+
++------------------------------------------------------------------+ u
+| +---------------+ +---------------+ +------+ +------+ | s
+| | | | | | | | | | e
+| | App 1 | | App 2 | | Apps | | Apps | | r
+| | Class A | | Class B | | Rest | | Rest | |
+| | Eth0 | | Eth0 | | Eth0 | | Eth1 | | s
+| | VLAN100 | | VLAN100 | | | | | | | | p
+| | 40 Mb/s | | 20 Mb/s | | | | | | | | a
+| | SO_PRIORITY=3 | | SO_PRIORITY=2 | | | | | | | | c
+| | | | | | | | | | | | | | e
+| +---|-----------+ +---|-----------+ +---|--+ +---|--+ |
++-----|------------------|------------------|--------|-------------+
+ +-+ +------------+ | |
+ | | +-----------------+ +--+
+ | | | |
++---|-------|-------------|-----------------------|----------------+
+| +----+ +----+ +----+ +----+ +----+ |
+| | p3 | | p2 | | p1 | | p0 | | p0 | | k
+| \ / \ / \ / \ / \ / | e
+| \ / \ / \ / \ / \ / | r
+| \/ \/ \/ \/ \/ | n
+| | | | | | e
+| | | +-----+ | | l
+| | | | | |
+| +----+ +----+ +----+ +----+ | s
+| |tc0 | |tc1 | |tc2 | |tc0 | | p
+| \ / \ / \ / \ / | a
+| \ / \ / \ / \ / | c
+| \/ \/ \/ \/ | e
+| | | +-----+ | |
+| | | | | | |
+| | | | | | |
+| | | | | | |
+| +----+ +----+ +----+ +----+ +----+ |
+| |txq0| |txq1| |txq2| |txq3| |txq4| |
+| \ / \ / \ / \ / \ / |
+| \ / \ / \ / \ / \ / |
+| \/ \/ \/ \/ \/ |
+| +-|------|------|------|--+ +--|--------------+ |
+| | | | | | | Eth0.100 | | Eth1 | |
++---|------|------|------|------------------------|----------------+
+ | | | | |
+ p p p p |
+ 3 2 0-1, 4-7 <- L2 priority |
+ | | | | |
+ | | | | |
++---|------|------|------|------------------------|----------------+
+| | | | | |----------+ |
+| +----+ +----+ +----+ +----+ +----+ |
+| |dma7| |dma6| |dma5| |dma4| |dma3| |
+| \ / \ / \ / \ / \ / | c
+| \S / \S / \ / \ / \ / | p
+| \/ \/ \/ \/ \/ | s
+| | | | +----- | | w
+| | | | | | |
+| | | | | | | d
+| +----+ +----+ +----+p p+----+ | r
+| | | | | | |o o| | | i
+| | f3 | | f2 | | f0 |r r| f0 | | v
+| |tc0 | |tc1 | |tc2 |t t|tc0 | | e
+| \CBS / \CBS / \CBS /1 2\CBS / | r
+| \S / \S / \ / \ / |
+| \/ \/ \/ \/ |
++------------------------------------------------------------------+
+========================================Eth==========================>
+
+1)
+// Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
+$ ethtool -L eth0 rx 1 tx 5
+rx unmodified, ignoring
+
+2)
+// Check if num of queues is set correctly:
+$ ethtool -l eth0
+Channel parameters for eth0:
+Pre-set maximums:
+RX: 8
+TX: 8
+Other: 0
+Combined: 0
+Current hardware settings:
+RX: 1
+TX: 5
+Other: 0
+Combined: 0
+
+3)
+// TX queues must be rated starting from 0, so set bws for tx0 and tx1
+// Set rates 40 and 20 Mb/s appropriately.
+// Pay attention, real speed can differ a bit due to discreetness.
+// Leave last 2 tx queues not rated.
+$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
+$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
+
+4)
+// Check maximum rate of tx (cpdma) queues:
+$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate
+40
+20
+0
+0
+0
+
+5)
+// Map skb->priority to traffic class:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq2, txq3)
+$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1
+
+5a)
+// As two interface sharing same set of tx queues, assign all traffic
+// coming to interface Eth1 to separate queue in order to not mix it
+// with traffic from interface Eth0, so use separate txq to send
+// packets to Eth1, so all prio -> tc0 and tc0 -> txq4
+// Here hw 0, so here still default configuration for eth1 in hw
+$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 1 \
+map 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 queues 1@4 hw 0
+
+6)
+// Check classes settings
+$ tc -g class show dev eth0
++---(100:ffe2) mqprio
+| +---(100:3) mqprio
+| +---(100:4) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:2) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:1) mqprio
+
+$ tc -g class show dev eth1
++---(100:ffe0) mqprio
+ +---(100:5) mqprio
+
+7)
+// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc
+// Set it +1 Mb for reserve (important!)
+// here only idle slope is important, others arg are ignored
+// Pay attention, real speed can differ a bit due to discreetness
+$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1438 \
+hicredit 62 sendslope -959000 idleslope 41000 offload 1
+net eth0: set FIFO3 bw = 50
+
+8)
+// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc:
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1468 \
+hicredit 65 sendslope -979000 idleslope 21000 offload 1
+net eth0: set FIFO2 bw = 30
+
+9)
+// Create vlan 100 to map sk->priority to vlan qos
+$ ip link add link eth0 name eth0.100 type vlan id 100
+8021q: 802.1Q VLAN Support v1.8
+8021q: adding VLAN 0 to HW filter on device eth0
+8021q: adding VLAN 0 to HW filter on device eth1
+net eth0: Adding vlanid 100 to vlan filter
+
+10)
+// Map skb->priority to L2 prio, 1 to 1
+$ ip link set eth0.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+11)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth0.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+12)
+// Run your appropriate tools with socket option "SO_PRIORITY"
+// to 3 for class A and/or to 2 for class B
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500&
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500&
+
+13)
+// run your listener on workstation (should be in same vlan)
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39000 kbps
+
+14)
+// Restore default configuration if needed
+$ ip link del eth0.100
+$ tc qdisc del dev eth1 root
+$ tc qdisc del dev eth0 root
+net eth0: Prev FIFO2 is shaped
+net eth0: set FIFO3 bw = 0
+net eth0: set FIFO2 bw = 0
+$ ethtool -L eth0 rx 1 tx 1
+
+*********************************************************************
+*********************************************************************
+*********************************************************************
+Example 2: Two port tx AVB configuration scheme for target board
+----------------------------------------------------------------------
+(prints and scheme for AM572x evm, for dual emac boards only)
+
++------------------------------------------------------------------+ u
+| +----------+ +----------+ +------+ +----------+ +----------+ | s
+| | | | | | | | | | | | e
+| | App 1 | | App 2 | | Apps | | App 3 | | App 4 | | r
+| | Class A | | Class B | | Rest | | Class B | | Class A | |
+| | Eth0 | | Eth0 | | | | | Eth1 | | Eth1 | | s
+| | VLAN100 | | VLAN100 | | | | | VLAN100 | | VLAN100 | | p
+| | 40 Mb/s | | 20 Mb/s | | | | | 10 Mb/s | | 30 Mb/s | | a
+| | SO_PRI=3 | | SO_PRI=2 | | | | | SO_PRI=3 | | SO_PRI=2 | | c
+| | | | | | | | | | | | | | | | | e
+| +---|------+ +---|------+ +---|--+ +---|------+ +---|------+ |
++-----|-------------|-------------|---------|-------------|--------+
+ +-+ +-------+ | +----------+ +----+
+ | | +-------+------+ | |
+ | | | | | |
++---|-------|-------------|--------------|-------------|-------|---+
+| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ |
+| | p3 | | p2 | | p1 | | p0 | | p0 | | p1 | | p2 | | p3 | | k
+| \ / \ / \ / \ / \ / \ / \ / \ / | e
+| \ / \ / \ / \ / \ / \ / \ / \ / | r
+| \/ \/ \/ \/ \/ \/ \/ \/ | n
+| | | | | | | | e
+| | | +----+ +----+ | | | l
+| | | | | | | |
+| +----+ +----+ +----+ +----+ +----+ +----+ | s
+| |tc0 | |tc1 | |tc2 | |tc2 | |tc1 | |tc0 | | p
+| \ / \ / \ / \ / \ / \ / | a
+| \ / \ / \ / \ / \ / \ / | c
+| \/ \/ \/ \/ \/ \/ | e
+| | | +-----+ +-----+ | | |
+| | | | | | | | | |
+| | | | | | | | | |
+| | | | | E E | | | | |
+| +----+ +----+ +----+ +----+ t t +----+ +----+ +----+ +----+ |
+| |txq0| |txq1| |txq4| |txq5| h h |txq6| |txq7| |txq3| |txq2| |
+| \ / \ / \ / \ / 0 1 \ / \ / \ / \ / |
+| \ / \ / \ / \ / . . \ / \ / \ / \ / |
+| \/ \/ \/ \/ 1 1 \/ \/ \/ \/ |
+| +-|------|------|------|--+ 0 0 +-|------|------|------|--+ |
+| | | | | | | 0 0 | | | | | | |
++---|------|------|------|---------------|------|------|------|----+
+ | | | | | | | |
+ p p p p p p p p
+ 3 2 0-1, 4-7 <-L2 pri-> 0-1, 4-7 2 3
+ | | | | | | | |
+ | | | | | | | |
++---|------|------|------|---------------|------|------|------|----+
+| | | | | | | | | |
+| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ |
+| |dma7| |dma6| |dma3| |dma2| |dma1| |dma0| |dma4| |dma5| |
+| \ / \ / \ / \ / \ / \ / \ / \ / | c
+| \S / \S / \ / \ / \ / \ / \S / \S / | p
+| \/ \/ \/ \/ \/ \/ \/ \/ | s
+| | | | +----- | | | | | w
+| | | | | +----+ | | | |
+| | | | | | | | | | d
+| +----+ +----+ +----+p p+----+ +----+ +----+ | r
+| | | | | | |o o| | | | | | | i
+| | f3 | | f2 | | f0 |r CPSW r| f3 | | f2 | | f0 | | v
+| |tc0 | |tc1 | |tc2 |t t|tc0 | |tc1 | |tc2 | | e
+| \CBS / \CBS / \CBS /1 2\CBS / \CBS / \CBS / | r
+| \S / \S / \ / \S / \S / \ / |
+| \/ \/ \/ \/ \/ \/ |
++------------------------------------------------------------------+
+========================================Eth==========================>
+
+1)
+// Add 8 tx queues, for interface Eth0, but they are common, so are accessed
+// by two interfaces Eth0 and Eth1.
+$ ethtool -L eth1 rx 1 tx 8
+rx unmodified, ignoring
+
+2)
+// Check if num of queues is set correctly:
+$ ethtool -l eth0
+Channel parameters for eth0:
+Pre-set maximums:
+RX: 8
+TX: 8
+Other: 0
+Combined: 0
+Current hardware settings:
+RX: 1
+TX: 8
+Other: 0
+Combined: 0
+
+3)
+// TX queues must be rated starting from 0, so set bws for tx0 and tx1 for Eth0
+// and for tx2 and tx3 for Eth1. That is, rates 40 and 20 Mb/s appropriately
+// for Eth0 and 30 and 10 Mb/s for Eth1.
+// Real speed can differ a bit due to discreetness
+// Leave last 4 tx queues as not rated
+$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
+$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
+$ echo 30 > /sys/class/net/eth1/queues/tx-2/tx_maxrate
+$ echo 10 > /sys/class/net/eth1/queues/tx-3/tx_maxrate
+
+4)
+// Check maximum rate of tx (cpdma) queues:
+$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate
+40
+20
+30
+10
+0
+0
+0
+0
+
+5)
+// Map skb->priority to traffic class for Eth0:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq4, txq5)
+$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@4 hw 1
+
+6)
+// Check classes settings
+$ tc -g class show dev eth0
++---(100:ffe2) mqprio
+| +---(100:5) mqprio
+| +---(100:6) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:2) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:1) mqprio
+
+7)
+// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc for Eth0
+// here only idle slope is important, others ignored
+// Real speed can differ a bit due to discreetness
+$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1470 \
+hicredit 62 sendslope -959000 idleslope 41000 offload 1
+net eth0: set FIFO3 bw = 50
+
+8)
+// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc for Eth0
+$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \
+hicredit 65 sendslope -979000 idleslope 21000 offload 1
+net eth0: set FIFO2 bw = 30
+
+9)
+// Create vlan 100 to map sk->priority to vlan qos for Eth0
+$ ip link add link eth0 name eth0.100 type vlan id 100
+net eth0: Adding vlanid 100 to vlan filter
+
+10)
+// Map skb->priority to L2 prio for Eth0.100, one to one
+$ ip link set eth0.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+11)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth0.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+12)
+// Map skb->priority to traffic class for Eth1:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq2, tc1 -> txq3, tc2 -> (txq6, txq7)
+$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@2 1@3 2@6 hw 1
+
+13)
+// Check classes settings
+$ tc -g class show dev eth1
++---(100:ffe2) mqprio
+| +---(100:7) mqprio
+| +---(100:8) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:4) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:3) mqprio
+
+14)
+// Set rate for class A - 31 Mbit (tc0, txq2) using CBS Qdisc for Eth1
+// here only idle slope is important, others ignored, but calculated
+// for interface speed - 100Mb for eth1 port.
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth1 parent 100:3 cbs locredit -1035 \
+hicredit 465 sendslope -69000 idleslope 31000 offload 1
+net eth1: set FIFO3 bw = 31
+
+15)
+// Set rate for class B - 11 Mbit (tc1, txq3) using CBS Qdisc for Eth1
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth1 parent 100:4 cbs locredit -1335 \
+hicredit 405 sendslope -89000 idleslope 11000 offload 1
+net eth1: set FIFO2 bw = 11
+
+16)
+// Create vlan 100 to map sk->priority to vlan qos for Eth1
+$ ip link add link eth1 name eth1.100 type vlan id 100
+net eth1: Adding vlanid 100 to vlan filter
+
+17)
+// Map skb->priority to L2 prio for Eth1.100, one to one
+$ ip link set eth1.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+18)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth1.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+19)
+// Run appropriate tools with socket option "SO_PRIORITY" to 3
+// for class A and to 2 for class B. For both interfaces
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500&
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500&
+./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p2 -s 1500&
+./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p3 -s 1500&
+
+20)
+// run your listener on workstation (should be in same vlan)
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39000 kbps
+
+21)
+// Restore default configuration if needed
+$ ip link del eth1.100
+$ ip link del eth0.100
+$ tc qdisc del dev eth1 root
+net eth1: Prev FIFO2 is shaped
+net eth1: set FIFO3 bw = 0
+net eth1: set FIFO2 bw = 0
+$ tc qdisc del dev eth0 root
+net eth0: Prev FIFO2 is shaped
+net eth0: set FIFO3 bw = 0
+net eth0: set FIFO2 bw = 0
+$ ethtool -L eth0 rx 1 tx 1
diff --git a/Documentation/networking/tproxy.txt b/Documentation/networking/tproxy.txt
index ec11429e1d42..b9a188823d9f 100644
--- a/Documentation/networking/tproxy.txt
+++ b/Documentation/networking/tproxy.txt
@@ -5,19 +5,28 @@ This feature adds Linux 2.2-like transparent proxy support to current kernels.
To use it, enable the socket match and the TPROXY target in your kernel config.
You will need policy routing too, so be sure to enable that as well.
+From Linux 4.18 transparent proxy support is also available in nf_tables.
1. Making non-local sockets work
================================
The idea is that you identify packets with destination address matching a local
-socket on your box, set the packet mark to a certain value, and then match on that
-value using policy routing to have those packets delivered locally:
+socket on your box, set the packet mark to a certain value:
# iptables -t mangle -N DIVERT
# iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT
# iptables -t mangle -A DIVERT -j MARK --set-mark 1
# iptables -t mangle -A DIVERT -j ACCEPT
+Alternatively you can do this in nft with the following commands:
+
+# nft add table filter
+# nft add chain filter divert "{ type filter hook prerouting priority -150; }"
+# nft add rule filter divert meta l4proto tcp socket transparent 1 meta mark set 1 accept
+
+And then match on that value using policy routing to have those packets
+delivered locally:
+
# ip rule add fwmark 1 lookup 100
# ip route add local 0.0.0.0/0 dev lo table 100
@@ -57,17 +66,28 @@ add rules like this to the iptables ruleset above:
# iptables -t mangle -A PREROUTING -p tcp --dport 80 -j TPROXY \
--tproxy-mark 0x1/0x1 --on-port 50080
+Or the following rule to nft:
+
+# nft add rule filter divert tcp dport 80 tproxy to :50080 meta mark set 1 accept
+
Note that for this to work you'll have to modify the proxy to enable (SOL_IP,
IP_TRANSPARENT) for the listening socket.
+As an example implementation, tcprdr is available here:
+https://git.breakpoint.cc/cgit/fw/tcprdr.git/
+This tool is written by Florian Westphal and it was used for testing during the
+nf_tables implementation.
-3. Iptables extensions
-======================
+3. Iptables and nf_tables extensions
+====================================
-To use tproxy you'll need to have the 'socket' and 'TPROXY' modules
-compiled for iptables. A patched version of iptables is available
-here: http://git.balabit.hu/?p=bazsi/iptables-tproxy.git
+To use tproxy you'll need to have the following modules compiled for iptables:
+ - NETFILTER_XT_MATCH_SOCKET
+ - NETFILTER_XT_TARGET_TPROXY
+Or the floowing modules for nf_tables:
+ - NFT_SOCKET
+ - NFT_TPROXY
4. Application support
======================
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index 69556f0d494b..530fed08de2c 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -47,7 +47,7 @@ and it's also much more restricted in the latter case:
appropriate mapping protection capabilities. Ramfs, romfs, cramfs
and mtd might all permit this.
- - If the backing device device can't or won't permit direct sharing,
+ - If the backing device can't or won't permit direct sharing,
but does have the NOMMU_MAP_COPY capability, then a copy of the
appropriate bit of the file will be read into a contiguous bit of
memory and any extraneous space beyond the EOF will be cleared
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index dd04361dd361..78355c4c268a 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -46,9 +46,6 @@ This file details changes in 2.6 which affect PCMCIA card driver authors:
- use pcmcia_request_irq(p_dev, handler_t); the PCMCIA core will
clean up automatically on calls to pcmcia_disable_device() or
device ejection.
- - drivers still not capable of IRQF_SHARED (or not telling us so) may
- use the deprecated pcmcia_request_exclusive_irq() for the time
- being; they might receive a shared IRQ nonetheless.
* no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33)
Instead of the cs_error() callback or the CS_CHECK() macro, please use
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index af005770e767..cd283190855a 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -204,26 +204,26 @@ VI. Are there any precautions to be taken to prevent freezing failures?
Yes, there are.
-First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
+First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a piece of code
from system-wide sleep such as suspend/hibernation is not encouraged.
If possible, that piece of code must instead hook onto the suspend/hibernation
notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
(kernel/cpu.c) for an example.
-However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
-it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
+However, if that is not feasible, and grabbing 'system_transition_mutex' is deemed necessary,
+it is strongly discouraged to directly call mutex_[un]lock(&system_transition_mutex) since
that could lead to freezing failures, because if the suspend/hibernate code
-successfully acquired the 'pm_mutex' lock, and hence that other entity failed
+successfully acquired the 'system_transition_mutex' lock, and hence that other entity failed
to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
state. As a consequence, the freezer would not be able to freeze that task,
leading to freezing failure.
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
since they ask the freezer to skip freezing this task, since it is anyway
-"frozen enough" as it is blocked on 'pm_mutex', which will be released
+"frozen enough" as it is blocked on 'system_transition_mutex', which will be released
only after the entire suspend/hibernation sequence is complete.
So, to summarize, use [un]lock_system_sleep() instead of directly using
-mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
+mutex_[un]lock(&system_transition_mutex). That would prevent freezing failures.
V. Miscellaneous
/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 6f55eb960a6d..a8751b8df10e 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -32,7 +32,7 @@ More details follow:
sysfs file
|
v
- Acquire pm_mutex lock
+ Acquire system_transition_mutex lock
|
v
Send PM_SUSPEND_PREPARE
@@ -96,10 +96,10 @@ execution during resume):
* thaw tasks
* send PM_POST_SUSPEND notifications
-* Release pm_mutex lock.
+* Release system_transition_mutex lock.
-It is to be noted here that the pm_mutex lock is acquired at the very
+It is to be noted here that the system_transition_mutex lock is acquired at the very
beginning, when we are just starting out to suspend, and then released only
after the entire cycle is complete (i.e., suspend + resume).
diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/DAWR-POWER9.txt
new file mode 100644
index 000000000000..2feaa6619658
--- /dev/null
+++ b/Documentation/powerpc/DAWR-POWER9.txt
@@ -0,0 +1,58 @@
+DAWR issues on POWER9
+============================
+
+On POWER9 the DAWR can cause a checkstop if it points to cache
+inhibited (CI) memory. Currently Linux has no way to disinguish CI
+memory when configuring the DAWR, so (for now) the DAWR is disabled by
+this commit:
+
+ commit 9654153158d3e0684a1bdb76dbababdb7111d5a0
+ Author: Michael Neuling <mikey@neuling.org>
+ Date: Tue Mar 27 15:37:24 2018 +1100
+ powerpc: Disable DAWR in the base POWER9 CPU features
+
+Technical Details:
+============================
+
+DAWR has 6 different ways of being set.
+1) ptrace
+2) h_set_mode(DAWR)
+3) h_set_dabr()
+4) kvmppc_set_one_reg()
+5) xmon
+
+For ptrace, we now advertise zero breakpoints on POWER9 via the
+PPC_PTRACE_GETHWDBGINFO call. This results in GDB falling back to
+software emulation of the watchpoint (which is slow).
+
+h_set_mode(DAWR) and h_set_dabr() will now return an error to the
+guest on a POWER9 host. Current Linux guests ignore this error, so
+they will silently not get the DAWR.
+
+kvmppc_set_one_reg() will store the value in the vcpu but won't
+actually set it on POWER9 hardware. This is done so we don't break
+migration from POWER8 to POWER9, at the cost of silently losing the
+DAWR on the migration.
+
+For xmon, the 'bd' command will return an error on P9.
+
+Consequences for users
+============================
+
+For GDB watchpoints (ie 'watch' command) on POWER9 bare metal , GDB
+will accept the command. Unfortunately since there is no hardware
+support for the watchpoint, GDB will software emulate the watchpoint
+making it run very slowly.
+
+The same will also be true for any guests started on a POWER9
+host. The watchpoint will fail and GDB will fall back to software
+emulation.
+
+If a guest is started on a POWER8 host, GDB will accept the watchpoint
+and configure the hardware to use the DAWR. This will run at full
+speed since it can use the hardware emulation. Unfortunately if this
+guest is migrated to a POWER9 host, the watchpoint will be lost on the
+POWER9. Loads and stores to the watchpoint locations will not be
+trapped in GDB. The watchpoint is remembered, so if the guest is
+migrated back to the POWER8 host, it will start working again.
+
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index e32fdbb4c9a7..52c023e14f26 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -198,3 +198,47 @@ presented). The transaction cannot then be continued and will take the failure
handler route. Furthermore, the transactional 2nd register state will be
inaccessible. GDB can currently be used on programs using TM, but not sensibly
in parts within transactions.
+
+POWER9
+======
+
+TM on POWER9 has issues with storing the complete register state. This
+is described in this commit:
+
+ commit 4bb3c7a0208fc13ca70598efd109901a7cd45ae7
+ Author: Paul Mackerras <paulus@ozlabs.org>
+ Date: Wed Mar 21 21:32:01 2018 +1100
+ KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9
+
+To account for this different POWER9 chips have TM enabled in
+different ways.
+
+On POWER9N DD2.01 and below, TM is disabled. ie
+HWCAP2[PPC_FEATURE2_HTM] is not set.
+
+On POWER9N DD2.1 TM is configured by firmware to always abort a
+transaction when tm suspend occurs. So tsuspend will cause a
+transaction to be aborted and rolled back. Kernel exceptions will also
+cause the transaction to be aborted and rolled back and the exception
+will not occur. If userspace constructs a sigcontext that enables TM
+suspend, the sigcontext will be rejected by the kernel. This mode is
+advertised to users with HWCAP2[PPC_FEATURE2_HTM_NO_SUSPEND] set.
+HWCAP2[PPC_FEATURE2_HTM] is not set in this mode.
+
+On POWER9N DD2.2 and above, KVM and POWERVM emulate TM for guests (as
+described in commit 4bb3c7a0208f), hence TM is enabled for guests
+ie. HWCAP2[PPC_FEATURE2_HTM] is set for guest userspace. Guests that
+makes heavy use of TM suspend (tsuspend or kernel suspend) will result
+in traps into the hypervisor and hence will suffer a performance
+degradation. Host userspace has TM disabled
+ie. HWCAP2[PPC_FEATURE2_HTM] is not set. (although we make enable it
+at some point in the future if we bring the emulation into host
+userspace context switching).
+
+POWER9C DD1.2 and above are only available with POWERVM and hence
+Linux only runs as a guest. On these systems TM is emulated like on
+POWER9N DD2.2.
+
+Guest migration from POWER8 to POWER9 will work with POWER9N DD2.2 and
+POWER9C DD1.2. Since earlier POWER9 processors don't support TM
+emulation, migration from POWER8 to POWER9 is not supported there.
diff --git a/Documentation/process/2.Process.rst b/Documentation/process/2.Process.rst
index a9c46dd0706b..51d0349c7809 100644
--- a/Documentation/process/2.Process.rst
+++ b/Documentation/process/2.Process.rst
@@ -134,7 +134,7 @@ and their maintainers are:
4.4 Greg Kroah-Hartman (very long-term stable kernel)
4.9 Greg Kroah-Hartman
4.14 Greg Kroah-Hartman
- ====== ====================== ===========================
+ ====== ====================== ==============================
The selection of a kernel for long-term support is purely a matter of a
maintainer having the need and the time to maintain that release. There
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index ddc029734b25..61f918b10a0c 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -29,13 +29,13 @@ you probably needn't concern yourself with isdn4k-utils.
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
-GNU C 3.2 gcc --version
+GNU C 4.6 gcc --version
GNU make 3.81 make --version
binutils 2.20 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
util-linux 2.10o fdformat --version
-module-init-tools 0.9.10 depmod -V
+kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
jfsutils 1.1.3 fsck.jfs -V
reiserfsprogs 3.6.3 reiserfsck -V
@@ -81,6 +81,14 @@ The build system has, as of 4.13, switched to using thin archives (`ar T`)
rather than incremental linking (`ld -r`) for built-in.a intermediate steps.
This requires binutils 2.20 or newer.
+pkg-config
+----------
+
+The build system, as of 4.18, requires pkg-config to check for installed
+kconfig tools and to determine flags settings for use in
+'make {menu,n,g,x}config'. Previously pkg-config was being used but not
+verified or documented.
+
Flex
----
@@ -156,12 +164,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
reproduce the Oops with that option, then you can still decode that Oops
with ksymoops.
-Module-Init-Tools
------------------
-
-A new module loader is now in the kernel that requires ``module-init-tools``
-to use. It is backward compatible with the 2.4.x series kernels.
-
Mkinitrd
--------
@@ -371,16 +373,17 @@ Util-linux
- <https://www.kernel.org/pub/linux/utils/util-linux/>
+Kmod
+----
+
+- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
+- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
+
Ksymoops
--------
- <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
-Module-Init-Tools
------------------
-
-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
-
Mkinitrd
--------
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 3df55811b916..130bf0f48875 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -85,7 +85,7 @@ linux-api@vger.kernel.org.
Here is a list of files that are in the kernel source tree that are
required reading:
- README
+ :ref:`Documentation/admin-guide/README.rst <readme>`
This file gives a short background on the Linux kernel and describes
what is necessary to do to configure and build the kernel. People
who are new to the kernel should start here.
diff --git a/Documentation/process/management-style.rst b/Documentation/process/management-style.rst
index 45595fd8a66b..85ef8ca8f639 100644
--- a/Documentation/process/management-style.rst
+++ b/Documentation/process/management-style.rst
@@ -105,7 +105,7 @@ to admit that you are stupid when you haven't **yet** done the really
stupid thing.
Then, when it really does turn out to be stupid, people just roll their
-eyes and say "Oops, he did it again".
+eyes and say "Oops, not again".
This preemptive admission of incompetence might also make the people who
actually do the work also think twice about whether it's worth doing or
@@ -172,10 +172,10 @@ To solve this problem, you really only have two options:
might even be amused.
The option of being unfailingly polite really doesn't exist. Nobody will
-trust somebody who is so clearly hiding his true character.
+trust somebody who is so clearly hiding their true character.
.. [#f2] Paul Simon sang "Fifty Ways to Leave Your Lover", because quite
- frankly, "A Million Ways to Tell a Developer He Is a D*ckhead" doesn't
+ frankly, "A Million Ways to Tell a Developer They're a D*ckhead" doesn't
scan nearly as well. But I'm sure he thought about it.
@@ -219,15 +219,16 @@ Things will go wrong, and people want somebody to blame. Tag, you're it.
It's not actually that hard to accept the blame, especially if people
kind of realize that it wasn't **all** your fault. Which brings us to the
-best way of taking the blame: do it for another guy. You'll feel good
-for taking the fall, he'll feel good about not getting blamed, and the
-guy who lost his whole 36GB porn-collection because of your incompetence
-will grudgingly admit that you at least didn't try to weasel out of it.
-
-Then make the developer who really screwed up (if you can find him) know
-**in_private** that he screwed up. Not just so he can avoid it in the
-future, but so that he knows he owes you one. And, perhaps even more
-importantly, he's also likely the person who can fix it. Because, let's
+best way of taking the blame: do it for someone else. You'll feel good
+for taking the fall, they'll feel good about not getting blamed, and the
+person who lost their whole 36GB porn-collection because of your
+incompetence will grudgingly admit that you at least didn't try to weasel
+out of it.
+
+Then make the developer who really screwed up (if you can find them) know
+**in_private** that they screwed up. Not just so they can avoid it in the
+future, but so that they know they owe you one. And, perhaps even more
+importantly, they're also likely the person who can fix it. Because, let's
face it, it sure ain't you.
Taking the blame is also why you get to be manager in the first place.
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 36a2dded525b..0de6f6145cc6 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -37,7 +37,7 @@ Procedure for submitting patches to the -stable tree
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
- Documentation/networking/netdev-FAQ.txt
+ :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
- Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 908bb55be407..c0917107b90a 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -611,6 +611,7 @@ which stable kernel versions should receive your fix. This is the preferred
method for indicating a bug fixed by the patch. See :ref:`describe_changes`
for more details.
+.. _the_canonical_patch_format:
14) The canonical patch format
------------------------------
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index a289285d2412..7d3684e81df6 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -9,7 +9,7 @@ rfkill - RF kill switch support
Introduction
============
-The rfkill subsystem provides a generic interface to disabling any radio
+The rfkill subsystem provides a generic interface for disabling any radio
transmitter in the system. When a transmitter is blocked, it shall not
radiate any power.
@@ -45,7 +45,7 @@ The rfkill subsystem is composed of three main components:
* the rfkill drivers.
The rfkill core provides API for kernel drivers to register their radio
-transmitter with the kernel, methods for turning it on and off and, letting
+transmitter with the kernel, methods for turning it on and off, and letting
the system know about hardware-disabled states that may be implemented on
the device.
@@ -54,7 +54,7 @@ ways for userspace to query the current states. See the "Userspace support"
section below.
When the device is hard-blocked (either by a call to rfkill_set_hw_state()
-or from query_hw_block) set_block() will be invoked for additional software
+or from query_hw_block), set_block() will be invoked for additional software
block, but drivers can ignore the method call since they can use the return
value of the function rfkill_set_hw_state() to sync the software state
instead of keeping track of calls to set_block(). In fact, drivers should
@@ -65,7 +65,6 @@ keeps track of soft and hard block separately.
Kernel API
==========
-
Drivers for radio transmitters normally implement an rfkill driver.
Platform drivers might implement input devices if the rfkill button is just
@@ -75,14 +74,14 @@ a way to turn on/off the transmitter(s).
For some platforms, it is possible that the hardware state changes during
suspend/hibernation, in which case it will be necessary to update the rfkill
-core with the current state is at resume time.
+core with the current state at resume time.
To create an rfkill driver, driver's Kconfig needs to have::
depends on RFKILL || !RFKILL
to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL
-case allows the driver to be built when rfkill is not configured, which
+case allows the driver to be built when rfkill is not configured, in which
case all rfkill API can still be used but will be provided by static inlines
which compile to almost nothing.
@@ -91,7 +90,7 @@ rfkill drivers that control devices that can be hard-blocked unless they also
assign the poll_hw_block() callback (then the rfkill core will poll the
device). Don't do this unless you cannot get the event in any other way.
-RFKill provides per-switch LED triggers, which can be used to drive LEDs
+rfkill provides per-switch LED triggers, which can be used to drive LEDs
according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
@@ -114,7 +113,7 @@ a specified type) into a state which also updates the default state for
hotplugged devices.
After an application opens /dev/rfkill, it can read the current state of all
-devices. Changes can be either obtained by either polling the descriptor for
+devices. Changes can be obtained by either polling the descriptor for
hotplug or state change events or by listening for uevents emitted by the
rfkill core framework.
@@ -127,8 +126,7 @@ environment variables set::
RFKILL_STATE
RFKILL_TYPE
-The contents of these variables corresponds to the "name", "state" and
+The content of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
-
For further details consult Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index 4d83c1c0ca04..4a3cecc8ad38 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -1568,7 +1568,7 @@ joystick_io
The driver requires firmware files ``turtlebeach/msndinit.bin`` and
``turtlebeach/msndperm.bin`` in the proper firmware directory.
-See Documentation/sound/oss/MultiSound for important information
+See Documentation/sound/cards/multisound.sh for important information
about this driver. Note that it has been discontinued, but the
Voyetra Turtle Beach knowledge base entry for it is still available
at
diff --git a/Documentation/sound/cards/multisound.sh b/Documentation/sound/cards/multisound.sh
new file mode 100755
index 000000000000..a915a1affcde
--- /dev/null
+++ b/Documentation/sound/cards/multisound.sh
@@ -0,0 +1,1139 @@
+#! /bin/sh
+#
+# Turtle Beach MultiSound Driver Notes
+# -- Andrew Veliath <andrewtv@usa.net>
+#
+# Last update: September 10, 1998
+# Corresponding msnd driver: 0.8.3
+#
+# ** This file is a README (top part) and shell archive (bottom part).
+# The corresponding archived utility sources can be unpacked by
+# running `sh MultiSound' (the utilities are only needed for the
+# Pinnacle and Fiji cards). **
+#
+#
+# -=-=- Getting Firmware -=-=-
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# See the section `Obtaining and Creating Firmware Files' in this
+# document for instructions on obtaining the necessary firmware
+# files.
+#
+#
+# Supported Features
+# ~~~~~~~~~~~~~~~~~~
+#
+# Currently, full-duplex digital audio (/dev/dsp only, /dev/audio is
+# not currently available) and mixer functionality (/dev/mixer) are
+# supported (memory mapped digital audio is not yet supported).
+# Digital transfers and monitoring can be done as well if you have
+# the digital daughterboard (see the section on using the S/PDIF port
+# for more information).
+#
+# Support for the Turtle Beach MultiSound Hurricane architecture is
+# composed of the following modules (these can also operate compiled
+# into the kernel):
+#
+# snd-msnd-lib - MultiSound base (requires snd)
+#
+# snd-msnd-classic - Base audio/mixer support for Classic, Monetery and
+# Tahiti cards
+#
+# snd-msnd-pinnacle - Base audio/mixer support for Pinnacle and Fiji cards
+#
+#
+# Important Notes - Read Before Using
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# The firmware files are not included (may change in future). You
+# must obtain these images from Turtle Beach (they are included in
+# the MultiSound Development Kits), and place them in /etc/sound for
+# example, and give the full paths in the Linux configuration. If
+# you are compiling in support for the MultiSound driver rather than
+# using it as a module, these firmware files must be accessible
+# during kernel compilation.
+#
+# Please note these files must be binary files, not assembler. See
+# the section later in this document for instructions to obtain these
+# files.
+#
+#
+# Configuring Card Resources
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# ** This section is very important, as your card may not work at all
+# or your machine may crash if you do not do this correctly. **
+#
+# * Classic/Monterey/Tahiti
+#
+# These cards are configured through the driver snd-msnd-classic. You must
+# know the io port, then the driver will select the irq and memory resources
+# on the card. It is up to you to know if these are free locations or now,
+# a conflict can lock the machine up.
+#
+# * Pinnacle/Fiji
+#
+# The Pinnacle and Fiji cards have an extra config port, either
+# 0x250, 0x260 or 0x270. This port can be disabled to have the card
+# configured strictly through PnP, however you lose the ability to
+# access the IDE controller and joystick devices on this card when
+# using PnP. The included pinnaclecfg program in this shell archive
+# can be used to configure the card in non-PnP mode, and in PnP mode
+# you can use isapnptools. These are described briefly here.
+#
+# pinnaclecfg is not required; you can use the snd-msnd-pinnacle module
+# to fully configure the card as well. However, pinnaclecfg can be
+# used to change the resource values of a particular device after the
+# snd-msnd-pinnacle module has been loaded. If you are compiling the
+# driver into the kernel, you must set these values during compile
+# time, however other peripheral resource values can be changed with
+# the pinnaclecfg program after the kernel is loaded.
+#
+#
+# *** PnP mode
+#
+# Use pnpdump to obtain a sample configuration if you can; I was able
+# to obtain one with the command `pnpdump 1 0x203' -- this may vary
+# for you (running pnpdump by itself did not work for me). Then,
+# edit this file and use isapnp to uncomment and set the card values.
+# Use these values when inserting the snd-msnd-pinnacle module. Using
+# this method, you can set the resources for the DSP and the Kurzweil
+# synth (Pinnacle). Since Linux does not directly support PnP
+# devices, you may have difficulty when using the card in PnP mode
+# when it the driver is compiled into the kernel. Using non-PnP mode
+# is preferable in this case.
+#
+# Here is an example mypinnacle.conf for isapnp that sets the card to
+# io base 0x210, irq 5 and mem 0xd8000, and also sets the Kurzweil
+# synth to 0x330 and irq 9 (may need editing for your system):
+#
+# (READPORT 0x0203)
+# (CSN 2)
+# (IDENTIFY *)
+#
+# # DSP
+# (CONFIGURE BVJ0440/-1 (LD 0
+# (INT 0 (IRQ 5 (MODE +E))) (IO 0 (BASE 0x0210)) (MEM 0 (BASE 0x0d8000))
+# (ACT Y)))
+#
+# # Kurzweil Synth (Pinnacle Only)
+# (CONFIGURE BVJ0440/-1 (LD 1
+# (IO 0 (BASE 0x0330)) (INT 0 (IRQ 9 (MODE +E)))
+# (ACT Y)))
+#
+# (WAITFORKEY)
+#
+#
+# *** Non-PnP mode
+#
+# The second way is by running the card in non-PnP mode. This
+# actually has some advantages in that you can access some other
+# devices on the card, such as the joystick and IDE controller. To
+# configure the card, unpack this shell archive and build the
+# pinnaclecfg program. Using this program, you can assign the
+# resource values to the card's devices, or disable the devices. As
+# an alternative to using pinnaclecfg, you can specify many of the
+# configuration values when loading the snd-msnd-pinnacle module (or
+# during kernel configuration when compiling the driver into the
+# kernel).
+#
+# If you specify cfg=0x250 for the snd-msnd-pinnacle module, it
+# automatically configure the card to the given io, irq and memory
+# values using that config port (the config port is jumper selectable
+# on the card to 0x250, 0x260 or 0x270).
+#
+# See the `snd-msnd-pinnacle Additional Options' section below for more
+# information on these parameters (also, if you compile the driver
+# directly into the kernel, these extra parameters can be useful
+# here).
+#
+#
+# ** It is very easy to cause problems in your machine if you choose a
+# resource value which is incorrect. **
+#
+#
+# Examples
+# ~~~~~~~~
+#
+# * MultiSound Classic/Monterey/Tahiti:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-classic io=0x290 irq=7 mem=0xd0000
+#
+# * MultiSound Pinnacle in PnP mode:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# isapnp mypinnacle.conf
+# insmod snd-msnd-pinnacle io=0x210 irq=5 mem=0xd8000 <-- match mypinnacle.conf values
+#
+# * MultiSound Pinnacle in non-PnP mode (replace 0x250 with your configuration port,
+# one of 0x250, 0x260 or 0x270):
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000
+#
+# * To use the MPU-compatible Kurzweil synth on the Pinnacle in PnP
+# mode, add the following (assumes you did `isapnp mypinnacle.conf'):
+#
+# insmod snd
+# insmod mpu401 io=0x330 irq=9 <-- match mypinnacle.conf values
+#
+# * To use the MPU-compatible Kurzweil synth on the Pinnacle in non-PnP
+# mode, add the following. Note how we first configure the peripheral's
+# resources, _then_ install a Linux driver for it:
+#
+# insmod snd
+# pinnaclecfg 0x250 mpu 0x330 9
+# insmod mpu401 io=0x330 irq=9
+#
+# -- OR you can use the following sequence without pinnaclecfg in non-PnP mode:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 mpu_io=0x330 mpu_irq=9
+# insmod snd
+# insmod mpu401 io=0x330 irq=9
+#
+# * To setup the joystick port on the Pinnacle in non-PnP mode (though
+# you have to find the actual Linux joystick driver elsewhere), you
+# can use pinnaclecfg:
+#
+# pinnaclecfg 0x250 joystick 0x200
+#
+# -- OR you can configure this using snd-msnd-pinnacle with the following:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 joystick_io=0x200
+#
+#
+# snd-msnd-classic, snd-msnd-pinnacle Required Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# If the following options are not given, the module will not load.
+# Examine the kernel message log for informative error messages.
+# WARNING--probing isn't supported so try to make sure you have the
+# correct shared memory area, otherwise you may experience problems.
+#
+# io I/O base of DSP, e.g. io=0x210
+# irq IRQ number, e.g. irq=5
+# mem Shared memory area, e.g. mem=0xd8000
+#
+#
+# snd-msnd-classic, snd-msnd-pinnacle Additional Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# fifosize The digital audio FIFOs, in kilobytes. If not
+# specified, the default will be used. Increasing
+# this value will reduce the chance of a FIFO
+# underflow at the expense of increasing overall
+# latency. For example, fifosize=512 will
+# allocate 512kB read and write FIFOs (1MB total).
+# While this may reduce dropouts, a heavy machine
+# load will undoubtedly starve the FIFO of data
+# and you will eventually get dropouts. One
+# option is to alter the scheduling priority of
+# the playback process, using `nice' or some form
+# of POSIX soft real-time scheduling.
+#
+# calibrate_signal Setting this to one calibrates the ADCs to the
+# signal, zero calibrates to the card (defaults
+# to zero).
+#
+#
+# snd-msnd-pinnacle Additional Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# digital Specify digital=1 to enable the S/PDIF input
+# if you have the digital daughterboard
+# adapter. This will enable access to the
+# DIGITAL1 input for the soundcard in the mixer.
+# Some mixer programs might have trouble setting
+# the DIGITAL1 source as an input. If you have
+# trouble, you can try the setdigital.c program
+# at the bottom of this document.
+#
+# cfg Non-PnP configuration port for the Pinnacle
+# and Fiji (typically 0x250, 0x260 or 0x270,
+# depending on the jumper configuration). If
+# this option is omitted, then it is assumed
+# that the card is in PnP mode, and that the
+# specified DSP resource values are already
+# configured with PnP (i.e. it won't attempt to
+# do any sort of configuration).
+#
+# When the Pinnacle is in non-PnP mode, you can use the following
+# options to configure particular devices. If a full specification
+# for a device is not given, then the device is not configured. Note
+# that you still must use a Linux driver for any of these devices
+# once their resources are setup (such as the Linux joystick driver,
+# or the MPU401 driver from OSS for the Kurzweil synth).
+#
+# mpu_io I/O port of MPU (on-board Kurzweil synth)
+# mpu_irq IRQ of MPU (on-board Kurzweil synth)
+# ide_io0 First I/O port of IDE controller
+# ide_io1 Second I/O port of IDE controller
+# ide_irq IRQ IDE controller
+# joystick_io I/O port of joystick
+#
+#
+# Obtaining and Creating Firmware Files
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# For the Classic/Tahiti/Monterey
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Download to /tmp and unzip the following file from Turtle Beach:
+#
+# ftp://ftp.voyetra.com/pub/tbs/msndcl/msndvkit.zip
+#
+# When unzipped, unzip the file named MsndFiles.zip. Then copy the
+# following firmware files to /etc/sound (note the file renaming):
+#
+# cp DSPCODE/MSNDINIT.BIN /etc/sound/msndinit.bin
+# cp DSPCODE/MSNDPERM.REB /etc/sound/msndperm.bin
+#
+# When configuring the Linux kernel, specify /etc/sound/msndinit.bin and
+# /etc/sound/msndperm.bin for the two firmware files (Linux kernel
+# versions older than 2.2 do not ask for firmware paths, and are
+# hardcoded to /etc/sound).
+#
+# If you are compiling the driver into the kernel, these files must
+# be accessible during compilation, but will not be needed later.
+# The files must remain, however, if the driver is used as a module.
+#
+#
+# For the Pinnacle/Fiji
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# Download to /tmp and unzip the following file from Turtle Beach (be
+# sure to use the entire URL; some have had trouble navigating to the
+# URL):
+#
+# ftp://ftp.voyetra.com/pub/tbs/pinn/pnddk100.zip
+#
+# Unpack this shell archive, and run make in the created directory
+# (you need a C compiler and flex to build the utilities). This
+# should give you the executables conv, pinnaclecfg and setdigital.
+# conv is only used temporarily here to create the firmware files,
+# while pinnaclecfg is used to configure the Pinnacle or Fiji card in
+# non-PnP mode, and setdigital can be used to set the S/PDIF input on
+# the mixer (pinnaclecfg and setdigital should be copied to a
+# convenient place, possibly run during system initialization).
+#
+# To generating the firmware files with the `conv' program, we create
+# the binary firmware files by doing the following conversion
+# (assuming the archive unpacked into a directory named PINNDDK):
+#
+# ./conv < PINNDDK/dspcode/pndspini.asm > /etc/sound/pndspini.bin
+# ./conv < PINNDDK/dspcode/pndsperm.asm > /etc/sound/pndsperm.bin
+#
+# The conv (and conv.l) program is not needed after conversion and can
+# be safely deleted. Then, when configuring the Linux kernel, specify
+# /etc/sound/pndspini.bin and /etc/sound/pndsperm.bin for the two
+# firmware files (Linux kernel versions older than 2.2 do not ask for
+# firmware paths, and are hardcoded to /etc/sound).
+#
+# If you are compiling the driver into the kernel, these files must
+# be accessible during compilation, but will not be needed later.
+# The files must remain, however, if the driver is used as a module.
+#
+#
+# Using Digital I/O with the S/PDIF Port
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# If you have a Pinnacle or Fiji with the digital daughterboard and
+# want to set it as the input source, you can use this program if you
+# have trouble trying to do it with a mixer program (be sure to
+# insert the module with the digital=1 option, or say Y to the option
+# during compiled-in kernel operation). Upon selection of the S/PDIF
+# port, you should be able monitor and record from it.
+#
+# There is something to note about using the S/PDIF port. Digital
+# timing is taken from the digital signal, so if a signal is not
+# connected to the port and it is selected as recording input, you
+# will find PCM playback to be distorted in playback rate. Also,
+# attempting to record at a sampling rate other than the DAT rate may
+# be problematic (i.e. trying to record at 8000Hz when the DAT signal
+# is 44100Hz). If you have a problem with this, set the recording
+# input to analog if you need to record at a rate other than that of
+# the DAT rate.
+#
+#
+# -- Shell archive attached below, just run `sh MultiSound' to extract.
+# Contains Pinnacle/Fiji utilities to convert firmware, configure
+# in non-PnP mode, and select the DIGITAL1 input for the mixer.
+#
+#
+#!/bin/sh
+# This is a shell archive (produced by GNU sharutils 4.2).
+# To extract the files from this archive, save it to some FILE, remove
+# everything before the `!/bin/sh' line above, then type `sh FILE'.
+#
+# Made on 1998-12-04 10:07 EST by <andrewtv@ztransform.velsoft.com>.
+# Source directory was `/home/andrewtv/programming/pinnacle/pinnacle'.
+#
+# Existing files will *not* be overwritten unless `-c' is specified.
+#
+# This shar contains:
+# length mode name
+# ------ ---------- ------------------------------------------
+# 2064 -rw-rw-r-- MultiSound.d/setdigital.c
+# 10224 -rw-rw-r-- MultiSound.d/pinnaclecfg.c
+# 106 -rw-rw-r-- MultiSound.d/Makefile
+# 146 -rw-rw-r-- MultiSound.d/conv.l
+# 1491 -rw-rw-r-- MultiSound.d/msndreset.c
+#
+save_IFS="${IFS}"
+IFS="${IFS}:"
+gettext_dir=FAILED
+locale_dir=FAILED
+first_param="$1"
+for dir in $PATH
+do
+ if test "$gettext_dir" = FAILED && test -f $dir/gettext \
+ && ($dir/gettext --version >/dev/null 2>&1)
+ then
+ set `$dir/gettext --version 2>&1`
+ if test "$3" = GNU
+ then
+ gettext_dir=$dir
+ fi
+ fi
+ if test "$locale_dir" = FAILED && test -f $dir/shar \
+ && ($dir/shar --print-text-domain-dir >/dev/null 2>&1)
+ then
+ locale_dir=`$dir/shar --print-text-domain-dir`
+ fi
+done
+IFS="$save_IFS"
+if test "$locale_dir" = FAILED || test "$gettext_dir" = FAILED
+then
+ echo=echo
+else
+ TEXTDOMAINDIR=$locale_dir
+ export TEXTDOMAINDIR
+ TEXTDOMAIN=sharutils
+ export TEXTDOMAIN
+ echo="$gettext_dir/gettext -s"
+fi
+touch -am 1231235999 $$.touch >/dev/null 2>&1
+if test ! -f 1231235999 && test -f $$.touch; then
+ shar_touch=touch
+else
+ shar_touch=:
+ echo
+ $echo 'WARNING: not restoring timestamps. Consider getting and'
+ $echo "installing GNU \`touch', distributed in GNU File Utilities..."
+ echo
+fi
+rm -f 1231235999 $$.touch
+#
+if mkdir _sh01426; then
+ $echo 'x -' 'creating lock directory'
+else
+ $echo 'failed to create lock directory'
+ exit 1
+fi
+# ============= MultiSound.d/setdigital.c ==============
+if test ! -d 'MultiSound.d'; then
+ $echo 'x -' 'creating directory' 'MultiSound.d'
+ mkdir 'MultiSound.d'
+fi
+if test -f 'MultiSound.d/setdigital.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/setdigital.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/setdigital.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/setdigital.c' &&
+/*********************************************************************
+X *
+X * setdigital.c - sets the DIGITAL1 input for a mixer
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+X
+int main(int argc, char *argv[])
+{
+X int fd;
+X unsigned long recmask, recsrc;
+X
+X if (argc != 2) {
+X fprintf(stderr, "usage: setdigital <mixer device>\n");
+X exit(1);
+X }
+X
+X if ((fd = open(argv[1], O_RDWR)) < 0) {
+X perror(argv[1]);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_READ_RECMASK, &recmask) < 0) {
+X fprintf(stderr, "error: ioctl read recording mask failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X if (!(recmask & SOUND_MASK_DIGITAL1)) {
+X fprintf(stderr, "error: cannot find DIGITAL1 device in mixer\n");
+X close(fd);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_READ_RECSRC, &recsrc) < 0) {
+X fprintf(stderr, "error: ioctl read recording source failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X recsrc |= SOUND_MASK_DIGITAL1;
+X
+X if (ioctl(fd, SOUND_MIXER_WRITE_RECSRC, &recsrc) < 0) {
+X fprintf(stderr, "error: ioctl write recording source failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X close(fd);
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204092598 'MultiSound.d/setdigital.c' &&
+ chmod 0664 'MultiSound.d/setdigital.c' ||
+ $echo 'restore of' 'MultiSound.d/setdigital.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/setdigital.c:' 'MD5 check failed'
+e87217fc3e71288102ba41fd81f71ec4 MultiSound.d/setdigital.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/setdigital.c'`"
+ test 2064 -eq "$shar_count" ||
+ $echo 'MultiSound.d/setdigital.c:' 'original size' '2064,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/pinnaclecfg.c ==============
+if test -f 'MultiSound.d/pinnaclecfg.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/pinnaclecfg.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/pinnaclecfg.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/pinnaclecfg.c' &&
+/*********************************************************************
+X *
+X * pinnaclecfg.c - Pinnacle/Fiji Device Configuration Program
+X *
+X * This is for NON-PnP mode only. For PnP mode, use isapnptools.
+X *
+X * This is Linux-specific, and must be run with root permissions.
+X *
+X * Part of the Turtle Beach MultiSound Sound Card Driver for Linux
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <asm/types.h>
+#include <sys/io.h>
+X
+#define IREG_LOGDEVICE 0x07
+#define IREG_ACTIVATE 0x30
+#define LD_ACTIVATE 0x01
+#define LD_DISACTIVATE 0x00
+#define IREG_EECONTROL 0x3F
+#define IREG_MEMBASEHI 0x40
+#define IREG_MEMBASELO 0x41
+#define IREG_MEMCONTROL 0x42
+#define IREG_MEMRANGEHI 0x43
+#define IREG_MEMRANGELO 0x44
+#define MEMTYPE_8BIT 0x00
+#define MEMTYPE_16BIT 0x02
+#define MEMTYPE_RANGE 0x00
+#define MEMTYPE_HIADDR 0x01
+#define IREG_IO0_BASEHI 0x60
+#define IREG_IO0_BASELO 0x61
+#define IREG_IO1_BASEHI 0x62
+#define IREG_IO1_BASELO 0x63
+#define IREG_IRQ_NUMBER 0x70
+#define IREG_IRQ_TYPE 0x71
+#define IRQTYPE_HIGH 0x02
+#define IRQTYPE_LOW 0x00
+#define IRQTYPE_LEVEL 0x01
+#define IRQTYPE_EDGE 0x00
+X
+#define HIBYTE(w) ((BYTE)(((WORD)(w) >> 8) & 0xFF))
+#define LOBYTE(w) ((BYTE)(w))
+#define MAKEWORD(low,hi) ((WORD)(((BYTE)(low))|(((WORD)((BYTE)(hi)))<<8)))
+X
+typedef __u8 BYTE;
+typedef __u16 USHORT;
+typedef __u16 WORD;
+X
+static int config_port = -1;
+X
+static int msnd_write_cfg(int cfg, int reg, int value)
+{
+X outb(reg, cfg);
+X outb(value, cfg + 1);
+X if (value != inb(cfg + 1)) {
+X fprintf(stderr, "error: msnd_write_cfg: I/O error\n");
+X return -EIO;
+X }
+X return 0;
+}
+X
+static int msnd_read_cfg(int cfg, int reg)
+{
+X outb(reg, cfg);
+X return inb(cfg + 1);
+}
+X
+static int msnd_write_cfg_io0(int cfg, int num, WORD io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_io0(int cfg, int num, WORD *io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO0_BASELO),
+X msnd_read_cfg(cfg, IREG_IO0_BASEHI));
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_io1(int cfg, int num, WORD io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_io1(int cfg, int num, WORD *io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO1_BASELO),
+X msnd_read_cfg(cfg, IREG_IO1_BASEHI));
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_irq(int cfg, int num, WORD irq)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_irq(int cfg, int num, WORD *irq)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *irq = msnd_read_cfg(cfg, IREG_IRQ_NUMBER);
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_mem(int cfg, int num, int mem)
+{
+X WORD wmem;
+X
+X mem >>= 8;
+X mem &= 0xfff;
+X wmem = (WORD)mem;
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem)))
+X return -EIO;
+X if (wmem && msnd_write_cfg(cfg, IREG_MEMCONTROL, (MEMTYPE_HIADDR | MEMTYPE_16BIT)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_mem(int cfg, int num, int *mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *mem = MAKEWORD(msnd_read_cfg(cfg, IREG_MEMBASELO),
+X msnd_read_cfg(cfg, IREG_MEMBASEHI));
+X *mem <<= 8;
+X
+X return 0;
+}
+X
+static int msnd_activate_logical(int cfg, int num)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_write_cfg_logical(int cfg, int num, WORD io0, WORD io1, WORD irq, int mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg_io0(cfg, num, io0))
+X return -EIO;
+X if (msnd_write_cfg_io1(cfg, num, io1))
+X return -EIO;
+X if (msnd_write_cfg_irq(cfg, num, irq))
+X return -EIO;
+X if (msnd_write_cfg_mem(cfg, num, mem))
+X return -EIO;
+X if (msnd_activate_logical(cfg, num))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_logical(int cfg, int num, WORD *io0, WORD *io1, WORD *irq, int *mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_read_cfg_io0(cfg, num, io0))
+X return -EIO;
+X if (msnd_read_cfg_io1(cfg, num, io1))
+X return -EIO;
+X if (msnd_read_cfg_irq(cfg, num, irq))
+X return -EIO;
+X if (msnd_read_cfg_mem(cfg, num, mem))
+X return -EIO;
+X return 0;
+}
+X
+static void usage(void)
+{
+X fprintf(stderr,
+X "\n"
+X "pinnaclecfg 1.0\n"
+X "\n"
+X "usage: pinnaclecfg <config port> [device config]\n"
+X "\n"
+X "This is for use with the card in NON-PnP mode only.\n"
+X "\n"
+X "Available devices (not all available for Fiji):\n"
+X "\n"
+X " Device Description\n"
+X " -------------------------------------------------------------------\n"
+X " reset Reset all devices (i.e. disable)\n"
+X " show Display current device configurations\n"
+X "\n"
+X " dsp <io> <irq> <mem> Audio device\n"
+X " mpu <io> <irq> Internal Kurzweil synth\n"
+X " ide <io0> <io1> <irq> On-board IDE controller\n"
+X " joystick <io> Joystick port\n"
+X "\n");
+X exit(1);
+}
+X
+static int cfg_reset(void)
+{
+X int i;
+X
+X for (i = 0; i < 4; ++i)
+X msnd_write_cfg_logical(config_port, i, 0, 0, 0, 0);
+X
+X return 0;
+}
+X
+static int cfg_show(void)
+{
+X int i;
+X int count = 0;
+X
+X for (i = 0; i < 4; ++i) {
+X WORD io0, io1, irq;
+X int mem;
+X msnd_read_cfg_logical(config_port, i, &io0, &io1, &irq, &mem);
+X switch (i) {
+X case 0:
+X if (io0 || irq || mem) {
+X printf("dsp 0x%x %d 0x%x\n", io0, irq, mem);
+X ++count;
+X }
+X break;
+X case 1:
+X if (io0 || irq) {
+X printf("mpu 0x%x %d\n", io0, irq);
+X ++count;
+X }
+X break;
+X case 2:
+X if (io0 || io1 || irq) {
+X printf("ide 0x%x 0x%x %d\n", io0, io1, irq);
+X ++count;
+X }
+X break;
+X case 3:
+X if (io0) {
+X printf("joystick 0x%x\n", io0);
+X ++count;
+X }
+X break;
+X }
+X }
+X
+X if (count == 0)
+X fprintf(stderr, "no devices configured\n");
+X
+X return 0;
+}
+X
+static int cfg_dsp(int argc, char *argv[])
+{
+X int io, irq, mem;
+X
+X if (argc < 3 ||
+X sscanf(argv[0], "0x%x", &io) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1 ||
+X sscanf(argv[2], "0x%x", &mem) != 1)
+X usage();
+X
+X if (!(io == 0x290 ||
+X io == 0x260 ||
+X io == 0x250 ||
+X io == 0x240 ||
+X io == 0x230 ||
+X io == 0x220 ||
+X io == 0x210 ||
+X io == 0x3e0)) {
+X fprintf(stderr, "error: io must be one of "
+X "210, 220, 230, 240, 250, 260, 290, or 3E0\n");
+X usage();
+X }
+X
+X if (!(irq == 5 ||
+X irq == 7 ||
+X irq == 9 ||
+X irq == 10 ||
+X irq == 11 ||
+X irq == 12)) {
+X fprintf(stderr, "error: irq must be one of "
+X "5, 7, 9, 10, 11 or 12\n");
+X usage();
+X }
+X
+X if (!(mem == 0xb0000 ||
+X mem == 0xc8000 ||
+X mem == 0xd0000 ||
+X mem == 0xd8000 ||
+X mem == 0xe0000 ||
+X mem == 0xe8000)) {
+X fprintf(stderr, "error: mem must be one of "
+X "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000\n");
+X usage();
+X }
+X
+X return msnd_write_cfg_logical(config_port, 0, io, 0, irq, mem);
+}
+X
+static int cfg_mpu(int argc, char *argv[])
+{
+X int io, irq;
+X
+X if (argc < 2 ||
+X sscanf(argv[0], "0x%x", &io) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 1, io, 0, irq, 0);
+}
+X
+static int cfg_ide(int argc, char *argv[])
+{
+X int io0, io1, irq;
+X
+X if (argc < 3 ||
+X sscanf(argv[0], "0x%x", &io0) != 1 ||
+X sscanf(argv[0], "0x%x", &io1) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 2, io0, io1, irq, 0);
+}
+X
+static int cfg_joystick(int argc, char *argv[])
+{
+X int io;
+X
+X if (argc < 1 ||
+X sscanf(argv[0], "0x%x", &io) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 3, io, 0, 0, 0);
+}
+X
+int main(int argc, char *argv[])
+{
+X char *device;
+X int rv = 0;
+X
+X --argc; ++argv;
+X
+X if (argc < 2)
+X usage();
+X
+X sscanf(argv[0], "0x%x", &config_port);
+X if (config_port != 0x250 && config_port != 0x260 && config_port != 0x270) {
+X fprintf(stderr, "error: <config port> must be 0x250, 0x260 or 0x270\n");
+X exit(1);
+X }
+X if (ioperm(config_port, 2, 1)) {
+X perror("ioperm");
+X fprintf(stderr, "note: pinnaclecfg must be run as root\n");
+X exit(1);
+X }
+X device = argv[1];
+X
+X argc -= 2; argv += 2;
+X
+X if (strcmp(device, "reset") == 0)
+X rv = cfg_reset();
+X else if (strcmp(device, "show") == 0)
+X rv = cfg_show();
+X else if (strcmp(device, "dsp") == 0)
+X rv = cfg_dsp(argc, argv);
+X else if (strcmp(device, "mpu") == 0)
+X rv = cfg_mpu(argc, argv);
+X else if (strcmp(device, "ide") == 0)
+X rv = cfg_ide(argc, argv);
+X else if (strcmp(device, "joystick") == 0)
+X rv = cfg_joystick(argc, argv);
+X else {
+X fprintf(stderr, "error: unknown device %s\n", device);
+X usage();
+X }
+X
+X if (rv)
+X fprintf(stderr, "error: device configuration failed\n");
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204092598 'MultiSound.d/pinnaclecfg.c' &&
+ chmod 0664 'MultiSound.d/pinnaclecfg.c' ||
+ $echo 'restore of' 'MultiSound.d/pinnaclecfg.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/pinnaclecfg.c:' 'MD5 check failed'
+366bdf27f0db767a3c7921d0a6db20fe MultiSound.d/pinnaclecfg.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/pinnaclecfg.c'`"
+ test 10224 -eq "$shar_count" ||
+ $echo 'MultiSound.d/pinnaclecfg.c:' 'original size' '10224,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/Makefile ==============
+if test -f 'MultiSound.d/Makefile' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/Makefile' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/Makefile' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/Makefile' &&
+CC = gcc
+CFLAGS = -O
+PROGS = setdigital msndreset pinnaclecfg conv
+X
+all: $(PROGS)
+X
+clean:
+X rm -f $(PROGS)
+SHAR_EOF
+ $shar_touch -am 1204092398 'MultiSound.d/Makefile' &&
+ chmod 0664 'MultiSound.d/Makefile' ||
+ $echo 'restore of' 'MultiSound.d/Makefile' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/Makefile:' 'MD5 check failed'
+76ca8bb44e3882edcf79c97df6c81845 MultiSound.d/Makefile
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/Makefile'`"
+ test 106 -eq "$shar_count" ||
+ $echo 'MultiSound.d/Makefile:' 'original size' '106,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/conv.l ==============
+if test -f 'MultiSound.d/conv.l' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/conv.l' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/conv.l' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/conv.l' &&
+%%
+[ \n\t,\r]
+\;.*
+DB
+[0-9A-Fa-f]+H { int n; sscanf(yytext, "%xH", &n); printf("%c", n); }
+%%
+int yywrap() { return 1; }
+void main() { yylex(); }
+SHAR_EOF
+ $shar_touch -am 0828231798 'MultiSound.d/conv.l' &&
+ chmod 0664 'MultiSound.d/conv.l' ||
+ $echo 'restore of' 'MultiSound.d/conv.l' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/conv.l:' 'MD5 check failed'
+d2411fc32cd71a00dcdc1f009e858dd2 MultiSound.d/conv.l
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/conv.l'`"
+ test 146 -eq "$shar_count" ||
+ $echo 'MultiSound.d/conv.l:' 'original size' '146,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/msndreset.c ==============
+if test -f 'MultiSound.d/msndreset.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/msndreset.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/msndreset.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/msndreset.c' &&
+/*********************************************************************
+X *
+X * msndreset.c - resets the MultiSound card
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+X
+int main(int argc, char *argv[])
+{
+X int fd;
+X
+X if (argc != 2) {
+X fprintf(stderr, "usage: msndreset <mixer device>\n");
+X exit(1);
+X }
+X
+X if ((fd = open(argv[1], O_RDWR)) < 0) {
+X perror(argv[1]);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_PRIVATE1, 0) < 0) {
+X fprintf(stderr, "error: msnd ioctl reset failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X close(fd);
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204100698 'MultiSound.d/msndreset.c' &&
+ chmod 0664 'MultiSound.d/msndreset.c' ||
+ $echo 'restore of' 'MultiSound.d/msndreset.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/msndreset.c:' 'MD5 check failed'
+c52f876521084e8eb25e12e01dcccb8a MultiSound.d/msndreset.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/msndreset.c'`"
+ test 1491 -eq "$shar_count" ||
+ $echo 'MultiSound.d/msndreset.c:' 'original size' '1491,' 'current size' "$shar_count!"
+ fi
+fi
+rm -fr _sh01426
+exit 0
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 7c2d37571af0..e06238131f77 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -34,6 +34,22 @@ ALC262
======
inv-dmic
Inverted internal mic workaround
+fsc-h270
+ Fixups for Fujitsu-Siemens Celsius H270
+fsc-s7110
+ Fixups for Fujitsu-Siemens Lifebook S7110
+hp-z200
+ Fixups for HP Z200
+tyan
+ Fixups for Tyan Thunder n6650W
+lenovo-3000
+ Fixups for Lenovo 3000
+benq
+ Fixups for Benq ED8
+benq-t31
+ Fixups for Benq T31
+bayleybay
+ Fixups for Intel BayleyBay
ALC267/268
==========
@@ -41,6 +57,8 @@ inv-dmic
Inverted internal mic workaround
hp-eapd
Disable HP EAPD on NID 0x15
+spdif
+ Enable SPDIF output on NID 0x1e
ALC22x/23x/25x/269/27x/28x/29x (and vendor-specific ALC3xxx models)
===================================================================
@@ -70,6 +88,10 @@ dell-headset-multi
Headset jack, which can also be used as mic-in
dell-headset-dock
Headset jack (without mic-in), and also dock I/O
+dell-headset3
+ Headset jack (without mic-in), and also dock I/O, variant 3
+dell-headset4
+ Headset jack (without mic-in), and also dock I/O, variant 4
alc283-dac-wcaps
Fixups for Chromebook with ALC283
alc283-sense-combo
@@ -80,15 +102,173 @@ tpt440
Lenovo Thinkpad T440s setup
tpt460
Lenovo Thinkpad T460/560 setup
+tpt470-dock
+ Lenovo Thinkpad T470 dock setup
dual-codecs
Lenovo laptops with dual codecs
alc700-ref
Intel reference board with ALC700 codec
+vaio
+ Pin fixups for Sony VAIO laptops
+dell-m101z
+ COEF setup for Dell M101z
+asus-g73jw
+ Subwoofer pin fixup for ASUS G73JW
+lenovo-eapd
+ Inversed EAPD setup for Lenovo laptops
+sony-hweq
+ H/W EQ COEF setup for Sony laptops
+pcm44k
+ Fixed PCM 44kHz constraints (for buggy devices)
+lifebook
+ Dock pin fixups for Fujitsu Lifebook
+lifebook-extmic
+ Headset mic fixup for Fujitsu Lifebook
+lifebook-hp-pin
+ Headphone pin fixup for Fujitsu Lifebook
+lifebook-u7x7
+ Lifebook U7x7 fixups
+alc269vb-amic
+ ALC269VB analog mic pin fixups
+alc269vb-dmic
+ ALC269VB digital mic pin fixups
+hp-mute-led-mic1
+ Mute LED via Mic1 pin on HP
+hp-mute-led-mic2
+ Mute LED via Mic2 pin on HP
+hp-mute-led-mic3
+ Mute LED via Mic3 pin on HP
+hp-gpio-mic1
+ GPIO + Mic1 pin LED on HP
+hp-line1-mic1
+ Mute LED via Line1 + Mic1 pins on HP
+noshutup
+ Skip shutup callback
+sony-nomic
+ Headset mic fixup for Sony laptops
+aspire-headset-mic
+ Headset pin fixup for Acer Aspire
+asus-x101
+ ASUS X101 fixups
+acer-ao7xx
+ Acer AO7xx fixups
+acer-aspire-e1
+ Acer Aspire E1 fixups
+acer-ac700
+ Acer AC700 fixups
+limit-mic-boost
+ Limit internal mic boost on Lenovo machines
+asus-zenbook
+ ASUS Zenbook fixups
+asus-zenbook-ux31a
+ ASUS Zenbook UX31A fixups
+ordissimo
+ Ordissimo EVE2 (or Malata PC-B1303) fixups
+asus-tx300
+ ASUS TX300 fixups
+alc283-int-mic
+ ALC283 COEF setup for Lenovo machines
+mono-speakers
+ Subwoofer and headset fixupes for Dell Inspiron
+alc290-subwoofer
+ Subwoofer fixups for Dell Vostro
+thinkpad
+ Binding with thinkpad_acpi driver for Lenovo machines
+dmic-thinkpad
+ thinkpad_acpi binding + digital mic support
+alc255-acer
+ ALC255 fixups on Acer machines
+alc255-asus
+ ALC255 fixups on ASUS machines
+alc255-dell1
+ ALC255 fixups on Dell machines
+alc255-dell2
+ ALC255 fixups on Dell machines, variant 2
+alc293-dell1
+ ALC293 fixups on Dell machines
+alc283-headset
+ Headset pin fixups on ALC283
+aspire-v5
+ Acer Aspire V5 fixups
+hp-gpio4
+ GPIO and Mic1 pin mute LED fixups for HP
+hp-gpio-led
+ GPIO mute LEDs on HP
+hp-gpio2-hotkey
+ GPIO mute LED with hot key handling on HP
+hp-dock-pins
+ GPIO mute LEDs and dock support on HP
+hp-dock-gpio-mic
+ GPIO, Mic mute LED and dock support on HP
+hp-9480m
+ HP 9480m fixups
+alc288-dell1
+ ALC288 fixups on Dell machines
+alc288-dell-xps13
+ ALC288 fixups on Dell XPS13
+dell-e7x
+ Dell E7x fixups
+alc293-dell
+ ALC293 fixups on Dell machines
+alc298-dell1
+ ALC298 fixups on Dell machines
+alc298-dell-aio
+ ALC298 fixups on Dell AIO machines
+alc275-dell-xps
+ ALC275 fixups on Dell XPS models
+alc256-dell-xps13
+ ALC256 fixups on Dell XPS13
+lenovo-spk-noise
+ Workaround for speaker noise on Lenovo machines
+lenovo-hotkey
+ Hot-key support via Mic2 pin on Lenovo machines
+dell-spk-noise
+ Workaround for speaker noise on Dell machines
+alc255-dell1
+ ALC255 fixups on Dell machines
+alc295-disable-dac3
+ Disable DAC3 routing on ALC295
+alc280-hp-headset
+ HP Elitebook fixups
+alc221-hp-mic
+ Front mic pin fixup on HP machines
+alc298-spk-volume
+ Speaker pin routing workaround on ALC298
+dell-inspiron-7559
+ Dell Inspiron 7559 fixups
+ativ-book
+ Samsung Ativ book 8 fixups
+alc221-hp-mic
+ ALC221 headset fixups on HP machines
+alc256-asus-mic
+ ALC256 fixups on ASUS machines
+alc256-asus-aio
+ ALC256 fixups on ASUS AIO machines
+alc233-eapd
+ ALC233 fixups on ASUS machines
+alc294-lenovo-mic
+ ALC294 Mic pin fixup for Lenovo AIO machines
+alc225-wyse
+ Dell Wyse fixups
+alc274-dell-aio
+ ALC274 fixups on Dell AIO machines
+alc255-dummy-lineout
+ Dell Precision 3930 fixups
+alc255-dell-headset"},
+ Dell Precision 3630 fixups
+alc295-hp-x360
+ HP Spectre X360 fixups
ALC66x/67x/892
==============
+aspire
+ Subwoofer pin fixup for Aspire laptops
+ideapad
+ Subwoofer pin fixup for Ideapad laptops
mario
Chromebook mario model fixup
+hp-rp5800
+ Headphone pin fixup for HP RP5800
asus-mode1
ASUS
asus-mode2
@@ -105,10 +285,40 @@ asus-mode7
ASUS
asus-mode8
ASUS
+zotac-z68
+ Front HP fixup for Zotac Z68
inv-dmic
Inverted internal mic workaround
+alc662-headset-multi
+ Dell headset jack, which can also be used as mic-in (ALC662)
dell-headset-multi
Headset jack, which can also be used as mic-in
+alc662-headset
+ Headset mode support on ALC662
+alc668-headset
+ Headset mode support on ALC668
+bass16
+ Bass speaker fixup on pin 0x16
+bass1a
+ Bass speaker fixup on pin 0x1a
+automute
+ Auto-mute fixups for ALC668
+dell-xps13
+ Dell XPS13 fixups
+asus-nx50
+ ASUS Nx50 fixups
+asus-nx51
+ ASUS Nx51 fixups
+alc891-headset
+ Headset mode support on ALC891
+alc891-headset-multi
+ Dell headset jack, which can also be used as mic-in (ALC891)
+acer-veriton
+ Acer Veriton speaker pin fixup
+asrock-mobo
+ Fix invalid 0x15 / 0x16 pins
+usi-headset
+ Headset support on USI machines
dual-codecs
Lenovo laptops with dual codecs
@@ -116,20 +326,70 @@ ALC680
======
N/A
-ALC88x/898/1150
-======================
+ALC88x/898/1150/1220
+====================
+abit-aw9d
+ Pin fixups for Abit AW9D-MAX
+lenovo-y530
+ Pin fixups for Lenovo Y530
+acer-aspire-7736
+ Fixup for Acer Aspire 7736
+asus-w90v
+ Pin fixup for ASUS W90V
+cd
+ Enable audio CD pin NID 0x1c
+no-front-hp
+ Disable front HP pin NID 0x1b
+vaio-tt
+ Pin fixup for VAIO TT
+eee1601
+ COEF setups for ASUS Eee 1601
+alc882-eapd
+ Change EAPD COEF mode on ALC882
+alc883-eapd
+ Change EAPD COEF mode on ALC883
+gpio1
+ Enable GPIO1
+gpio2
+ Enable GPIO2
+gpio3
+ Enable GPIO3
+alc889-coef
+ Setup ALC889 COEF
+asus-w2jc
+ Fixups for ASUS W2JC
acer-aspire-4930g
Acer Aspire 4930G/5930G/6530G/6930G/7730G
acer-aspire-8930g
Acer Aspire 8330G/6935G
acer-aspire
Acer Aspire others
+macpro-gpio
+ GPIO setup for Mac Pro
+dac-route
+ Workaround for DAC routing on Acer Aspire
+mbp-vref
+ Vref setup for Macbook Pro
+imac91-vref
+ Vref setup for iMac 9,1
+mba11-vref
+ Vref setup for MacBook Air 1,1
+mba21-vref
+ Vref setup for MacBook Air 2,1
+mp11-vref
+ Vref setup for Mac Pro 1,1
+mp41-vref
+ Vref setup for Mac Pro 4,1
inv-dmic
Inverted internal mic workaround
no-primary-hp
VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
+asus-bass
+ Bass speaker setup for ASUS ET2700
dual-codecs
ALC1220 dual codecs for Gaming mobos
+clevo-p950
+ Fixups for Clevo P950
ALC861/660
==========
diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst
index 395e5a516282..fe61e02277f8 100644
--- a/Documentation/sound/soc/dpcm.rst
+++ b/Documentation/sound/soc/dpcm.rst
@@ -254,9 +254,7 @@ configuration.
channels->min = channels->max = 2;
/* set DAI0 to 16 bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S16_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index fbedcc39460b..9d0a7f08f93b 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -47,7 +47,7 @@ class KernelDocDirective(Directive):
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
- 'functions': directives.unchanged_required,
+ 'functions': directives.unchanged,
'export': directives.unchanged,
'internal': directives.unchanged,
}
@@ -75,8 +75,12 @@ class KernelDocDirective(Directive):
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'functions' in self.options:
- for f in str(self.options.get('functions')).split():
- cmd += ['-function', f]
+ functions = self.options.get('functions').split()
+ if functions:
+ for f in functions:
+ cmd += ['-function', f]
+ else:
+ cmd += ['-no-doc-sections']
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index d410f47567e9..c518050ffc3f 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -344,7 +344,7 @@ enums and defines and create cross-references to a Sphinx book.
B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-Where <options> can be: --debug, --help or --man.
+Where <options> can be: --debug, --help or --usage.
=head1 OPTIONS
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index eded671d55eb..37a679501ddc 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -38,7 +38,9 @@ show up in /proc/sys/kernel:
- hung_task_panic
- hung_task_check_count
- hung_task_timeout_secs
+- hung_task_check_interval_secs
- hung_task_warnings
+- hyperv_record_panic_msg
- kexec_load_disabled
- kptr_restrict
- l2cr [ PPC only ]
@@ -354,7 +356,7 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
hung_task_timeout_secs:
-Check interval. When a task in D state did not get scheduled
+When a task in D state did not get scheduled
for more than this value report a warning.
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
@@ -363,6 +365,18 @@ Possible values to set are in range {0..LONG_MAX/HZ}.
==============================================================
+hung_task_check_interval_secs:
+
+Hung task check interval. If hung task checking is enabled
+(see hung_task_timeout_secs), the check is done every
+hung_task_check_interval_secs seconds.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+0 (default): means use hung_task_timeout_secs as checking interval.
+Possible values to set are in range {0..LONG_MAX/HZ}.
+
+==============================================================
+
hung_task_warnings:
The maximum number of warnings to report. During a check interval
@@ -374,6 +388,16 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
==============================================================
+hyperv_record_panic_msg:
+
+Controls whether the panic kmsg data should be reported to Hyper-V.
+
+0: do not report panic kmsg data.
+
+1: report the panic kmsg data. This is the default behavior.
+
+==============================================================
+
kexec_load_disabled:
A toggle indicating if the kexec_load syscall has been disabled. This
@@ -440,7 +464,8 @@ Notes:
1) kernel doesn't guarantee, that new object will have desired id. So,
it's up to userspace, how to handle an object with "wrong" id.
2) Toggle with non-default value will be set back to -1 by kernel after
-successful IPC object allocation.
+successful IPC object allocation. If an IPC object allocation syscall
+fails, it is undefined if the value remains unmodified or is reset to -1.
==============================================================
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 697ef8c225df..7d73882e2c27 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -27,6 +27,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_bytes
- dirty_expire_centisecs
- dirty_ratio
+- dirtytime_expire_seconds
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
@@ -44,6 +45,7 @@ Currently, these files are in /proc/sys/vm:
- mmap_rnd_bits
- mmap_rnd_compat_bits
- nr_hugepages
+- nr_hugepages_mempolicy
- nr_overcommit_hugepages
- nr_trim_pages (only if CONFIG_MMU=n)
- numa_zonelist_order
@@ -178,6 +180,18 @@ The total available memory is not equal to total system memory.
==============================================================
+dirtytime_expire_seconds
+
+When a lazytime inode is constantly having its pages dirtied, the inode with
+an updated timestamp will never get chance to be written out. And, if the
+only thing that has happened on the file system is a dirtytime inode caused
+by an atime update, a worker will be scheduled to make sure that inode
+eventually gets pushed out to disk. This tunable is used to define when dirty
+inode is old enough to be eligible for writeback by the kernel flusher threads.
+And, it is also used as the interval to wakeup dirtytime_writeback thread.
+
+==============================================================
+
dirty_writeback_centisecs
The kernel flusher threads will periodically wake up and write `old' data
@@ -519,6 +533,15 @@ See Documentation/admin-guide/mm/hugetlbpage.rst
==============================================================
+nr_hugepages_mempolicy
+
+Change the size of the hugepage pool at run-time on a specific
+set of NUMA nodes.
+
+See Documentation/admin-guide/mm/hugetlbpage.rst
+
+==============================================================
+
nr_overcommit_hugepages
Change the maximum size of the hugepage pool. The maximum is
@@ -668,7 +691,7 @@ and don't use much of it.
The default value is 0.
See Documentation/vm/overcommit-accounting.rst and
-mm/mmap.c::__vm_enough_memory() for more information.
+mm/util.c::__vm_enough_memory() for more information.
==============================================================
diff --git a/Documentation/timers/timekeeping.txt b/Documentation/timers/timekeeping.txt
index f3a8cf28f802..2d1732b0a868 100644
--- a/Documentation/timers/timekeeping.txt
+++ b/Documentation/timers/timekeeping.txt
@@ -27,7 +27,7 @@ a Linux system will eventually read the clock source to determine exactly
what time it is.
Typically the clock source is a monotonic, atomic counter which will provide
-n bits which count from 0 to 2^(n-1) and then wraps around to 0 and start over.
+n bits which count from 0 to (2^n)-1 and then wraps around to 0 and start over.
It will ideally NEVER stop ticking as long as the system is running. It
may stop during system suspend.
diff --git a/Documentation/trace/events-power.rst b/Documentation/trace/events-power.rst
index a77daca75e30..2ef318962e29 100644
--- a/Documentation/trace/events-power.rst
+++ b/Documentation/trace/events-power.rst
@@ -27,6 +27,7 @@ cpufreq.
cpu_idle "state=%lu cpu_id=%lu"
cpu_frequency "state=%lu cpu_id=%lu"
+ cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu"
A suspend event is used to indicate the system going in and out of the
suspend mode:
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 696dc69b8158..f7e1fcc0953c 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -524,4 +524,4 @@ The following commands are supported:
totals derived from one or more trace event format fields and/or
event counts (hitcount).
- See Documentation/trace/histogram.txt for details and examples.
+ See Documentation/trace/histogram.rst for details and examples.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index a20d34955333..7ea16a0ceffc 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -329,9 +329,9 @@ of ftrace. Here is a list of some of the key files:
track of the time spent in those functions. The histogram
content can be displayed in the files:
- trace_stats/function<cpu> ( function0, function1, etc).
+ trace_stat/function<cpu> ( function0, function1, etc).
- trace_stats:
+ trace_stat:
A directory that holds different tracing stats.
diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.rst
index 7ffea6aa22e3..5ac724baea7d 100644
--- a/Documentation/trace/histogram.txt
+++ b/Documentation/trace/histogram.rst
@@ -1,6 +1,8 @@
- Event Histograms
+================
+Event Histograms
+================
- Documentation written by Tom Zanussi
+Documentation written by Tom Zanussi
1. Introduction
===============
@@ -19,7 +21,7 @@
derived from one or more trace event format fields and/or event
counts (hitcount).
- The format of a hist trigger is as follows:
+ The format of a hist trigger is as follows::
hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
[:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
@@ -68,6 +70,7 @@
modified by appending any of the following modifiers to the field
name:
+ =========== ==========================================
.hex display a number as a hex value
.sym display an address as a symbol
.sym-offset display an address as a symbol and offset
@@ -75,6 +78,7 @@
.execname display a common_pid as a program name
.log2 display log2 value rather than raw number
.usecs display a common_timestamp in microseconds
+ =========== ==========================================
Note that in general the semantics of a given field aren't
interpreted when applying a modifier to it, but there are some
@@ -92,15 +96,15 @@
pid-specific comm fields in the event itself.
A typical usage scenario would be the following to enable a hist
- trigger, read its current contents, and then turn it off:
+ trigger, read its current contents, and then turn it off::
- # echo 'hist:keys=skbaddr.hex:vals=len' > \
- /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+ # echo 'hist:keys=skbaddr.hex:vals=len' > \
+ /sys/kernel/debug/tracing/events/net/netif_rx/trigger
- # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
+ # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
- # echo '!hist:keys=skbaddr.hex:vals=len' > \
- /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+ # echo '!hist:keys=skbaddr.hex:vals=len' > \
+ /sys/kernel/debug/tracing/events/net/netif_rx/trigger
The trigger file itself can be read to show the details of the
currently attached hist trigger. This information is also displayed
@@ -140,7 +144,7 @@
can be attached to a given event, allowing that event to kick off
and stop aggregations on a host of other events.
- The format is very similar to the enable/disable_event triggers:
+ The format is very similar to the enable/disable_event triggers::
enable_hist:<system>:<event>[:count]
disable_hist:<system>:<event>[:count]
@@ -153,16 +157,16 @@
A typical usage scenario for the enable_hist/disable_hist triggers
would be to first set up a paused hist trigger on some event,
followed by an enable_hist/disable_hist pair that turns the hist
- aggregation on and off when conditions of interest are hit:
+ aggregation on and off when conditions of interest are hit::
- # echo 'hist:keys=skbaddr.hex:vals=len:pause' > \
- /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+ # echo 'hist:keys=skbaddr.hex:vals=len:pause' > \
+ /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
- # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
- /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+ # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
+ /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
- # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
- /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+ # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
+ /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
The above sets up an initially paused hist trigger which is unpaused
and starts aggregating events when a given program is executed, and
@@ -172,8 +176,8 @@
The examples below provide a more concrete illustration of the
concepts and typical usage patterns discussed above.
- 'special' event fields
- ------------------------
+'special' event fields
+------------------------
There are a number of 'special event fields' available for use as
keys or values in a hist trigger. These look like and behave as if
@@ -182,14 +186,16 @@
event, and can be used anywhere an actual event field could be.
They are:
- common_timestamp u64 - timestamp (from ring buffer) associated
- with the event, in nanoseconds. May be
- modified by .usecs to have timestamps
- interpreted as microseconds.
- cpu int - the cpu on which the event occurred.
+ ====================== ==== =======================================
+ common_timestamp u64 timestamp (from ring buffer) associated
+ with the event, in nanoseconds. May be
+ modified by .usecs to have timestamps
+ interpreted as microseconds.
+ cpu int the cpu on which the event occurred.
+ ====================== ==== =======================================
- Extended error information
- --------------------------
+Extended error information
+--------------------------
For some error conditions encountered when invoking a hist trigger
command, extended error information is available via the
@@ -199,7 +205,7 @@
be available until the next hist trigger command for that event.
If available for a given error condition, the extended error
- information and usage takes the following form:
+ information and usage takes the following form::
# echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
echo: write error: Invalid argument
@@ -213,7 +219,7 @@
The first set of examples creates aggregations using the kmalloc
event. The fields that can be used for the hist trigger are listed
- in the kmalloc event's format file:
+ in the kmalloc event's format file::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/format
name: kmalloc
@@ -232,7 +238,7 @@
We'll start by creating a hist trigger that generates a simple table
that lists the total number of bytes requested for each function in
- the kernel that made one or more calls to kmalloc:
+ the kernel that made one or more calls to kmalloc::
# echo 'hist:key=call_site:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -247,7 +253,7 @@
We'll let it run for awhile and then dump the contents of the 'hist'
file in the kmalloc event's subdirectory (for readability, a number
- of entries have been omitted):
+ of entries have been omitted)::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
# trigger info: hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
@@ -287,7 +293,7 @@
specified in the trigger, followed by the value(s) also specified in
the trigger. At the beginning of the output is a line that displays
the trigger info, which can also be displayed by reading the
- 'trigger' file:
+ 'trigger' file::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
@@ -317,7 +323,7 @@
frequencies.
To turn the hist trigger off, simply call up the trigger in the
- command history and re-execute it with a '!' prepended:
+ command history and re-execute it with a '!' prepended::
# echo '!hist:key=call_site:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -325,7 +331,7 @@
Finally, notice that the call_site as displayed in the output above
isn't really very useful. It's an address, but normally addresses
are displayed in hex. To have a numeric field displayed as a hex
- value, simply append '.hex' to the field name in the trigger:
+ value, simply append '.hex' to the field name in the trigger::
# echo 'hist:key=call_site.hex:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -370,7 +376,7 @@
when looking at text addresses are the corresponding symbols
instead. To have an address displayed as symbolic value instead,
simply append '.sym' or '.sym-offset' to the field name in the
- trigger:
+ trigger::
# echo 'hist:key=call_site.sym:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -420,7 +426,7 @@
run. If instead we we wanted to see the top kmalloc callers in
terms of the number of bytes requested rather than the number of
calls, and we wanted the top caller to appear at the top, we can use
- the 'sort' parameter, along with the 'descending' modifier:
+ the 'sort' parameter, along with the 'descending' modifier::
# echo 'hist:key=call_site.sym:val=bytes_req:sort=bytes_req.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -461,7 +467,7 @@
Dropped: 0
To display the offset and size information in addition to the symbol
- name, just use 'sym-offset' instead:
+ name, just use 'sym-offset' instead::
# echo 'hist:key=call_site.sym-offset:val=bytes_req:sort=bytes_req.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -500,7 +506,7 @@
We can also add multiple fields to the 'values' parameter. For
example, we might want to see the total number of bytes allocated
alongside bytes requested, and display the result sorted by bytes
- allocated in a descending order:
+ allocated in a descending order::
# echo 'hist:keys=call_site.sym:values=bytes_req,bytes_alloc:sort=bytes_alloc.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -543,7 +549,7 @@
the hist trigger display symbolic call_sites, we can have the hist
trigger additionally display the complete set of kernel stack traces
that led to each call_site. To do that, we simply use the special
- value 'stacktrace' for the key parameter:
+ value 'stacktrace' for the key parameter::
# echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -554,7 +560,7 @@
event, along with a running total of any of the event fields for
that event. Here we tally bytes requested and bytes allocated for
every callpath in the system that led up to a kmalloc (in this case
- every callpath to a kmalloc for a kernel compile):
+ every callpath to a kmalloc for a kernel compile)::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
# trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
@@ -652,7 +658,7 @@
gather and display sorted totals for each process, you can use the
special .execname modifier to display the executable names for the
processes in the table rather than raw pids. The example below
- keeps a per-process sum of total bytes read:
+ keeps a per-process sum of total bytes read::
# echo 'hist:key=common_pid.execname:val=count:sort=count.descending' > \
/sys/kernel/debug/tracing/events/syscalls/sys_enter_read/trigger
@@ -693,7 +699,7 @@
gather and display a list of systemwide syscall hits, you can use
the special .syscall modifier to display the syscall names rather
than raw ids. The example below keeps a running total of syscall
- counts for the system during the run:
+ counts for the system during the run::
# echo 'hist:key=id.syscall:val=hitcount' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -735,19 +741,19 @@
Entries: 72
Dropped: 0
- The syscall counts above provide a rough overall picture of system
- call activity on the system; we can see for example that the most
- popular system call on this system was the 'sys_ioctl' system call.
+ The syscall counts above provide a rough overall picture of system
+ call activity on the system; we can see for example that the most
+ popular system call on this system was the 'sys_ioctl' system call.
- We can use 'compound' keys to refine that number and provide some
- further insight as to which processes exactly contribute to the
- overall ioctl count.
+ We can use 'compound' keys to refine that number and provide some
+ further insight as to which processes exactly contribute to the
+ overall ioctl count.
- The command below keeps a hitcount for every unique combination of
- system call id and pid - the end result is essentially a table
- that keeps a per-pid sum of system call hits. The results are
- sorted using the system call id as the primary key, and the
- hitcount sum as the secondary key:
+ The command below keeps a hitcount for every unique combination of
+ system call id and pid - the end result is essentially a table
+ that keeps a per-pid sum of system call hits. The results are
+ sorted using the system call id as the primary key, and the
+ hitcount sum as the secondary key::
# echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -793,11 +799,11 @@
Entries: 323
Dropped: 0
- The above list does give us a breakdown of the ioctl syscall by
- pid, but it also gives us quite a bit more than that, which we
- don't really care about at the moment. Since we know the syscall
- id for sys_ioctl (16, displayed next to the sys_ioctl name), we
- can use that to filter out all the other syscalls:
+ The above list does give us a breakdown of the ioctl syscall by
+ pid, but it also gives us quite a bit more than that, which we
+ don't really care about at the moment. Since we know the syscall
+ id for sys_ioctl (16, displayed next to the sys_ioctl name), we
+ can use that to filter out all the other syscalls::
# echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount if id == 16' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -829,18 +835,18 @@
Entries: 103
Dropped: 0
- The above output shows that 'compiz' and 'Xorg' are far and away
- the heaviest ioctl callers (which might lead to questions about
- whether they really need to be making all those calls and to
- possible avenues for further investigation.)
+ The above output shows that 'compiz' and 'Xorg' are far and away
+ the heaviest ioctl callers (which might lead to questions about
+ whether they really need to be making all those calls and to
+ possible avenues for further investigation.)
- The compound key examples used a key and a sum value (hitcount) to
- sort the output, but we can just as easily use two keys instead.
- Here's an example where we use a compound key composed of the the
- common_pid and size event fields. Sorting with pid as the primary
- key and 'size' as the secondary key allows us to display an
- ordered summary of the recvfrom sizes, with counts, received by
- each process:
+ The compound key examples used a key and a sum value (hitcount) to
+ sort the output, but we can just as easily use two keys instead.
+ Here's an example where we use a compound key composed of the the
+ common_pid and size event fields. Sorting with pid as the primary
+ key and 'size' as the secondary key allows us to display an
+ ordered summary of the recvfrom sizes, with counts, received by
+ each process::
# echo 'hist:key=common_pid.execname,size:val=hitcount:sort=common_pid,size' > \
/sys/kernel/debug/tracing/events/syscalls/sys_enter_recvfrom/trigger
@@ -893,7 +899,7 @@
demonstrates how you can manually pause and continue a hist trigger.
In this example, we'll aggregate fork counts and don't expect a
large number of entries in the hash table, so we'll drop it to a
- much smaller number, say 256:
+ much smaller number, say 256::
# echo 'hist:key=child_comm:val=hitcount:size=256' > \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -929,7 +935,7 @@
If we want to pause the hist trigger, we can simply append :pause to
the command that started the trigger. Notice that the trigger info
- displays as [paused]:
+ displays as [paused]::
# echo 'hist:key=child_comm:val=hitcount:size=256:pause' >> \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -966,7 +972,7 @@
To manually continue having the trigger aggregate events, append
:cont instead. Notice that the trigger info displays as [active]
- again, and the data has changed:
+ again, and the data has changed::
# echo 'hist:key=child_comm:val=hitcount:size=256:cont' >> \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -1020,7 +1026,7 @@
wget.
First we set up an initially paused stacktrace trigger on the
- netif_receive_skb event:
+ netif_receive_skb event::
# echo 'hist:key=stacktrace:vals=len:pause' > \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1031,7 +1037,7 @@
set up on netif_receive_skb if and only if it sees a
sched_process_exec event with a filename of '/usr/bin/wget'. When
that happens, all netif_receive_skb events are aggregated into a
- hash table keyed on stacktrace:
+ hash table keyed on stacktrace::
# echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
@@ -1039,7 +1045,7 @@
The aggregation continues until the netif_receive_skb is paused
again, which is what the following disable_hist event does by
creating a similar setup on the sched_process_exit event, using the
- filter 'comm==wget':
+ filter 'comm==wget'::
# echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
@@ -1051,7 +1057,7 @@
The overall effect is that netif_receive_skb events are aggregated
into the hash table for only the duration of the wget. Executing a
wget command and then listing the 'hist' file will display the
- output generated by the wget command:
+ output generated by the wget command::
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
@@ -1136,13 +1142,13 @@
Suppose we wanted to try another run of the previous example but
this time also wanted to see the complete list of events that went
into the histogram. In order to avoid having to set everything up
- again, we can just clear the histogram first:
+ again, we can just clear the histogram first::
# echo 'hist:key=stacktrace:vals=len:clear' >> \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
Just to verify that it is in fact cleared, here's what we now see in
- the hist file:
+ the hist file::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
@@ -1156,7 +1162,7 @@
event occurring during the new run, which are in fact the same
events being aggregated into the hash table, we add some additional
'enable_event' events to the triggering sched_process_exec and
- sched_process_exit events as such:
+ sched_process_exit events as such::
# echo 'enable_event:net:netif_receive_skb if filename==/usr/bin/wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
@@ -1167,7 +1173,7 @@
If you read the trigger files for the sched_process_exec and
sched_process_exit triggers, you should see two triggers for each:
one enabling/disabling the hist aggregation and the other
- enabling/disabling the logging of events:
+ enabling/disabling the logging of events::
# cat /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
enable_event:net:netif_receive_skb:unlimited if filename==/usr/bin/wget
@@ -1181,13 +1187,13 @@
sched_process_exit events is hit and matches 'wget', it enables or
disables both the histogram and the event log, and what you end up
with is a hash table and set of events just covering the specified
- duration. Run the wget command again:
+ duration. Run the wget command again::
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
Displaying the 'hist' file should show something similar to what you
saw in the last run, but this time you should also see the
- individual events in the trace file:
+ individual events in the trace file::
# cat /sys/kernel/debug/tracing/trace
@@ -1220,7 +1226,7 @@
attached to a given event. This capability can be useful for
creating a set of different summaries derived from the same set of
events, or for comparing the effects of different filters, among
- other things.
+ other things::
# echo 'hist:keys=skbaddr.hex:vals=len if len < 0' >> \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1241,7 +1247,7 @@
any existing hist triggers beforehand).
Displaying the contents of the 'hist' file for the event shows the
- contents of all five histograms:
+ contents of all five histograms::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
@@ -1361,7 +1367,7 @@
output of events generated by tracepoints contained inside inline
functions, but names can be used in a hist trigger on any event.
For example, these two triggers when hit will update the same 'len'
- field in the shared 'foo' histogram data:
+ field in the shared 'foo' histogram data::
# echo 'hist:name=foo:keys=skbaddr.hex:vals=len' > \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1369,7 +1375,7 @@
/sys/kernel/debug/tracing/events/net/netif_rx/trigger
You can see that they're updating common histogram data by reading
- each event's hist files at the same time:
+ each event's hist files at the same time::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist;
cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
@@ -1482,7 +1488,7 @@
And here's an example that shows how to combine histogram data from
any two events even if they don't share any 'compatible' fields
other than 'hitcount' and 'stacktrace'. These commands create a
- couple of triggers named 'bar' using those fields:
+ couple of triggers named 'bar' using those fields::
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -1490,7 +1496,7 @@
/sys/kernel/debug/tracing/events/net/netif_rx/trigger
And displaying the output of either shows some interesting if
- somewhat confusing output:
+ somewhat confusing output::
# cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
# cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
@@ -1705,7 +1711,7 @@ to any event field.
Either keys or values can be saved and retrieved in this way. This
creates a variable named 'ts0' for a histogram entry with the key
-'next_pid':
+'next_pid'::
# echo 'hist:keys=next_pid:vals=$ts0:ts0=common_timestamp ... >> \
event/trigger
@@ -1721,40 +1727,40 @@ Because 'vals=' is used, the common_timestamp variable value above
will also be summed as a normal histogram value would (though for a
timestamp it makes little sense).
-The below shows that a key value can also be saved in the same way:
+The below shows that a key value can also be saved in the same way::
# echo 'hist:timer_pid=common_pid:key=timer_pid ...' >> event/trigger
If a variable isn't a key variable or prefixed with 'vals=', the
associated event field will be saved in a variable but won't be summed
-as a value:
+as a value::
# echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
Multiple variables can be assigned at the same time. The below would
result in both ts0 and b being created as variables, with both
-common_timestamp and field1 additionally being summed as values:
+common_timestamp and field1 additionally being summed as values::
# echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
event/trigger
Note that variable assignments can appear either preceding or
following their use. The command below behaves identically to the
-command above:
+command above::
# echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
event/trigger
Any number of variables not bound to a 'vals=' prefix can also be
assigned by simply separating them with colons. Below is the same
-thing but without the values being summed in the histogram:
+thing but without the values being summed in the histogram::
# echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
Variables set as above can be referenced and used in expressions on
another event.
-For example, here's how a latency can be calculated:
+For example, here's how a latency can be calculated::
# echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
@@ -1764,7 +1770,7 @@ variable ts0. In the next line, ts0 is subtracted from the second
event's timestamp to produce the latency, which is then assigned into
yet another variable, 'wakeup_lat'. The hist trigger below in turn
makes use of the wakeup_lat variable to compute a combined latency
-using the same key and variable from yet another event:
+using the same key and variable from yet another event::
# echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
@@ -1784,7 +1790,7 @@ separated by semicolons, to the tracing/synthetic_events file.
For instance, the following creates a new event named 'wakeup_latency'
with 3 fields: lat, pid, and prio. Each of those fields is simply a
-variable reference to a variable on another event:
+variable reference to a variable on another event::
# echo 'wakeup_latency \
u64 lat; \
@@ -1793,13 +1799,13 @@ variable reference to a variable on another event:
/sys/kernel/debug/tracing/synthetic_events
Reading the tracing/synthetic_events file lists all the currently
-defined synthetic events, in this case the event defined above:
+defined synthetic events, in this case the event defined above::
# cat /sys/kernel/debug/tracing/synthetic_events
wakeup_latency u64 lat; pid_t pid; int prio
An existing synthetic event definition can be removed by prepending
-the command that defined it with a '!':
+the command that defined it with a '!'::
# echo '!wakeup_latency u64 lat pid_t pid int prio' >> \
/sys/kernel/debug/tracing/synthetic_events
@@ -1811,13 +1817,13 @@ and variables defined on other events (see Section 2.2.3 below on
how that is done using hist trigger 'onmatch' action). Once that is
done, the 'wakeup_latency' synthetic event instance is created.
-A histogram can now be defined for the new synthetic event:
+A histogram can now be defined for the new synthetic event::
# echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
/sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
The new event is created under the tracing/events/synthetic/ directory
-and looks and behaves just like any other event:
+and looks and behaves just like any other event::
# ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
enable filter format hist id trigger
@@ -1872,74 +1878,74 @@ hist trigger specification.
As an example the below defines a simple synthetic event and uses
a variable defined on the sched_wakeup_new event as a parameter
when invoking the synthetic event. Here we define the synthetic
- event:
+ event::
- # echo 'wakeup_new_test pid_t pid' >> \
- /sys/kernel/debug/tracing/synthetic_events
+ # echo 'wakeup_new_test pid_t pid' >> \
+ /sys/kernel/debug/tracing/synthetic_events
- # cat /sys/kernel/debug/tracing/synthetic_events
- wakeup_new_test pid_t pid
+ # cat /sys/kernel/debug/tracing/synthetic_events
+ wakeup_new_test pid_t pid
The following hist trigger both defines the missing testpid
variable and specifies an onmatch() action that generates a
wakeup_new_test synthetic event whenever a sched_wakeup_new event
occurs, which because of the 'if comm == "cyclictest"' filter only
- happens when the executable is cyclictest:
+ happens when the executable is cyclictest::
- # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
- wakeup_new_test($testpid) if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+ # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
+ wakeup_new_test($testpid) if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
Creating and displaying a histogram based on those events is now
just a matter of using the fields and new synthetic event in the
- tracing/events/synthetic directory, as usual:
+ tracing/events/synthetic directory, as usual::
- # echo 'hist:keys=pid:sort=pid' >> \
- /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
+ # echo 'hist:keys=pid:sort=pid' >> \
+ /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
Running 'cyclictest' should cause wakeup_new events to generate
wakeup_new_test synthetic events which should result in histogram
- output in the wakeup_new_test event's hist file:
+ output in the wakeup_new_test event's hist file::
- # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
A more typical usage would be to use two events to calculate a
latency. The following example uses a set of hist triggers to
- produce a 'wakeup_latency' histogram:
+ produce a 'wakeup_latency' histogram.
- First, we define a 'wakeup_latency' synthetic event:
+ First, we define a 'wakeup_latency' synthetic event::
- # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
- /sys/kernel/debug/tracing/synthetic_events
+ # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
+ /sys/kernel/debug/tracing/synthetic_events
Next, we specify that whenever we see a sched_waking event for a
- cyclictest thread, save the timestamp in a 'ts0' variable:
+ cyclictest thread, save the timestamp in a 'ts0' variable::
- # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
- if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
+ if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
Then, when the corresponding thread is actually scheduled onto the
CPU by a sched_switch event, calculate the latency and use that
along with another variable and an event field to generate a
- wakeup_latency synthetic event:
+ wakeup_latency synthetic event::
- # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
- onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
- $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
+ onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
+ $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
We also need to create a histogram on the wakeup_latency synthetic
- event in order to aggregate the generated synthetic event data:
+ event in order to aggregate the generated synthetic event data::
- # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
- /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
+ # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
+ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
Finally, once we've run cyclictest to actually generate some
events, we can see the output by looking at the wakeup_latency
- synthetic event's hist file:
+ synthetic event's hist file::
- # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
- onmax(var).save(field,.. .)
@@ -1961,38 +1967,38 @@ hist trigger specification.
back to that pid, the timestamp difference is calculated. If the
resulting latency, stored in wakeup_lat, exceeds the current
maximum latency, the values specified in the save() fields are
- recorded:
+ recorded::
- # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
- if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
+ if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
- # echo 'hist:keys=next_pid:\
- wakeup_lat=common_timestamp.usecs-$ts0:\
- onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
- if next_comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+ # echo 'hist:keys=next_pid:\
+ wakeup_lat=common_timestamp.usecs-$ts0:\
+ onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
+ if next_comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
When the histogram is displayed, the max value and the saved
values corresponding to the max are displayed following the rest
- of the fields:
+ of the fields::
- # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
- { next_pid: 2255 } hitcount: 239
- common_timestamp-ts0: 0
- max: 27
- next_comm: cyclictest
- prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
+ # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
+ { next_pid: 2255 } hitcount: 239
+ common_timestamp-ts0: 0
+ max: 27
+ next_comm: cyclictest
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
- { next_pid: 2256 } hitcount: 2355
- common_timestamp-ts0: 0
- max: 49 next_comm: cyclictest
- prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
+ { next_pid: 2256 } hitcount: 2355
+ common_timestamp-ts0: 0
+ max: 49 next_comm: cyclictest
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
- Totals:
- Hits: 12970
- Entries: 2
- Dropped: 0
+ Totals:
+ Hits: 12970
+ Entries: 2
+ Dropped: 0
3. User space creating a trigger
--------------------------------
@@ -2002,24 +2008,24 @@ ring buffer. This can also act like an event, by writing into the trigger
file located in /sys/kernel/tracing/events/ftrace/print/
Modifying cyclictest to write into the trace_marker file before it sleeps
-and after it wakes up, something like this:
+and after it wakes up, something like this::
-static void traceputs(char *str)
-{
+ static void traceputs(char *str)
+ {
/* tracemark_fd is the trace_marker file descriptor */
if (tracemark_fd < 0)
return;
/* write the tracemark message */
write(tracemark_fd, str, strlen(str));
-}
+ }
-And later add something like:
+And later add something like::
traceputs("start");
clock_nanosleep(...);
traceputs("end");
-We can make a histogram from this:
+We can make a histogram from this::
# cd /sys/kernel/tracing
# echo 'latency u64 lat' > synthetic_events
@@ -2034,7 +2040,7 @@ it will call the "latency" synthetic event with the calculated latency as its
parameter. Finally, a histogram is added to the latency synthetic event to
record the calculated latency along with the pid.
-Now running cyclictest with:
+Now running cyclictest with::
# ./cyclictest -p80 -d0 -i250 -n -a -t --tracemark -b 1000
@@ -2049,297 +2055,297 @@ Now running cyclictest with:
Note, the -b 1000 is used just to make --tracemark available.
-Then we can see the histogram created by this with:
+Then we can see the histogram created by this with::
# cat events/synthetic/latency/hist
-# event histogram
-#
-# trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
-#
-
-{ lat: 107, common_pid: 2039 } hitcount: 1
-{ lat: 122, common_pid: 2041 } hitcount: 1
-{ lat: 166, common_pid: 2039 } hitcount: 1
-{ lat: 174, common_pid: 2039 } hitcount: 1
-{ lat: 194, common_pid: 2041 } hitcount: 1
-{ lat: 196, common_pid: 2036 } hitcount: 1
-{ lat: 197, common_pid: 2038 } hitcount: 1
-{ lat: 198, common_pid: 2039 } hitcount: 1
-{ lat: 199, common_pid: 2039 } hitcount: 1
-{ lat: 200, common_pid: 2041 } hitcount: 1
-{ lat: 201, common_pid: 2039 } hitcount: 2
-{ lat: 202, common_pid: 2038 } hitcount: 1
-{ lat: 202, common_pid: 2043 } hitcount: 1
-{ lat: 203, common_pid: 2039 } hitcount: 1
-{ lat: 203, common_pid: 2036 } hitcount: 1
-{ lat: 203, common_pid: 2041 } hitcount: 1
-{ lat: 206, common_pid: 2038 } hitcount: 2
-{ lat: 207, common_pid: 2039 } hitcount: 1
-{ lat: 207, common_pid: 2036 } hitcount: 1
-{ lat: 208, common_pid: 2040 } hitcount: 1
-{ lat: 209, common_pid: 2043 } hitcount: 1
-{ lat: 210, common_pid: 2039 } hitcount: 1
-{ lat: 211, common_pid: 2039 } hitcount: 4
-{ lat: 212, common_pid: 2043 } hitcount: 1
-{ lat: 212, common_pid: 2039 } hitcount: 2
-{ lat: 213, common_pid: 2039 } hitcount: 1
-{ lat: 214, common_pid: 2038 } hitcount: 1
-{ lat: 214, common_pid: 2039 } hitcount: 2
-{ lat: 214, common_pid: 2042 } hitcount: 1
-{ lat: 215, common_pid: 2039 } hitcount: 1
-{ lat: 217, common_pid: 2036 } hitcount: 1
-{ lat: 217, common_pid: 2040 } hitcount: 1
-{ lat: 217, common_pid: 2039 } hitcount: 1
-{ lat: 218, common_pid: 2039 } hitcount: 6
-{ lat: 219, common_pid: 2039 } hitcount: 9
-{ lat: 220, common_pid: 2039 } hitcount: 11
-{ lat: 221, common_pid: 2039 } hitcount: 5
-{ lat: 221, common_pid: 2042 } hitcount: 1
-{ lat: 222, common_pid: 2039 } hitcount: 7
-{ lat: 223, common_pid: 2036 } hitcount: 1
-{ lat: 223, common_pid: 2039 } hitcount: 3
-{ lat: 224, common_pid: 2039 } hitcount: 4
-{ lat: 224, common_pid: 2037 } hitcount: 1
-{ lat: 224, common_pid: 2036 } hitcount: 2
-{ lat: 225, common_pid: 2039 } hitcount: 5
-{ lat: 225, common_pid: 2042 } hitcount: 1
-{ lat: 226, common_pid: 2039 } hitcount: 7
-{ lat: 226, common_pid: 2036 } hitcount: 4
-{ lat: 227, common_pid: 2039 } hitcount: 6
-{ lat: 227, common_pid: 2036 } hitcount: 12
-{ lat: 227, common_pid: 2043 } hitcount: 1
-{ lat: 228, common_pid: 2039 } hitcount: 7
-{ lat: 228, common_pid: 2036 } hitcount: 14
-{ lat: 229, common_pid: 2039 } hitcount: 9
-{ lat: 229, common_pid: 2036 } hitcount: 8
-{ lat: 229, common_pid: 2038 } hitcount: 1
-{ lat: 230, common_pid: 2039 } hitcount: 11
-{ lat: 230, common_pid: 2036 } hitcount: 6
-{ lat: 230, common_pid: 2043 } hitcount: 1
-{ lat: 230, common_pid: 2042 } hitcount: 2
-{ lat: 231, common_pid: 2041 } hitcount: 1
-{ lat: 231, common_pid: 2036 } hitcount: 6
-{ lat: 231, common_pid: 2043 } hitcount: 1
-{ lat: 231, common_pid: 2039 } hitcount: 8
-{ lat: 232, common_pid: 2037 } hitcount: 1
-{ lat: 232, common_pid: 2039 } hitcount: 6
-{ lat: 232, common_pid: 2040 } hitcount: 2
-{ lat: 232, common_pid: 2036 } hitcount: 5
-{ lat: 232, common_pid: 2043 } hitcount: 1
-{ lat: 233, common_pid: 2036 } hitcount: 5
-{ lat: 233, common_pid: 2039 } hitcount: 11
-{ lat: 234, common_pid: 2039 } hitcount: 4
-{ lat: 234, common_pid: 2038 } hitcount: 2
-{ lat: 234, common_pid: 2043 } hitcount: 2
-{ lat: 234, common_pid: 2036 } hitcount: 11
-{ lat: 234, common_pid: 2040 } hitcount: 1
-{ lat: 235, common_pid: 2037 } hitcount: 2
-{ lat: 235, common_pid: 2036 } hitcount: 8
-{ lat: 235, common_pid: 2043 } hitcount: 2
-{ lat: 235, common_pid: 2039 } hitcount: 5
-{ lat: 235, common_pid: 2042 } hitcount: 2
-{ lat: 235, common_pid: 2040 } hitcount: 4
-{ lat: 235, common_pid: 2041 } hitcount: 1
-{ lat: 236, common_pid: 2036 } hitcount: 7
-{ lat: 236, common_pid: 2037 } hitcount: 1
-{ lat: 236, common_pid: 2041 } hitcount: 5
-{ lat: 236, common_pid: 2039 } hitcount: 3
-{ lat: 236, common_pid: 2043 } hitcount: 9
-{ lat: 236, common_pid: 2040 } hitcount: 7
-{ lat: 237, common_pid: 2037 } hitcount: 1
-{ lat: 237, common_pid: 2040 } hitcount: 1
-{ lat: 237, common_pid: 2036 } hitcount: 9
-{ lat: 237, common_pid: 2039 } hitcount: 3
-{ lat: 237, common_pid: 2043 } hitcount: 8
-{ lat: 237, common_pid: 2042 } hitcount: 2
-{ lat: 237, common_pid: 2041 } hitcount: 2
-{ lat: 238, common_pid: 2043 } hitcount: 10
-{ lat: 238, common_pid: 2040 } hitcount: 1
-{ lat: 238, common_pid: 2037 } hitcount: 9
-{ lat: 238, common_pid: 2038 } hitcount: 1
-{ lat: 238, common_pid: 2039 } hitcount: 1
-{ lat: 238, common_pid: 2042 } hitcount: 3
-{ lat: 238, common_pid: 2036 } hitcount: 7
-{ lat: 239, common_pid: 2041 } hitcount: 1
-{ lat: 239, common_pid: 2043 } hitcount: 11
-{ lat: 239, common_pid: 2037 } hitcount: 11
-{ lat: 239, common_pid: 2038 } hitcount: 6
-{ lat: 239, common_pid: 2036 } hitcount: 7
-{ lat: 239, common_pid: 2040 } hitcount: 1
-{ lat: 239, common_pid: 2042 } hitcount: 9
-{ lat: 240, common_pid: 2037 } hitcount: 29
-{ lat: 240, common_pid: 2043 } hitcount: 15
-{ lat: 240, common_pid: 2040 } hitcount: 44
-{ lat: 240, common_pid: 2039 } hitcount: 1
-{ lat: 240, common_pid: 2041 } hitcount: 2
-{ lat: 240, common_pid: 2038 } hitcount: 1
-{ lat: 240, common_pid: 2036 } hitcount: 10
-{ lat: 240, common_pid: 2042 } hitcount: 13
-{ lat: 241, common_pid: 2036 } hitcount: 21
-{ lat: 241, common_pid: 2041 } hitcount: 36
-{ lat: 241, common_pid: 2037 } hitcount: 34
-{ lat: 241, common_pid: 2042 } hitcount: 14
-{ lat: 241, common_pid: 2040 } hitcount: 94
-{ lat: 241, common_pid: 2039 } hitcount: 12
-{ lat: 241, common_pid: 2038 } hitcount: 2
-{ lat: 241, common_pid: 2043 } hitcount: 28
-{ lat: 242, common_pid: 2040 } hitcount: 109
-{ lat: 242, common_pid: 2041 } hitcount: 506
-{ lat: 242, common_pid: 2039 } hitcount: 155
-{ lat: 242, common_pid: 2042 } hitcount: 21
-{ lat: 242, common_pid: 2037 } hitcount: 52
-{ lat: 242, common_pid: 2043 } hitcount: 21
-{ lat: 242, common_pid: 2036 } hitcount: 16
-{ lat: 242, common_pid: 2038 } hitcount: 156
-{ lat: 243, common_pid: 2037 } hitcount: 46
-{ lat: 243, common_pid: 2039 } hitcount: 40
-{ lat: 243, common_pid: 2042 } hitcount: 119
-{ lat: 243, common_pid: 2041 } hitcount: 611
-{ lat: 243, common_pid: 2036 } hitcount: 69
-{ lat: 243, common_pid: 2038 } hitcount: 784
-{ lat: 243, common_pid: 2040 } hitcount: 323
-{ lat: 243, common_pid: 2043 } hitcount: 14
-{ lat: 244, common_pid: 2043 } hitcount: 35
-{ lat: 244, common_pid: 2042 } hitcount: 305
-{ lat: 244, common_pid: 2039 } hitcount: 8
-{ lat: 244, common_pid: 2040 } hitcount: 4515
-{ lat: 244, common_pid: 2038 } hitcount: 371
-{ lat: 244, common_pid: 2037 } hitcount: 31
-{ lat: 244, common_pid: 2036 } hitcount: 114
-{ lat: 244, common_pid: 2041 } hitcount: 3396
-{ lat: 245, common_pid: 2036 } hitcount: 700
-{ lat: 245, common_pid: 2041 } hitcount: 2772
-{ lat: 245, common_pid: 2037 } hitcount: 268
-{ lat: 245, common_pid: 2039 } hitcount: 472
-{ lat: 245, common_pid: 2038 } hitcount: 2758
-{ lat: 245, common_pid: 2042 } hitcount: 3833
-{ lat: 245, common_pid: 2040 } hitcount: 3105
-{ lat: 245, common_pid: 2043 } hitcount: 645
-{ lat: 246, common_pid: 2038 } hitcount: 3451
-{ lat: 246, common_pid: 2041 } hitcount: 142
-{ lat: 246, common_pid: 2037 } hitcount: 5101
-{ lat: 246, common_pid: 2040 } hitcount: 68
-{ lat: 246, common_pid: 2043 } hitcount: 5099
-{ lat: 246, common_pid: 2039 } hitcount: 5608
-{ lat: 246, common_pid: 2042 } hitcount: 3723
-{ lat: 246, common_pid: 2036 } hitcount: 4738
-{ lat: 247, common_pid: 2042 } hitcount: 312
-{ lat: 247, common_pid: 2043 } hitcount: 2385
-{ lat: 247, common_pid: 2041 } hitcount: 452
-{ lat: 247, common_pid: 2038 } hitcount: 792
-{ lat: 247, common_pid: 2040 } hitcount: 78
-{ lat: 247, common_pid: 2036 } hitcount: 2375
-{ lat: 247, common_pid: 2039 } hitcount: 1834
-{ lat: 247, common_pid: 2037 } hitcount: 2655
-{ lat: 248, common_pid: 2037 } hitcount: 36
-{ lat: 248, common_pid: 2042 } hitcount: 11
-{ lat: 248, common_pid: 2038 } hitcount: 122
-{ lat: 248, common_pid: 2036 } hitcount: 135
-{ lat: 248, common_pid: 2039 } hitcount: 26
-{ lat: 248, common_pid: 2041 } hitcount: 503
-{ lat: 248, common_pid: 2043 } hitcount: 66
-{ lat: 248, common_pid: 2040 } hitcount: 46
-{ lat: 249, common_pid: 2037 } hitcount: 29
-{ lat: 249, common_pid: 2038 } hitcount: 1
-{ lat: 249, common_pid: 2043 } hitcount: 29
-{ lat: 249, common_pid: 2039 } hitcount: 8
-{ lat: 249, common_pid: 2042 } hitcount: 56
-{ lat: 249, common_pid: 2040 } hitcount: 27
-{ lat: 249, common_pid: 2041 } hitcount: 11
-{ lat: 249, common_pid: 2036 } hitcount: 27
-{ lat: 250, common_pid: 2038 } hitcount: 1
-{ lat: 250, common_pid: 2036 } hitcount: 30
-{ lat: 250, common_pid: 2040 } hitcount: 19
-{ lat: 250, common_pid: 2043 } hitcount: 22
-{ lat: 250, common_pid: 2042 } hitcount: 20
-{ lat: 250, common_pid: 2041 } hitcount: 1
-{ lat: 250, common_pid: 2039 } hitcount: 6
-{ lat: 250, common_pid: 2037 } hitcount: 48
-{ lat: 251, common_pid: 2037 } hitcount: 43
-{ lat: 251, common_pid: 2039 } hitcount: 1
-{ lat: 251, common_pid: 2036 } hitcount: 12
-{ lat: 251, common_pid: 2042 } hitcount: 2
-{ lat: 251, common_pid: 2041 } hitcount: 1
-{ lat: 251, common_pid: 2043 } hitcount: 15
-{ lat: 251, common_pid: 2040 } hitcount: 3
-{ lat: 252, common_pid: 2040 } hitcount: 1
-{ lat: 252, common_pid: 2036 } hitcount: 12
-{ lat: 252, common_pid: 2037 } hitcount: 21
-{ lat: 252, common_pid: 2043 } hitcount: 14
-{ lat: 253, common_pid: 2037 } hitcount: 21
-{ lat: 253, common_pid: 2039 } hitcount: 2
-{ lat: 253, common_pid: 2036 } hitcount: 9
-{ lat: 253, common_pid: 2043 } hitcount: 6
-{ lat: 253, common_pid: 2040 } hitcount: 1
-{ lat: 254, common_pid: 2036 } hitcount: 8
-{ lat: 254, common_pid: 2043 } hitcount: 3
-{ lat: 254, common_pid: 2041 } hitcount: 1
-{ lat: 254, common_pid: 2042 } hitcount: 1
-{ lat: 254, common_pid: 2039 } hitcount: 1
-{ lat: 254, common_pid: 2037 } hitcount: 12
-{ lat: 255, common_pid: 2043 } hitcount: 1
-{ lat: 255, common_pid: 2037 } hitcount: 2
-{ lat: 255, common_pid: 2036 } hitcount: 2
-{ lat: 255, common_pid: 2039 } hitcount: 8
-{ lat: 256, common_pid: 2043 } hitcount: 1
-{ lat: 256, common_pid: 2036 } hitcount: 4
-{ lat: 256, common_pid: 2039 } hitcount: 6
-{ lat: 257, common_pid: 2039 } hitcount: 5
-{ lat: 257, common_pid: 2036 } hitcount: 4
-{ lat: 258, common_pid: 2039 } hitcount: 5
-{ lat: 258, common_pid: 2036 } hitcount: 2
-{ lat: 259, common_pid: 2036 } hitcount: 7
-{ lat: 259, common_pid: 2039 } hitcount: 7
-{ lat: 260, common_pid: 2036 } hitcount: 8
-{ lat: 260, common_pid: 2039 } hitcount: 6
-{ lat: 261, common_pid: 2036 } hitcount: 5
-{ lat: 261, common_pid: 2039 } hitcount: 7
-{ lat: 262, common_pid: 2039 } hitcount: 5
-{ lat: 262, common_pid: 2036 } hitcount: 5
-{ lat: 263, common_pid: 2039 } hitcount: 7
-{ lat: 263, common_pid: 2036 } hitcount: 7
-{ lat: 264, common_pid: 2039 } hitcount: 9
-{ lat: 264, common_pid: 2036 } hitcount: 9
-{ lat: 265, common_pid: 2036 } hitcount: 5
-{ lat: 265, common_pid: 2039 } hitcount: 1
-{ lat: 266, common_pid: 2036 } hitcount: 1
-{ lat: 266, common_pid: 2039 } hitcount: 3
-{ lat: 267, common_pid: 2036 } hitcount: 1
-{ lat: 267, common_pid: 2039 } hitcount: 3
-{ lat: 268, common_pid: 2036 } hitcount: 1
-{ lat: 268, common_pid: 2039 } hitcount: 6
-{ lat: 269, common_pid: 2036 } hitcount: 1
-{ lat: 269, common_pid: 2043 } hitcount: 1
-{ lat: 269, common_pid: 2039 } hitcount: 2
-{ lat: 270, common_pid: 2040 } hitcount: 1
-{ lat: 270, common_pid: 2039 } hitcount: 6
-{ lat: 271, common_pid: 2041 } hitcount: 1
-{ lat: 271, common_pid: 2039 } hitcount: 5
-{ lat: 272, common_pid: 2039 } hitcount: 10
-{ lat: 273, common_pid: 2039 } hitcount: 8
-{ lat: 274, common_pid: 2039 } hitcount: 2
-{ lat: 275, common_pid: 2039 } hitcount: 1
-{ lat: 276, common_pid: 2039 } hitcount: 2
-{ lat: 276, common_pid: 2037 } hitcount: 1
-{ lat: 276, common_pid: 2038 } hitcount: 1
-{ lat: 277, common_pid: 2039 } hitcount: 1
-{ lat: 277, common_pid: 2042 } hitcount: 1
-{ lat: 278, common_pid: 2039 } hitcount: 1
-{ lat: 279, common_pid: 2039 } hitcount: 4
-{ lat: 279, common_pid: 2043 } hitcount: 1
-{ lat: 280, common_pid: 2039 } hitcount: 3
-{ lat: 283, common_pid: 2036 } hitcount: 2
-{ lat: 284, common_pid: 2039 } hitcount: 1
-{ lat: 284, common_pid: 2043 } hitcount: 1
-{ lat: 288, common_pid: 2039 } hitcount: 1
-{ lat: 289, common_pid: 2039 } hitcount: 1
-{ lat: 300, common_pid: 2039 } hitcount: 1
-{ lat: 384, common_pid: 2039 } hitcount: 1
-
-Totals:
- Hits: 67625
- Entries: 278
- Dropped: 0
+ # event histogram
+ #
+ # trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
+ #
+
+ { lat: 107, common_pid: 2039 } hitcount: 1
+ { lat: 122, common_pid: 2041 } hitcount: 1
+ { lat: 166, common_pid: 2039 } hitcount: 1
+ { lat: 174, common_pid: 2039 } hitcount: 1
+ { lat: 194, common_pid: 2041 } hitcount: 1
+ { lat: 196, common_pid: 2036 } hitcount: 1
+ { lat: 197, common_pid: 2038 } hitcount: 1
+ { lat: 198, common_pid: 2039 } hitcount: 1
+ { lat: 199, common_pid: 2039 } hitcount: 1
+ { lat: 200, common_pid: 2041 } hitcount: 1
+ { lat: 201, common_pid: 2039 } hitcount: 2
+ { lat: 202, common_pid: 2038 } hitcount: 1
+ { lat: 202, common_pid: 2043 } hitcount: 1
+ { lat: 203, common_pid: 2039 } hitcount: 1
+ { lat: 203, common_pid: 2036 } hitcount: 1
+ { lat: 203, common_pid: 2041 } hitcount: 1
+ { lat: 206, common_pid: 2038 } hitcount: 2
+ { lat: 207, common_pid: 2039 } hitcount: 1
+ { lat: 207, common_pid: 2036 } hitcount: 1
+ { lat: 208, common_pid: 2040 } hitcount: 1
+ { lat: 209, common_pid: 2043 } hitcount: 1
+ { lat: 210, common_pid: 2039 } hitcount: 1
+ { lat: 211, common_pid: 2039 } hitcount: 4
+ { lat: 212, common_pid: 2043 } hitcount: 1
+ { lat: 212, common_pid: 2039 } hitcount: 2
+ { lat: 213, common_pid: 2039 } hitcount: 1
+ { lat: 214, common_pid: 2038 } hitcount: 1
+ { lat: 214, common_pid: 2039 } hitcount: 2
+ { lat: 214, common_pid: 2042 } hitcount: 1
+ { lat: 215, common_pid: 2039 } hitcount: 1
+ { lat: 217, common_pid: 2036 } hitcount: 1
+ { lat: 217, common_pid: 2040 } hitcount: 1
+ { lat: 217, common_pid: 2039 } hitcount: 1
+ { lat: 218, common_pid: 2039 } hitcount: 6
+ { lat: 219, common_pid: 2039 } hitcount: 9
+ { lat: 220, common_pid: 2039 } hitcount: 11
+ { lat: 221, common_pid: 2039 } hitcount: 5
+ { lat: 221, common_pid: 2042 } hitcount: 1
+ { lat: 222, common_pid: 2039 } hitcount: 7
+ { lat: 223, common_pid: 2036 } hitcount: 1
+ { lat: 223, common_pid: 2039 } hitcount: 3
+ { lat: 224, common_pid: 2039 } hitcount: 4
+ { lat: 224, common_pid: 2037 } hitcount: 1
+ { lat: 224, common_pid: 2036 } hitcount: 2
+ { lat: 225, common_pid: 2039 } hitcount: 5
+ { lat: 225, common_pid: 2042 } hitcount: 1
+ { lat: 226, common_pid: 2039 } hitcount: 7
+ { lat: 226, common_pid: 2036 } hitcount: 4
+ { lat: 227, common_pid: 2039 } hitcount: 6
+ { lat: 227, common_pid: 2036 } hitcount: 12
+ { lat: 227, common_pid: 2043 } hitcount: 1
+ { lat: 228, common_pid: 2039 } hitcount: 7
+ { lat: 228, common_pid: 2036 } hitcount: 14
+ { lat: 229, common_pid: 2039 } hitcount: 9
+ { lat: 229, common_pid: 2036 } hitcount: 8
+ { lat: 229, common_pid: 2038 } hitcount: 1
+ { lat: 230, common_pid: 2039 } hitcount: 11
+ { lat: 230, common_pid: 2036 } hitcount: 6
+ { lat: 230, common_pid: 2043 } hitcount: 1
+ { lat: 230, common_pid: 2042 } hitcount: 2
+ { lat: 231, common_pid: 2041 } hitcount: 1
+ { lat: 231, common_pid: 2036 } hitcount: 6
+ { lat: 231, common_pid: 2043 } hitcount: 1
+ { lat: 231, common_pid: 2039 } hitcount: 8
+ { lat: 232, common_pid: 2037 } hitcount: 1
+ { lat: 232, common_pid: 2039 } hitcount: 6
+ { lat: 232, common_pid: 2040 } hitcount: 2
+ { lat: 232, common_pid: 2036 } hitcount: 5
+ { lat: 232, common_pid: 2043 } hitcount: 1
+ { lat: 233, common_pid: 2036 } hitcount: 5
+ { lat: 233, common_pid: 2039 } hitcount: 11
+ { lat: 234, common_pid: 2039 } hitcount: 4
+ { lat: 234, common_pid: 2038 } hitcount: 2
+ { lat: 234, common_pid: 2043 } hitcount: 2
+ { lat: 234, common_pid: 2036 } hitcount: 11
+ { lat: 234, common_pid: 2040 } hitcount: 1
+ { lat: 235, common_pid: 2037 } hitcount: 2
+ { lat: 235, common_pid: 2036 } hitcount: 8
+ { lat: 235, common_pid: 2043 } hitcount: 2
+ { lat: 235, common_pid: 2039 } hitcount: 5
+ { lat: 235, common_pid: 2042 } hitcount: 2
+ { lat: 235, common_pid: 2040 } hitcount: 4
+ { lat: 235, common_pid: 2041 } hitcount: 1
+ { lat: 236, common_pid: 2036 } hitcount: 7
+ { lat: 236, common_pid: 2037 } hitcount: 1
+ { lat: 236, common_pid: 2041 } hitcount: 5
+ { lat: 236, common_pid: 2039 } hitcount: 3
+ { lat: 236, common_pid: 2043 } hitcount: 9
+ { lat: 236, common_pid: 2040 } hitcount: 7
+ { lat: 237, common_pid: 2037 } hitcount: 1
+ { lat: 237, common_pid: 2040 } hitcount: 1
+ { lat: 237, common_pid: 2036 } hitcount: 9
+ { lat: 237, common_pid: 2039 } hitcount: 3
+ { lat: 237, common_pid: 2043 } hitcount: 8
+ { lat: 237, common_pid: 2042 } hitcount: 2
+ { lat: 237, common_pid: 2041 } hitcount: 2
+ { lat: 238, common_pid: 2043 } hitcount: 10
+ { lat: 238, common_pid: 2040 } hitcount: 1
+ { lat: 238, common_pid: 2037 } hitcount: 9
+ { lat: 238, common_pid: 2038 } hitcount: 1
+ { lat: 238, common_pid: 2039 } hitcount: 1
+ { lat: 238, common_pid: 2042 } hitcount: 3
+ { lat: 238, common_pid: 2036 } hitcount: 7
+ { lat: 239, common_pid: 2041 } hitcount: 1
+ { lat: 239, common_pid: 2043 } hitcount: 11
+ { lat: 239, common_pid: 2037 } hitcount: 11
+ { lat: 239, common_pid: 2038 } hitcount: 6
+ { lat: 239, common_pid: 2036 } hitcount: 7
+ { lat: 239, common_pid: 2040 } hitcount: 1
+ { lat: 239, common_pid: 2042 } hitcount: 9
+ { lat: 240, common_pid: 2037 } hitcount: 29
+ { lat: 240, common_pid: 2043 } hitcount: 15
+ { lat: 240, common_pid: 2040 } hitcount: 44
+ { lat: 240, common_pid: 2039 } hitcount: 1
+ { lat: 240, common_pid: 2041 } hitcount: 2
+ { lat: 240, common_pid: 2038 } hitcount: 1
+ { lat: 240, common_pid: 2036 } hitcount: 10
+ { lat: 240, common_pid: 2042 } hitcount: 13
+ { lat: 241, common_pid: 2036 } hitcount: 21
+ { lat: 241, common_pid: 2041 } hitcount: 36
+ { lat: 241, common_pid: 2037 } hitcount: 34
+ { lat: 241, common_pid: 2042 } hitcount: 14
+ { lat: 241, common_pid: 2040 } hitcount: 94
+ { lat: 241, common_pid: 2039 } hitcount: 12
+ { lat: 241, common_pid: 2038 } hitcount: 2
+ { lat: 241, common_pid: 2043 } hitcount: 28
+ { lat: 242, common_pid: 2040 } hitcount: 109
+ { lat: 242, common_pid: 2041 } hitcount: 506
+ { lat: 242, common_pid: 2039 } hitcount: 155
+ { lat: 242, common_pid: 2042 } hitcount: 21
+ { lat: 242, common_pid: 2037 } hitcount: 52
+ { lat: 242, common_pid: 2043 } hitcount: 21
+ { lat: 242, common_pid: 2036 } hitcount: 16
+ { lat: 242, common_pid: 2038 } hitcount: 156
+ { lat: 243, common_pid: 2037 } hitcount: 46
+ { lat: 243, common_pid: 2039 } hitcount: 40
+ { lat: 243, common_pid: 2042 } hitcount: 119
+ { lat: 243, common_pid: 2041 } hitcount: 611
+ { lat: 243, common_pid: 2036 } hitcount: 69
+ { lat: 243, common_pid: 2038 } hitcount: 784
+ { lat: 243, common_pid: 2040 } hitcount: 323
+ { lat: 243, common_pid: 2043 } hitcount: 14
+ { lat: 244, common_pid: 2043 } hitcount: 35
+ { lat: 244, common_pid: 2042 } hitcount: 305
+ { lat: 244, common_pid: 2039 } hitcount: 8
+ { lat: 244, common_pid: 2040 } hitcount: 4515
+ { lat: 244, common_pid: 2038 } hitcount: 371
+ { lat: 244, common_pid: 2037 } hitcount: 31
+ { lat: 244, common_pid: 2036 } hitcount: 114
+ { lat: 244, common_pid: 2041 } hitcount: 3396
+ { lat: 245, common_pid: 2036 } hitcount: 700
+ { lat: 245, common_pid: 2041 } hitcount: 2772
+ { lat: 245, common_pid: 2037 } hitcount: 268
+ { lat: 245, common_pid: 2039 } hitcount: 472
+ { lat: 245, common_pid: 2038 } hitcount: 2758
+ { lat: 245, common_pid: 2042 } hitcount: 3833
+ { lat: 245, common_pid: 2040 } hitcount: 3105
+ { lat: 245, common_pid: 2043 } hitcount: 645
+ { lat: 246, common_pid: 2038 } hitcount: 3451
+ { lat: 246, common_pid: 2041 } hitcount: 142
+ { lat: 246, common_pid: 2037 } hitcount: 5101
+ { lat: 246, common_pid: 2040 } hitcount: 68
+ { lat: 246, common_pid: 2043 } hitcount: 5099
+ { lat: 246, common_pid: 2039 } hitcount: 5608
+ { lat: 246, common_pid: 2042 } hitcount: 3723
+ { lat: 246, common_pid: 2036 } hitcount: 4738
+ { lat: 247, common_pid: 2042 } hitcount: 312
+ { lat: 247, common_pid: 2043 } hitcount: 2385
+ { lat: 247, common_pid: 2041 } hitcount: 452
+ { lat: 247, common_pid: 2038 } hitcount: 792
+ { lat: 247, common_pid: 2040 } hitcount: 78
+ { lat: 247, common_pid: 2036 } hitcount: 2375
+ { lat: 247, common_pid: 2039 } hitcount: 1834
+ { lat: 247, common_pid: 2037 } hitcount: 2655
+ { lat: 248, common_pid: 2037 } hitcount: 36
+ { lat: 248, common_pid: 2042 } hitcount: 11
+ { lat: 248, common_pid: 2038 } hitcount: 122
+ { lat: 248, common_pid: 2036 } hitcount: 135
+ { lat: 248, common_pid: 2039 } hitcount: 26
+ { lat: 248, common_pid: 2041 } hitcount: 503
+ { lat: 248, common_pid: 2043 } hitcount: 66
+ { lat: 248, common_pid: 2040 } hitcount: 46
+ { lat: 249, common_pid: 2037 } hitcount: 29
+ { lat: 249, common_pid: 2038 } hitcount: 1
+ { lat: 249, common_pid: 2043 } hitcount: 29
+ { lat: 249, common_pid: 2039 } hitcount: 8
+ { lat: 249, common_pid: 2042 } hitcount: 56
+ { lat: 249, common_pid: 2040 } hitcount: 27
+ { lat: 249, common_pid: 2041 } hitcount: 11
+ { lat: 249, common_pid: 2036 } hitcount: 27
+ { lat: 250, common_pid: 2038 } hitcount: 1
+ { lat: 250, common_pid: 2036 } hitcount: 30
+ { lat: 250, common_pid: 2040 } hitcount: 19
+ { lat: 250, common_pid: 2043 } hitcount: 22
+ { lat: 250, common_pid: 2042 } hitcount: 20
+ { lat: 250, common_pid: 2041 } hitcount: 1
+ { lat: 250, common_pid: 2039 } hitcount: 6
+ { lat: 250, common_pid: 2037 } hitcount: 48
+ { lat: 251, common_pid: 2037 } hitcount: 43
+ { lat: 251, common_pid: 2039 } hitcount: 1
+ { lat: 251, common_pid: 2036 } hitcount: 12
+ { lat: 251, common_pid: 2042 } hitcount: 2
+ { lat: 251, common_pid: 2041 } hitcount: 1
+ { lat: 251, common_pid: 2043 } hitcount: 15
+ { lat: 251, common_pid: 2040 } hitcount: 3
+ { lat: 252, common_pid: 2040 } hitcount: 1
+ { lat: 252, common_pid: 2036 } hitcount: 12
+ { lat: 252, common_pid: 2037 } hitcount: 21
+ { lat: 252, common_pid: 2043 } hitcount: 14
+ { lat: 253, common_pid: 2037 } hitcount: 21
+ { lat: 253, common_pid: 2039 } hitcount: 2
+ { lat: 253, common_pid: 2036 } hitcount: 9
+ { lat: 253, common_pid: 2043 } hitcount: 6
+ { lat: 253, common_pid: 2040 } hitcount: 1
+ { lat: 254, common_pid: 2036 } hitcount: 8
+ { lat: 254, common_pid: 2043 } hitcount: 3
+ { lat: 254, common_pid: 2041 } hitcount: 1
+ { lat: 254, common_pid: 2042 } hitcount: 1
+ { lat: 254, common_pid: 2039 } hitcount: 1
+ { lat: 254, common_pid: 2037 } hitcount: 12
+ { lat: 255, common_pid: 2043 } hitcount: 1
+ { lat: 255, common_pid: 2037 } hitcount: 2
+ { lat: 255, common_pid: 2036 } hitcount: 2
+ { lat: 255, common_pid: 2039 } hitcount: 8
+ { lat: 256, common_pid: 2043 } hitcount: 1
+ { lat: 256, common_pid: 2036 } hitcount: 4
+ { lat: 256, common_pid: 2039 } hitcount: 6
+ { lat: 257, common_pid: 2039 } hitcount: 5
+ { lat: 257, common_pid: 2036 } hitcount: 4
+ { lat: 258, common_pid: 2039 } hitcount: 5
+ { lat: 258, common_pid: 2036 } hitcount: 2
+ { lat: 259, common_pid: 2036 } hitcount: 7
+ { lat: 259, common_pid: 2039 } hitcount: 7
+ { lat: 260, common_pid: 2036 } hitcount: 8
+ { lat: 260, common_pid: 2039 } hitcount: 6
+ { lat: 261, common_pid: 2036 } hitcount: 5
+ { lat: 261, common_pid: 2039 } hitcount: 7
+ { lat: 262, common_pid: 2039 } hitcount: 5
+ { lat: 262, common_pid: 2036 } hitcount: 5
+ { lat: 263, common_pid: 2039 } hitcount: 7
+ { lat: 263, common_pid: 2036 } hitcount: 7
+ { lat: 264, common_pid: 2039 } hitcount: 9
+ { lat: 264, common_pid: 2036 } hitcount: 9
+ { lat: 265, common_pid: 2036 } hitcount: 5
+ { lat: 265, common_pid: 2039 } hitcount: 1
+ { lat: 266, common_pid: 2036 } hitcount: 1
+ { lat: 266, common_pid: 2039 } hitcount: 3
+ { lat: 267, common_pid: 2036 } hitcount: 1
+ { lat: 267, common_pid: 2039 } hitcount: 3
+ { lat: 268, common_pid: 2036 } hitcount: 1
+ { lat: 268, common_pid: 2039 } hitcount: 6
+ { lat: 269, common_pid: 2036 } hitcount: 1
+ { lat: 269, common_pid: 2043 } hitcount: 1
+ { lat: 269, common_pid: 2039 } hitcount: 2
+ { lat: 270, common_pid: 2040 } hitcount: 1
+ { lat: 270, common_pid: 2039 } hitcount: 6
+ { lat: 271, common_pid: 2041 } hitcount: 1
+ { lat: 271, common_pid: 2039 } hitcount: 5
+ { lat: 272, common_pid: 2039 } hitcount: 10
+ { lat: 273, common_pid: 2039 } hitcount: 8
+ { lat: 274, common_pid: 2039 } hitcount: 2
+ { lat: 275, common_pid: 2039 } hitcount: 1
+ { lat: 276, common_pid: 2039 } hitcount: 2
+ { lat: 276, common_pid: 2037 } hitcount: 1
+ { lat: 276, common_pid: 2038 } hitcount: 1
+ { lat: 277, common_pid: 2039 } hitcount: 1
+ { lat: 277, common_pid: 2042 } hitcount: 1
+ { lat: 278, common_pid: 2039 } hitcount: 1
+ { lat: 279, common_pid: 2039 } hitcount: 4
+ { lat: 279, common_pid: 2043 } hitcount: 1
+ { lat: 280, common_pid: 2039 } hitcount: 3
+ { lat: 283, common_pid: 2036 } hitcount: 2
+ { lat: 284, common_pid: 2039 } hitcount: 1
+ { lat: 284, common_pid: 2043 } hitcount: 1
+ { lat: 288, common_pid: 2039 } hitcount: 1
+ { lat: 289, common_pid: 2039 } hitcount: 1
+ { lat: 300, common_pid: 2039 } hitcount: 1
+ { lat: 384, common_pid: 2039 } hitcount: 1
+
+ Totals:
+ Hits: 67625
+ Entries: 278
+ Dropped: 0
Note, the writes are around the sleep, so ideally they will all be of 250
microseconds. If you are wondering how there are several that are under
@@ -2350,7 +2356,7 @@ will be at 200 microseconds.
But this could easily be done in userspace. To make this even more
interesting, we can mix the histogram between events that happened in the
-kernel with trace_marker.
+kernel with trace_marker::
# cd /sys/kernel/tracing
# echo 'latency u64 lat' > synthetic_events
@@ -2362,177 +2368,177 @@ The difference this time is that instead of using the trace_marker to start
the latency, the sched_waking event is used, matching the common_pid for the
trace_marker write with the pid that is being woken by sched_waking.
-After running cyclictest again with the same parameters, we now have:
+After running cyclictest again with the same parameters, we now have::
# cat events/synthetic/latency/hist
-# event histogram
-#
-# trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
-#
-
-{ lat: 7, common_pid: 2302 } hitcount: 640
-{ lat: 7, common_pid: 2299 } hitcount: 42
-{ lat: 7, common_pid: 2303 } hitcount: 18
-{ lat: 7, common_pid: 2305 } hitcount: 166
-{ lat: 7, common_pid: 2306 } hitcount: 1
-{ lat: 7, common_pid: 2301 } hitcount: 91
-{ lat: 7, common_pid: 2300 } hitcount: 17
-{ lat: 8, common_pid: 2303 } hitcount: 8296
-{ lat: 8, common_pid: 2304 } hitcount: 6864
-{ lat: 8, common_pid: 2305 } hitcount: 9464
-{ lat: 8, common_pid: 2301 } hitcount: 9213
-{ lat: 8, common_pid: 2306 } hitcount: 6246
-{ lat: 8, common_pid: 2302 } hitcount: 8797
-{ lat: 8, common_pid: 2299 } hitcount: 8771
-{ lat: 8, common_pid: 2300 } hitcount: 8119
-{ lat: 9, common_pid: 2305 } hitcount: 1519
-{ lat: 9, common_pid: 2299 } hitcount: 2346
-{ lat: 9, common_pid: 2303 } hitcount: 2841
-{ lat: 9, common_pid: 2301 } hitcount: 1846
-{ lat: 9, common_pid: 2304 } hitcount: 3861
-{ lat: 9, common_pid: 2302 } hitcount: 1210
-{ lat: 9, common_pid: 2300 } hitcount: 2762
-{ lat: 9, common_pid: 2306 } hitcount: 4247
-{ lat: 10, common_pid: 2299 } hitcount: 16
-{ lat: 10, common_pid: 2306 } hitcount: 333
-{ lat: 10, common_pid: 2303 } hitcount: 16
-{ lat: 10, common_pid: 2304 } hitcount: 168
-{ lat: 10, common_pid: 2302 } hitcount: 240
-{ lat: 10, common_pid: 2301 } hitcount: 28
-{ lat: 10, common_pid: 2300 } hitcount: 95
-{ lat: 10, common_pid: 2305 } hitcount: 18
-{ lat: 11, common_pid: 2303 } hitcount: 5
-{ lat: 11, common_pid: 2305 } hitcount: 8
-{ lat: 11, common_pid: 2306 } hitcount: 221
-{ lat: 11, common_pid: 2302 } hitcount: 76
-{ lat: 11, common_pid: 2304 } hitcount: 26
-{ lat: 11, common_pid: 2300 } hitcount: 125
-{ lat: 11, common_pid: 2299 } hitcount: 2
-{ lat: 12, common_pid: 2305 } hitcount: 3
-{ lat: 12, common_pid: 2300 } hitcount: 6
-{ lat: 12, common_pid: 2306 } hitcount: 90
-{ lat: 12, common_pid: 2302 } hitcount: 4
-{ lat: 12, common_pid: 2303 } hitcount: 1
-{ lat: 12, common_pid: 2304 } hitcount: 122
-{ lat: 13, common_pid: 2300 } hitcount: 12
-{ lat: 13, common_pid: 2301 } hitcount: 1
-{ lat: 13, common_pid: 2306 } hitcount: 32
-{ lat: 13, common_pid: 2302 } hitcount: 5
-{ lat: 13, common_pid: 2305 } hitcount: 1
-{ lat: 13, common_pid: 2303 } hitcount: 1
-{ lat: 13, common_pid: 2304 } hitcount: 61
-{ lat: 14, common_pid: 2303 } hitcount: 4
-{ lat: 14, common_pid: 2306 } hitcount: 5
-{ lat: 14, common_pid: 2305 } hitcount: 4
-{ lat: 14, common_pid: 2304 } hitcount: 62
-{ lat: 14, common_pid: 2302 } hitcount: 19
-{ lat: 14, common_pid: 2300 } hitcount: 33
-{ lat: 14, common_pid: 2299 } hitcount: 1
-{ lat: 14, common_pid: 2301 } hitcount: 4
-{ lat: 15, common_pid: 2305 } hitcount: 1
-{ lat: 15, common_pid: 2302 } hitcount: 25
-{ lat: 15, common_pid: 2300 } hitcount: 11
-{ lat: 15, common_pid: 2299 } hitcount: 5
-{ lat: 15, common_pid: 2301 } hitcount: 1
-{ lat: 15, common_pid: 2304 } hitcount: 8
-{ lat: 15, common_pid: 2303 } hitcount: 1
-{ lat: 15, common_pid: 2306 } hitcount: 6
-{ lat: 16, common_pid: 2302 } hitcount: 31
-{ lat: 16, common_pid: 2306 } hitcount: 3
-{ lat: 16, common_pid: 2300 } hitcount: 5
-{ lat: 17, common_pid: 2302 } hitcount: 6
-{ lat: 17, common_pid: 2303 } hitcount: 1
-{ lat: 18, common_pid: 2304 } hitcount: 1
-{ lat: 18, common_pid: 2302 } hitcount: 8
-{ lat: 18, common_pid: 2299 } hitcount: 1
-{ lat: 18, common_pid: 2301 } hitcount: 1
-{ lat: 19, common_pid: 2303 } hitcount: 4
-{ lat: 19, common_pid: 2304 } hitcount: 5
-{ lat: 19, common_pid: 2302 } hitcount: 4
-{ lat: 19, common_pid: 2299 } hitcount: 3
-{ lat: 19, common_pid: 2306 } hitcount: 1
-{ lat: 19, common_pid: 2300 } hitcount: 4
-{ lat: 19, common_pid: 2305 } hitcount: 5
-{ lat: 20, common_pid: 2299 } hitcount: 2
-{ lat: 20, common_pid: 2302 } hitcount: 3
-{ lat: 20, common_pid: 2305 } hitcount: 1
-{ lat: 20, common_pid: 2300 } hitcount: 2
-{ lat: 20, common_pid: 2301 } hitcount: 2
-{ lat: 20, common_pid: 2303 } hitcount: 3
-{ lat: 21, common_pid: 2305 } hitcount: 1
-{ lat: 21, common_pid: 2299 } hitcount: 5
-{ lat: 21, common_pid: 2303 } hitcount: 4
-{ lat: 21, common_pid: 2302 } hitcount: 7
-{ lat: 21, common_pid: 2300 } hitcount: 1
-{ lat: 21, common_pid: 2301 } hitcount: 5
-{ lat: 21, common_pid: 2304 } hitcount: 2
-{ lat: 22, common_pid: 2302 } hitcount: 5
-{ lat: 22, common_pid: 2303 } hitcount: 1
-{ lat: 22, common_pid: 2306 } hitcount: 3
-{ lat: 22, common_pid: 2301 } hitcount: 2
-{ lat: 22, common_pid: 2300 } hitcount: 1
-{ lat: 22, common_pid: 2299 } hitcount: 1
-{ lat: 22, common_pid: 2305 } hitcount: 1
-{ lat: 22, common_pid: 2304 } hitcount: 1
-{ lat: 23, common_pid: 2299 } hitcount: 1
-{ lat: 23, common_pid: 2306 } hitcount: 2
-{ lat: 23, common_pid: 2302 } hitcount: 6
-{ lat: 24, common_pid: 2302 } hitcount: 3
-{ lat: 24, common_pid: 2300 } hitcount: 1
-{ lat: 24, common_pid: 2306 } hitcount: 2
-{ lat: 24, common_pid: 2305 } hitcount: 1
-{ lat: 24, common_pid: 2299 } hitcount: 1
-{ lat: 25, common_pid: 2300 } hitcount: 1
-{ lat: 25, common_pid: 2302 } hitcount: 4
-{ lat: 26, common_pid: 2302 } hitcount: 2
-{ lat: 27, common_pid: 2305 } hitcount: 1
-{ lat: 27, common_pid: 2300 } hitcount: 1
-{ lat: 27, common_pid: 2302 } hitcount: 3
-{ lat: 28, common_pid: 2306 } hitcount: 1
-{ lat: 28, common_pid: 2302 } hitcount: 4
-{ lat: 29, common_pid: 2302 } hitcount: 1
-{ lat: 29, common_pid: 2300 } hitcount: 2
-{ lat: 29, common_pid: 2306 } hitcount: 1
-{ lat: 29, common_pid: 2304 } hitcount: 1
-{ lat: 30, common_pid: 2302 } hitcount: 4
-{ lat: 31, common_pid: 2302 } hitcount: 6
-{ lat: 32, common_pid: 2302 } hitcount: 1
-{ lat: 33, common_pid: 2299 } hitcount: 1
-{ lat: 33, common_pid: 2302 } hitcount: 3
-{ lat: 34, common_pid: 2302 } hitcount: 2
-{ lat: 35, common_pid: 2302 } hitcount: 1
-{ lat: 35, common_pid: 2304 } hitcount: 1
-{ lat: 36, common_pid: 2302 } hitcount: 4
-{ lat: 37, common_pid: 2302 } hitcount: 6
-{ lat: 38, common_pid: 2302 } hitcount: 2
-{ lat: 39, common_pid: 2302 } hitcount: 2
-{ lat: 39, common_pid: 2304 } hitcount: 1
-{ lat: 40, common_pid: 2304 } hitcount: 2
-{ lat: 40, common_pid: 2302 } hitcount: 5
-{ lat: 41, common_pid: 2304 } hitcount: 1
-{ lat: 41, common_pid: 2302 } hitcount: 8
-{ lat: 42, common_pid: 2302 } hitcount: 6
-{ lat: 42, common_pid: 2304 } hitcount: 1
-{ lat: 43, common_pid: 2302 } hitcount: 3
-{ lat: 43, common_pid: 2304 } hitcount: 4
-{ lat: 44, common_pid: 2302 } hitcount: 6
-{ lat: 45, common_pid: 2302 } hitcount: 5
-{ lat: 46, common_pid: 2302 } hitcount: 5
-{ lat: 47, common_pid: 2302 } hitcount: 7
-{ lat: 48, common_pid: 2301 } hitcount: 1
-{ lat: 48, common_pid: 2302 } hitcount: 9
-{ lat: 49, common_pid: 2302 } hitcount: 3
-{ lat: 50, common_pid: 2302 } hitcount: 1
-{ lat: 50, common_pid: 2301 } hitcount: 1
-{ lat: 51, common_pid: 2302 } hitcount: 2
-{ lat: 51, common_pid: 2301 } hitcount: 1
-{ lat: 61, common_pid: 2302 } hitcount: 1
-{ lat: 110, common_pid: 2302 } hitcount: 1
-
-Totals:
- Hits: 89565
- Entries: 158
- Dropped: 0
+ # event histogram
+ #
+ # trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
+ #
+
+ { lat: 7, common_pid: 2302 } hitcount: 640
+ { lat: 7, common_pid: 2299 } hitcount: 42
+ { lat: 7, common_pid: 2303 } hitcount: 18
+ { lat: 7, common_pid: 2305 } hitcount: 166
+ { lat: 7, common_pid: 2306 } hitcount: 1
+ { lat: 7, common_pid: 2301 } hitcount: 91
+ { lat: 7, common_pid: 2300 } hitcount: 17
+ { lat: 8, common_pid: 2303 } hitcount: 8296
+ { lat: 8, common_pid: 2304 } hitcount: 6864
+ { lat: 8, common_pid: 2305 } hitcount: 9464
+ { lat: 8, common_pid: 2301 } hitcount: 9213
+ { lat: 8, common_pid: 2306 } hitcount: 6246
+ { lat: 8, common_pid: 2302 } hitcount: 8797
+ { lat: 8, common_pid: 2299 } hitcount: 8771
+ { lat: 8, common_pid: 2300 } hitcount: 8119
+ { lat: 9, common_pid: 2305 } hitcount: 1519
+ { lat: 9, common_pid: 2299 } hitcount: 2346
+ { lat: 9, common_pid: 2303 } hitcount: 2841
+ { lat: 9, common_pid: 2301 } hitcount: 1846
+ { lat: 9, common_pid: 2304 } hitcount: 3861
+ { lat: 9, common_pid: 2302 } hitcount: 1210
+ { lat: 9, common_pid: 2300 } hitcount: 2762
+ { lat: 9, common_pid: 2306 } hitcount: 4247
+ { lat: 10, common_pid: 2299 } hitcount: 16
+ { lat: 10, common_pid: 2306 } hitcount: 333
+ { lat: 10, common_pid: 2303 } hitcount: 16
+ { lat: 10, common_pid: 2304 } hitcount: 168
+ { lat: 10, common_pid: 2302 } hitcount: 240
+ { lat: 10, common_pid: 2301 } hitcount: 28
+ { lat: 10, common_pid: 2300 } hitcount: 95
+ { lat: 10, common_pid: 2305 } hitcount: 18
+ { lat: 11, common_pid: 2303 } hitcount: 5
+ { lat: 11, common_pid: 2305 } hitcount: 8
+ { lat: 11, common_pid: 2306 } hitcount: 221
+ { lat: 11, common_pid: 2302 } hitcount: 76
+ { lat: 11, common_pid: 2304 } hitcount: 26
+ { lat: 11, common_pid: 2300 } hitcount: 125
+ { lat: 11, common_pid: 2299 } hitcount: 2
+ { lat: 12, common_pid: 2305 } hitcount: 3
+ { lat: 12, common_pid: 2300 } hitcount: 6
+ { lat: 12, common_pid: 2306 } hitcount: 90
+ { lat: 12, common_pid: 2302 } hitcount: 4
+ { lat: 12, common_pid: 2303 } hitcount: 1
+ { lat: 12, common_pid: 2304 } hitcount: 122
+ { lat: 13, common_pid: 2300 } hitcount: 12
+ { lat: 13, common_pid: 2301 } hitcount: 1
+ { lat: 13, common_pid: 2306 } hitcount: 32
+ { lat: 13, common_pid: 2302 } hitcount: 5
+ { lat: 13, common_pid: 2305 } hitcount: 1
+ { lat: 13, common_pid: 2303 } hitcount: 1
+ { lat: 13, common_pid: 2304 } hitcount: 61
+ { lat: 14, common_pid: 2303 } hitcount: 4
+ { lat: 14, common_pid: 2306 } hitcount: 5
+ { lat: 14, common_pid: 2305 } hitcount: 4
+ { lat: 14, common_pid: 2304 } hitcount: 62
+ { lat: 14, common_pid: 2302 } hitcount: 19
+ { lat: 14, common_pid: 2300 } hitcount: 33
+ { lat: 14, common_pid: 2299 } hitcount: 1
+ { lat: 14, common_pid: 2301 } hitcount: 4
+ { lat: 15, common_pid: 2305 } hitcount: 1
+ { lat: 15, common_pid: 2302 } hitcount: 25
+ { lat: 15, common_pid: 2300 } hitcount: 11
+ { lat: 15, common_pid: 2299 } hitcount: 5
+ { lat: 15, common_pid: 2301 } hitcount: 1
+ { lat: 15, common_pid: 2304 } hitcount: 8
+ { lat: 15, common_pid: 2303 } hitcount: 1
+ { lat: 15, common_pid: 2306 } hitcount: 6
+ { lat: 16, common_pid: 2302 } hitcount: 31
+ { lat: 16, common_pid: 2306 } hitcount: 3
+ { lat: 16, common_pid: 2300 } hitcount: 5
+ { lat: 17, common_pid: 2302 } hitcount: 6
+ { lat: 17, common_pid: 2303 } hitcount: 1
+ { lat: 18, common_pid: 2304 } hitcount: 1
+ { lat: 18, common_pid: 2302 } hitcount: 8
+ { lat: 18, common_pid: 2299 } hitcount: 1
+ { lat: 18, common_pid: 2301 } hitcount: 1
+ { lat: 19, common_pid: 2303 } hitcount: 4
+ { lat: 19, common_pid: 2304 } hitcount: 5
+ { lat: 19, common_pid: 2302 } hitcount: 4
+ { lat: 19, common_pid: 2299 } hitcount: 3
+ { lat: 19, common_pid: 2306 } hitcount: 1
+ { lat: 19, common_pid: 2300 } hitcount: 4
+ { lat: 19, common_pid: 2305 } hitcount: 5
+ { lat: 20, common_pid: 2299 } hitcount: 2
+ { lat: 20, common_pid: 2302 } hitcount: 3
+ { lat: 20, common_pid: 2305 } hitcount: 1
+ { lat: 20, common_pid: 2300 } hitcount: 2
+ { lat: 20, common_pid: 2301 } hitcount: 2
+ { lat: 20, common_pid: 2303 } hitcount: 3
+ { lat: 21, common_pid: 2305 } hitcount: 1
+ { lat: 21, common_pid: 2299 } hitcount: 5
+ { lat: 21, common_pid: 2303 } hitcount: 4
+ { lat: 21, common_pid: 2302 } hitcount: 7
+ { lat: 21, common_pid: 2300 } hitcount: 1
+ { lat: 21, common_pid: 2301 } hitcount: 5
+ { lat: 21, common_pid: 2304 } hitcount: 2
+ { lat: 22, common_pid: 2302 } hitcount: 5
+ { lat: 22, common_pid: 2303 } hitcount: 1
+ { lat: 22, common_pid: 2306 } hitcount: 3
+ { lat: 22, common_pid: 2301 } hitcount: 2
+ { lat: 22, common_pid: 2300 } hitcount: 1
+ { lat: 22, common_pid: 2299 } hitcount: 1
+ { lat: 22, common_pid: 2305 } hitcount: 1
+ { lat: 22, common_pid: 2304 } hitcount: 1
+ { lat: 23, common_pid: 2299 } hitcount: 1
+ { lat: 23, common_pid: 2306 } hitcount: 2
+ { lat: 23, common_pid: 2302 } hitcount: 6
+ { lat: 24, common_pid: 2302 } hitcount: 3
+ { lat: 24, common_pid: 2300 } hitcount: 1
+ { lat: 24, common_pid: 2306 } hitcount: 2
+ { lat: 24, common_pid: 2305 } hitcount: 1
+ { lat: 24, common_pid: 2299 } hitcount: 1
+ { lat: 25, common_pid: 2300 } hitcount: 1
+ { lat: 25, common_pid: 2302 } hitcount: 4
+ { lat: 26, common_pid: 2302 } hitcount: 2
+ { lat: 27, common_pid: 2305 } hitcount: 1
+ { lat: 27, common_pid: 2300 } hitcount: 1
+ { lat: 27, common_pid: 2302 } hitcount: 3
+ { lat: 28, common_pid: 2306 } hitcount: 1
+ { lat: 28, common_pid: 2302 } hitcount: 4
+ { lat: 29, common_pid: 2302 } hitcount: 1
+ { lat: 29, common_pid: 2300 } hitcount: 2
+ { lat: 29, common_pid: 2306 } hitcount: 1
+ { lat: 29, common_pid: 2304 } hitcount: 1
+ { lat: 30, common_pid: 2302 } hitcount: 4
+ { lat: 31, common_pid: 2302 } hitcount: 6
+ { lat: 32, common_pid: 2302 } hitcount: 1
+ { lat: 33, common_pid: 2299 } hitcount: 1
+ { lat: 33, common_pid: 2302 } hitcount: 3
+ { lat: 34, common_pid: 2302 } hitcount: 2
+ { lat: 35, common_pid: 2302 } hitcount: 1
+ { lat: 35, common_pid: 2304 } hitcount: 1
+ { lat: 36, common_pid: 2302 } hitcount: 4
+ { lat: 37, common_pid: 2302 } hitcount: 6
+ { lat: 38, common_pid: 2302 } hitcount: 2
+ { lat: 39, common_pid: 2302 } hitcount: 2
+ { lat: 39, common_pid: 2304 } hitcount: 1
+ { lat: 40, common_pid: 2304 } hitcount: 2
+ { lat: 40, common_pid: 2302 } hitcount: 5
+ { lat: 41, common_pid: 2304 } hitcount: 1
+ { lat: 41, common_pid: 2302 } hitcount: 8
+ { lat: 42, common_pid: 2302 } hitcount: 6
+ { lat: 42, common_pid: 2304 } hitcount: 1
+ { lat: 43, common_pid: 2302 } hitcount: 3
+ { lat: 43, common_pid: 2304 } hitcount: 4
+ { lat: 44, common_pid: 2302 } hitcount: 6
+ { lat: 45, common_pid: 2302 } hitcount: 5
+ { lat: 46, common_pid: 2302 } hitcount: 5
+ { lat: 47, common_pid: 2302 } hitcount: 7
+ { lat: 48, common_pid: 2301 } hitcount: 1
+ { lat: 48, common_pid: 2302 } hitcount: 9
+ { lat: 49, common_pid: 2302 } hitcount: 3
+ { lat: 50, common_pid: 2302 } hitcount: 1
+ { lat: 50, common_pid: 2301 } hitcount: 1
+ { lat: 51, common_pid: 2302 } hitcount: 2
+ { lat: 51, common_pid: 2301 } hitcount: 1
+ { lat: 61, common_pid: 2302 } hitcount: 1
+ { lat: 110, common_pid: 2302 } hitcount: 1
+
+ Totals:
+ Hits: 89565
+ Entries: 158
+ Dropped: 0
This doesn't tell us any information about how late cyclictest may have
woken up, but it does show us a nice histogram of how long it took from
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index b58c10b04e27..306997941ba1 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -18,6 +18,7 @@ Linux Tracing Technologies
events-nmi
events-msr
mmiotrace
+ histogram
hwlat_detector
intel_th
stm
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 3e0f971b12de..8bfc75c90806 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -18,7 +18,7 @@ To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
Similar to the events tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
/sys/kernel/debug/tracing/kprobe_events, and enable it via
-/sys/kernel/debug/tracing/events/kprobes/<EVENT>/enabled.
+/sys/kernel/debug/tracing/events/kprobes/<EVENT>/enable.
Synopsis of kprobe_events
@@ -81,9 +81,9 @@ Per-probe event filtering feature allows you to set different filter on each
probe and gives you what arguments will be shown in trace buffer. If an event
name is specified right after 'p:' or 'r:' in kprobe_events, it adds an event
under tracing/events/kprobes/<EVENT>, at the directory you can see 'id',
-'enabled', 'format' and 'filter'.
+'enable', 'format', 'filter' and 'trigger'.
-enabled:
+enable:
You can enable/disable the probe by writing 1 or 0 on it.
format:
@@ -95,6 +95,9 @@ filter:
id:
This shows the id of this probe event.
+trigger:
+ This allows to install trigger commands which are executed when the event is
+ hit (for details, see Documentation/trace/events.rst, section 6).
Event Profiling
---------------
diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst
index 98d3f692957a..d0822811527a 100644
--- a/Documentation/trace/uprobetracer.rst
+++ b/Documentation/trace/uprobetracer.rst
@@ -13,7 +13,7 @@ To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
Similar to the kprobe-event tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
/sys/kernel/debug/tracing/uprobe_events, and enable it via
-/sys/kernel/debug/tracing/events/uprobes/<EVENT>/enabled.
+/sys/kernel/debug/tracing/events/uprobes/<EVENT>/enable.
However unlike kprobe-event tracer, the uprobe event interface expects the
user to calculate the offset of the probepoint in the object.
diff --git a/Documentation/translations/index.rst b/Documentation/translations/index.rst
new file mode 100644
index 000000000000..7f77c52d33aa
--- /dev/null
+++ b/Documentation/translations/index.rst
@@ -0,0 +1,13 @@
+.. _translations:
+
+============
+Translations
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ zh_CN/index
+ it_IT/index
+ ko_KR/index
+ ja_JP/index
diff --git a/Documentation/translations/it_IT/disclaimer-ita.rst b/Documentation/translations/it_IT/disclaimer-ita.rst
new file mode 100644
index 000000000000..d68e52de6a5d
--- /dev/null
+++ b/Documentation/translations/it_IT/disclaimer-ita.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. note::
+ This document is maintained by Federico Vaga <federico.vaga@vaga.pv.it>.
+ If you find any difference between this document and the original file or a
+ problem with the translation, please contact the maintainer of this file.
+ Following people helped to translate or review:
+ Alessia Mantegazza <amantegazza@vaga.pv.it>
+
+.. warning::
+ The purpose of this file is to be easier to read and understand for Italian
+ speakers and is not intended as a fork. So, if you have any comments or
+ updates for this file please try to update the original English file first.
diff --git a/Documentation/translations/it_IT/doc-guide/index.rst b/Documentation/translations/it_IT/doc-guide/index.rst
new file mode 100644
index 000000000000..7a6562b547ee
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/index.rst
@@ -0,0 +1,24 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+.. _it_doc_guide:
+
+==========================================
+Come scrivere la documentazione del kernel
+==========================================
+
+.. toctree::
+ :maxdepth: 1
+
+ sphinx.rst
+ kernel-doc.rst
+ parse-headers.rst
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/it_IT/doc-guide/kernel-doc.rst b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
new file mode 100644
index 000000000000..2bf1c1e2f394
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
@@ -0,0 +1,554 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+.. _it_kernel_doc:
+
+Scrivere i commenti in kernel-doc
+=================================
+
+Nei file sorgenti del kernel Linux potrete trovare commenti di documentazione
+strutturanti secondo il formato kernel-doc. Essi possono descrivere funzioni,
+tipi di dati, e l'architettura del codice.
+
+.. note:: Il formato kernel-doc può sembrare simile a gtk-doc o Doxygen ma
+ in realtà è molto differente per ragioni storiche. I sorgenti del kernel
+ contengono decine di migliaia di commenti kernel-doc. Siete pregati
+ d'attenervi allo stile qui descritto.
+
+La struttura kernel-doc è estratta a partire dai commenti; da questi viene
+generato il `dominio Sphinx per il C`_ con un'adeguata descrizione per le
+funzioni ed i tipi di dato con i loro relativi collegamenti. Le descrizioni
+vengono filtrare per cercare i riferimenti ed i marcatori.
+
+Vedere di seguito per maggiori dettagli.
+
+.. _`dominio Sphinx per il C`: http://www.sphinx-doc.org/en/stable/domains.html
+
+Tutte le funzioni esportate verso i moduli esterni utilizzando
+``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL`` dovrebbero avere un commento
+kernel-doc. Quando l'intenzione è di utilizzarle nei moduli, anche le funzioni
+e le strutture dati nei file d'intestazione dovrebbero avere dei commenti
+kernel-doc.
+
+È considerata una buona pratica quella di fornire una documentazione formattata
+secondo kernel-doc per le funzioni che sono visibili da altri file del kernel
+(ovvero, che non siano dichiarate utilizzando ``static``). Raccomandiamo,
+inoltre, di fornire una documentazione kernel-doc anche per procedure private
+(ovvero, dichiarate "static") al fine di fornire una struttura più coerente
+dei sorgenti. Quest'ultima raccomandazione ha una priorità più bassa ed è a
+discrezione dal manutentore (MAINTAINER) del file sorgente.
+
+
+
+Sicuramente la documentazione formattata con kernel-doc è necessaria per
+le funzioni che sono esportate verso i moduli esterni utilizzando
+``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL``.
+
+Cerchiamo anche di fornire una documentazione formattata secondo kernel-doc
+per le funzioni che sono visibili da altri file del kernel (ovvero, che non
+siano dichiarate utilizzando "static")
+
+Raccomandiamo, inoltre, di fornire una documentazione formattata con kernel-doc
+anche per procedure private (ovvero, dichiarate "static") al fine di fornire
+una struttura più coerente dei sorgenti. Questa raccomandazione ha una priorità
+più bassa ed è a discrezione dal manutentore (MAINTAINER) del file sorgente.
+
+Le strutture dati visibili nei file di intestazione dovrebbero essere anch'esse
+documentate utilizzando commenti formattati con kernel-doc.
+
+Come formattare i commenti kernel-doc
+-------------------------------------
+
+I commenti kernel-doc iniziano con il marcatore ``/**``. Il programma
+``kernel-doc`` estrarrà i commenti marchiati in questo modo. Il resto
+del commento è formattato come un normale commento multilinea, ovvero
+con un asterisco all'inizio d'ogni riga e che si conclude con ``*/``
+su una riga separata.
+
+I commenti kernel-doc di funzioni e tipi dovrebbero essere posizionati
+appena sopra la funzione od il tipo che descrivono. Questo allo scopo di
+aumentare la probabilità che chi cambia il codice si ricordi di aggiornare
+anche la documentazione. I commenti kernel-doc di tipo più generale possono
+essere posizionati ovunque nel file.
+
+Al fine di verificare che i commenti siano formattati correttamente, potete
+eseguire il programma ``kernel-doc`` con un livello di verbosità alto e senza
+che questo produca alcuna documentazione. Per esempio::
+
+ scripts/kernel-doc -v -none drivers/foo/bar.c
+
+Il formato della documentazione è verificato della procedura di generazione
+del kernel quando viene richiesto di effettuare dei controlli extra con GCC::
+
+ make W=n
+
+Documentare le funzioni
+------------------------
+
+Generalmente il formato di un commento kernel-doc per funzioni e
+macro simil-funzioni è il seguente::
+
+ /**
+ * function_name() - Brief description of function.
+ * @arg1: Describe the first argument.
+ * @arg2: Describe the second argument.
+ * One can provide multiple line descriptions
+ * for arguments.
+ *
+ * A longer description, with more discussion of the function function_name()
+ * that might be useful to those using or modifying it. Begins with an
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * The longer description may have multiple paragraphs.
+ *
+ * Context: Describes whether the function can sleep, what locks it takes,
+ * releases, or expects to be held. It can extend over multiple
+ * lines.
+ * Return: Describe the return value of foobar.
+ *
+ * The return value description can also have multiple paragraphs, and should
+ * be placed at the end of the comment block.
+ */
+
+La descrizione introduttiva (*brief description*) che segue il nome della
+funzione può continuare su righe successive e termina con la descrizione di
+un argomento, una linea di commento vuota, oppure la fine del commento.
+
+Parametri delle funzioni
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ogni argomento di una funzione dovrebbe essere descritto in ordine, subito
+dopo la descrizione introduttiva. Non lasciare righe vuote né fra la
+descrizione introduttiva e quella degli argomenti, né fra gli argomenti.
+
+Ogni ``@argument:`` può estendersi su più righe.
+
+.. note::
+
+ Se la descrizione di ``@argument:`` si estende su più righe,
+ la continuazione dovrebbe iniziare alla stessa colonna della riga
+ precedente::
+
+ * @argument: some long description
+ * that continues on next lines
+
+ or::
+
+ * @argument:
+ * some long description
+ * that continues on next lines
+
+Se una funzione ha un numero variabile di argomento, la sua descrizione
+dovrebbe essere scritta con la notazione kernel-doc::
+
+ * @...: description
+
+Contesto delle funzioni
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Il contesto in cui le funzioni vengono chiamate viene descritto in una
+sezione chiamata ``Context``. Questo dovrebbe informare sulla possibilità
+che una funzione dorma (*sleep*) o che possa essere chiamata in un contesto
+d'interruzione, così come i *lock* che prende, rilascia e che si aspetta che
+vengano presi dal chiamante.
+
+Esempi::
+
+ * Context: Any context.
+ * Context: Any context. Takes and releases the RCU lock.
+ * Context: Any context. Expects <lock> to be held by caller.
+ * Context: Process context. May sleep if @gfp flags permit.
+ * Context: Process context. Takes and releases <mutex>.
+ * Context: Softirq or process context. Takes and releases <lock>, BH-safe.
+ * Context: Interrupt context.
+
+Valore di ritorno
+~~~~~~~~~~~~~~~~~
+
+Il valore di ritorno, se c'è, viene descritto in una sezione dedicata di nome
+``Return``.
+
+.. note::
+
+ #) La descrizione multiriga non riconosce il termine d'una riga, per cui
+ se provate a formattare bene il vostro testo come nel seguente esempio::
+
+ * Return:
+ * 0 - OK
+ * -EINVAL - invalid argument
+ * -ENOMEM - out of memory
+
+ le righe verranno unite e il risultato sarà::
+
+ Return: 0 - OK -EINVAL - invalid argument -ENOMEM - out of memory
+
+ Quindi, se volete che le righe vengano effettivamente generate, dovete
+ utilizzare una lista ReST, ad esempio::
+
+ * Return:
+ * * 0 - OK to runtime suspend the device
+ * * -EBUSY - Device should not be runtime suspended
+
+ #) Se il vostro testo ha delle righe che iniziano con una frase seguita dai
+ due punti, allora ognuna di queste frasi verrà considerata come il nome
+ di una nuova sezione, e probabilmente non produrrà gli effetti desiderati.
+
+Documentare strutture, unioni ed enumerazioni
+---------------------------------------------
+
+Generalmente il formato di un commento kernel-doc per struct, union ed enum è::
+
+ /**
+ * struct struct_name - Brief description.
+ * @member1: Description of member1.
+ * @member2: Description of member2.
+ * One can provide multiple line descriptions
+ * for members.
+ *
+ * Description of the structure.
+ */
+
+Nell'esempio qui sopra, potete sostituire ``struct`` con ``union`` o ``enum``
+per descrivere unioni ed enumerati. ``member`` viene usato per indicare i
+membri di strutture ed unioni, ma anche i valori di un tipo enumerato.
+
+La descrizione introduttiva (*brief description*) che segue il nome della
+funzione può continuare su righe successive e termina con la descrizione di
+un argomento, una linea di commento vuota, oppure la fine del commento.
+
+Membri
+~~~~~~
+
+I membri di strutture, unioni ed enumerati devo essere documentati come i
+parametri delle funzioni; seguono la descrizione introduttiva e possono
+estendersi su più righe.
+
+All'interno d'una struttura o d'un unione, potete utilizzare le etichette
+``private:`` e ``public:``. I campi che sono nell'area ``private:`` non
+verranno inclusi nella documentazione finale.
+
+Le etichette ``private:`` e ``public:`` devono essere messe subito dopo
+il marcatore di un commento ``/*``. Opzionalmente, possono includere commenti
+fra ``:`` e il marcatore di fine commento ``*/``.
+
+Esempio::
+
+ /**
+ * struct my_struct - short description
+ * @a: first member
+ * @b: second member
+ * @d: fourth member
+ *
+ * Longer description
+ */
+ struct my_struct {
+ int a;
+ int b;
+ /* private: internal use only */
+ int c;
+ /* public: the next one is public */
+ int d;
+ };
+
+Strutture ed unioni annidate
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+È possibile documentare strutture ed unioni annidate, ad esempio::
+
+ /**
+ * struct nested_foobar - a struct with nested unions and structs
+ * @memb1: first member of anonymous union/anonymous struct
+ * @memb2: second member of anonymous union/anonymous struct
+ * @memb3: third member of anonymous union/anonymous struct
+ * @memb4: fourth member of anonymous union/anonymous struct
+ * @bar: non-anonymous union
+ * @bar.st1: struct st1 inside @bar
+ * @bar.st2: struct st2 inside @bar
+ * @bar.st1.memb1: first member of struct st1 on union bar
+ * @bar.st1.memb2: second member of struct st1 on union bar
+ * @bar.st2.memb1: first member of struct st2 on union bar
+ * @bar.st2.memb2: second member of struct st2 on union bar
+ */
+ struct nested_foobar {
+ /* Anonymous union/struct*/
+ union {
+ struct {
+ int memb1;
+ int memb2;
+ }
+ struct {
+ void *memb3;
+ int memb4;
+ }
+ }
+ union {
+ struct {
+ int memb1;
+ int memb2;
+ } st1;
+ struct {
+ void *memb1;
+ int memb2;
+ } st2;
+ } bar;
+ };
+
+.. note::
+
+ #) Quando documentate una struttura od unione annidata, ad esempio
+ di nome ``foo``, il suo campo ``bar`` dev'essere documentato
+ usando ``@foo.bar:``
+ #) Quando la struttura od unione annidata è anonima, il suo campo
+ ``bar`` dev'essere documentato usando ``@bar:``
+
+Commenti in linea per la documentazione dei membri
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+I membri d'una struttura possono essere documentati in linea all'interno
+della definizione stessa. Ci sono due stili: una singola riga di commento
+che inizia con ``/**`` e finisce con ``*/``; commenti multi riga come
+qualsiasi altro commento kernel-doc::
+
+ /**
+ * struct foo - Brief description.
+ * @foo: The Foo member.
+ */
+ struct foo {
+ int foo;
+ /**
+ * @bar: The Bar member.
+ */
+ int bar;
+ /**
+ * @baz: The Baz member.
+ *
+ * Here, the member description may contain several paragraphs.
+ */
+ int baz;
+ union {
+ /** @foobar: Single line description. */
+ int foobar;
+ };
+ /** @bar2: Description for struct @bar2 inside @foo */
+ struct {
+ /**
+ * @bar2.barbar: Description for @barbar inside @foo.bar2
+ */
+ int barbar;
+ } bar2;
+ };
+
+
+Documentazione dei tipi di dato
+-------------------------------
+Generalmente il formato di un commento kernel-doc per typedef è
+il seguente::
+
+ /**
+ * typedef type_name - Brief description.
+ *
+ * Description of the type.
+ */
+
+Anche i tipi di dato per prototipi di funzione possono essere documentati::
+
+ /**
+ * typedef type_name - Brief description.
+ * @arg1: description of arg1
+ * @arg2: description of arg2
+ *
+ * Description of the type.
+ *
+ * Context: Locking context.
+ * Return: Meaning of the return value.
+ */
+ typedef void (*type_name)(struct v4l2_ctrl *arg1, void *arg2);
+
+Marcatori e riferimenti
+-----------------------
+
+All'interno dei commenti di tipo kernel-doc vengono riconosciuti i seguenti
+*pattern* che vengono convertiti in marcatori reStructuredText ed in riferimenti
+del `dominio Sphinx per il C`_.
+
+.. attention:: Questi sono riconosciuti **solo** all'interno di commenti
+ kernel-doc, e **non** all'interno di documenti reStructuredText.
+
+``funcname()``
+ Riferimento ad una funzione.
+
+``@parameter``
+ Nome di un parametro di una funzione (nessun riferimento, solo formattazione).
+
+``%CONST``
+ Il nome di una costante (nessun riferimento, solo formattazione)
+
+````literal````
+ Un blocco di testo che deve essere riportato così com'è. La rappresentazione
+ finale utilizzerà caratteri a ``spaziatura fissa``.
+
+ Questo è utile se dovete utilizzare caratteri speciali che altrimenti
+ potrebbero assumere un significato diverso in kernel-doc o in reStructuredText
+
+ Questo è particolarmente utile se dovete scrivere qualcosa come ``%ph``
+ all'interno della descrizione di una funzione.
+
+``$ENVVAR``
+ Il nome di una variabile d'ambiente (nessun riferimento, solo formattazione).
+
+``&struct name``
+ Riferimento ad una struttura.
+
+``&enum name``
+ Riferimento ad un'enumerazione.
+
+``&typedef name``
+ Riferimento ad un tipo di dato.
+
+``&struct_name->member`` or ``&struct_name.member``
+ Riferimento ad un membro di una struttura o di un'unione. Il riferimento sarà
+ la struttura o l'unione, non il memembro.
+
+``&name``
+ Un generico riferimento ad un tipo. Usate, preferibilmente, il riferimento
+ completo come descritto sopra. Questo è dedicato ai commenti obsoleti.
+
+Riferimenti usando reStructuredText
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Per fare riferimento a funzioni e tipi di dato definiti nei commenti kernel-doc
+all'interno dei documenti reStructuredText, utilizzate i riferimenti dal
+`dominio Sphinx per il C`_. Per esempio::
+
+ See function :c:func:`foo` and struct/union/enum/typedef :c:type:`bar`.
+
+Nonostante il riferimento ai tipi di dato funzioni col solo nome,
+ovvero senza specificare struct/union/enum/typedef, potreste preferire il
+seguente::
+
+ See :c:type:`struct foo <foo>`.
+ See :c:type:`union bar <bar>`.
+ See :c:type:`enum baz <baz>`.
+ See :c:type:`typedef meh <meh>`.
+
+Questo produce dei collegamenti migliori, ed è in linea con il modo in cui
+kernel-doc gestisce i riferimenti.
+
+Per maggiori informazioni, siete pregati di consultare la documentazione
+del `dominio Sphinx per il C`_.
+
+Commenti per una documentazione generale
+----------------------------------------
+
+Al fine d'avere il codice ed i commenti nello stesso file, potete includere
+dei blocchi di documentazione kernel-doc con un formato libero invece
+che nel formato specifico per funzioni, strutture, unioni, enumerati o tipi
+di dato. Per esempio, questo tipo di commento potrebbe essere usato per la
+spiegazione delle operazioni di un driver o di una libreria
+
+Questo s'ottiene utilizzando la parola chiave ``DOC:`` a cui viene associato
+un titolo.
+
+Generalmente il formato di un commento generico o di visione d'insieme è
+il seguente::
+
+ /**
+ * DOC: Theory of Operation
+ *
+ * The whizbang foobar is a dilly of a gizmo. It can do whatever you
+ * want it to do, at any time. It reads your mind. Here's how it works.
+ *
+ * foo bar splat
+ *
+ * The only drawback to this gizmo is that is can sometimes damage
+ * hardware, software, or its subject(s).
+ */
+
+Il titolo che segue ``DOC:`` funziona da intestazione all'interno del file
+sorgente, ma anche come identificatore per l'estrazione di questi commenti di
+documentazione. Quindi, il titolo dev'essere unico all'interno del file.
+
+Includere i commenti di tipo kernel-doc
+=======================================
+
+I commenti di documentazione possono essere inclusi in un qualsiasi documento
+di tipo reStructuredText mediante l'apposita direttiva nell'estensione
+kernel-doc per Sphinx.
+
+Le direttive kernel-doc sono nel formato::
+
+ .. kernel-doc:: source
+ :option:
+
+Il campo *source* è il percorso ad un file sorgente, relativo alla cartella
+principale dei sorgenti del kernel. La direttiva supporta le seguenti opzioni:
+
+export: *[source-pattern ...]*
+ Include la documentazione per tutte le funzioni presenti nel file sorgente
+ (*source*) che sono state esportate utilizzando ``EXPORT_SYMBOL`` o
+ ``EXPORT_SYMBOL_GPL`` in *source* o in qualsiasi altro *source-pattern*
+ specificato.
+
+ Il campo *source-patter* è utile quando i commenti kernel-doc sono stati
+ scritti nei file d'intestazione, mentre ``EXPORT_SYMBOL`` e
+ ``EXPORT_SYMBOL_GPL`` si trovano vicino alla definizione delle funzioni.
+
+ Esempi::
+
+ .. kernel-doc:: lib/bitmap.c
+ :export:
+
+ .. kernel-doc:: include/net/mac80211.h
+ :export: net/mac80211/*.c
+
+internal: *[source-pattern ...]*
+ Include la documentazione per tutte le funzioni ed i tipi presenti nel file
+ sorgente (*source*) che **non** sono stati esportati utilizzando
+ ``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL`` né in *source* né in qualsiasi
+ altro *source-pattern* specificato.
+
+ Esempio::
+
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :internal:
+
+doc: *title*
+ Include la documentazione del paragrafo ``DOC:`` identificato dal titolo
+ (*title*) all'interno del file sorgente (*source*). Gli spazi in *title* sono
+ permessi; non virgolettate *title*. Il campo *title* è utilizzato per
+ identificare un paragrafo e per questo non viene incluso nella documentazione
+ finale. Verificate d'avere l'intestazione appropriata nei documenti
+ reStructuredText.
+
+ Esempio::
+
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :doc: High Definition Audio over HDMI and Display Port
+
+functions: *function* *[...]*
+ Dal file sorgente (*source*) include la documentazione per le funzioni
+ elencate (*function*).
+
+ Esempio::
+
+ .. kernel-doc:: lib/bitmap.c
+ :functions: bitmap_parselist bitmap_parselist_user
+
+Senza alcuna opzione, la direttiva kernel-doc include tutti i commenti di
+documentazione presenti nel file sorgente (*source*).
+
+L'estensione kernel-doc fa parte dei sorgenti del kernel, la si può trovare
+in ``Documentation/sphinx/kerneldoc.py``. Internamente, viene utilizzato
+lo script ``scripts/kernel-doc`` per estrarre i commenti di documentazione
+dai file sorgenti.
+
+Come utilizzare kernel-doc per generare pagine man
+--------------------------------------------------
+
+Se volete utilizzare kernel-doc solo per generare delle pagine man, potete
+farlo direttamente dai sorgenti del kernel::
+
+ $ scripts/kernel-doc -man $(git grep -l '/\*\*' -- :^Documentation :^tools) | scripts/split-man.pl /tmp/man
diff --git a/Documentation/translations/it_IT/doc-guide/parse-headers.rst b/Documentation/translations/it_IT/doc-guide/parse-headers.rst
new file mode 100644
index 000000000000..b38918ca637e
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/parse-headers.rst
@@ -0,0 +1,196 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+=========================================
+Includere gli i file di intestazione uAPI
+=========================================
+
+Qualche volta è utile includere dei file di intestazione e degli esempi di codice C
+al fine di descrivere l'API per lo spazio utente e per generare dei riferimenti
+fra il codice e la documentazione. Aggiungere i riferimenti ai file dell'API
+dello spazio utente ha ulteriori vantaggi: Sphinx genererà dei messaggi
+d'avviso se un simbolo non viene trovato nella documentazione. Questo permette
+di mantenere allineate la documentazione della uAPI (API spazio utente)
+con le modifiche del kernel.
+Il programma :ref:`parse_headers.pl <it_parse_headers>` genera questi riferimenti.
+Esso dev'essere invocato attraverso un Makefile, mentre si genera la
+documentazione. Per avere un esempio su come utilizzarlo all'interno del kernel
+consultate ``Documentation/media/Makefile``.
+
+.. _it_parse_headers:
+
+parse_headers.pl
+^^^^^^^^^^^^^^^^
+
+NOME
+****
+
+
+parse_headers.pl - analizza i file C al fine di identificare funzioni,
+strutture, enumerati e definizioni, e creare riferimenti per Sphinx
+
+SINTASSI
+********
+
+
+\ **parse_headers.pl**\ [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
+
+Dove <options> può essere: --debug, --usage o --help.
+
+
+OPZIONI
+*******
+
+
+
+\ **--debug**\
+
+ Lo script viene messo in modalità verbosa, utile per il debugging.
+
+
+\ **--usage**\
+
+ Mostra un messaggio d'aiuto breve e termina.
+
+
+\ **--help**\
+
+ Mostra un messaggio d'aiuto dettagliato e termina.
+
+
+DESCRIZIONE
+***********
+
+Converte un file d'intestazione o un file sorgente C (C_FILE) in un testo
+ReStructuredText incluso mediante il blocco ..parsed-literal
+con riferimenti alla documentazione che descrive l'API. Opzionalmente,
+il programma accetta anche un altro file (EXCEPTIONS_FILE) che
+descrive quali elementi debbano essere ignorati o il cui riferimento
+deve puntare ad elemento diverso dal predefinito.
+
+Il file generato sarà disponibile in (OUT_FILE).
+
+Il programma è capace di identificare *define*, funzioni, strutture,
+tipi di dato, enumerati e valori di enumerati, e di creare i riferimenti
+per ognuno di loro. Inoltre, esso è capace di distinguere le #define
+utilizzate per specificare i comandi ioctl di Linux.
+
+Il file EXCEPTIONS_FILE contiene due tipi di dichiarazioni:
+\ **ignore**\ o \ **replace**\ .
+
+La sintassi per ignore è:
+
+ignore \ **tipo**\ \ **nome**\
+
+La dichiarazione \ **ignore**\ significa che non verrà generato alcun
+riferimento per il simbolo \ **name**\ di tipo \ **tipo**\ .
+
+
+La sintassi per replace è:
+
+replace \ **tipo**\ \ **nome**\ \ **nuovo_valore**\
+
+La dichiarazione \ **replace**\ significa che verrà generato un
+riferimento per il simbolo \ **name**\ di tipo \ **tipo**\ , ma, invece
+di utilizzare il valore predefinito, verrà utilizzato il valore
+\ **nuovo_valore**\ .
+
+Per entrambe le dichiarazioni, il \ **tipo**\ può essere uno dei seguenti:
+
+
+\ **ioctl**\
+
+ La dichiarazione ignore o replace verrà applicata su definizioni di ioctl
+ come la seguente:
+
+ #define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+
+
+
+\ **define**\
+
+ La dichiarazione ignore o replace verrà applicata su una qualsiasi #define
+ trovata in C_FILE.
+
+
+
+\ **typedef**\
+
+ La dichiarazione ignore o replace verrà applicata ad una dichiarazione typedef
+ in C_FILE.
+
+
+
+\ **struct**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di strutture
+ in C_FILE.
+
+
+
+\ **enum**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di enumerati
+ in C_FILE.
+
+
+
+\ **symbol**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di valori di
+ enumerati in C_FILE.
+
+ Per le dichiarazioni di tipo replace, il campo \ **new_value**\ utilizzerà
+ automaticamente i riferimenti :c:type: per \ **typedef**\ , \ **enum**\ e
+ \ **struct**\. Invece, utilizzerà :ref: per \ **ioctl**\ , \ **define**\ e
+ \ **symbol**\. Il tipo di riferimento può essere definito esplicitamente
+ nella dichiarazione stessa.
+
+
+ESEMPI
+******
+
+
+ignore define _VIDEODEV2_H
+
+
+Ignora una definizione #define _VIDEODEV2_H nel file C_FILE.
+
+ignore symbol PRIVATE
+
+
+In un enumerato come il seguente:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+Non genererà alcun riferimento per \ **PRIVATE**\ .
+
+replace symbol BAR1 :c:type:\`foo\`
+replace symbol BAR2 :c:type:\`foo\`
+
+
+In un enumerato come il seguente:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+Genererà un riferimento ai valori BAR1 e BAR2 dal simbolo foo nel dominio C.
+
+
+BUGS
+****
+
+Riferire ogni malfunzionamento a Mauro Carvalho Chehab <mchehab@s-opensource.com>
+
+
+COPYRIGHT
+*********
+
+
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+
+Licenza GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+
+Questo è software libero: siete liberi di cambiarlo e ridistribuirlo.
+Non c'è alcuna garanzia, nei limiti permessi dalla legge.
diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst
new file mode 100644
index 000000000000..474b7e127893
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst
@@ -0,0 +1,457 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+Introduzione
+============
+
+Il kernel Linux usa `Sphinx`_ per la generazione della documentazione a partire
+dai file `reStructuredText`_ che si trovano nella cartella ``Documentation``.
+Per generare la documentazione in HTML o PDF, usate comandi ``make htmldocs`` o
+``make pdfdocs``. La documentazione così generata sarà disponibile nella
+cartella ``Documentation/output``.
+
+.. _Sphinx: http://www.sphinx-doc.org/
+.. _reStructuredText: http://docutils.sourceforge.net/rst.html
+
+I file reStructuredText possono contenere delle direttive che permettono di
+includere i commenti di documentazione, o di tipo kernel-doc, dai file
+sorgenti.
+Solitamente questi commenti sono utilizzati per descrivere le funzioni, i tipi
+e l'architettura del codice. I commenti di tipo kernel-doc hanno una struttura
+e formato speciale, ma a parte questo vengono processati come reStructuredText.
+
+Inoltre, ci sono migliaia di altri documenti in formato testo sparsi nella
+cartella ``Documentation``. Alcuni di questi verranno probabilmente convertiti,
+nel tempo, in formato reStructuredText, ma la maggior parte di questi rimarranno
+in formato testo.
+
+.. _it_sphinx_install:
+
+Installazione Sphinx
+====================
+
+I marcatori ReST utilizzati nei file in Documentation/ sono pensati per essere
+processati da ``Sphinx`` nella versione 1.3 o superiore. Se desiderate produrre
+un documento PDF è raccomandato l'utilizzo di una versione superiore alle 1.4.6.
+
+Esiste uno script che verifica i requisiti Sphinx. Per ulteriori dettagli
+consultate :ref:`it_sphinx-pre-install`.
+
+La maggior parte delle distribuzioni Linux forniscono Sphinx, ma l'insieme dei
+programmi e librerie è fragile e non è raro che dopo un aggiornamento di
+Sphinx, o qualche altro pacchetto Python, la documentazione non venga più
+generata correttamente.
+
+Un modo per evitare questo genere di problemi è quello di utilizzare una
+versione diversa da quella fornita dalla vostra distribuzione. Per fare questo,
+vi raccomandiamo di installare Sphinx dentro ad un ambiente virtuale usando
+``virtualenv-3`` o ``virtualenv`` a seconda di come Python 3 è stato
+pacchettizzato dalla vostra distribuzione.
+
+.. note::
+
+ #) Le versioni di Sphinx inferiori alla 1.5 non funzionano bene
+ con il pacchetto Python docutils versione 0.13.1 o superiore.
+ Se volete usare queste versioni, allora dovere eseguire
+ ``pip install 'docutils==0.12'``.
+
+ #) Viene raccomandato l'uso del tema RTD per la documentazione in HTML.
+ A seconda della versione di Sphinx, potrebbe essere necessaria
+ l'installazione tramite il comando ``pip install sphinx_rtd_theme``.
+
+ #) Alcune pagine ReST contengono delle formule matematiche. A causa del
+ modo in cui Sphinx funziona, queste espressioni sono scritte
+ utilizzando LaTeX. Per una corretta interpretazione, è necessario aver
+ installato texlive con i pacchetti amdfonts e amsmath.
+
+Riassumendo, se volete installare la versione 1.4.9 di Sphinx dovete eseguire::
+
+ $ virtualenv sphinx_1.4
+ $ . sphinx_1.4/bin/activate
+ (sphinx_1.4) $ pip install -r Documentation/sphinx/requirements.txt
+
+Dopo aver eseguito ``. sphinx_1.4/bin/activate``, il prompt cambierà per
+indicare che state usando il nuovo ambiente. Se aprite un nuova sessione,
+prima di generare la documentazione, dovrete rieseguire questo comando per
+rientrare nell'ambiente virtuale.
+
+Generazione d'immagini
+----------------------
+
+Il meccanismo che genera la documentazione del kernel contiene un'estensione
+capace di gestire immagini in formato Graphviz e SVG (per maggior informazioni
+vedere :ref:`it_sphinx_kfigure`).
+
+Per far si che questo funzioni, dovete installare entrambe i pacchetti
+Graphviz e ImageMagick. Il sistema di generazione della documentazione è in
+grado di procedere anche se questi pacchetti non sono installati, ma il
+risultato, ovviamente, non includerà le immagini.
+
+Generazione in PDF e LaTeX
+--------------------------
+
+Al momento, la generazione di questi documenti è supportata solo dalle
+versioni di Sphinx superiori alla 1.4.
+
+Per la generazione di PDF e LaTeX, avrete bisogno anche del pacchetto
+``XeLaTeX`` nella versione 3.14159265
+
+Per alcune distribuzioni Linux potrebbe essere necessario installare
+anche una serie di pacchetti ``texlive`` in modo da fornire il supporto
+minimo per il funzionamento di ``XeLaTeX``.
+
+.. _it_sphinx-pre-install:
+
+Verificare le dipendenze Sphinx
+-------------------------------
+
+Esiste uno script che permette di verificare automaticamente le dipendenze di
+Sphinx. Se lo script riesce a riconoscere la vostra distribuzione, allora
+sarà in grado di darvi dei suggerimenti su come procedere per completare
+l'installazione::
+
+ $ ./scripts/sphinx-pre-install
+ Checking if the needed tools for Fedora release 26 (Twenty Six) are available
+ Warning: better to also install "texlive-luatex85".
+ You should run:
+
+ sudo dnf install -y texlive-luatex85
+ /usr/bin/virtualenv sphinx_1.4
+ . sphinx_1.4/bin/activate
+ pip install -r Documentation/sphinx/requirements.txt
+
+ Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
+
+L'impostazione predefinita prevede il controllo dei requisiti per la generazione
+di documenti html e PDF, includendo anche il supporto per le immagini, le
+espressioni matematiche e LaTeX; inoltre, presume che venga utilizzato un
+ambiente virtuale per Python. I requisiti per generare i documenti html
+sono considerati obbligatori, gli altri sono opzionali.
+
+Questo script ha i seguenti parametri:
+
+``--no-pdf``
+ Disabilita i controlli per la generazione di PDF;
+
+``--no-virtualenv``
+ Utilizza l'ambiente predefinito dal sistema operativo invece che
+ l'ambiente virtuale per Python;
+
+
+Generazione della documentazione Sphinx
+=======================================
+
+Per generare la documentazione in formato HTML o PDF si eseguono i rispettivi
+comandi ``make htmldocs`` o ``make pdfdocs``. Esistono anche altri formati
+in cui è possibile generare la documentazione; per maggiori informazioni
+potere eseguire il comando ``make help``.
+La documentazione così generata sarà disponibile nella sottocartella
+``Documentation/output``.
+
+Ovviamente, per generare la documentazione, Sphinx (``sphinx-build``)
+dev'essere installato. Se disponibile, il tema *Read the Docs* per Sphinx
+verrà utilizzato per ottenere una documentazione HTML più gradevole.
+Per la documentazione in formato PDF, invece, avrete bisogno di ``XeLaTeX`
+e di ``convert(1)`` disponibile in ImageMagick (https://www.imagemagick.org).
+Tipicamente, tutti questi pacchetti sono disponibili e pacchettizzati nelle
+distribuzioni Linux.
+
+Per poter passare ulteriori opzioni a Sphinx potete utilizzare la variabile
+make ``SPHINXOPTS``. Per esempio, se volete che Sphinx sia più verboso durante
+la generazione potete usare il seguente comando ``make SPHINXOPTS=-v htmldocs``.
+
+Potete eliminare la documentazione generata tramite il comando
+``make cleandocs``.
+
+Scrivere la documentazione
+==========================
+
+Aggiungere nuova documentazione è semplice:
+
+1. aggiungete un file ``.rst`` nella sottocartella ``Documentation``
+2. aggiungete un riferimento ad esso nell'indice (`TOC tree`_) in
+ ``Documentation/index.rst``.
+
+.. _TOC tree: http://www.sphinx-doc.org/en/stable/markup/toctree.html
+
+Questo, di solito, è sufficiente per la documentazione più semplice (come
+quella che state leggendo ora), ma per una documentazione più elaborata è
+consigliato creare una sottocartella dedicata (o, quando possibile, utilizzarne
+una già esistente). Per esempio, il sottosistema grafico è documentato nella
+sottocartella ``Documentation/gpu``; questa documentazione è divisa in
+diversi file ``.rst`` ed un indice ``index.rst`` (con un ``toctree``
+dedicato) a cui si fa riferimento nell'indice principale.
+
+Consultate la documentazione di `Sphinx`_ e `reStructuredText`_ per maggiori
+informazione circa le loro potenzialità. In particolare, il
+`manuale introduttivo a reStructuredText`_ di Sphinx è un buon punto da
+cui cominciare. Esistono, inoltre, anche alcuni
+`costruttori specifici per Sphinx`_.
+
+.. _`manuale introduttivo a reStructuredText`: http://www.sphinx-doc.org/en/stable/rest.html
+.. _`costruttori specifici per Sphinx`: http://www.sphinx-doc.org/en/stable/markup/index.html
+
+Guide linea per la documentazione del kernel
+--------------------------------------------
+
+In questa sezione troverete alcune linee guida specifiche per la documentazione
+del kernel:
+
+* Non esagerate con i costrutti di reStructuredText. Mantenete la
+ documentazione semplice. La maggior parte della documentazione dovrebbe
+ essere testo semplice con una strutturazione minima che permetta la
+ conversione in diversi formati.
+
+* Mantenete la strutturazione il più fedele possibile all'originale quando
+ convertite un documento in formato reStructuredText.
+
+* Aggiornate i contenuti quando convertite della documentazione, non limitatevi
+ solo alla formattazione.
+
+* Mantenete la decorazione dei livelli di intestazione come segue:
+
+ 1. ``=`` con una linea superiore per il titolo del documento::
+
+ ======
+ Titolo
+ ======
+
+ 2. ``=`` per i capitoli::
+
+ Capitoli
+ ========
+
+ 3. ``-`` per le sezioni::
+
+ Sezioni
+ -------
+
+ 4. ``~`` per le sottosezioni::
+
+ Sottosezioni
+ ~~~~~~~~~~~~
+
+ Sebbene RST non forzi alcun ordine specifico (*Piuttosto che imporre
+ un numero ed un ordine fisso di decorazioni, l'ordine utilizzato sarà
+ quello incontrato*), avere uniformità dei livelli principali rende più
+ semplice la lettura dei documenti.
+
+* Per inserire blocchi di testo con caratteri a dimensione fissa (codici di
+ esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario
+ evidenziare la sintassi, specialmente per piccoli frammenti; invece,
+ utilizzate ``.. code-block:: <language>`` per blocchi di più lunghi che
+ potranno beneficiare dell'avere la sintassi evidenziata.
+
+
+Il dominio C
+------------
+
+Il **Dominio Sphinx C** (denominato c) è adatto alla documentazione delle API C.
+Per esempio, un prototipo di una funzione:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+
+Il dominio C per kernel-doc ha delle funzionalità aggiuntive. Per esempio,
+potete assegnare un nuovo nome di riferimento ad una funzione con un nome
+molto comune come ``open`` o ``ioctl``:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo
+riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce
+nell'indice cambia in ``VIDIOC_LOG_STATUS`` e si potrà quindi fare riferimento
+a questa funzione scrivendo:
+
+.. code-block:: rst
+
+ :c:func:`VIDIOC_LOG_STATUS`
+
+
+Tabelle a liste
+---------------
+
+Raccomandiamo l'uso delle tabelle in formato lista (*list table*). Le tabelle
+in formato lista sono liste di liste. In confronto all'ASCII-art potrebbero
+non apparire di facile lettura nei file in formato testo. Il loro vantaggio è
+che sono facili da creare o modificare e che la differenza di una modifica è
+molto più significativa perché limitata alle modifiche del contenuto.
+
+La ``flat-table`` è anch'essa una lista di liste simile alle ``list-table``
+ma con delle funzionalità aggiuntive:
+
+* column-span: col ruolo ``cspan`` una cella può essere estesa attraverso
+ colonne successive
+
+* raw-span: col ruolo ``rspan`` una cella può essere estesa attraverso
+ righe successive
+
+* auto-span: la cella più a destra viene estesa verso destra per compensare
+ la mancanza di celle. Con l'opzione ``:fill-cells:`` questo comportamento
+ può essere cambiato da *auto-span* ad *auto-fill*, il quale inserisce
+ automaticamente celle (vuote) invece che estendere l'ultima.
+
+opzioni:
+
+* ``:header-rows:`` [int] conta le righe di intestazione
+* ``:stub-columns:`` [int] conta le colonne di stub
+* ``:widths:`` [[int] [int] ... ] larghezza delle colonne
+* ``:fill-cells:`` invece di estendere automaticamente una cella su quelle
+ mancanti, ne crea di vuote.
+
+ruoli:
+
+* ``:cspan:`` [int] colonne successive (*morecols*)
+* ``:rspan:`` [int] righe successive (*morerows*)
+
+L'esempio successivo mostra come usare questo marcatore. Il primo livello della
+nostra lista di liste è la *riga*. In una *riga* è possibile inserire solamente
+la lista di celle che compongono la *riga* stessa. Fanno eccezione i *commenti*
+( ``..`` ) ed i *collegamenti* (per esempio, un riferimento a
+``:ref:`last row <last row>``` / :ref:`last row <it last row>`)
+
+.. code-block:: rst
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`it last row`:
+
+ - column 3
+
+Che verrà rappresentata nel seguente modo:
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`it last row`:
+
+ - column 3
+
+.. _it_sphinx_kfigure:
+
+Figure ed immagini
+==================
+
+Se volete aggiungere un'immagine, utilizzate le direttive ``kernel-figure``
+e ``kernel-image``. Per esempio, per inserire una figura di un'immagine in
+formato SVG::
+
+ .. kernel-figure:: ../../../doc-guide/svg_image.svg
+ :alt: una semplice immagine SVG
+
+ Una semplice immagine SVG
+
+.. _it_svg_image_example:
+
+.. kernel-figure:: ../../../doc-guide/svg_image.svg
+ :alt: una semplice immagine SVG
+
+ Una semplice immagine SVG
+
+Le direttive del kernel per figure ed immagini supportano il formato **DOT**,
+per maggiori informazioni
+
+* DOT: http://graphviz.org/pdf/dotguide.pdf
+* Graphviz: http://www.graphviz.org/content/dot-language
+
+Un piccolo esempio (:ref:`it_hello_dot_file`)::
+
+ .. kernel-figure:: ../../../doc-guide/hello.dot
+ :alt: ciao mondo
+
+ Esempio DOT
+
+.. _it_hello_dot_file:
+
+.. kernel-figure:: ../../../doc-guide/hello.dot
+ :alt: ciao mondo
+
+ Esempio DOT
+
+Tramite la direttiva ``kernel-render`` è possibile aggiungere codice specifico;
+ad esempio nel formato **DOT** di Graphviz.::
+
+ .. kernel-render:: DOT
+ :alt: foobar digraph
+ :caption: Codice **DOT** (Graphviz) integrato
+
+ digraph foo {
+ "bar" -> "baz";
+ }
+
+La rappresentazione dipenderà dei programmi installati. Se avete Graphviz
+installato, vedrete un'immagine vettoriale. In caso contrario, il codice grezzo
+verrà rappresentato come *blocco testuale* (:ref:`it_hello_dot_render`).
+
+.. _it_hello_dot_render:
+
+.. kernel-render:: DOT
+ :alt: foobar digraph
+ :caption: Codice **DOT** (Graphviz) integrato
+
+ digraph foo {
+ "bar" -> "baz";
+ }
+
+La direttiva *render* ha tutte le opzioni della direttiva *figure*, con
+l'aggiunta dell'opzione ``caption``. Se ``caption`` ha un valore allora
+un nodo *figure* viene aggiunto. Altrimenti verrà aggiunto un nodo *image*.
+L'opzione ``caption`` è necessaria in caso si vogliano aggiungere dei
+riferimenti (:ref:`it_hello_svg_render`).
+
+Per la scrittura di codice **SVG**::
+
+ .. kernel-render:: SVG
+ :caption: Integrare codice **SVG**
+ :alt: so-nw-arrow
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <svg xmlns="http://www.w3.org/2000/svg" version="1.1" ...>
+ ...
+ </svg>
+
+.. _it_hello_svg_render:
+
+.. kernel-render:: SVG
+ :caption: Integrare codice **SVG**
+ :alt: so-nw-arrow
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <svg xmlns="http://www.w3.org/2000/svg"
+ version="1.1" baseProfile="full" width="70px" height="40px" viewBox="0 0 700 400">
+ <line x1="180" y1="370" x2="500" y2="50" stroke="black" stroke-width="15px"/>
+ <polygon points="585 0 525 25 585 50" transform="rotate(135 525 25)"/>
+ </svg>
diff --git a/Documentation/translations/it_IT/index.rst b/Documentation/translations/it_IT/index.rst
new file mode 100644
index 000000000000..898a7823a6f4
--- /dev/null
+++ b/Documentation/translations/it_IT/index.rst
@@ -0,0 +1,118 @@
+.. _it_linux_doc:
+
+===================
+Traduzione italiana
+===================
+
+L'obiettivo di questa traduzione è di rendere più facile la lettura e
+la comprensione per chi preferisce leggere in lingua italiana.
+Tenete presente che la documentazione di riferimento rimane comunque
+quella in lingua inglese: :ref:`linux_doc`
+
+Questa traduzione cerca di essere il più fedele possibile all'originale ma
+è ovvio che alcune frasi vadano trasformate: non aspettatevi una traduzione
+letterale. Quando possibile, si eviteranno gli inglesismi ed al loro posto
+verranno utilizzate le corrispettive parole italiane.
+
+Se notate che la traduzione non è più aggiornata potete contattare
+direttamente il manutentore della traduzione italiana.
+
+Se notate che la documentazione contiene errori o dimenticanze, allora
+verificate la documentazione di riferimento in lingua inglese. Se il problema
+è presente anche nella documentazione di riferimento, contattate il suo
+manutentore. Se avete problemi a scrivere in inglese, potete comunque
+riportare il problema al manutentore della traduzione italiana.
+
+Manutentore della traduzione italiana: Federico Vaga <federico.vaga@vaga.pv.it>
+
+La documentazione del kernel Linux
+==================================
+
+Questo è il livello principale della documentazione del kernel in
+lingua italiana. La traduzione è incompleta, noterete degli avvisi
+che vi segnaleranno la mancanza di una traduzione o di un gruppo di
+traduzioni.
+
+Più in generale, la documentazione, come il kernel stesso, sono in
+costante sviluppo; particolarmente vero in quanto stiamo lavorando
+alla riorganizzazione della documentazione in modo più coerente.
+I miglioramenti alla documentazione sono sempre i benvenuti; per cui,
+se vuoi aiutare, iscriviti alla lista di discussione linux-doc presso
+vger.kernel.org.
+
+Documentazione sulla licenza dei sorgenti
+-----------------------------------------
+
+I seguenti documenti descrivono la licenza usata nei sorgenti del kernel Linux
+(GPLv2), come licenziare i singoli file; inoltre troverete i riferimenti al
+testo integrale della licenza.
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione per gli utenti
+-----------------------------
+
+I seguenti manuali sono scritti per gli *utenti* del kernel - ovvero,
+coloro che cercano di farlo funzionare in modo ottimale su un dato sistema
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione per gli sviluppatori di applicazioni
+---------------------------------------------------
+
+Il manuale delle API verso lo spazio utente è una collezione di documenti
+che descrivono le interfacce del kernel viste dagli sviluppatori
+di applicazioni.
+
+.. warning::
+
+ TODO ancora da tradurre
+
+
+Introduzione allo sviluppo del kernel
+-------------------------------------
+
+Questi manuali contengono informazioni su come contribuire allo sviluppo
+del kernel.
+Attorno al kernel Linux gira una comunità molto grande con migliaia di
+sviluppatori che contribuiscono ogni anno. Come in ogni grande comunità,
+sapere come le cose vengono fatte renderà il processo di integrazione delle
+vostre modifiche molto più semplice
+
+.. toctree::
+ :maxdepth: 2
+
+ doc-guide/index
+ kernel-hacking/index
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione della API del kernel
+-----------------------------------
+
+Questi manuali forniscono dettagli su come funzionano i sottosistemi del
+kernel dal punto di vista degli sviluppatori del kernel. Molte delle
+informazioni contenute in questi manuali sono prese direttamente dai
+file sorgenti, informazioni aggiuntive vengono aggiunte solo se necessarie
+(o almeno ci proviamo — probabilmente *non* tutto quello che è davvero
+necessario).
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione specifica per architettura
+-----------------------------------------
+
+Questi manuali forniscono dettagli di programmazione per le diverse
+implementazioni d'architettura.
+
+.. warning::
+
+ TODO ancora da tradurre
diff --git a/Documentation/translations/it_IT/kernel-hacking/hacking.rst b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
new file mode 100644
index 000000000000..7178e517af0a
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
@@ -0,0 +1,855 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/kernel-hacking/hacking.rst <kernel_hacking_hack>`
+
+:Original: :ref:`Documentation/kernel-hacking/hacking.rst <kernel_hacking_hack>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking_hack:
+
+=================================================
+L'inaffidabile guida all'hacking del kernel Linux
+=================================================
+
+:Author: Rusty Russell
+
+Introduzione
+============
+
+Benvenuto, gentile lettore, alla notevole ed inaffidabile guida all'hacking
+del kernel Linux ad opera di Rusty. Questo documento descrive le procedure
+più usate ed i concetti necessari per scrivere codice per il kernel: lo scopo
+è di fornire ai programmatori C più esperti un manuale di base per sviluppo.
+Eviterò dettagli implementativi: per questo abbiamo il codice,
+ed ignorerò intere parti di alcune procedure.
+
+Prima di leggere questa guida, sappiate che non ho mai voluto scriverla,
+essendo esageratamente sotto qualificato, ma ho sempre voluto leggere
+qualcosa di simile, e quindi questa era l'unica via. Spero che possa
+crescere e diventare un compendio di buone pratiche, punti di partenza
+e generiche informazioni.
+
+Gli attori
+==========
+
+In qualsiasi momento ognuna delle CPU di un sistema può essere:
+
+- non associata ad alcun processo, servendo un'interruzione hardware;
+
+- non associata ad alcun processo, servendo un softirq o tasklet;
+
+- in esecuzione nello spazio kernel, associata ad un processo
+ (contesto utente);
+
+- in esecuzione di un processo nello spazio utente;
+
+Esiste un ordine fra questi casi. Gli ultimi due possono avvicendarsi (preempt)
+l'un l'altro, ma a parte questo esiste una gerarchia rigida: ognuno di questi
+può avvicendarsi solo ad uno di quelli sottostanti. Per esempio, mentre un
+softirq è in esecuzione su d'una CPU, nessun altro softirq può avvicendarsi
+nell'esecuzione, ma un'interruzione hardware può. Ciò nonostante, le altre CPU
+del sistema operano indipendentemente.
+
+Più avanti vedremo alcuni modi in cui dal contesto utente è possibile bloccare
+le interruzioni, così da impedirne davvero il diritto di prelazione.
+
+Contesto utente
+---------------
+
+Ci si trova nel contesto utente quando si arriva da una chiamata di sistema
+od altre eccezioni: come nello spazio utente, altre procedure più importanti,
+o le interruzioni, possono far valere il proprio diritto di prelazione sul
+vostro processo. Potete sospendere l'esecuzione chiamando :c:func:`schedule()`.
+
+.. note::
+
+ Si è sempre in contesto utente quando un modulo viene caricato o rimosso,
+ e durante le operazioni nello strato dei dispositivi a blocchi
+ (*block layer*).
+
+Nel contesto utente, il puntatore ``current`` (il quale indica il processo al
+momento in esecuzione) è valido, e :c:func:`in_interrupt()`
+(``include/linux/preempt.h``) è falsa.
+
+.. warning::
+
+ Attenzione che se avete la prelazione o i softirq disabilitati (vedere
+ di seguito), :c:func:`in_interrupt()` ritornerà un falso positivo.
+
+Interruzioni hardware (Hard IRQs)
+---------------------------------
+
+Temporizzatori, schede di rete e tastiere sono esempi di vero hardware
+che possono produrre interruzioni in un qualsiasi momento. Il kernel esegue
+i gestori d'interruzione che prestano un servizio all'hardware. Il kernel
+garantisce che questi gestori non vengano mai interrotti: se una stessa
+interruzione arriva, questa verrà accodata (o scartata).
+Dato che durante la loro esecuzione le interruzioni vengono disabilitate,
+i gestori d'interruzioni devono essere veloci: spesso si limitano
+esclusivamente a notificare la presa in carico dell'interruzione,
+programmare una 'interruzione software' per l'esecuzione e quindi terminare.
+
+Potete dire d'essere in una interruzione hardware perché :c:func:`in_irq()`
+ritorna vero.
+
+.. warning::
+
+ Attenzione, questa ritornerà un falso positivo se le interruzioni
+ sono disabilitate (vedere di seguito).
+
+Contesto d'interruzione software: softirq e tasklet
+---------------------------------------------------
+
+Quando una chiamata di sistema sta per tornare allo spazio utente,
+oppure un gestore d'interruzioni termina, qualsiasi 'interruzione software'
+marcata come pendente (solitamente da un'interruzione hardware) viene
+eseguita (``kernel/softirq.c``).
+
+La maggior parte del lavoro utile alla gestione di un'interruzione avviene qui.
+All'inizio della transizione ai sistemi multiprocessore, c'erano solo i
+cosiddetti 'bottom half' (BH), i quali non traevano alcun vantaggio da questi
+sistemi. Non appena abbandonammo i computer raffazzonati con fiammiferi e
+cicche, abbandonammo anche questa limitazione e migrammo alle interruzioni
+software 'softirqs'.
+
+Il file ``include/linux/interrupt.h`` elenca i differenti tipi di 'softirq'.
+Un tipo di softirq molto importante è il timer (``include/linux/timer.h``):
+potete programmarlo per far si che esegua funzioni dopo un determinato
+periodo di tempo.
+
+Dato che i softirq possono essere eseguiti simultaneamente su più di un
+processore, spesso diventa estenuante l'averci a che fare. Per questa ragione,
+i tasklet (``include/linux/interrupt.h``) vengo usati più di frequente:
+possono essere registrati dinamicamente (il che significa che potete averne
+quanti ne volete), e garantiscono che un qualsiasi tasklet verrà eseguito
+solo su un processore alla volta, sebbene diversi tasklet possono essere
+eseguiti simultaneamente.
+
+.. warning::
+
+ Il nome 'tasklet' è ingannevole: non hanno niente a che fare
+ con i 'processi' ('tasks'), e probabilmente hanno più a che vedere
+ con qualche pessima vodka che Alexey Kuznetsov si fece a quel tempo.
+
+Potete determinate se siete in un softirq (o tasklet) utilizzando la
+macro :c:func:`in_softirq()` (``include/linux/preempt.h``).
+
+.. warning::
+
+ State attenti che questa macro ritornerà un falso positivo
+ se :ref:`botton half lock <it_local_bh_disable>` è bloccato.
+
+Alcune regole basilari
+======================
+
+Nessuna protezione della memoria
+ Se corrompete la memoria, che sia in contesto utente o d'interruzione,
+ la macchina si pianterà. Siete sicuri che quello che volete fare
+ non possa essere fatto nello spazio utente?
+
+Nessun numero in virgola mobile o MMX
+ Il contesto della FPU non è salvato; anche se siete in contesto utente
+ lo stato dell'FPU probabilmente non corrisponde a quello del processo
+ corrente: vi incasinerete con lo stato di qualche altro processo. Se
+ volete davvero usare la virgola mobile, allora dovrete salvare e recuperare
+ lo stato dell'FPU (ed evitare cambi di contesto). Generalmente è una
+ cattiva idea; usate l'aritmetica a virgola fissa.
+
+Un limite rigido dello stack
+ A seconda della configurazione del kernel lo stack è fra 3K e 6K per la
+ maggior parte delle architetture a 32-bit; è di 14K per la maggior
+ parte di quelle a 64-bit; e spesso è condiviso con le interruzioni,
+ per cui non si può usare.
+ Evitare profonde ricorsioni ad enormi array locali nello stack
+ (allocateli dinamicamente).
+
+Il kernel Linux è portabile
+ Quindi mantenetelo tale. Il vostro codice dovrebbe essere a 64-bit ed
+ indipendente dall'ordine dei byte (endianess) di un processore. Inoltre,
+ dovreste minimizzare il codice specifico per un processore; per esempio
+ il codice assembly dovrebbe essere incapsulato in modo pulito e minimizzato
+ per facilitarne la migrazione. Generalmente questo codice dovrebbe essere
+ limitato alla parte di kernel specifica per un'architettura.
+
+ioctl: non scrivere nuove chiamate di sistema
+=============================================
+
+Una chiamata di sistema, generalmente, è scritta così::
+
+ asmlinkage long sys_mycall(int arg)
+ {
+ return 0;
+ }
+
+Primo, nella maggior parte dei casi non volete creare nuove chiamate di
+sistema.
+Create un dispositivo a caratteri ed implementate l'appropriata chiamata ioctl.
+Questo meccanismo è molto più flessibile delle chiamate di sistema: esso non
+dev'essere dichiarato in tutte le architetture nei file
+``include/asm/unistd.h`` e ``arch/kernel/entry.S``; inoltre, è improbabile
+che questo venga accettato da Linus.
+
+Se tutto quello che il vostro codice fa è leggere o scrivere alcuni parametri,
+considerate l'implementazione di un'interfaccia :c:func:`sysfs()`.
+
+All'interno di una ioctl vi trovate nel contesto utente di un processo. Quando
+avviene un errore dovete ritornare un valore negativo di errno (consultate
+``include/uapi/asm-generic/errno-base.h``,
+``include/uapi/asm-generic/errno.h`` e ``include/linux/errno.h``), altrimenti
+ritornate 0.
+
+Dopo aver dormito dovreste verificare se ci sono stati dei segnali: il modo
+Unix/Linux di gestire un segnale è di uscire temporaneamente dalla chiamata
+di sistema con l'errore ``-ERESTARTSYS``. La chiamata di sistema ritornerà
+al contesto utente, eseguirà il gestore del segnale e poi la vostra chiamata
+di sistema riprenderà (a meno che l'utente non l'abbia disabilitata). Quindi,
+dovreste essere pronti per continuare l'esecuzione, per esempio nel mezzo
+della manipolazione di una struttura dati.
+
+::
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+Se dovete eseguire dei calcoli molto lunghi: pensate allo spazio utente.
+Se **davvero** volete farlo nel kernel ricordatevi di verificare periodicamente
+se dovete *lasciare* il processore (ricordatevi che, per ogni processore, c'è
+un sistema multi-processo senza diritto di prelazione).
+Esempio::
+
+ cond_resched(); /* Will sleep */
+
+Una breve nota sulla progettazione delle interfacce: il motto dei sistemi
+UNIX è "fornite meccanismi e non politiche"
+
+La ricetta per uno stallo
+=========================
+
+Non è permesso invocare una procedura che potrebbe dormire, fanno eccezione
+i seguenti casi:
+
+- Siete in un contesto utente.
+
+- Non trattenete alcun spinlock.
+
+- Avete abilitato le interruzioni (in realtà, Andy Kleen dice che
+ lo schedulatore le abiliterà per voi, ma probabilmente questo non è quello
+ che volete).
+
+Da tener presente che alcune funzioni potrebbero dormire implicitamente:
+le più comuni sono quelle per l'accesso allo spazio utente (\*_user) e
+quelle per l'allocazione della memoria senza l'opzione ``GFP_ATOMIC``
+
+Dovreste sempre compilare il kernel con l'opzione ``CONFIG_DEBUG_ATOMIC_SLEEP``
+attiva, questa vi avviserà se infrangete una di queste regole.
+Se **infrangete** le regole, allora potreste bloccare il vostro scatolotto.
+
+Veramente.
+
+Alcune delle procedure più comuni
+=================================
+
+:c:func:`printk()`
+------------------
+
+Definita in ``include/linux/printk.h``
+
+:c:func:`printk()` fornisce messaggi alla console, dmesg, e al demone syslog.
+Essa è utile per il debugging o per la notifica di errori; può essere
+utilizzata anche all'interno del contesto d'interruzione, ma usatela con
+cautela: una macchina che ha la propria console inondata da messaggi diventa
+inutilizzabile. La funzione utilizza un formato stringa quasi compatibile con
+la printf ANSI C, e la concatenazione di una stringa C come primo argomento
+per indicare la "priorità"::
+
+ printk(KERN_INFO "i = %u\n", i);
+
+Consultate ``include/linux/kern_levels.h`` per gli altri valori ``KERN_``;
+questi sono interpretati da syslog come livelli. Un caso speciale:
+per stampare un indirizzo IP usate::
+
+ __be32 ipaddress;
+ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
+
+
+:c:func:`printk()` utilizza un buffer interno di 1K e non s'accorge di
+eventuali sforamenti. Accertatevi che vi basti.
+
+.. note::
+
+ Saprete di essere un vero hacker del kernel quando inizierete a digitare
+ nei vostri programmi utenti le printf come se fossero printk :)
+
+.. note::
+
+ Un'altra nota a parte: la versione originale di Unix 6 aveva un commento
+ sopra alla funzione printf: "Printf non dovrebbe essere usata per il
+ chiacchiericcio". Dovreste seguire questo consiglio.
+
+:c:func:`copy_to_user()` / :c:func:`copy_from_user()` / :c:func:`get_user()` / :c:func:`put_user()`
+---------------------------------------------------------------------------------------------------
+
+Definite in ``include/linux/uaccess.h`` / ``asm/uaccess.h``
+
+**[DORMONO]**
+
+:c:func:`put_user()` e :c:func:`get_user()` sono usate per ricevere ed
+impostare singoli valori (come int, char, o long) da e verso lo spazio utente.
+Un puntatore nello spazio utente non dovrebbe mai essere dereferenziato: i dati
+dovrebbero essere copiati usando suddette procedure. Entrambe ritornano
+``-EFAULT`` oppure 0.
+
+:c:func:`copy_to_user()` e :c:func:`copy_from_user()` sono più generiche:
+esse copiano una quantità arbitraria di dati da e verso lo spazio utente.
+
+.. warning::
+
+ Al contrario di:c:func:`put_user()` e :c:func:`get_user()`, queste
+ funzioni ritornano la quantità di dati copiati (0 è comunque un successo).
+
+[Sì, questa stupida interfaccia mi imbarazza. La battaglia torna in auge anno
+dopo anno. --RR]
+
+Le funzioni potrebbero dormire implicitamente. Queste non dovrebbero mai essere
+invocate fuori dal contesto utente (non ha senso), con le interruzioni
+disabilitate, o con uno spinlock trattenuto.
+
+:c:func:`kmalloc()`/:c:func:`kfree()`
+-------------------------------------
+
+Definite in ``include/linux/slab.h``
+
+**[POTREBBERO DORMIRE: LEGGI SOTTO]**
+
+Queste procedure sono utilizzate per la richiesta dinamica di un puntatore ad
+un pezzo di memoria allineato, esattamente come malloc e free nello spazio
+utente, ma :c:func:`kmalloc()` ha un argomento aggiuntivo per indicare alcune
+opzioni. Le opzioni più importanti sono:
+
+``GFP_KERNEL``
+ Potrebbe dormire per librarare della memoria. L'opzione fornisce il modo
+ più affidabile per allocare memoria, ma il suo uso è strettamente limitato
+ allo spazio utente.
+
+``GFP_ATOMIC``
+ Non dorme. Meno affidabile di ``GFP_KERNEL``, ma può essere usata in un
+ contesto d'interruzione. Dovreste avere **davvero** una buona strategia
+ per la gestione degli errori in caso di mancanza di memoria.
+
+``GFP_DMA``
+ Alloca memoria per il DMA sul bus ISA nello spazio d'indirizzamento
+ inferiore ai 16MB. Se non sapete cos'è allora non vi serve.
+ Molto inaffidabile.
+
+Se vedete un messaggio d'avviso per una funzione dormiente che viene chiamata
+da un contesto errato, allora probabilmente avete usato una funzione
+d'allocazione dormiente da un contesto d'interruzione senza ``GFP_ATOMIC``.
+Dovreste correggerlo. Sbrigatevi, non cincischiate.
+
+Se allocate almeno ``PAGE_SIZE``(``asm/page.h`` o ``asm/page_types.h``) byte,
+considerate l'uso di :c:func:`__get_free_pages()` (``include/linux/gfp.h``).
+Accetta un argomento che definisce l'ordine (0 per per la dimensione di una
+pagine, 1 per una doppia pagina, 2 per quattro pagine, eccetra) e le stesse
+opzioni d'allocazione viste precedentemente.
+
+Se state allocando un numero di byte notevolemnte superiore ad una pagina
+potete usare :c:func:`vmalloc()`. Essa allocherà memoria virtuale all'interno
+dello spazio kernel. Questo è un blocco di memoria fisica non contiguo, ma
+la MMU vi darà l'impressione che lo sia (quindi, sarà contiguo solo dal punto
+di vista dei processori, non dal punto di vista dei driver dei dispositivi
+esterni).
+Se per qualche strana ragione avete davvero bisogno di una grossa quantità di
+memoria fisica contigua, avete un problema: Linux non ha un buon supporto per
+questo caso d'uso perché, dopo un po' di tempo, la frammentazione della memoria
+rende l'operazione difficile. Il modo migliore per allocare un simile blocco
+all'inizio dell'avvio del sistema è attraverso la procedura
+:c:func:`alloc_bootmem()`.
+
+Prima di inventare la vostra cache per gli oggetti più usati, considerate
+l'uso di una cache slab disponibile in ``include/linux/slab.h``.
+
+:c:func:`current()`
+-------------------
+
+Definita in ``include/asm/current.h``
+
+Questa variabile globale (in realtà una macro) contiene un puntatore alla
+struttura del processo corrente, quindi è valido solo dal contesto utente.
+Per esempio, quando un processo esegue una chiamata di sistema, questo
+punterà alla struttura dati del processo chiamate.
+Nel contesto d'interruzione in suo valore **non è NULL**.
+
+:c:func:`mdelay()`/:c:func:`udelay()`
+-------------------------------------
+
+Definite in ``include/asm/delay.h`` / ``include/linux/delay.h``
+
+Le funzioni :c:func:`udelay()` e :c:func:`ndelay()` possono essere utilizzate
+per brevi pause. Non usate grandi valori perché rischiate d'avere un
+overflow - in questo contesto la funzione :c:func:`mdelay()` è utile,
+oppure considerate :c:func:`msleep()`.
+
+:c:func:`cpu_to_be32()`/:c:func:`be32_to_cpu()`/:c:func:`cpu_to_le32()`/:c:func:`le32_to_cpu()`
+-----------------------------------------------------------------------------------------------
+
+Definite in ``include/asm/byteorder.h``
+
+La famiglia di funzioni :c:func:`cpu_to_be32()` (dove "32" può essere
+sostituito da 64 o 16, e "be" con "le") forniscono un modo generico
+per fare conversioni sull'ordine dei byte (endianess): esse ritornano
+il valore convertito. Tutte le varianti supportano anche il processo inverso:
+:c:func:`be32_to_cpu()`, eccetera.
+
+Queste funzioni hanno principalmente due varianti: la variante per
+puntatori, come :c:func:`cpu_to_be32p(), che prende un puntatore
+ad un tipo, e ritorna il valore convertito. L'altra variante per
+la famiglia di conversioni "in-situ", come :c:func:`cpu_to_be32s()`,
+che convertono il valore puntato da un puntatore, e ritornano void.
+
+:c:func:`local_irq_save()`/:c:func:`local_irq_restore()`
+--------------------------------------------------------
+
+Definite in ``include/linux/irqflags.h``
+
+Queste funzioni abilitano e disabilitano le interruzioni hardware
+sul processore locale. Entrambe sono rientranti; esse salvano lo stato
+precedente nel proprio argomento ``unsigned long flags``. Se sapete
+che le interruzioni sono abilite, potete semplicemente utilizzare
+:c:func:`local_irq_disable()` e :c:func:`local_irq_enable()`.
+
+.. _it_local_bh_disable:
+
+:c:func:`local_bh_disable()`/:c:func:`local_bh_enable()`
+--------------------------------------------------------
+
+Definite in ``include/linux/bottom_half.h``
+
+
+Queste funzioni abilitano e disabilitano le interruzioni software
+sul processore locale. Entrambe sono rientranti; se le interruzioni
+software erano già state disabilitate in precedenza, rimarranno
+disabilitate anche dopo aver invocato questa coppia di funzioni.
+Lo scopo è di prevenire l'esecuzione di softirq e tasklet sul processore
+attuale.
+
+:c:func:`smp_processor_id()`
+----------------------------
+
+Definita in ``include/linux/smp.h``
+
+:c:func:`get_cpu()` nega il diritto di prelazione (quindi non potete essere
+spostati su un altro processore all'improvviso) e ritorna il numero
+del processore attuale, fra 0 e ``NR_CPUS``. Da notare che non è detto
+che la numerazione dei processori sia continua. Quando avete terminato,
+ritornate allo stato precedente con :c:func:`put_cpu()`.
+
+Se sapete che non dovete essere interrotti da altri processi (per esempio,
+se siete in un contesto d'interruzione, o il diritto di prelazione
+è disabilitato) potete utilizzare smp_processor_id().
+
+
+``__init``/``__exit``/``__initdata``
+------------------------------------
+
+Definite in ``include/linux/init.h``
+
+Dopo l'avvio, il kernel libera una sezione speciale; le funzioni marcate
+con ``__init`` e le strutture dati marcate con ``__initdata`` vengono
+eliminate dopo il completamento dell'avvio: in modo simile i moduli eliminano
+questa memoria dopo l'inizializzazione. ``__exit`` viene utilizzato per
+dichiarare che una funzione verrà utilizzata solo in fase di rimozione:
+la detta funzione verrà eliminata quando il file che la contiene non è
+compilato come modulo. Guardate l'header file per informazioni. Da notare che
+non ha senso avere una funzione marcata come ``__init`` e al tempo stesso
+esportata ai moduli utilizzando :c:func:`EXPORT_SYMBOL()` o
+:c:func:`EXPORT_SYMBOL_GPL()` - non funzionerà.
+
+
+:c:func:`__initcall()`/:c:func:`module_init()`
+----------------------------------------------
+
+Definite in ``include/linux/init.h`` / ``include/linux/module.h``
+
+Molte parti del kernel funzionano bene come moduli (componenti del kernel
+caricabili dinamicamente). L'utilizzo delle macro :c:func:`module_init()`
+e :c:func:`module_exit()` semplifica la scrittura di codice che può funzionare
+sia come modulo, sia come parte del kernel, senza l'ausilio di #ifdef.
+
+La macro :c:func:`module_init()` definisce quale funzione dev'essere
+chiamata quando il modulo viene inserito (se il file è stato compilato come
+tale), o in fase di avvio : se il file non è stato compilato come modulo la
+macro :c:func:`module_init()` diventa equivalente a :c:func:`__initcall()`,
+la quale, tramite qualche magia del linker, s'assicura che la funzione venga
+chiamata durante l'avvio.
+
+La funzione può ritornare un numero d'errore negativo per scatenare un
+fallimento del caricamento (sfortunatamente, questo non ha effetto se il
+modulo è compilato come parte integrante del kernel). Questa funzione è chiamata
+in contesto utente con le interruzioni abilitate, quindi potrebbe dormire.
+
+
+:c:func:`module_exit()`
+-----------------------
+
+
+Definita in ``include/linux/module.h``
+
+Questa macro definisce la funzione che dev'essere chiamata al momento della
+rimozione (o mai, nel caso in cui il file sia parte integrante del kernel).
+Essa verrà chiamata solo quando il contatore d'uso del modulo raggiunge lo
+zero. Questa funzione può anche dormire, ma non può fallire: tutto dev'essere
+ripulito prima che la funzione ritorni.
+
+Da notare che questa macro è opzionale: se non presente, il modulo non sarà
+removibile (a meno che non usiate 'rmmod -f' ).
+
+
+:c:func:`try_module_get()`/:c:func:`module_put()`
+-------------------------------------------------
+
+Definite in ``include/linux/module.h``
+
+Queste funzioni maneggiano il contatore d'uso del modulo per proteggerlo dalla
+rimozione (in aggiunta, un modulo non può essere rimosso se un altro modulo
+utilizzo uno dei sui simboli esportati: vedere di seguito). Prima di eseguire
+codice del modulo, dovreste chiamare :c:func:`try_module_get()` su quel modulo:
+se fallisce significa che il modulo è stato rimosso e dovete agire come se
+non fosse presente. Altrimenti, potete accedere al modulo in sicurezza, e
+chiamare :c:func:`module_put()` quando avete finito.
+
+La maggior parte delle strutture registrabili hanno un campo owner
+(proprietario), come nella struttura
+:c:type:`struct file_operations <file_operations>`.
+Impostate questo campo al valore della macro ``THIS_MODULE``.
+
+
+Code d'attesa ``include/linux/wait.h``
+======================================
+
+**[DORMONO]**
+
+Una coda d'attesa è usata per aspettare che qualcuno vi attivi quando una
+certa condizione s'avvera. Per evitare corse critiche, devono essere usate
+con cautela. Dichiarate una :c:type:`wait_queue_head_t`, e poi i processi
+che vogliono attendere il verificarsi di quella condizione dichiareranno
+una :c:type:`wait_queue_entry_t` facendo riferimento a loro stessi, poi
+metteranno questa in coda.
+
+Dichiarazione
+-------------
+
+Potere dichiarare una ``wait_queue_head_t`` utilizzando la macro
+:c:func:`DECLARE_WAIT_QUEUE_HEAD()` oppure utilizzando la procedura
+:c:func:`init_waitqueue_head()` nel vostro codice d'inizializzazione.
+
+Accodamento
+-----------
+
+Mettersi in una coda d'attesa è piuttosto complesso, perché dovete
+mettervi in coda prima di verificare la condizione. Esiste una macro
+a questo scopo: :c:func:`wait_event_interruptible()` (``include/linux/wait.h``).
+Il primo argomento è la testa della coda d'attesa, e il secondo è
+un'espressione che dev'essere valutata; la macro ritorna 0 quando questa
+espressione è vera, altrimenti ``-ERESTARTSYS`` se è stato ricevuto un segnale.
+La versione :c:func:`wait_event()` ignora i segnali.
+
+Svegliare una procedura in coda
+-------------------------------
+
+Chiamate :c:func:`wake_up()` (``include/linux/wait.h``); questa attiverà tutti
+i processi in coda. Ad eccezione se uno di questi è impostato come
+``TASK_EXCLUSIVE``, in questo caso i rimanenti non verranno svegliati.
+Nello stesso header file esistono altre varianti di questa funzione.
+
+Operazioni atomiche
+===================
+
+Certe operazioni sono garantite come atomiche su tutte le piattaforme.
+Il primo gruppo di operazioni utilizza :c:type:`atomic_t`
+(``include/asm/atomic.h``); questo contiene un intero con segno (minimo 32bit),
+e dovete utilizzare queste funzione per modificare o leggere variabili di tipo
+:c:type:`atomic_t`. :c:func:`atomic_read()` e :c:func:`atomic_set()` leggono ed
+impostano il contatore, :c:func:`atomic_add()`, :c:func:`atomic_sub()`,
+:c:func:`atomic_inc()`, :c:func:`atomic_dec()`, e
+:c:func:`atomic_dec_and_test()` (ritorna vero se raggiunge zero dopo essere
+stata decrementata).
+
+Sì. Ritorna vero (ovvero != 0) se la variabile atomica è zero.
+
+Da notare che queste funzioni sono più lente rispetto alla normale aritmetica,
+e quindi non dovrebbero essere usate a sproposito.
+
+Il secondo gruppo di operazioni atomiche sono definite in
+``include/linux/bitops.h`` ed agiscono sui bit d'una variabile di tipo
+``unsigned long``. Queste operazioni prendono come argomento un puntatore
+alla variabile, e un numero di bit dove 0 è quello meno significativo.
+:c:func:`set_bit()`, :c:func:`clear_bit()` e :c:func:`change_bit()`
+impostano, cancellano, ed invertono il bit indicato.
+:c:func:`test_and_set_bit()`, :c:func:`test_and_clear_bit()` e
+:c:func:`test_and_change_bit()` fanno la stessa cosa, ad eccezione che
+ritornano vero se il bit era impostato; queste sono particolarmente
+utili quando si vuole impostare atomicamente dei flag.
+
+Con queste operazioni è possibile utilizzare indici di bit che eccedono
+il valore ``BITS_PER_LONG``. Il comportamento è strano sulle piattaforme
+big-endian quindi è meglio evitarlo.
+
+Simboli
+=======
+
+All'interno del kernel, si seguono le normali regole del linker (ovvero,
+a meno che un simbolo non venga dichiarato con visibilita limitata ad un
+file con la parola chiave ``static``, esso può essere utilizzato in qualsiasi
+parte del kernel). Nonostante ciò, per i moduli, esiste una tabella dei
+simboli esportati che limita i punti di accesso al kernel. Anche i moduli
+possono esportare simboli.
+
+:c:func:`EXPORT_SYMBOL()`
+-------------------------
+
+Definita in ``include/linux/export.h``
+
+Questo è il classico metodo per esportare un simbolo: i moduli caricati
+dinamicamente potranno utilizzare normalmente il simbolo.
+
+:c:func:`EXPORT_SYMBOL_GPL()`
+-----------------------------
+
+Definita in ``include/linux/export.h``
+
+Essa è simile a :c:func:`EXPORT_SYMBOL()` ad eccezione del fatto che i
+simboli esportati con :c:func:`EXPORT_SYMBOL_GPL()` possono essere
+utilizzati solo dai moduli che hanno dichiarato una licenza compatibile
+con la GPL attraverso :c:func:`MODULE_LICENSE()`. Questo implica che la
+funzione esportata è considerata interna, e non una vera e propria interfaccia.
+Alcuni manutentori e sviluppatori potrebbero comunque richiedere
+:c:func:`EXPORT_SYMBOL_GPL()` quando si aggiungono nuove funzionalità o
+interfacce.
+
+Procedure e convenzioni
+=======================
+
+Liste doppiamente concatenate ``include/linux/list.h``
+------------------------------------------------------
+
+Un tempo negli header del kernel c'erano tre gruppi di funzioni per
+le liste concatenate, ma questa è stata la vincente. Se non avete particolari
+necessità per una semplice lista concatenata, allora questa è una buona scelta.
+
+In particolare, :c:func:`list_for_each_entry()` è utile.
+
+Convenzione dei valori di ritorno
+---------------------------------
+
+Per codice chiamato in contesto utente, è molto comune sfidare le convenzioni
+C e ritornare 0 in caso di successo, ed un codice di errore negativo
+(eg. ``-EFAULT``) nei casi fallimentari. Questo potrebbe essere controintuitivo
+a prima vista, ma è abbastanza diffuso nel kernel.
+
+Utilizzate :c:func:`ERR_PTR()` (``include/linux/err.h``) per codificare
+un numero d'errore negativo in un puntatore, e :c:func:`IS_ERR()` e
+:c:func:`PTR_ERR()` per recuperarlo di nuovo: così si evita d'avere un
+puntatore dedicato per il numero d'errore. Da brividi, ma in senso positivo.
+
+Rompere la compilazione
+-----------------------
+
+Linus e gli altri sviluppatori a volte cambiano i nomi delle funzioni e
+delle strutture nei kernel in sviluppo; questo non è solo per tenere
+tutti sulle spine: questo riflette cambiamenti fondamentati (eg. la funzione
+non può più essere chiamata con le funzioni attive, o fa controlli aggiuntivi,
+o non fa più controlli che venivano fatti in precedenza). Solitamente a questo
+s'accompagna un'adeguata e completa nota sulla lista di discussone
+linux-kernel; cercate negli archivi.
+Solitamente eseguire una semplice sostituzione su tutto un file rendere
+le cose **peggiori**.
+
+Inizializzazione dei campi d'una struttura
+------------------------------------------
+
+Il metodo preferito per l'inizializzazione delle strutture è quello
+di utilizzare gli inizializzatori designati, come definiti nello
+standard ISO C99, eg::
+
+ static struct block_device_operations opt_fops = {
+ .open = opt_open,
+ .release = opt_release,
+ .ioctl = opt_ioctl,
+ .check_media_change = opt_media_change,
+ };
+
+Questo rende più facile la ricerca con grep, e rende più chiaro quale campo
+viene impostato. Dovreste fare così perché si mostra meglio.
+
+Estensioni GNU
+--------------
+
+Le estensioni GNU sono esplicitamente permesse nel kernel Linux. Da notare
+che alcune delle più complesse non sono ben supportate, per via dello scarso
+sviluppo, ma le seguenti sono da considerarsi la norma (per maggiori dettagli,
+leggete la sezione "C Extensions" nella pagina info di GCC - Sì, davvero
+la pagina info, la pagina man è solo un breve riassunto delle cose nella
+pagina info).
+
+- Funzioni inline
+
+- Istruzioni in espressioni (ie. il costrutto ({ and }) ).
+
+- Dichiarate attributi di una funzione / variabile / tipo
+ (__attribute__)
+
+- typeof
+
+- Array con lunghezza zero
+
+- Macro varargs
+
+- Aritmentica sui puntatori void
+
+- Inizializzatori non costanti
+
+- Istruzioni assembler (non al di fuori di 'arch/' e 'include/asm/')
+
+- Nomi delle funzioni come stringhe (__func__).
+
+- __builtin_constant_p()
+
+Siate sospettosi quando utilizzate long long nel kernel, il codice generato
+da gcc è orribile ed anche peggio: le divisioni e le moltiplicazioni non
+funzionano sulle piattaforme i386 perché le rispettive funzioni di runtime
+di GCC non sono incluse nell'ambiente del kernel.
+
+C++
+---
+
+Solitamente utilizzare il C++ nel kernel è una cattiva idea perché
+il kernel non fornisce il necessario ambiente di runtime e gli header file
+non sono stati verificati. Rimane comunque possibile, ma non consigliato.
+Se davvero volete usarlo, almeno evitate le eccezioni.
+
+NUMif
+-----
+
+Viene generalmente considerato più pulito l'uso delle macro negli header file
+(o all'inizio dei file .c) per astrarre funzioni piuttosto che utlizzare
+l'istruzione di pre-processore \`#if' all'interno del codice sorgente.
+
+Mettere le vostre cose nel kernel
+=================================
+
+Al fine d'avere le vostre cose in ordine per l'inclusione ufficiale, o
+anche per avere patch pulite, c'è del lavoro amministrativo da fare:
+
+- Trovare di chi è lo stagno in cui state pisciando. Guardare in cima
+ ai file sorgenti, all'interno del file ``MAINTAINERS``, ed alla fine
+ di tutti nel file ``CREDITS``. Dovreste coordinarvi con queste persone
+ per evitare di duplicare gli sforzi, o provare qualcosa che è già stato
+ rigettato.
+
+ Assicuratevi di mettere il vostro nome ed indirizzo email in cima a
+ tutti i file che create o che mangeggiate significativamente. Questo è
+ il primo posto dove le persone guarderanno quando troveranno un baco,
+ o quando **loro** vorranno fare una modifica.
+
+- Solitamente vorrete un'opzione di configurazione per la vostra modifica
+ al kernel. Modificate ``Kconfig`` nella cartella giusta. Il linguaggio
+ Config è facile con copia ed incolla, e c'è una completa documentazione
+ nel file ``Documentation/kbuild/kconfig-language.txt``.
+
+ Nella descrizione della vostra opzione, assicuratevi di parlare sia agli
+ utenti esperti sia agli utente che non sanno nulla del vostro lavoro.
+ Menzionate qui le incompatibilità ed i problemi. Chiaramente la
+ descrizione deve terminare con “if in doubt, say N” (se siete in dubbio,
+ dite N) (oppure, occasionalmente, \`Y'); questo è per le persone che non
+ hanno idea di che cosa voi stiate parlando.
+
+- Modificate il file ``Makefile``: le variabili CONFIG sono esportate qui,
+ quindi potete solitamente aggiungere una riga come la seguete
+ "obj-$(CONFIG_xxx) += xxx.o". La sintassi è documentata nel file
+ ``Documentation/kbuild/makefiles.txt``.
+
+- Aggiungete voi stessi in ``CREDITS`` se avete fatto qualcosa di notevole,
+ solitamente qualcosa che supera il singolo file (comunque il vostro nome
+ dovrebbe essere all'inizio dei file sorgenti). ``MAINTAINERS`` significa
+ che volete essere consultati quando vengono fatte delle modifiche ad un
+ sottosistema, e quando ci sono dei bachi; questo implica molto di più
+ di un semplice impegno su una parte del codice.
+
+- Infine, non dimenticatevi di leggere
+ ``Documentation/process/submitting-patches.rst`` e possibilmente anche
+ ``Documentation/process/submitting-drivers.rst``.
+
+Trucchetti del kernel
+=====================
+
+Dopo una rapida occhiata al codice, questi sono i preferiti. Sentitevi liberi
+di aggiungerne altri.
+
+``arch/x86/include/asm/delay.h``::
+
+ #define ndelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+ __ndelay(n))
+
+
+``include/linux/fs.h``::
+
+ /*
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a dentry
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ */
+ #define ERR_PTR(err) ((void *)((long)(err)))
+ #define PTR_ERR(ptr) ((long)(ptr))
+ #define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)(-1000))
+
+``arch/x86/include/asm/uaccess_32.h:``::
+
+ #define copy_to_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user((to),(from),(n)) : \
+ __generic_copy_to_user((to),(from),(n)))
+
+
+``arch/sparc/kernel/head.S:``::
+
+ /*
+ * Sun people can't spell worth damn. "compatability" indeed.
+ * At least we *know* we can't spell, and use a spell-checker.
+ */
+
+ /* Uh, actually Linus it is I who cannot spell. Too much murky
+ * Sparc assembly will do this to ya.
+ */
+ C_LABEL(cputypvar):
+ .asciz "compatibility"
+
+ /* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
+ .align 4
+ C_LABEL(cputypvar_sun4m):
+ .asciz "compatible"
+
+
+``arch/sparc/lib/checksum.S:``::
+
+ /* Sun, you just can't beat me, you just can't. Stop trying,
+ * give up. I'm serious, I am going to kick the living shit
+ * out of you, game over, lights out.
+ */
+
+
+Ringraziamenti
+==============
+
+Ringrazio Andi Kleen per le sue idee, le risposte alle mie domande,
+le correzioni dei miei errori, l'aggiunta di contenuti, eccetera.
+Philipp Rumpf per l'ortografia e per aver reso più chiaro il testo, e
+per alcuni eccellenti punti tutt'altro che ovvi. Werner Almesberger
+per avermi fornito un ottimo riassunto di :c:func:`disable_irq()`,
+e Jes Sorensen e Andrea Arcangeli per le precisazioni. Michael Elizabeth
+Chastain per aver verificato ed aggiunto la sezione configurazione.
+Telsa Gwynne per avermi insegnato DocBook.
diff --git a/Documentation/translations/it_IT/kernel-hacking/index.rst b/Documentation/translations/it_IT/kernel-hacking/index.rst
new file mode 100644
index 000000000000..50228380bd50
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/index.rst
@@ -0,0 +1,16 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/kernel-hacking/index.rst <kernel_hacking>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking:
+
+============================
+Guida all'hacking del kernel
+============================
+
+.. toctree::
+ :maxdepth: 2
+
+ hacking
+ locking
diff --git a/Documentation/translations/it_IT/kernel-hacking/locking.rst b/Documentation/translations/it_IT/kernel-hacking/locking.rst
new file mode 100644
index 000000000000..753643622c23
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/locking.rst
@@ -0,0 +1,1493 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/kernel-hacking/locking.rst <kernel_hacking_lock>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking_lock:
+
+==========================================
+L'inaffidabile guida alla sincronizzazione
+==========================================
+
+:Author: Rusty Russell
+
+Introduzione
+============
+
+Benvenuto, alla notevole ed inaffidabile guida ai problemi di sincronizzazione
+(locking) nel kernel. Questo documento descrive il sistema di sincronizzazione
+nel kernel Linux 2.6.
+
+Dato il largo utilizzo del multi-threading e della prelazione nel kernel
+Linux, chiunque voglia dilettarsi col kernel deve conoscere i concetti
+fondamentali della concorrenza e della sincronizzazione nei sistemi
+multi-processore.
+
+Il problema con la concorrenza
+==============================
+
+(Saltatelo se sapete già cos'è una corsa critica).
+
+In un normale programma, potete incrementare un contatore nel seguente modo:
+
+::
+
+ contatore++;
+
+Questo è quello che vi aspettereste che accada sempre:
+
+
+.. table:: Risultati attesi
+
+ +------------------------------------+------------------------------------+
+ | Istanza 1 | Istanza 2 |
+ +====================================+====================================+
+ | leggi contatore (5) | |
+ +------------------------------------+------------------------------------+
+ | aggiungi 1 (6) | |
+ +------------------------------------+------------------------------------+
+ | scrivi contatore (6) | |
+ +------------------------------------+------------------------------------+
+ | | leggi contatore (6) |
+ +------------------------------------+------------------------------------+
+ | | aggiungi 1 (7) |
+ +------------------------------------+------------------------------------+
+ | | scrivi contatore (7) |
+ +------------------------------------+------------------------------------+
+
+Questo è quello che potrebbe succedere in realtà:
+
+.. table:: Possibile risultato
+
+ +------------------------------------+------------------------------------+
+ | Istanza 1 | Istanza 2 |
+ +====================================+====================================+
+ | leggi contatore (5) | |
+ +------------------------------------+------------------------------------+
+ | | leggi contatore (5) |
+ +------------------------------------+------------------------------------+
+ | aggiungi 1 (6) | |
+ +------------------------------------+------------------------------------+
+ | | aggiungi 1 (6) |
+ +------------------------------------+------------------------------------+
+ | scrivi contatore (6) | |
+ +------------------------------------+------------------------------------+
+ | | scrivi contatore (6) |
+ +------------------------------------+------------------------------------+
+
+
+Corse critiche e sezioni critiche
+---------------------------------
+
+Questa sovrapposizione, ovvero quando un risultato dipende dal tempo che
+intercorre fra processi diversi, è chiamata corsa critica. La porzione
+di codice che contiene questo problema è chiamata sezione critica.
+In particolar modo da quando Linux ha incominciato a girare su
+macchine multi-processore, le sezioni critiche sono diventate uno dei
+maggiori problemi di progettazione ed implementazione del kernel.
+
+La prelazione può sortire gli stessi effetti, anche se c'è una sola CPU:
+interrompendo un processo nella sua sezione critica otterremo comunque
+la stessa corsa critica. In questo caso, il thread che si avvicenda
+nell'esecuzione potrebbe eseguire anch'esso la sezione critica.
+
+La soluzione è quella di riconoscere quando avvengono questi accessi
+simultanei, ed utilizzare i *lock* per accertarsi che solo un'istanza
+per volta possa entrare nella sezione critica. Il kernel offre delle buone
+funzioni a questo scopo. E poi ci sono quelle meno buone, ma farò finta
+che non esistano.
+
+Sincronizzazione nel kernel Linux
+=================================
+
+Se posso darvi un suggerimento: non dormite mai con qualcuno più pazzo di
+voi. Ma se dovessi darvi un suggerimento sulla sincronizzazione:
+**mantenetela semplice**.
+
+Siate riluttanti nell'introduzione di nuovi *lock*.
+
+Abbastanza strano, quest'ultimo è l'esatto opposto del mio suggerimento
+su quando **avete** dormito con qualcuno più pazzo di voi. E dovreste
+pensare a prendervi un cane bello grande.
+
+I due principali tipi di *lock* nel kernel: spinlock e mutex
+------------------------------------------------------------
+
+Ci sono due tipi principali di *lock* nel kernel. Il tipo fondamentale è lo
+spinlock (``include/asm/spinlock.h``), un semplice *lock* che può essere
+trattenuto solo da un processo: se non si può trattenere lo spinlock, allora
+rimane in attesa attiva (in inglese *spinning*) finché non ci riesce.
+Gli spinlock sono molto piccoli e rapidi, possono essere utilizzati ovunque.
+
+Il secondo tipo è il mutex (``include/linux/mutex.h``): è come uno spinlock,
+ma potreste bloccarvi trattenendolo. Se non potete trattenere un mutex
+il vostro processo si auto-sospenderà; verrà riattivato quando il mutex
+verrà rilasciato. Questo significa che il processore potrà occuparsi d'altro
+mentre il vostro processo è in attesa. Esistono molti casi in cui non potete
+permettervi di sospendere un processo (vedere
+:ref:`Quali funzioni possono essere chiamate in modo sicuro dalle interruzioni? <it_sleeping-things>`)
+e quindi dovrete utilizzare gli spinlock.
+
+Nessuno di questi *lock* è ricorsivo: vedere
+:ref:`Stallo: semplice ed avanzato <it_deadlock>`
+
+I *lock* e i kernel per sistemi monoprocessore
+----------------------------------------------
+
+Per i kernel compilati senza ``CONFIG_SMP`` e senza ``CONFIG_PREEMPT``
+gli spinlock non esistono. Questa è un'ottima scelta di progettazione:
+quando nessun altro processo può essere eseguito in simultanea, allora
+non c'è la necessità di avere un *lock*.
+
+Se il kernel è compilato senza ``CONFIG_SMP`` ma con ``CONFIG_PREEMPT``,
+allora gli spinlock disabilitano la prelazione; questo è sufficiente a
+prevenire le corse critiche. Nella maggior parte dei casi, possiamo considerare
+la prelazione equivalente ad un sistema multi-processore senza preoccuparci
+di trattarla indipendentemente.
+
+Dovreste verificare sempre la sincronizzazione con le opzioni ``CONFIG_SMP`` e
+``CONFIG_PREEMPT`` abilitate, anche quando non avete un sistema
+multi-processore, questo vi permetterà di identificare alcuni problemi
+di sincronizzazione.
+
+Come vedremo di seguito, i mutex continuano ad esistere perché sono necessari
+per la sincronizzazione fra processi in contesto utente.
+
+Sincronizzazione in contesto utente
+-----------------------------------
+
+Se avete una struttura dati che verrà utilizzata solo dal contesto utente,
+allora, per proteggerla, potete utilizzare un semplice mutex
+(``include/linux/mutex.h``). Questo è il caso più semplice: inizializzate il
+mutex; invocate :c:func:`mutex_lock_interruptible()` per trattenerlo e
+:c:func:`mutex_unlock()` per rilasciarlo. C'è anche :c:func:`mutex_lock()`
+ma questa dovrebbe essere evitata perché non ritorna in caso di segnali.
+
+Per esempio: ``net/netfilter/nf_sockopt.c`` permette la registrazione
+di nuove chiamate per :c:func:`setsockopt()` e :c:func:`getsockopt()`
+usando la funzione :c:func:`nf_register_sockopt()`. La registrazione e
+la rimozione vengono eseguite solamente quando il modulo viene caricato
+o scaricato (e durante l'avvio del sistema, qui non abbiamo concorrenza),
+e la lista delle funzioni registrate viene consultata solamente quando
+:c:func:`setsockopt()` o :c:func:`getsockopt()` sono sconosciute al sistema.
+In questo caso ``nf_sockopt_mutex`` è perfetto allo scopo, in particolar modo
+visto che setsockopt e getsockopt potrebbero dormire.
+
+Sincronizzazione fra il contesto utente e i softirq
+---------------------------------------------------
+
+Se un softirq condivide dati col contesto utente, avete due problemi.
+Primo, il contesto utente corrente potrebbe essere interroto da un softirq,
+e secondo, la sezione critica potrebbe essere eseguita da un altro
+processore. Questo è quando :c:func:`spin_lock_bh()`
+(``include/linux/spinlock.h``) viene utilizzato. Questo disabilita i softirq
+sul processore e trattiene il *lock*. Invece, :c:func:`spin_unlock_bh()` fa
+l'opposto. (Il suffisso '_bh' è un residuo storico che fa riferimento al
+"Bottom Halves", il vecchio nome delle interruzioni software. In un mondo
+perfetto questa funzione si chiamerebbe 'spin_lock_softirq()').
+
+Da notare che in questo caso potete utilizzare anche :c:func:`spin_lock_irq()`
+o :c:func:`spin_lock_irqsave()`, queste fermano anche le interruzioni hardware:
+vedere :ref:`Contesto di interruzione hardware <it_hardirq-context>`.
+
+Questo funziona alla perfezione anche sui sistemi monoprocessore: gli spinlock
+svaniscono e questa macro diventa semplicemente :c:func:`local_bh_disable()`
+(``include/linux/interrupt.h``), la quale impedisce ai softirq d'essere
+eseguiti.
+
+Sincronizzazione fra contesto utente e i tasklet
+------------------------------------------------
+
+Questo caso è uguale al precedente, un tasklet viene eseguito da un softirq.
+
+Sincronizzazione fra contesto utente e i timer
+----------------------------------------------
+
+Anche questo caso è uguale al precedente, un timer viene eseguito da un
+softirq.
+Dal punto di vista della sincronizzazione, tasklet e timer sono identici.
+
+Sincronizzazione fra tasklet e timer
+------------------------------------
+
+Qualche volta un tasklet od un timer potrebbero condividere i dati con
+un altro tasklet o timer
+
+Lo stesso tasklet/timer
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Dato che un tasklet non viene mai eseguito contemporaneamente su due
+processori, non dovete preoccuparvi che sia rientrante (ovvero eseguito
+più volte in contemporanea), perfino su sistemi multi-processore.
+
+Differenti tasklet/timer
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Se un altro tasklet/timer vuole condividere dati col vostro tasklet o timer,
+allora avrete bisogno entrambe di :c:func:`spin_lock()` e
+:c:func:`spin_unlock()`. Qui :c:func:`spin_lock_bh()` è inutile, siete già
+in un tasklet ed avete la garanzia che nessun altro verrà eseguito sullo
+stesso processore.
+
+Sincronizzazione fra softirq
+----------------------------
+
+Spesso un softirq potrebbe condividere dati con se stesso o un tasklet/timer.
+
+Lo stesso softirq
+~~~~~~~~~~~~~~~~~
+
+Lo stesso softirq può essere eseguito su un diverso processore: allo scopo
+di migliorare le prestazioni potete utilizzare dati riservati ad ogni
+processore (vedere :ref:`Dati per processore <it_per-cpu>`). Se siete arrivati
+fino a questo punto nell'uso dei softirq, probabilmente tenete alla scalabilità
+delle prestazioni abbastanza da giustificarne la complessità aggiuntiva.
+
+Dovete utilizzare :c:func:`spin_lock()` e :c:func:`spin_unlock()` per
+proteggere i dati condivisi.
+
+Diversi Softirqs
+~~~~~~~~~~~~~~~~
+
+Dovete utilizzare :c:func:`spin_lock()` e :c:func:`spin_unlock()` per
+proteggere i dati condivisi, che siano timer, tasklet, diversi softirq o
+lo stesso o altri softirq: uno qualsiasi di essi potrebbe essere in esecuzione
+su un diverso processore.
+
+.. _`it_hardirq-context`:
+
+Contesto di interruzione hardware
+=================================
+
+Solitamente le interruzioni hardware comunicano con un tasklet o un softirq.
+Spesso questo si traduce nel mettere in coda qualcosa da fare che verrà
+preso in carico da un softirq.
+
+Sincronizzazione fra interruzioni hardware e softirq/tasklet
+------------------------------------------------------------
+
+Se un gestore di interruzioni hardware condivide dati con un softirq, allora
+avrete due preoccupazioni. Primo, il softirq può essere interrotto da
+un'interruzione hardware, e secondo, la sezione critica potrebbe essere
+eseguita da un'interruzione hardware su un processore diverso. Questo è il caso
+dove :c:func:`spin_lock_irq()` viene utilizzato. Disabilita le interruzioni
+sul processore che l'esegue, poi trattiene il lock. :c:func:`spin_unlock_irq()`
+fa l'opposto.
+
+Il gestore d'interruzione hardware non usa :c:func:`spin_lock_irq()` perché
+i softirq non possono essere eseguiti quando il gestore d'interruzione hardware
+è in esecuzione: per questo si può usare :c:func:`spin_lock()`, che è un po'
+più veloce. L'unica eccezione è quando un altro gestore d'interruzioni
+hardware utilizza lo stesso *lock*: :c:func:`spin_lock_irq()` impedirà a questo
+secondo gestore di interrompere quello in esecuzione.
+
+Questo funziona alla perfezione anche sui sistemi monoprocessore: gli spinlock
+svaniscono e questa macro diventa semplicemente :c:func:`local_irq_disable()`
+(``include/asm/smp.h``), la quale impedisce a softirq/tasklet/BH d'essere
+eseguiti.
+
+:c:func:`spin_lock_irqsave()` (``include/linux/spinlock.h``) è una variante che
+salva lo stato delle interruzioni in una variabile, questa verrà poi passata
+a :c:func:`spin_unlock_irqrestore()`. Questo significa che lo stesso codice
+potrà essere utilizzato in un'interruzione hardware (dove le interruzioni sono
+già disabilitate) e in un softirq (dove la disabilitazione delle interruzioni
+è richiesta).
+
+Da notare che i softirq (e quindi tasklet e timer) sono eseguiti al ritorno
+da un'interruzione hardware, quindi :c:func:`spin_lock_irq()` interrompe
+anche questi. Tenuto conto di questo si può dire che
+:c:func:`spin_lock_irqsave()` è la funzione di sincronizzazione più generica
+e potente.
+
+Sincronizzazione fra due gestori d'interruzioni hardware
+--------------------------------------------------------
+
+Condividere dati fra due gestori di interruzione hardware è molto raro, ma se
+succede, dovreste usare :c:func:`spin_lock_irqsave()`: è una specificità
+dell'architettura il fatto che tutte le interruzioni vengano interrotte
+quando si eseguono di gestori di interruzioni.
+
+Bigino della sincronizzazione
+=============================
+
+Pete Zaitcev ci offre il seguente riassunto:
+
+- Se siete in un contesto utente (una qualsiasi chiamata di sistema)
+ e volete sincronizzarvi con altri processi, usate i mutex. Potete trattenere
+ il mutex e dormire (``copy_from_user*(`` o ``kmalloc(x,GFP_KERNEL)``).
+
+- Altrimenti (== i dati possono essere manipolati da un'interruzione) usate
+ :c:func:`spin_lock_irqsave()` e :c:func:`spin_unlock_irqrestore()`.
+
+- Evitate di trattenere uno spinlock per più di 5 righe di codice incluse
+ le chiamate a funzione (ad eccezione di quell per l'accesso come
+ :c:func:`readb()`).
+
+Tabella dei requisiti minimi
+----------------------------
+
+La tabella seguente illustra i requisiti **minimi** per la sincronizzazione fra
+diversi contesti. In alcuni casi, lo stesso contesto può essere eseguito solo
+da un processore per volta, quindi non ci sono requisiti per la
+sincronizzazione (per esempio, un thread può essere eseguito solo su un
+processore alla volta, ma se deve condividere dati con un altro thread, allora
+la sincronizzazione è necessaria).
+
+Ricordatevi il suggerimento qui sopra: potete sempre usare
+:c:func:`spin_lock_irqsave()`, che è un sovrainsieme di tutte le altre funzioni
+per spinlock.
+
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+. IRQ Handler A IRQ Handler B Softirq A Softirq B Tasklet A Tasklet B Timer A Timer B User Context A User Context B
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+IRQ Handler A None
+IRQ Handler B SLIS None
+Softirq A SLI SLI SL
+Softirq B SLI SLI SL SL
+Tasklet A SLI SLI SL SL None
+Tasklet B SLI SLI SL SL SL None
+Timer A SLI SLI SL SL SL SL None
+Timer B SLI SLI SL SL SL SL SL None
+User Context A SLI SLI SLBH SLBH SLBH SLBH SLBH SLBH None
+User Context B SLI SLI SLBH SLBH SLBH SLBH SLBH SLBH MLI None
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+
+Table: Tabella dei requisiti per la sincronizzazione
+
++--------+----------------------------+
+| SLIS | spin_lock_irqsave |
++--------+----------------------------+
+| SLI | spin_lock_irq |
++--------+----------------------------+
+| SL | spin_lock |
++--------+----------------------------+
+| SLBH | spin_lock_bh |
++--------+----------------------------+
+| MLI | mutex_lock_interruptible |
++--------+----------------------------+
+
+Table: Legenda per la tabella dei requisiti per la sincronizzazione
+
+Le funzioni *trylock*
+=====================
+
+Ci sono funzioni che provano a trattenere un *lock* solo una volta e
+ritornano immediatamente comunicato il successo od il fallimento
+dell'operazione. Posso essere usate quando non serve accedere ai dati
+protetti dal *lock* quando qualche altro thread lo sta già facendo
+trattenendo il *lock*. Potrete acquisire il *lock* più tardi se vi
+serve accedere ai dati protetti da questo *lock*.
+
+La funzione :c:func:`spin_trylock()` non ritenta di acquisire il *lock*,
+se ci riesce al primo colpo ritorna un valore diverso da zero, altrimenti
+se fallisce ritorna 0. Questa funzione può essere utilizzata in un qualunque
+contesto, ma come :c:func:`spin_lock()`: dovete disabilitare i contesti che
+potrebbero interrompervi e quindi trattenere lo spinlock.
+
+La funzione :c:func:`mutex_trylock()` invece di sospendere il vostro processo
+ritorna un valore diverso da zero se è possibile trattenere il lock al primo
+colpo, altrimenti se fallisce ritorna 0. Nonostante non dorma, questa funzione
+non può essere usata in modo sicuro in contesti di interruzione hardware o
+software.
+
+Esempi più comuni
+=================
+
+Guardiamo un semplice esempio: una memoria che associa nomi a numeri.
+La memoria tiene traccia di quanto spesso viene utilizzato ogni oggetto;
+quando è piena, l'oggetto meno usato viene eliminato.
+
+Tutto in contesto utente
+------------------------
+
+Nel primo esempio, supponiamo che tutte le operazioni avvengano in contesto
+utente (in soldoni, da una chiamata di sistema), quindi possiamo dormire.
+Questo significa che possiamo usare i mutex per proteggere la nostra memoria
+e tutti gli oggetti che contiene. Ecco il codice::
+
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/mutex.h>
+ #include <asm/errno.h>
+
+ struct object
+ {
+ struct list_head list;
+ int id;
+ char name[32];
+ int popularity;
+ };
+
+ /* Protects the cache, cache_num, and the objects within it */
+ static DEFINE_MUTEX(cache_lock);
+ static LIST_HEAD(cache);
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ /* Must be holding cache_lock */
+ static struct object *__cache_find(int id)
+ {
+ struct object *i;
+
+ list_for_each_entry(i, &cache, list)
+ if (i->id == id) {
+ i->popularity++;
+ return i;
+ }
+ return NULL;
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_delete(struct object *obj)
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ kfree(obj);
+ cache_num--;
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_add(struct object *obj)
+ {
+ list_add(&obj->list, &cache);
+ if (++cache_num > MAX_CACHE_SIZE) {
+ struct object *i, *outcast = NULL;
+ list_for_each_entry(i, &cache, list) {
+ if (!outcast || i->popularity < outcast->popularity)
+ outcast = i;
+ }
+ __cache_delete(outcast);
+ }
+ }
+
+ int cache_add(int id, const char *name)
+ {
+ struct object *obj;
+
+ if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+
+ mutex_lock(&cache_lock);
+ __cache_add(obj);
+ mutex_unlock(&cache_lock);
+ return 0;
+ }
+
+ void cache_delete(int id)
+ {
+ mutex_lock(&cache_lock);
+ __cache_delete(__cache_find(id));
+ mutex_unlock(&cache_lock);
+ }
+
+ int cache_find(int id, char *name)
+ {
+ struct object *obj;
+ int ret = -ENOENT;
+
+ mutex_lock(&cache_lock);
+ obj = __cache_find(id);
+ if (obj) {
+ ret = 0;
+ strcpy(name, obj->name);
+ }
+ mutex_unlock(&cache_lock);
+ return ret;
+ }
+
+Da notare che ci assicuriamo sempre di trattenere cache_lock quando
+aggiungiamo, rimuoviamo od ispezioniamo la memoria: sia la struttura
+della memoria che il suo contenuto sono protetti dal *lock*. Questo
+caso è semplice dato che copiamo i dati dall'utente e non permettiamo
+mai loro di accedere direttamente agli oggetti.
+
+C'è una piccola ottimizzazione qui: nella funzione :c:func:`cache_add()`
+impostiamo i campi dell'oggetto prima di acquisire il *lock*. Questo è
+sicuro perché nessun altro potrà accedervi finché non lo inseriremo
+nella memoria.
+
+Accesso dal contesto utente
+---------------------------
+
+Ora consideriamo il caso in cui :c:func:`cache_find()` può essere invocata
+dal contesto d'interruzione: sia hardware che software. Un esempio potrebbe
+essere un timer che elimina oggetti dalla memoria.
+
+Qui di seguito troverete la modifica nel formato *patch*: le righe ``-``
+sono quelle rimosse, mentre quelle ``+`` sono quelle aggiunte.
+
+::
+
+ --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
+ +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100
+ @@ -12,7 +12,7 @@
+ int popularity;
+ };
+
+ -static DEFINE_MUTEX(cache_lock);
+ +static DEFINE_SPINLOCK(cache_lock);
+ static LIST_HEAD(cache);
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+ @@ -55,6 +55,7 @@
+ int cache_add(int id, const char *name)
+ {
+ struct object *obj;
+ + unsigned long flags;
+
+ if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ @@ -63,30 +64,33 @@
+ obj->id = id;
+ obj->popularity = 0;
+
+ - mutex_lock(&cache_lock);
+ + spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ return 0;
+ }
+
+ void cache_delete(int id)
+ {
+ - mutex_lock(&cache_lock);
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ __cache_delete(__cache_find(id));
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ }
+
+ int cache_find(int id, char *name)
+ {
+ struct object *obj;
+ int ret = -ENOENT;
+ + unsigned long flags;
+
+ - mutex_lock(&cache_lock);
+ + spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ if (obj) {
+ ret = 0;
+ strcpy(name, obj->name);
+ }
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ return ret;
+ }
+
+Da notare che :c:func:`spin_lock_irqsave()` disabiliterà le interruzioni
+se erano attive, altrimenti non farà niente (quando siamo già in un contesto
+d'interruzione); dunque queste funzioni possono essere chiamante in
+sicurezza da qualsiasi contesto.
+
+Sfortunatamente, :c:func:`cache_add()` invoca :c:func:`kmalloc()` con
+l'opzione ``GFP_KERNEL`` che è permessa solo in contesto utente. Ho supposto
+che :c:func:`cache_add()` venga chiamata dal contesto utente, altrimenti
+questa opzione deve diventare un parametro di :c:func:`cache_add()`.
+
+Exposing Objects Outside This File
+----------------------------------
+
+Se i vostri oggetti contengono più informazioni, potrebbe non essere
+sufficiente copiare i dati avanti e indietro: per esempio, altre parti del
+codice potrebbero avere un puntatore a questi oggetti piuttosto che cercarli
+ogni volta. Questo introduce due problemi.
+
+Il primo problema è che utilizziamo ``cache_lock`` per proteggere gli oggetti:
+dobbiamo renderlo dinamico così che il resto del codice possa usarlo. Questo
+rende la sincronizzazione più complicata dato che non avviene più in un unico
+posto.
+
+Il secondo problema è il problema del ciclo di vita: se un'altra struttura
+mantiene un puntatore ad un oggetto, presumibilmente si aspetta che questo
+puntatore rimanga valido. Sfortunatamente, questo è garantito solo mentre
+si trattiene il *lock*, altrimenti qualcuno potrebbe chiamare
+:c:func:`cache_delete()` o peggio, aggiungere un oggetto che riutilizza lo
+stesso indirizzo.
+
+Dato che c'è un solo *lock*, non potete trattenerlo a vita: altrimenti
+nessun altro potrà eseguire il proprio lavoro.
+
+La soluzione a questo problema è l'uso di un contatore di riferimenti:
+chiunque punti ad un oggetto deve incrementare il contatore, e decrementarlo
+quando il puntatore non viene più usato. Quando il contatore raggiunge lo zero
+significa che non è più usato e l'oggetto può essere rimosso.
+
+Ecco il codice::
+
+ --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100
+ +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100
+ @@ -7,6 +7,7 @@
+ struct object
+ {
+ struct list_head list;
+ + unsigned int refcnt;
+ int id;
+ char name[32];
+ int popularity;
+ @@ -17,6 +18,35 @@
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ +static void __object_put(struct object *obj)
+ +{
+ + if (--obj->refcnt == 0)
+ + kfree(obj);
+ +}
+ +
+ +static void __object_get(struct object *obj)
+ +{
+ + obj->refcnt++;
+ +}
+ +
+ +void object_put(struct object *obj)
+ +{
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ + __object_put(obj);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ +}
+ +
+ +void object_get(struct object *obj)
+ +{
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ + __object_get(obj);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ +}
+ +
+ /* Must be holding cache_lock */
+ static struct object *__cache_find(int id)
+ {
+ @@ -35,6 +65,7 @@
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ + __object_put(obj);
+ cache_num--;
+ }
+
+ @@ -63,6 +94,7 @@
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+ + obj->refcnt = 1; /* The cache holds a reference */
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ @@ -79,18 +111,15 @@
+ spin_unlock_irqrestore(&cache_lock, flags);
+ }
+
+ -int cache_find(int id, char *name)
+ +struct object *cache_find(int id)
+ {
+ struct object *obj;
+ - int ret = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ - if (obj) {
+ - ret = 0;
+ - strcpy(name, obj->name);
+ - }
+ + if (obj)
+ + __object_get(obj);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ - return ret;
+ + return obj;
+ }
+
+Abbiamo incapsulato il contatore di riferimenti nelle tipiche funzioni
+di 'get' e 'put'. Ora possiamo ritornare l'oggetto da :c:func:`cache_find()`
+col vantaggio che l'utente può dormire trattenendo l'oggetto (per esempio,
+:c:func:`copy_to_user()` per copiare il nome verso lo spazio utente).
+
+Un altro punto da notare è che ho detto che il contatore dovrebbe incrementarsi
+per ogni puntatore ad un oggetto: quindi il contatore di riferimenti è 1
+quando l'oggetto viene inserito nella memoria. In altre versione il framework
+non trattiene un riferimento per se, ma diventa più complicato.
+
+Usare operazioni atomiche per il contatore di riferimenti
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In sostanza, :c:type:`atomic_t` viene usato come contatore di riferimenti.
+Ci sono un certo numbero di operazioni atomiche definite
+in ``include/asm/atomic.h``: queste sono garantite come atomiche su qualsiasi
+processore del sistema, quindi non sono necessari i *lock*. In questo caso è
+più semplice rispetto all'uso degli spinlock, benché l'uso degli spinlock
+sia più elegante per casi non banali. Le funzioni :c:func:`atomic_inc()` e
+:c:func:`atomic_dec_and_test()` vengono usate al posto dei tipici operatori di
+incremento e decremento, e i *lock* non sono più necessari per proteggere il
+contatore stesso.
+
+::
+
+ --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100
+ +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100
+ @@ -7,7 +7,7 @@
+ struct object
+ {
+ struct list_head list;
+ - unsigned int refcnt;
+ + atomic_t refcnt;
+ int id;
+ char name[32];
+ int popularity;
+ @@ -18,33 +18,15 @@
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ -static void __object_put(struct object *obj)
+ -{
+ - if (--obj->refcnt == 0)
+ - kfree(obj);
+ -}
+ -
+ -static void __object_get(struct object *obj)
+ -{
+ - obj->refcnt++;
+ -}
+ -
+ void object_put(struct object *obj)
+ {
+ - unsigned long flags;
+ -
+ - spin_lock_irqsave(&cache_lock, flags);
+ - __object_put(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + if (atomic_dec_and_test(&obj->refcnt))
+ + kfree(obj);
+ }
+
+ void object_get(struct object *obj)
+ {
+ - unsigned long flags;
+ -
+ - spin_lock_irqsave(&cache_lock, flags);
+ - __object_get(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + atomic_inc(&obj->refcnt);
+ }
+
+ /* Must be holding cache_lock */
+ @@ -65,7 +47,7 @@
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ - __object_put(obj);
+ + object_put(obj);
+ cache_num--;
+ }
+
+ @@ -94,7 +76,7 @@
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+ - obj->refcnt = 1; /* The cache holds a reference */
+ + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ @@ -119,7 +101,7 @@
+ spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ if (obj)
+ - __object_get(obj);
+ + object_get(obj);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ return obj;
+ }
+
+Proteggere l'oggetto stesso
+---------------------------
+
+In questo esempio, assumiamo che gli oggetti (ad eccezione del contatore
+di riferimenti) non cambino mai dopo la loro creazione. Se vogliamo permettere
+al nome di cambiare abbiamo tre possibilità:
+
+- Si può togliere static da ``cache_lock`` e dire agli utenti che devono
+ trattenere il *lock* prima di modificare il nome di un oggetto.
+
+- Si può fornire una funzione :c:func:`cache_obj_rename()` che prende il
+ *lock* e cambia il nome per conto del chiamante; si dirà poi agli utenti
+ di usare questa funzione.
+
+- Si può decidere che ``cache_lock`` protegge solo la memoria stessa, ed
+ un altro *lock* è necessario per la protezione del nome.
+
+Teoricamente, possiamo avere un *lock* per ogni campo e per ogni oggetto.
+In pratica, le varianti più comuni sono:
+
+- un *lock* che protegge l'infrastruttura (la lista ``cache`` di questo
+ esempio) e gli oggetti. Questo è quello che abbiamo fatto finora.
+
+- un *lock* che protegge l'infrastruttura (inclusi i puntatori alla lista
+ negli oggetti), e un *lock* nell'oggetto per proteggere il resto
+ dell'oggetto stesso.
+
+- *lock* multipli per proteggere l'infrastruttura (per esempio un *lock*
+ per ogni lista), possibilmente con un *lock* per oggetto.
+
+Qui di seguito un'implementazione con "un lock per oggetto":
+
+::
+
+ --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100
+ +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
+ @@ -6,11 +6,17 @@
+
+ struct object
+ {
+ + /* These two protected by cache_lock. */
+ struct list_head list;
+ + int popularity;
+ +
+ atomic_t refcnt;
+ +
+ + /* Doesn't change once created. */
+ int id;
+ +
+ + spinlock_t lock; /* Protects the name */
+ char name[32];
+ - int popularity;
+ };
+
+ static DEFINE_SPINLOCK(cache_lock);
+ @@ -77,6 +84,7 @@
+ obj->id = id;
+ obj->popularity = 0;
+ atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+ + spin_lock_init(&obj->lock);
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+
+Da notare che ho deciso che il contatore di popolarità dovesse essere
+protetto da ``cache_lock`` piuttosto che dal *lock* dell'oggetto; questo
+perché è logicamente parte dell'infrastruttura (come
+:c:type:`struct list_head <list_head>` nell'oggetto). In questo modo,
+in :c:func:`__cache_add()`, non ho bisogno di trattenere il *lock* di ogni
+oggetto mentre si cerca il meno popolare.
+
+Ho anche deciso che il campo id è immutabile, quindi non ho bisogno di
+trattenere il lock dell'oggetto quando si usa :c:func:`__cache_find()`
+per leggere questo campo; il *lock* dell'oggetto è usato solo dal chiamante
+che vuole leggere o scrivere il campo name.
+
+Inoltre, da notare che ho aggiunto un commento che descrive i dati che sono
+protetti dal *lock*. Questo è estremamente importante in quanto descrive il
+comportamento del codice, che altrimenti sarebbe di difficile comprensione
+leggendo solamente il codice. E come dice Alan Cox: “Lock data, not code”.
+
+Problemi comuni
+===============
+
+.. _`it_deadlock`:
+
+Stallo: semplice ed avanzato
+----------------------------
+
+Esiste un tipo di baco dove un pezzo di codice tenta di trattenere uno
+spinlock due volte: questo rimarrà in attesa attiva per sempre aspettando che
+il *lock* venga rilasciato (in Linux spinlocks, rwlocks e mutex non sono
+ricorsivi).
+Questo è facile da diagnosticare: non è uno di quei problemi che ti tengono
+sveglio 5 notti a parlare da solo.
+
+Un caso un pochino più complesso; immaginate d'avere una spazio condiviso
+fra un softirq ed il contesto utente. Se usate :c:func:`spin_lock()` per
+proteggerlo, il contesto utente potrebbe essere interrotto da un softirq
+mentre trattiene il lock, da qui il softirq rimarrà in attesa attiva provando
+ad acquisire il *lock* già trattenuto nel contesto utente.
+
+Questi casi sono chiamati stalli (*deadlock*), e come mostrato qui sopra,
+può succedere anche con un solo processore (Ma non sui sistemi
+monoprocessore perché gli spinlock spariscano quando il kernel è compilato
+con ``CONFIG_SMP``\ =n. Nonostante ciò, nel secondo caso avrete comunque
+una corruzione dei dati).
+
+Questi casi sono facili da diagnosticare; sui sistemi multi-processore
+il supervisione (*watchdog*) o l'opzione di compilazione ``DEBUG_SPINLOCK``
+(``include/linux/spinlock.h``) permettono di scovare immediatamente quando
+succedono.
+
+Esiste un caso più complesso che è conosciuto come l'abbraccio della morte;
+questo coinvolge due o più *lock*. Diciamo che avete un vettore di hash in cui
+ogni elemento è uno spinlock a cui è associata una lista di elementi con lo
+stesso hash. In un gestore di interruzioni software, dovete modificare un
+oggetto e spostarlo su un altro hash; quindi dovrete trattenete lo spinlock
+del vecchio hash e di quello nuovo, quindi rimuovere l'oggetto dal vecchio ed
+inserirlo nel nuovo.
+
+Qui abbiamo due problemi. Primo, se il vostro codice prova a spostare un
+oggetto all'interno della stessa lista, otterrete uno stallo visto che
+tenterà di trattenere lo stesso *lock* due volte. Secondo, se la stessa
+interruzione software su un altro processore sta tentando di spostare
+un altro oggetto nella direzione opposta, potrebbe accadere quanto segue:
+
++---------------------------------+---------------------------------+
+| CPU 1 | CPU 2 |
++=================================+=================================+
+| Trattiene *lock* A -> OK | Trattiene *lock* B -> OK |
++---------------------------------+---------------------------------+
+| Trattiene *lock* B -> attesa | Trattiene *lock* A -> attesa |
++---------------------------------+---------------------------------+
+
+Table: Conseguenze
+
+Entrambe i processori rimarranno in attesa attiva sul *lock* per sempre,
+aspettando che l'altro lo rilasci. Sembra e puzza come un blocco totale.
+
+Prevenire gli stalli
+--------------------
+
+I libri di testo vi diranno che se trattenete i *lock* sempre nello stesso
+ordine non avrete mai un simile stallo. La pratica vi dirà che questo
+approccio non funziona all'ingrandirsi del sistema: quando creo un nuovo
+*lock* non ne capisco abbastanza del kernel per dire in quale dei 5000 *lock*
+si incastrerà.
+
+I *lock* migliori sono quelli incapsulati: non vengono esposti nei file di
+intestazione, e non vengono mai trattenuti fuori dallo stesso file. Potete
+rileggere questo codice e vedere che non ci sarà mai uno stallo perché
+non tenterà mai di trattenere un altro *lock* quando lo ha già.
+Le persone che usano il vostro codice non devono nemmeno sapere che voi
+state usando dei *lock*.
+
+Un classico problema deriva dall'uso di *callback* e di *hook*: se li
+chiamate mentre trattenete un *lock*, rischiate uno stallo o un abbraccio
+della morte (chi lo sa cosa farà una *callback*?).
+
+Ossessiva prevenzione degli stalli
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Gli stalli sono un problema, ma non così terribile come la corruzione dei dati.
+Un pezzo di codice trattiene un *lock* di lettura, cerca in una lista,
+fallisce nel trovare quello che vuole, quindi rilascia il *lock* di lettura,
+trattiene un *lock* di scrittura ed inserisce un oggetto; questo genere di
+codice presenta una corsa critica.
+
+Se non riuscite a capire il perché, per favore state alla larga dal mio
+codice.
+
+corsa fra temporizzatori: un passatempo del kernel
+--------------------------------------------------
+
+I temporizzatori potrebbero avere dei problemi con le corse critiche.
+Considerate una collezione di oggetti (liste, hash, eccetera) dove ogni oggetto
+ha un temporizzatore che sta per distruggerlo.
+
+Se volete eliminare l'intera collezione (diciamo quando rimuovete un modulo),
+potreste fare come segue::
+
+ /* THIS CODE BAD BAD BAD BAD: IF IT WAS ANY WORSE IT WOULD USE
+ HUNGARIAN NOTATION */
+ spin_lock_bh(&list_lock);
+
+ while (list) {
+ struct foo *next = list->next;
+ del_timer(&list->timer);
+ kfree(list);
+ list = next;
+ }
+
+ spin_unlock_bh(&list_lock);
+
+Primo o poi, questo esploderà su un sistema multiprocessore perché un
+temporizzatore potrebbe essere già partiro prima di :c:func:`spin_lock_bh()`,
+e prenderà il *lock* solo dopo :c:func:`spin_unlock_bh()`, e cercherà
+di eliminare il suo oggetto (che però è già stato eliminato).
+
+Questo può essere evitato controllando il valore di ritorno di
+:c:func:`del_timer()`: se ritorna 1, il temporizzatore è stato già
+rimosso. Se 0, significa (in questo caso) che il temporizzatore è in
+esecuzione, quindi possiamo fare come segue::
+
+ retry:
+ spin_lock_bh(&list_lock);
+
+ while (list) {
+ struct foo *next = list->next;
+ if (!del_timer(&list->timer)) {
+ /* Give timer a chance to delete this */
+ spin_unlock_bh(&list_lock);
+ goto retry;
+ }
+ kfree(list);
+ list = next;
+ }
+
+ spin_unlock_bh(&list_lock);
+
+Un altro problema è l'eliminazione dei temporizzatori che si riavviano
+da soli (chiamando :c:func:`add_timer()` alla fine della loro esecuzione).
+Dato che questo è un problema abbastanza comune con una propensione
+alle corse critiche, dovreste usare :c:func:`del_timer_sync()`
+(``include/linux/timer.h``) per gestire questo caso. Questa ritorna il
+numero di volte che il temporizzatore è stato interrotto prima che
+fosse in grado di fermarlo senza che si riavviasse.
+
+Velocità della sincronizzazione
+===============================
+
+Ci sono tre cose importanti da tenere in considerazione quando si valuta
+la velocità d'esecuzione di un pezzo di codice che necessita di
+sincronizzazione. La prima è la concorrenza: quante cose rimangono in attesa
+mentre qualcuno trattiene un *lock*. La seconda è il tempo necessario per
+acquisire (senza contese) e rilasciare un *lock*. La terza è di usare meno
+*lock* o di più furbi. Immagino che i *lock* vengano usati regolarmente,
+altrimenti, non sareste interessati all'efficienza.
+
+La concorrenza dipende da quanto a lungo un *lock* è trattenuto: dovreste
+trattenere un *lock* solo il tempo minimo necessario ma non un istante in più.
+Nella memoria dell'esempio precedente, creiamo gli oggetti senza trattenere
+il *lock*, poi acquisiamo il *lock* quando siamo pronti per inserirlo nella
+lista.
+
+Il tempo di acquisizione di un *lock* dipende da quanto danno fa
+l'operazione sulla *pipeline* (ovvero stalli della *pipeline*) e quant'è
+probabile che il processore corrente sia stato anche l'ultimo ad acquisire
+il *lock* (in pratica, il *lock* è nella memoria cache del processore
+corrente?): su sistemi multi-processore questa probabilità precipita
+rapidamente. Consideriamo un processore Intel Pentium III a 700Mhz: questo
+esegue un'istruzione in 0.7ns, un incremento atomico richiede 58ns, acquisire
+un *lock* che è nella memoria cache del processore richiede 160ns, e un
+trasferimento dalla memoria cache di un altro processore richiede altri
+170/360ns (Leggetevi l'articolo di Paul McKenney's `Linux Journal RCU
+article <http://www.linuxjournal.com/article.php?sid=6993>`__).
+
+Questi due obiettivi sono in conflitto: trattenere un *lock* per il minor
+tempo possibile potrebbe richiedere la divisione in più *lock* per diverse
+parti (come nel nostro ultimo esempio con un *lock* per ogni oggetto),
+ma questo aumenta il numero di acquisizioni di *lock*, ed il risultato
+spesso è che tutto è più lento che con un singolo *lock*. Questo è un altro
+argomento in favore della semplicità quando si parla di sincronizzazione.
+
+Il terzo punto è discusso di seguito: ci sono alcune tecniche per ridurre
+il numero di sincronizzazioni che devono essere fatte.
+
+Read/Write Lock Variants
+------------------------
+
+Sia gli spinlock che i mutex hanno una variante per la lettura/scrittura
+(read/write): ``rwlock_t`` e :c:type:`struct rw_semaphore <rw_semaphore>`.
+Queste dividono gli utenti in due categorie: i lettori e gli scrittori.
+Se state solo leggendo i dati, potete acquisire il *lock* di lettura, ma
+per scrivere avrete bisogno del *lock* di scrittura. Molti possono trattenere
+il *lock* di lettura, ma solo uno scrittore alla volta può trattenere
+quello di scrittura.
+
+Se il vostro codice si divide chiaramente in codice per lettori e codice
+per scrittori (come nel nostro esempio), e il *lock* dei lettori viene
+trattenuto per molto tempo, allora l'uso di questo tipo di *lock* può aiutare.
+Questi sono leggermente più lenti rispetto alla loro versione normale, quindi
+nella pratica l'uso di ``rwlock_t`` non ne vale la pena.
+
+Evitare i *lock*: Read Copy Update
+--------------------------------------------
+
+Esiste un metodo di sincronizzazione per letture e scritture detto
+Read Copy Update. Con l'uso della tecnica RCU, i lettori possono scordarsi
+completamente di trattenere i *lock*; dato che nel nostro esempio ci
+aspettiamo d'avere più lettore che scrittori (altrimenti questa memoria
+sarebbe uno spreco) possiamo dire che questo meccanismo permette
+un'ottimizzazione.
+
+Come facciamo a sbarazzarci dei *lock* di lettura? Sbarazzarsi dei *lock* di
+lettura significa che uno scrittore potrebbe cambiare la lista sotto al naso
+dei lettori. Questo è abbastanza semplice: possiamo leggere una lista
+concatenata se lo scrittore aggiunge elementi alla fine e con certe
+precauzioni. Per esempio, aggiungendo ``new`` ad una lista concatenata
+chiamata ``list``::
+
+ new->next = list->next;
+ wmb();
+ list->next = new;
+
+La funzione :c:func:`wmb()` è una barriera di sincronizzazione delle
+scritture. Questa garantisce che la prima operazione (impostare l'elemento
+``next`` del nuovo elemento) venga completata e vista da tutti i processori
+prima che venga eseguita la seconda operazione (che sarebbe quella di mettere
+il nuovo elemento nella lista). Questo è importante perché i moderni
+compilatori ed i moderni processori possono, entrambe, riordinare le istruzioni
+se non vengono istruiti altrimenti: vogliamo che i lettori non vedano
+completamente il nuovo elemento; oppure che lo vedano correttamente e quindi
+il puntatore ``next`` deve puntare al resto della lista.
+
+Fortunatamente, c'è una funzione che fa questa operazione sulle liste
+:c:type:`struct list_head <list_head>`: :c:func:`list_add_rcu()`
+(``include/linux/list.h``).
+
+Rimuovere un elemento dalla lista è anche più facile: sostituiamo il puntatore
+al vecchio elemento con quello del suo successore, e i lettori vedranno
+l'elemento o lo salteranno.
+
+::
+
+ list->next = old->next;
+
+La funzione :c:func:`list_del_rcu()` (``include/linux/list.h``) fa esattamente
+questo (la versione normale corrompe il vecchio oggetto, e non vogliamo che
+accada).
+
+Anche i lettori devono stare attenti: alcuni processori potrebbero leggere
+attraverso il puntatore ``next`` il contenuto dell'elemento successivo
+troppo presto, ma non accorgersi che il contenuto caricato è sbagliato quando
+il puntatore ``next`` viene modificato alla loro spalle. Ancora una volta
+c'è una funzione che viene in vostro aiuto :c:func:`list_for_each_entry_rcu()`
+(``include/linux/list.h``). Ovviamente, gli scrittori possono usare
+:c:func:`list_for_each_entry()` dato che non ci possono essere due scrittori
+in contemporanea.
+
+Il nostro ultimo dilemma è il seguente: quando possiamo realmente distruggere
+l'elemento rimosso? Ricordate, un lettore potrebbe aver avuto accesso a questo
+elemento proprio ora: se eliminiamo questo elemento ed il puntatore ``next``
+cambia, il lettore salterà direttamente nella spazzatura e scoppierà. Dobbiamo
+aspettare finché tutti i lettori che stanno attraversando la lista abbiano
+finito. Utilizziamo :c:func:`call_rcu()` per registrare una funzione di
+richiamo che distrugga l'oggetto quando tutti i lettori correnti hanno
+terminato. In alternative, potrebbe essere usata la funzione
+:c:func:`synchronize_rcu()` che blocca l'esecuzione finché tutti i lettori
+non terminano di ispezionare la lista.
+
+Ma come fa l'RCU a sapere quando i lettori sono finiti? Il meccanismo è
+il seguente: innanzi tutto i lettori accedono alla lista solo fra la coppia
+:c:func:`rcu_read_lock()`/:c:func:`rcu_read_unlock()` che disabilita la
+prelazione così che i lettori non vengano sospesi mentre stanno leggendo
+la lista.
+
+Poi, l'RCU aspetta finché tutti i processori non abbiano dormito almeno
+una volta; a questo punto, dato che i lettori non possono dormire, possiamo
+dedurre che un qualsiasi lettore che abbia consultato la lista durante la
+rimozione abbia già terminato, quindi la *callback* viene eseguita. Il vero
+codice RCU è un po' più ottimizzato di così, ma questa è l'idea di fondo.
+
+::
+
+ --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
+ +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100
+ @@ -1,15 +1,18 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ +#include <linux/rcupdate.h>
+ #include <linux/mutex.h>
+ #include <asm/errno.h>
+
+ struct object
+ {
+ - /* These two protected by cache_lock. */
+ + /* This is protected by RCU */
+ struct list_head list;
+ int popularity;
+
+ + struct rcu_head rcu;
+ +
+ atomic_t refcnt;
+
+ /* Doesn't change once created. */
+ @@ -40,7 +43,7 @@
+ {
+ struct object *i;
+
+ - list_for_each_entry(i, &cache, list) {
+ + list_for_each_entry_rcu(i, &cache, list) {
+ if (i->id == id) {
+ i->popularity++;
+ return i;
+ @@ -49,19 +52,25 @@
+ return NULL;
+ }
+
+ +/* Final discard done once we know no readers are looking. */
+ +static void cache_delete_rcu(void *arg)
+ +{
+ + object_put(arg);
+ +}
+ +
+ /* Must be holding cache_lock */
+ static void __cache_delete(struct object *obj)
+ {
+ BUG_ON(!obj);
+ - list_del(&obj->list);
+ - object_put(obj);
+ + list_del_rcu(&obj->list);
+ cache_num--;
+ + call_rcu(&obj->rcu, cache_delete_rcu);
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_add(struct object *obj)
+ {
+ - list_add(&obj->list, &cache);
+ + list_add_rcu(&obj->list, &cache);
+ if (++cache_num > MAX_CACHE_SIZE) {
+ struct object *i, *outcast = NULL;
+ list_for_each_entry(i, &cache, list) {
+ @@ -104,12 +114,11 @@
+ struct object *cache_find(int id)
+ {
+ struct object *obj;
+ - unsigned long flags;
+
+ - spin_lock_irqsave(&cache_lock, flags);
+ + rcu_read_lock();
+ obj = __cache_find(id);
+ if (obj)
+ object_get(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + rcu_read_unlock();
+ return obj;
+ }
+
+Da notare che i lettori modificano il campo popularity nella funzione
+:c:func:`__cache_find()`, e ora non trattiene alcun *lock*. Una soluzione
+potrebbe essere quella di rendere la variabile ``atomic_t``, ma per l'uso
+che ne abbiamo fatto qui, non ci interessano queste corse critiche perché un
+risultato approssimativo è comunque accettabile, quindi non l'ho cambiato.
+
+Il risultato è che la funzione :c:func:`cache_find()` non ha bisogno di alcuna
+sincronizzazione con le altre funzioni, quindi è veloce su un sistema
+multi-processore tanto quanto lo sarebbe su un sistema mono-processore.
+
+Esiste un'ulteriore ottimizzazione possibile: vi ricordate il codice originale
+della nostra memoria dove non c'erano contatori di riferimenti e il chiamante
+semplicemente tratteneva il *lock* prima di accedere ad un oggetto? Questo è
+ancora possibile: se trattenete un *lock* nessuno potrà cancellare l'oggetto,
+quindi non avete bisogno di incrementare e decrementare il contatore di
+riferimenti.
+
+Ora, dato che il '*lock* di lettura' di un RCU non fa altro che disabilitare
+la prelazione, un chiamante che ha sempre la prelazione disabilitata fra le
+chiamate :c:func:`cache_find()` e :c:func:`object_put()` non necessita
+di incrementare e decrementare il contatore di riferimenti. Potremmo
+esporre la funzione :c:func:`__cache_find()` dichiarandola non-static,
+e quel chiamante potrebbe usare direttamente questa funzione.
+
+Il beneficio qui sta nel fatto che il contatore di riferimenti no
+viene scritto: l'oggetto non viene alterato in alcun modo e quindi diventa
+molto più veloce su sistemi molti-processore grazie alla loro memoria cache.
+
+.. _`it_per-cpu`:
+
+Dati per processore
+-------------------
+
+Un'altra tecnica comunemente usata per evitare la sincronizzazione è quella
+di duplicare le informazioni per ogni processore. Per esempio, se volete
+avere un contatore di qualcosa, potreste utilizzare uno spinlock ed un
+singolo contatore. Facile e pulito.
+
+Se questo dovesse essere troppo lento (solitamente non lo è, ma se avete
+dimostrato che lo è devvero), potreste usare un contatore per ogni processore
+e quindi non sarebbe più necessaria la mutua esclusione. Vedere
+:c:func:`DEFINE_PER_CPU()`, :c:func:`get_cpu_var()` e :c:func:`put_cpu_var()`
+(``include/linux/percpu.h``).
+
+Il tipo di dato ``local_t``, la funzione :c:func:`cpu_local_inc()` e tutte
+le altre funzioni associate, sono di particolare utilità per semplici contatori
+per-processore; su alcune architetture sono anche più efficienti
+(``include/asm/local.h``).
+
+Da notare che non esiste un modo facile ed affidabile per ottenere il valore
+di un simile contatore senza introdurre altri *lock*. In alcuni casi questo
+non è un problema.
+
+Dati che sono usati prevalentemente dai gestori d'interruzioni
+--------------------------------------------------------------
+
+Se i dati vengono utilizzati sempre dallo stesso gestore d'interruzioni,
+allora i *lock* non vi servono per niente: il kernel già vi garantisce che
+il gestore d'interruzione non verrà eseguito in contemporanea su diversi
+processori.
+
+Manfred Spraul fa notare che potreste comunque comportarvi così anche
+se i dati vengono occasionalmente utilizzati da un contesto utente o
+da un'interruzione software. Il gestore d'interruzione non utilizza alcun
+*lock*, e tutti gli altri accessi verranno fatti così::
+
+ spin_lock(&lock);
+ disable_irq(irq);
+ ...
+ enable_irq(irq);
+ spin_unlock(&lock);
+
+La funzione :c:func:`disable_irq()` impedisce al gestore d'interruzioni
+d'essere eseguito (e aspetta che finisca nel caso fosse in esecuzione su
+un altro processore). Lo spinlock, invece, previene accessi simultanei.
+Naturalmente, questo è più lento della semplice chiamata
+:c:func:`spin_lock_irq()`, quindi ha senso solo se questo genere di accesso
+è estremamente raro.
+
+.. _`it_sleeping-things`:
+
+Quali funzioni possono essere chiamate in modo sicuro dalle interruzioni?
+=========================================================================
+
+Molte funzioni del kernel dormono (in sostanza, chiamano ``schedule()``)
+direttamente od indirettamente: non potete chiamarle se trattenere uno
+spinlock o avete la prelazione disabilitata, mai. Questo significa che
+dovete necessariamente essere nel contesto utente: chiamarle da un
+contesto d'interruzione è illegale.
+
+Alcune funzioni che dormono
+---------------------------
+
+Le più comuni sono elencate qui di seguito, ma solitamente dovete leggere
+il codice per scoprire se altre chiamate sono sicure. Se chiunque altro
+le chiami dorme, allora dovreste poter dormire anche voi. In particolar
+modo, le funzioni di registrazione e deregistrazione solitamente si
+aspettano d'essere chiamante da un contesto utente e quindi che possono
+dormire.
+
+- Accessi allo spazio utente:
+
+ - :c:func:`copy_from_user()`
+
+ - :c:func:`copy_to_user()`
+
+ - :c:func:`get_user()`
+
+ - :c:func:`put_user()`
+
+- :c:func:`kmalloc(GFP_KERNEL) <kmalloc>`
+
+- :c:func:`mutex_lock_interruptible()` and
+ :c:func:`mutex_lock()`
+
+ C'è anche :c:func:`mutex_trylock()` che però non dorme.
+ Comunque, non deve essere usata in un contesto d'interruzione dato
+ che la sua implementazione non è sicura in quel contesto.
+ Anche :c:func:`mutex_unlock()` non dorme mai. Non può comunque essere
+ usata in un contesto d'interruzione perché un mutex deve essere rilasciato
+ dallo stesso processo che l'ha acquisito.
+
+Alcune funzioni che non dormono
+-------------------------------
+
+Alcune funzioni possono essere chiamate tranquillamente da qualsiasi
+contesto, o trattenendo un qualsiasi *lock*.
+
+- :c:func:`printk()`
+
+- :c:func:`kfree()`
+
+- :c:func:`add_timer()` e :c:func:`del_timer()`
+
+Riferimento per l'API dei Mutex
+===============================
+
+.. kernel-doc:: include/linux/mutex.h
+ :internal:
+
+.. kernel-doc:: kernel/locking/mutex.c
+ :export:
+
+Riferimento per l'API dei Futex
+===============================
+
+.. kernel-doc:: kernel/futex.c
+ :internal:
+
+Approfondimenti
+===============
+
+- ``Documentation/locking/spinlocks.txt``: la guida di Linus Torvalds agli
+ spinlock del kernel.
+
+- Unix Systems for Modern Architectures: Symmetric Multiprocessing and
+ Caching for Kernel Programmers.
+
+ L'introduzione alla sincronizzazione a livello di kernel di Curt Schimmel
+ è davvero ottima (non è scritta per Linux, ma approssimativamente si adatta
+ a tutte le situazioni). Il libro è costoso, ma vale ogni singolo spicciolo
+ per capire la sincronizzazione nei sistemi multi-processore.
+ [ISBN: 0201633388]
+
+Ringraziamenti
+==============
+
+Grazie a Telsa Gwynne per aver formattato questa guida in DocBook, averla
+pulita e aggiunto un po' di stile.
+
+Grazie a Martin Pool, Philipp Rumpf, Stephen Rothwell, Paul Mackerras,
+Ruedi Aschwanden, Alan Cox, Manfred Spraul, Tim Waugh, Pete Zaitcev,
+James Morris, Robert Love, Paul McKenney, John Ashby per aver revisionato,
+corretto, maledetto e commentato.
+
+Grazie alla congrega per non aver avuto alcuna influenza su questo documento.
+
+Glossario
+=========
+
+prelazione
+ Prima del kernel 2.5, o quando ``CONFIG_PREEMPT`` non è impostato, i processi
+ in contesto utente non si avvicendano nell'esecuzione (in pratica, il
+ processo userà il processore fino al proprio termine, a meno che non ci siano
+ delle interruzioni). Con l'aggiunta di ``CONFIG_PREEMPT`` nella versione
+ 2.5.4 questo è cambiato: quando si è in contesto utente, processi con una
+ priorità maggiore possono subentrare nell'esecuzione: gli spinlock furono
+ cambiati per disabilitare la prelazioni, anche su sistemi monoprocessore.
+
+bh
+ Bottom Half: per ragioni storiche, le funzioni che contengono '_bh' nel
+ loro nome ora si riferiscono a qualsiasi interruzione software; per esempio,
+ :c:func:`spin_lock_bh()` blocca qualsiasi interuzione software sul processore
+ corrente. I *Bottom Halves* sono deprecati, e probabilmente verranno
+ sostituiti dai tasklet. In un dato momento potrà esserci solo un
+ *bottom half* in esecuzione.
+
+contesto d'interruzione
+ Non è il contesto utente: qui si processano le interruzioni hardware e
+ software. La macro :c:func:`in_interrupt()` ritorna vero.
+
+contesto utente
+ Il kernel che esegue qualcosa per conto di un particolare processo (per
+ esempio una chiamata di sistema) o di un thread del kernel. Potete
+ identificare il processo con la macro ``current``. Da non confondere
+ con lo spazio utente. Può essere interrotto sia da interruzioni software
+ che hardware.
+
+interruzione hardware
+ Richiesta di interruzione hardware. :c:func:`in_irq()` ritorna vero in un
+ gestore d'interruzioni hardware.
+
+interruzione software / softirq
+ Gestore di interruzioni software: :c:func:`in_irq()` ritorna falso;
+ :c:func:`in_softirq()` ritorna vero. I tasklet e le softirq sono entrambi
+ considerati 'interruzioni software'.
+
+ In soldoni, un softirq è uno delle 32 interruzioni software che possono
+ essere eseguite su più processori in contemporanea. A volte si usa per
+ riferirsi anche ai tasklet (in pratica tutte le interruzioni software).
+
+monoprocessore / UP
+ (Uni-Processor) un solo processore, ovvero non è SMP. (``CONFIG_SMP=n``).
+
+multi-processore / SMP
+ (Symmetric Multi-Processor) kernel compilati per sistemi multi-processore
+ (``CONFIG_SMP=y``).
+
+spazio utente
+ Un processo che esegue il proprio codice fuori dal kernel.
+
+tasklet
+ Un'interruzione software registrabile dinamicamente che ha la garanzia
+ d'essere eseguita solo su un processore alla volta.
+
+timer
+ Un'interruzione software registrabile dinamicamente che viene eseguita
+ (circa) in un determinato momento. Quando è in esecuzione è come un tasklet
+ (infatti, sono chiamati da ``TIMER_SOFTIRQ``).
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 921739d00f69..7f01fb1c1084 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1891,22 +1891,22 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효
/* 소유권을 수정 */
desc->status = DEVICE_OWN;
- /* MMIO 를 통해 디바이스에 공지를 하기 전에 메모리를 동기화 */
- wmb();
-
/* 업데이트된 디스크립터의 디바이스에 공지 */
writel(DESC_NOTIFY, doorbell);
}
dma_rmb() 는 디스크립터로부터 데이터를 읽어오기 전에 디바이스가 소유권을
- 내놓았음을 보장하게 하고, dma_wmb() 는 디바이스가 자신이 소유권을 다시
- 가졌음을 보기 전에 디스크립터에 데이터가 쓰였음을 보장합니다. wmb() 는
- 캐시 일관성이 없는 (cache incoherent) MMIO 영역에 쓰기를 시도하기 전에
- 캐시 일관성이 있는 메모리 (cache coherent memory) 쓰기가 완료되었음을
- 보장해주기 위해 필요합니다.
-
- consistent memory 에 대한 자세한 내용을 위해선 Documentation/DMA-API.txt
- 문서를 참고하세요.
+ 내려놓았을 것을 보장하고, dma_wmb() 는 디바이스가 자신이 소유권을 다시
+ 가졌음을 보기 전에 디스크립터에 데이터가 쓰였을 것을 보장합니다. 참고로,
+ writel() 을 사용하면 캐시 일관성이 있는 메모리 (cache coherent memory)
+ 쓰기가 MMIO 영역에의 쓰기 전에 완료되었을 것을 보장하므로 writel() 앞에
+ wmb() 를 실행할 필요가 없음을 알아두시기 바랍니다. writel() 보다 비용이
+ 저렴한 writel_relaxed() 는 이런 보장을 제공하지 않으므로 여기선 사용되지
+ 않아야 합니다.
+
+ writel_relaxed() 와 같은 완화된 I/O 접근자들에 대한 자세한 내용을 위해서는
+ "커널 I/O 배리어의 효과" 섹션을, consistent memory 에 대한 자세한 내용을
+ 위해선 Documentation/DMA-API.txt 문서를 참고하세요.
MMIO 쓰기 배리어
diff --git a/Documentation/translations/zh_CN/oops-tracing.txt b/Documentation/translations/zh_CN/oops-tracing.txt
index 41ab53cc0e83..a893f04dfd5d 100644
--- a/Documentation/translations/zh_CN/oops-tracing.txt
+++ b/Documentation/translations/zh_CN/oops-tracing.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/admin-guide/oops-tracing.rst
+Chinese translated version of Documentation/admin-guide/bug-hunting.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Dave Young <hidave.darkstar@gmail.com>
---------------------------------------------------------------------
-Documentation/admin-guide/oops-tracing.rst 的中文翻译
+Documentation/admin-guide/bug-hunting.rst 的中文翻译
如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index 349f3104fa4f..ab100d6ee436 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -418,15 +418,6 @@ Current status:
why it is wise to cut down on the rate used is wise for large
transfers until this is settled.
-Options supported:
- If this driver is compiled as a module you can pass the following
- options to it:
- debug - extra verbose debugging info
- (default: 0; nonzero enables)
- use_lowlatency - use low_latency flag to speed up tty layer
- when reading from the device.
- (default: 0; nonzero enables)
-
See http://www.uuhaus.de/linux/palmconnect.html for up-to-date
information on this driver.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index d10944e619d3..c664064f76fb 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -835,11 +835,13 @@ struct kvm_clock_data {
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86
-Type: vm ioctl
+Architectures: x86, arm, arm64
+Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (out)
Returns: 0 on success, -1 on error
+X86:
+
Gets currently pending exceptions, interrupts, and NMIs as well as related
states of the vcpu.
@@ -881,15 +883,64 @@ Only two fields are defined in the flags field:
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state.
+ARM/ARM64:
+
+If the guest accesses a device that is being emulated by the host kernel in
+such a way that a real device would generate a physical SError, KVM may make
+a virtual SError pending for that VCPU. This system error interrupt remains
+pending until the guest takes the exception by unmasking PSTATE.A.
+
+Running the VCPU may cause it to take a pending SError, or make an access that
+causes an SError to become pending. The event's description is only valid while
+the VPCU is not running.
+
+This API provides a way to read and write the pending 'event' state that is not
+visible to the guest. To save, restore or migrate a VCPU the struct representing
+the state can be read then written using this GET/SET API, along with the other
+guest-visible registers. It is not possible to 'cancel' an SError that has been
+made pending.
+
+A device being emulated in user-space may also wish to generate an SError. To do
+this the events structure can be populated by user-space. The current state
+should be read first, to ensure no existing SError is pending. If an existing
+SError is pending, the architecture's 'Multiple SError interrupts' rules should
+be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and
+Serviceability (RAS) Specification").
+
+SError exceptions always have an ESR value. Some CPUs have the ability to
+specify what the virtual SError's ESR value should be. These systems will
+advertise KVM_CAP_ARM_INJECT_SERROR_ESR. In this case exception.has_esr will
+always have a non-zero value when read, and the agent making an SError pending
+should specify the ISS field in the lower 24 bits of exception.serror_esr. If
+the system supports KVM_CAP_ARM_INJECT_SERROR_ESR, but user-space sets the events
+with exception.has_esr as zero, KVM will choose an ESR.
+
+Specifying exception.has_esr on a system that does not support it will return
+-EINVAL. Setting anything other than the lower 24bits of exception.serror_esr
+will return -EINVAL.
+
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86
-Type: vm ioctl
+Architectures: x86, arm, arm64
+Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (in)
Returns: 0 on success, -1 on error
+X86:
+
Set pending exceptions, interrupts, and NMIs as well as related states of the
vcpu.
@@ -910,6 +961,13 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
+ARM/ARM64:
+
+Set the pending SError exception state for this VCPU. It is not possible to
+'cancel' an Serror that has been made pending.
+
+See KVM_GET_VCPU_EVENTS for the data structure.
+
4.33 KVM_GET_DEBUGREGS
@@ -3561,6 +3619,62 @@ Returns: 0 on success,
-ENOENT on deassign if the conn_id isn't registered
-EEXIST on assign if the conn_id is already registered
+4.114 KVM_GET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in/out)
+Returns: 0 on success, -1 on error
+Errors:
+ E2BIG: the total state size (including the fixed-size part of struct
+ kvm_nested_state) exceeds the value of 'size' specified by
+ the user; the size required will be written into size.
+
+struct kvm_nested_state {
+ __u16 flags;
+ __u16 format;
+ __u32 size;
+ union {
+ struct kvm_vmx_nested_state vmx;
+ struct kvm_svm_nested_state svm;
+ __u8 pad[120];
+ };
+ __u8 data[0];
+};
+
+#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
+
+struct kvm_vmx_nested_state {
+ __u64 vmxon_pa;
+ __u64 vmcs_pa;
+
+ struct {
+ __u16 flags;
+ } smm;
+};
+
+This ioctl copies the vcpu's nested virtualization state from the kernel to
+userspace.
+
+The maximum size of the state, including the fixed-size part of struct
+kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to
+the KVM_CHECK_EXTENSION ioctl().
+
+4.115 KVM_SET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in)
+Returns: 0 on success, -1 on error
+
+This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For
+the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
5. The kvm_run structure
------------------------
@@ -4391,6 +4505,22 @@ all such vmexits.
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
+7.14 KVM_CAP_S390_HPAGE_1M
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, -EINVAL if hpage module parameter was not set
+ or cmma is enabled
+
+With this capability the KVM support for memory backing with 1m pages
+through hugetlbfs can be enabled for a VM. After the capability is
+enabled, cmma can't be enabled anymore and pfmfi and the storage key
+interpretation are disabled. If cmma has already been enabled or the
+hpage module parameter is not set to 1, -EINVAL is returned.
+
+While it is generally possible to create a huge page backed VM without
+this capability, the VM will not be able to run.
+
8. Other capabilities.
----------------------
@@ -4618,3 +4748,17 @@ This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush
hypercalls:
HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx,
HvFlushVirtualAddressList, HvFlushVirtualAddressListEx.
+
+8.19 KVM_CAP_ARM_INJECT_SERROR_ESR
+
+Architectures: arm, arm64
+
+This capability indicates that userspace can specify (via the
+KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it
+takes a virtual SError interrupt exception.
+If KVM advertises this capability, userspace can only specify the ISS field for
+the ESR syndrome. Other parts of the ESR, such as the EC are generated by the
+CPU when the exception is taken. If this virtual SError is taken to EL1 using
+AArch64, this value will be reported in the ISS field of ESR_ELx.
+
+See KVM_CAP_VCPU_EVENTS for more details.
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index ab022dcd0911..97ca1940a0dc 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -62,6 +62,10 @@ KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit
|| || can be enabled by setting bit 2
|| || when writing to msr 0x4b564d02
------------------------------------------------------------------------------
+KVM_FEATURE_PV_SEND_IPI || 11 || guest checks this feature bit
+ || || before using paravirtualized
+ || || send IPIs.
+------------------------------------------------------------------------------
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|| || per-cpu warps are expected in
|| || kvmclock.
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
index 2408ab720ef7..ff290b43c8e5 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
@@ -100,6 +100,14 @@ Groups:
Note that distributor fields are not banked, but return the same value
regardless of the mpidr used to access the register.
+ GICD_IIDR.Revision is updated when the KVM implementation is changed in a
+ way directly observable by the guest or userspace. Userspace should read
+ GICD_IIDR from KVM and write back the read value to confirm its expected
+ behavior is aligned with the KVM implementation. Userspace should set
+ GICD_IIDR before setting any other registers to ensure the expected
+ behavior.
+
+
The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such
that a write of a clear bit has no effect, whereas a write with a set bit
clears that value. To allow userspace to freely set the values of these two
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index b3ce12643553..97b6518148f8 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -49,9 +49,15 @@ Groups:
index is specified with the vcpu_index field. Note that most distributor
fields are not banked, but return the same value regardless of the
vcpu_index used to access the register.
- Limitations:
- - Priorities are not implemented, and registers are RAZ/WI
- - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
+
+ GICD_IIDR.Revision is updated when the KVM implementation of an emulated
+ GICv2 is changed in a way directly observable by the guest or userspace.
+ Userspace should read GICD_IIDR from KVM and write back the read value to
+ confirm its expected behavior is aligned with the KVM implementation.
+ Userspace should set GICD_IIDR before setting any other registers (both
+ KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS) to ensure
+ the expected behavior. Unless GICD_IIDR has been set from userspace, writes
+ to the interrupt group registers (GICD_IGROUPR) are ignored.
Errors:
-ENXIO: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
@@ -94,9 +100,6 @@ Groups:
use the lower 5 bits to communicate with the KVM device and must shift the
value left by 3 places to obtain the actual priority mask level.
- Limitations:
- - Priorities are not implemented, and registers are RAZ/WI
- - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
Errors:
-ENXIO: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virtual/kvm/hypercalls.txt
index a890529c63ed..da24c138c8d1 100644
--- a/Documentation/virtual/kvm/hypercalls.txt
+++ b/Documentation/virtual/kvm/hypercalls.txt
@@ -121,3 +121,23 @@ compute the CLOCK_REALTIME for its clock, at the same instant.
Returns KVM_EOPNOTSUPP if the host does not use TSC clocksource,
or if clock type is different than KVM_CLOCK_PAIRING_WALLCLOCK.
+
+6. KVM_HC_SEND_IPI
+------------------------
+Architecture: x86
+Status: active
+Purpose: Send IPIs to multiple vCPUs.
+
+a0: lower part of the bitmap of destination APIC IDs
+a1: higher part of the bitmap of destination APIC IDs
+a2: the lowest APIC ID in bitmap
+a3: APIC ICR
+
+The hypercall lets a guest send multicast IPIs, with at most 128
+128 destinations per hypercall in 64-bit mode and 64 vCPUs per
+hypercall in 32-bit mode. The destinations are represented by a
+bitmap contained in the first two arguments (a0 and a1). Bit 0 of
+a0 corresponds to the APIC ID in the third argument (a2), bit 1
+corresponds to the APIC ID a2+1, and so on.
+
+Returns the number of CPUs to which the IPIs were delivered successfully.
diff --git a/Documentation/w1/slaves/w1_ds2438 b/Documentation/w1/slaves/w1_ds2438
index b99f3674c5b4..e64f65a09387 100644
--- a/Documentation/w1/slaves/w1_ds2438
+++ b/Documentation/w1/slaves/w1_ds2438
@@ -60,4 +60,4 @@ vad: general purpose A/D input (VAD)
vdd: battery input (VDD)
After the voltage conversion the value is returned as decimal ASCII.
-Note: The value is in mV, so to get a volts the value has to be divided by 10.
+Note: To get a volts the value has to be divided by 100.
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index a16aa2113840..f662d3c530e5 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -29,7 +29,11 @@ mount options are:
L2 and L3 CDP are controlled seperately.
RDT features are orthogonal. A particular system may support only
-monitoring, only control, or both monitoring and control.
+monitoring, only control, or both monitoring and control. Cache
+pseudo-locking is a unique way of using cache control to "pin" or
+"lock" data in the cache. Details can be found in
+"Cache Pseudo-Locking".
+
The mount succeeds if either of allocation or monitoring is present, but
only those files and directories supported by the system will be created.
@@ -65,6 +69,29 @@ related to allocation:
some platforms support devices that have their
own settings for cache use which can over-ride
these bits.
+"bit_usage": Annotated capacity bitmasks showing how all
+ instances of the resource are used. The legend is:
+ "0" - Corresponding region is unused. When the system's
+ resources have been allocated and a "0" is found
+ in "bit_usage" it is a sign that resources are
+ wasted.
+ "H" - Corresponding region is used by hardware only
+ but available for software use. If a resource
+ has bits set in "shareable_bits" but not all
+ of these bits appear in the resource groups'
+ schematas then the bits appearing in
+ "shareable_bits" but no resource group will
+ be marked as "H".
+ "X" - Corresponding region is available for sharing and
+ used by hardware and software. These are the
+ bits that appear in "shareable_bits" as
+ well as a resource group's allocation.
+ "S" - Corresponding region is used by software
+ and available for sharing.
+ "E" - Corresponding region is used exclusively by
+ one resource group. No sharing allowed.
+ "P" - Corresponding region is pseudo-locked. No
+ sharing allowed.
Memory bandwitdh(MB) subdirectory contains the following files
with respect to allocation:
@@ -151,6 +178,9 @@ All groups contain the following files:
CPUs to/from this group. As with the tasks file a hierarchy is
maintained where MON groups may only include CPUs owned by the
parent CTRL_MON group.
+ When the resouce group is in pseudo-locked mode this file will
+ only be readable, reflecting the CPUs associated with the
+ pseudo-locked region.
"cpus_list":
@@ -163,6 +193,21 @@ When control is enabled all CTRL_MON groups will also contain:
A list of all the resources available to this group.
Each resource has its own line and format - see below for details.
+"size":
+ Mirrors the display of the "schemata" file to display the size in
+ bytes of each allocation instead of the bits representing the
+ allocation.
+
+"mode":
+ The "mode" of the resource group dictates the sharing of its
+ allocations. A "shareable" resource group allows sharing of its
+ allocations while an "exclusive" resource group does not. A
+ cache pseudo-locked region is created by first writing
+ "pseudo-locksetup" to the "mode" file before writing the cache
+ pseudo-locked region's schemata to the resource group's "schemata"
+ file. On successful pseudo-locked region creation the mode will
+ automatically change to "pseudo-locked".
+
When monitoring is enabled all MON groups will also contain:
"mon_data":
@@ -379,6 +424,170 @@ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+Cache Pseudo-Locking
+--------------------
+CAT enables a user to specify the amount of cache space that an
+application can fill. Cache pseudo-locking builds on the fact that a
+CPU can still read and write data pre-allocated outside its current
+allocated area on a cache hit. With cache pseudo-locking, data can be
+preloaded into a reserved portion of cache that no application can
+fill, and from that point on will only serve cache hits. The cache
+pseudo-locked memory is made accessible to user space where an
+application can map it into its virtual address space and thus have
+a region of memory with reduced average read latency.
+
+The creation of a cache pseudo-locked region is triggered by a request
+from the user to do so that is accompanied by a schemata of the region
+to be pseudo-locked. The cache pseudo-locked region is created as follows:
+- Create a CAT allocation CLOSNEW with a CBM matching the schemata
+ from the user of the cache region that will contain the pseudo-locked
+ memory. This region must not overlap with any current CAT allocation/CLOS
+ on the system and no future overlap with this cache region is allowed
+ while the pseudo-locked region exists.
+- Create a contiguous region of memory of the same size as the cache
+ region.
+- Flush the cache, disable hardware prefetchers, disable preemption.
+- Make CLOSNEW the active CLOS and touch the allocated memory to load
+ it into the cache.
+- Set the previous CLOS as active.
+- At this point the closid CLOSNEW can be released - the cache
+ pseudo-locked region is protected as long as its CBM does not appear in
+ any CAT allocation. Even though the cache pseudo-locked region will from
+ this point on not appear in any CBM of any CLOS an application running with
+ any CLOS will be able to access the memory in the pseudo-locked region since
+ the region continues to serve cache hits.
+- The contiguous region of memory loaded into the cache is exposed to
+ user-space as a character device.
+
+Cache pseudo-locking increases the probability that data will remain
+in the cache via carefully configuring the CAT feature and controlling
+application behavior. There is no guarantee that data is placed in
+cache. Instructions like INVD, WBINVD, CLFLUSH, etc. can still evict
+“locked” data from cache. Power management C-states may shrink or
+power off cache. Deeper C-states will automatically be restricted on
+pseudo-locked region creation.
+
+It is required that an application using a pseudo-locked region runs
+with affinity to the cores (or a subset of the cores) associated
+with the cache on which the pseudo-locked region resides. A sanity check
+within the code will not allow an application to map pseudo-locked memory
+unless it runs with affinity to cores associated with the cache on which the
+pseudo-locked region resides. The sanity check is only done during the
+initial mmap() handling, there is no enforcement afterwards and the
+application self needs to ensure it remains affine to the correct cores.
+
+Pseudo-locking is accomplished in two stages:
+1) During the first stage the system administrator allocates a portion
+ of cache that should be dedicated to pseudo-locking. At this time an
+ equivalent portion of memory is allocated, loaded into allocated
+ cache portion, and exposed as a character device.
+2) During the second stage a user-space application maps (mmap()) the
+ pseudo-locked memory into its address space.
+
+Cache Pseudo-Locking Interface
+------------------------------
+A pseudo-locked region is created using the resctrl interface as follows:
+
+1) Create a new resource group by creating a new directory in /sys/fs/resctrl.
+2) Change the new resource group's mode to "pseudo-locksetup" by writing
+ "pseudo-locksetup" to the "mode" file.
+3) Write the schemata of the pseudo-locked region to the "schemata" file. All
+ bits within the schemata should be "unused" according to the "bit_usage"
+ file.
+
+On successful pseudo-locked region creation the "mode" file will contain
+"pseudo-locked" and a new character device with the same name as the resource
+group will exist in /dev/pseudo_lock. This character device can be mmap()'ed
+by user space in order to obtain access to the pseudo-locked memory region.
+
+An example of cache pseudo-locked region creation and usage can be found below.
+
+Cache Pseudo-Locking Debugging Interface
+---------------------------------------
+The pseudo-locking debugging interface is enabled by default (if
+CONFIG_DEBUG_FS is enabled) and can be found in /sys/kernel/debug/resctrl.
+
+There is no explicit way for the kernel to test if a provided memory
+location is present in the cache. The pseudo-locking debugging interface uses
+the tracing infrastructure to provide two ways to measure cache residency of
+the pseudo-locked region:
+1) Memory access latency using the pseudo_lock_mem_latency tracepoint. Data
+ from these measurements are best visualized using a hist trigger (see
+ example below). In this test the pseudo-locked region is traversed at
+ a stride of 32 bytes while hardware prefetchers and preemption
+ are disabled. This also provides a substitute visualization of cache
+ hits and misses.
+2) Cache hit and miss measurements using model specific precision counters if
+ available. Depending on the levels of cache on the system the pseudo_lock_l2
+ and pseudo_lock_l3 tracepoints are available.
+ WARNING: triggering this measurement uses from two (for just L2
+ measurements) to four (for L2 and L3 measurements) precision counters on
+ the system, if any other measurements are in progress the counters and
+ their corresponding event registers will be clobbered.
+
+When a pseudo-locked region is created a new debugfs directory is created for
+it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
+write-only file, pseudo_lock_measure, is present in this directory. The
+measurement on the pseudo-locked region depends on the number, 1 or 2,
+written to this debugfs file. Since the measurements are recorded with the
+tracing infrastructure the relevant tracepoints need to be enabled before the
+measurement is triggered.
+
+Example of latency debugging interface:
+In this example a pseudo-locked region named "newlock" was created. Here is
+how we can measure the latency in cycles of reading from this region and
+visualize this data with a histogram that is available if CONFIG_HIST_TRIGGERS
+is set:
+# :> /sys/kernel/debug/tracing/trace
+# echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
+
+# event histogram
+#
+# trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
+#
+
+{ latency: 456 } hitcount: 1
+{ latency: 50 } hitcount: 83
+{ latency: 36 } hitcount: 96
+{ latency: 44 } hitcount: 174
+{ latency: 48 } hitcount: 195
+{ latency: 46 } hitcount: 262
+{ latency: 42 } hitcount: 693
+{ latency: 40 } hitcount: 3204
+{ latency: 38 } hitcount: 3484
+
+Totals:
+ Hits: 8192
+ Entries: 9
+ Dropped: 0
+
+Example of cache hits/misses debugging:
+In this example a pseudo-locked region named "newlock" was created on the L2
+cache of a platform. Here is how we can obtain details of the cache hits
+and misses using the platform's precision counters.
+
+# :> /sys/kernel/debug/tracing/trace
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# cat /sys/kernel/debug/tracing/trace
+
+# tracer: nop
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ pseudo_lock_mea-1672 [002] .... 3132.860500: pseudo_lock_l2: hits=4097 miss=0
+
+
Examples for RDT allocation usage:
Example 1
@@ -502,7 +711,172 @@ siblings and only the real time threads are scheduled on the cores 4-7.
# echo F0 > p0/cpus
-4) Locking between applications
+Example 4
+---------
+
+The resource groups in previous examples were all in the default "shareable"
+mode allowing sharing of their cache allocations. If one resource group
+configures a cache allocation then nothing prevents another resource group
+to overlap with that allocation.
+
+In this example a new exclusive resource group will be created on a L2 CAT
+system with two L2 cache instances that can be configured with an 8-bit
+capacity bitmask. The new exclusive resource group will be configured to use
+25% of each cache instance.
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+First, we observe that the default group is configured to allocate to all L2
+cache:
+
+# cat schemata
+L2:0=ff;1=ff
+
+We could attempt to create the new resource group at this point, but it will
+fail because of the overlap with the schemata of the default group:
+# mkdir p0
+# echo 'L2:0=0x3;1=0x3' > p0/schemata
+# cat p0/mode
+shareable
+# echo exclusive > p0/mode
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+schemata overlaps
+
+To ensure that there is no overlap with another resource group the default
+resource group's schemata has to change, making it possible for the new
+resource group to become exclusive.
+# echo 'L2:0=0xfc;1=0xfc' > schemata
+# echo exclusive > p0/mode
+# grep . p0/*
+p0/cpus:0
+p0/mode:exclusive
+p0/schemata:L2:0=03;1=03
+p0/size:L2:0=262144;1=262144
+
+A new resource group will on creation not overlap with an exclusive resource
+group:
+# mkdir p1
+# grep . p1/*
+p1/cpus:0
+p1/mode:shareable
+p1/schemata:L2:0=fc;1=fc
+p1/size:L2:0=786432;1=786432
+
+The bit_usage will reflect how the cache is used:
+# cat info/L2/bit_usage
+0=SSSSSSEE;1=SSSSSSEE
+
+A resource group cannot be forced to overlap with an exclusive resource group:
+# echo 'L2:0=0x1;1=0x1' > p1/schemata
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+overlaps with exclusive group
+
+Example of Cache Pseudo-Locking
+-------------------------------
+Lock portion of L2 cache from cache id 1 using CBM 0x3. Pseudo-locked
+region is exposed at /dev/pseudo_lock/newlock that can be provided to
+application for argument to mmap().
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+Ensure that there are bits available that can be pseudo-locked, since only
+unused bits can be pseudo-locked the bits to be pseudo-locked needs to be
+removed from the default resource group's schemata:
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSSS
+# echo 'L2:1=0xfc' > schemata
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSS00
+
+Create a new resource group that will be associated with the pseudo-locked
+region, indicate that it will be used for a pseudo-locked region, and
+configure the requested pseudo-locked region capacity bitmask:
+
+# mkdir newlock
+# echo pseudo-locksetup > newlock/mode
+# echo 'L2:1=0x3' > newlock/schemata
+
+On success the resource group's mode will change to pseudo-locked, the
+bit_usage will reflect the pseudo-locked region, and the character device
+exposing the pseudo-locked region will exist:
+
+# cat newlock/mode
+pseudo-locked
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSPP
+# ls -l /dev/pseudo_lock/newlock
+crw------- 1 root root 243, 0 Apr 3 05:01 /dev/pseudo_lock/newlock
+
+/*
+ * Example code to access one page of pseudo-locked cache region
+ * from user space.
+ */
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/*
+ * It is required that the application runs with affinity to only
+ * cores associated with the pseudo-locked region. Here the cpu
+ * is hardcoded for convenience of example.
+ */
+static int cpuid = 2;
+
+int main(int argc, char *argv[])
+{
+ cpu_set_t cpuset;
+ long page_size;
+ void *mapping;
+ int dev_fd;
+ int ret;
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpuid, &cpuset);
+ ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (ret < 0) {
+ perror("sched_setaffinity");
+ exit(EXIT_FAILURE);
+ }
+
+ dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
+ if (dev_fd < 0) {
+ perror("open");
+ exit(EXIT_FAILURE);
+ }
+
+ mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev_fd, 0);
+ if (mapping == MAP_FAILED) {
+ perror("mmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Application interacts with pseudo-locked memory @mapping */
+
+ ret = munmap(mapping, page_size);
+ if (ret < 0) {
+ perror("munmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ close(dev_fd);
+ exit(EXIT_SUCCESS);
+}
+
+Locking between applications
+----------------------------
Certain operations on the resctrl filesystem, composed of read/writes
to/from multiple files, must be atomic.
@@ -510,7 +884,7 @@ to/from multiple files, must be atomic.
As an example, the allocation of an exclusive reservation of L3 cache
involves:
- 1. Read the cbmmasks from each directory
+ 1. Read the cbmmasks from each directory or the per-resource "bit_usage"
2. Find a contiguous set of bits in the global CBM bitmask that is clear
in any of the directory cbmmasks
3. Create a new directory
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 8d109ef67ab6..ad6d2a80cf05 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -92,9 +92,7 @@ APICs
Timing
notsc
- Don't use the CPU time stamp counter to read the wall time.
- This can be used to work around timing problems on multiprocessor systems
- with not properly synchronized CPUs.
+ Deprecated, use tsc=unstable instead.
nohpet
Don't use the HPET timer.
@@ -156,6 +154,10 @@ NUMA
If given as an integer, fills all system RAM with N fake nodes
interleaved over physical nodes.
+ numa=fake=<N>U
+ If given as an integer followed by 'U', it will divide each
+ physical node into N emulated nodes.
+
ACPI
acpi=off Don't enable ACPI
diff --git a/Kconfig b/Kconfig
index a90d9f9e268b..48a80beab685 100644
--- a/Kconfig
+++ b/Kconfig
@@ -9,4 +9,24 @@ comment "Compiler: $(CC_VERSION_TEXT)"
source "scripts/Kconfig.include"
-source "arch/$(SRCARCH)/Kconfig"
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+source "fs/Kconfig.binfmt"
+
+source "mm/Kconfig"
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+source "lib/Kconfig.debug"
diff --git a/MAINTAINERS b/MAINTAINERS
index 24fbe6583b02..126335e0471f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -199,12 +199,13 @@ F: drivers/net/ethernet/8390/
9P FILE SYSTEM
M: Eric Van Hensbergen <ericvh@gmail.com>
-M: Ron Minnich <rminnich@sandia.gov>
M: Latchesar Ionkov <lucho@ionkov.net>
+M: Dominique Martinet <asmadeus@codewreck.org>
L: v9fs-developer@lists.sourceforge.net
W: http://swik.net/v9fs
Q: http://patchwork.kernel.org/project/v9fs-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git
+T: git git://github.com/martinetd/linux.git
S: Maintained
F: Documentation/filesystems/9p.txt
F: fs/9p/
@@ -367,6 +368,12 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: drivers/acpi/arm64
+ACPI I2C MULTI INSTANTIATE DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/i2c-multi-instantiate.c
+
ACPI PMIC DRIVERS
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <lenb@kernel.org>
@@ -581,7 +588,7 @@ W: https://www.infradead.org/~dhowells/kafs/
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
-T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+T: git git://anongit.freedesktop.org/drm/drm
S: Maintained
F: drivers/char/agp/
F: include/linux/agp*
@@ -728,6 +735,14 @@ S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
+AMD DISPLAY CORE
+M: Harry Wentland <harry.wentland@amd.com>
+M: Leo Li <sunpeng.li@amd.com>
+L: amd-gfx@lists.freedesktop.org
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Supported
+F: drivers/gpu/drm/amd/display/
+
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
@@ -777,6 +792,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/amd/include/v9_structs.h
F: include/uapi/linux/kfd_ioctl.h
+AMD POWERPLAY
+M: Rex Zhu <rex.zhu@amd.com>
+M: Evan Quan <evan.quan@amd.com>
+L: amd-gfx@lists.freedesktop.org
+S: Supported
+F: drivers/gpu/drm/amd/powerplay/
+T: git git://people.freedesktop.org/~agd5f/linux
+
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@@ -791,11 +814,6 @@ S: Supported
F: drivers/net/ethernet/amd/xgbe/
F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
-AMS (Apple Motion Sensor) DRIVER
-M: Michael Hanselmann <linux-kernel@hansmi.ch>
-S: Supported
-F: drivers/macintosh/ams/
-
ANALOG DEVICES INC AD5686 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
L: linux-pm@vger.kernel.org
@@ -804,12 +822,33 @@ S: Supported
F: drivers/iio/dac/ad5686*
F: drivers/iio/dac/ad5696*
+ANALOG DEVICES INC AD5758 DRIVER
+M: Stefan Popa <stefan.popa@analog.com>
+L: linux-iio@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/dac/ad5758.c
+F: Documentation/devicetree/bindings/iio/dac/ad5758.txt
+
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/ad9389b*
+ANALOG DEVICES INC ADGS1408 DRIVER
+M: Mircea Caprioru <mircea.caprioru@analog.com>
+S: Supported
+F: drivers/mux/adgs1408.c
+F: Documentation/devicetree/bindings/mux/adgs1408.txt
+
+ANALOG DEVICES INC ADP5061 DRIVER
+M: Stefan Popa <stefan.popa@analog.com>
+L: linux-pm@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/power/supply/adp5061.c
+
ANALOG DEVICES INC ADV7180 DRIVER
M: Lars-Peter Clausen <lars@metafoo.de>
L: linux-media@vger.kernel.org
@@ -1256,11 +1295,6 @@ F: arch/arm/mach-aspeed/
F: arch/arm/boot/dts/aspeed-*
N: aspeed
-ARM/ATMEL AT91 Clock Support
-M: Boris Brezillon <boris.brezillon@bootlin.com>
-S: Maintained
-F: drivers/clk/at91
-
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1442,6 +1476,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/mach-imx/*vf610*
F: arch/arm/boot/dts/vf*
+ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
+M: Shawn Guo <shawnguo@kernel.org>
+M: Li Yang <leoyang.li@nxp.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+F: arch/arm/boot/dts/ls1021a*
+F: arch/arm64/boot/dts/freescale/fsl-*
+F: arch/arm64/boot/dts/freescale/qoriq-*
+
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1647,7 +1691,8 @@ M: Chunfeng Yun <chunfeng.yun@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/phy/mediatek/phy-mtk-tphy.c
+F: drivers/phy/mediatek/
+F: Documentation/devicetree/bindings/phy/phy-mtk-*
ARM/MICREL KS8695 ARCHITECTURE
M: Greg Ungerer <gerg@uclinux.org>
@@ -2264,6 +2309,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
S: Maintained
F: arch/arm64/
+X: arch/arm64/boot/dts/
F: Documentation/arm64/
AS3645A LED FLASH CONTROLLER DRIVER
@@ -2272,6 +2318,14 @@ L: linux-leds@vger.kernel.org
S: Maintained
F: drivers/leds/leds-as3645a.c
+ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
+M: Tianshu Qiu <tian.shu.qiu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ak7375.c
+F: Documentation/devicetree/bindings/media/i2c/ak7375.txt
+
ASAHI KASEI AK8974 DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-iio@vger.kernel.org
@@ -2523,7 +2577,7 @@ S: Supported
F: drivers/scsi/esas2r
ATUSB IEEE 802.15.4 RADIO DRIVER
-M: Stefan Schmidt <stefan@osg.samsung.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
S: Maintained
F: drivers/net/ieee802154/atusb.c
@@ -2547,6 +2601,13 @@ S: Maintained
F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
+AVIA HX711 ANALOG DIGITAL CONVERTER IIO DRIVER
+M: Andreas Klinger <ak@it-klinger.de>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
+F: drivers/iio/adc/hx711.c
+
AX.25 NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
@@ -3510,10 +3571,30 @@ F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
-M: Dave Goodell <dgoodell@cisco.com>
S: Supported
F: drivers/infiniband/hw/usnic/
+CIRRUS LOGIC MADERA CODEC DRIVERS
+M: Charles Keepax <ckeepax@opensource.cirrus.com>
+M: Richard Fitzgerald <rf@opensource.cirrus.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+L: patches@opensource.cirrus.com
+T: git https://github.com/CirrusLogic/linux-drivers.git
+W: https://github.com/CirrusLogic/linux-drivers/wiki
+S: Supported
+F: Documentation/devicetree/bindings/mfd/madera.txt
+F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
+F: include/linux/mfd/madera/*
+F: drivers/gpio/gpio-madera*
+F: drivers/mfd/madera*
+F: drivers/mfd/cs47l*
+F: drivers/pinctrl/cirrus/*
+
+CLANG-FORMAT FILE
+M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+S: Maintained
+F: .clang-format
+
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org
@@ -4400,6 +4481,12 @@ X: Documentation/spi
X: Documentation/media
T: git git://git.lwn.net/linux.git docs-next
+DOCUMENTATION/ITALIAN
+M: Federico Vaga <federico.vaga@vaga.pv.it>
+L: linux-doc@vger.kernel.org
+S: Maintained
+F: Documentation/translations/it_IT
+
DONGWOON DW9714 LENS VOICE COIL DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
@@ -4407,6 +4494,13 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/dw9714.c
+DONGWOON DW9807 LENS VOICE COIL DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/dw9807.c
+
DOUBLETALK DRIVER
M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
L: blinux-list@redhat.com
@@ -4427,7 +4521,8 @@ S: Maintained
F: drivers/staging/fsl-dpaa2/ethernet
DPAA2 ETHERNET SWITCH DRIVER
-M: Razvan Stefanescu <razvan.stefanescu@nxp.com>
+M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
+M: Ioana Ciornei <ioana.ciornei@nxp.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/staging/fsl-dpaa2/ethsw
@@ -4460,6 +4555,7 @@ F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R: "Rafael J. Wysocki" <rafael@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
@@ -4630,7 +4726,7 @@ F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~airlied/linux
+T: git git://anongit.freedesktop.org/drm/drm
B: https://bugs.freedesktop.org/
C: irc://chat.freenode.net/dri-devel
S: Maintained
@@ -4883,7 +4979,8 @@ F: Documentation/gpu/xen-front.rst
DRM TTM SUBSYSTEM
M: Christian Koenig <christian.koenig@amd.com>
-M: Roger He <Hongbo.He@amd.com>
+M: Huang Rui <ray.huang@amd.com>
+M: Junwei Zhang <Jerry.Zhang@amd.com>
T: git git://people.freedesktop.org/~agd5f/linux
S: Maintained
L: dri-devel@lists.freedesktop.org
@@ -5055,6 +5152,18 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/e4000*
+EARTH_PT1 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/pci/pt1/
+
+EARTH_PT3 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/pci/pt3/
+
EC100 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -5443,6 +5552,7 @@ F: drivers/iommu/exynos-iommu.c
EZchip NPS platform support
M: Vineet Gupta <vgupta@synopsys.com>
+M: Ofer Levi <oferle@mellanox.com>
S: Supported
F: arch/arc/plat-eznps
F: arch/arc/boot/dts/eznps.dts
@@ -5638,6 +5748,14 @@ F: drivers/fpga/
F: include/linux/fpga/
W: http://www.rocketboards.org
+FPGA DFL DRIVERS
+M: Wu Hao <hao.wu@intel.com>
+L: linux-fpga@vger.kernel.org
+S: Maintained
+F: Documentation/fpga/dfl.txt
+F: include/uapi/linux/fpga-dfl.h
+F: drivers/fpga/dfl*
+
FPU EMULATOR
M: Bill Metzenthen <billm@melbpc.org.au>
W: http://floatingpoint.sourceforge.net/emulator/index.html
@@ -5789,7 +5907,6 @@ F: include/linux/fsl/
FREESCALE SOC FS_ENET DRIVER
M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M: Vitaly Bordug <vbordug@ru.mvista.com>
L: linuxppc-dev@lists.ozlabs.org
L: netdev@vger.kernel.org
S: Maintained
@@ -5856,6 +5973,14 @@ F: fs/crypto/
F: include/linux/fscrypt*.h
F: Documentation/filesystems/fscrypt.rst
+FSI-ATTACHED I2C DRIVER
+M: Eddie James <eajames@linux.vnet.ibm.com>
+L: linux-i2c@vger.kernel.org
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/i2c/busses/i2c-fsi.c
+F: Documentation/devicetree/bindings/i2c/i2c-fsi.txt
+
FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
@@ -5921,6 +6046,13 @@ F: scripts/gcc-plugin.sh
F: scripts/Makefile.gcc-plugins
F: Documentation/gcc-plugins.txt
+GASKET DRIVER FRAMEWORK
+M: Rob Springer <rspringer@google.com>
+M: John Joseph <jnjoseph@google.com>
+M: Ben Chan <benchan@chromium.org>
+S: Maintained
+F: drivers/staging/gasket/
+
GCOV BASED KERNEL PROFILING
M: Peter Oberparleiter <oberpar@linux.ibm.com>
S: Maintained
@@ -5929,7 +6061,7 @@ F: Documentation/dev-tools/gcov.rst
GDB KERNEL DEBUGGING HELPER SCRIPTS
M: Jan Kiszka <jan.kiszka@siemens.com>
-M: Kieran Bingham <kieran@bingham.xyz>
+M: Kieran Bingham <kbingham@kernel.org>
S: Supported
F: scripts/gdb/
@@ -6005,6 +6137,12 @@ F: drivers/base/power/domain*.c
F: include/linux/pm_domain.h
F: Documentation/devicetree/bindings/power/power_domain.txt
+GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
+M: Eugen Hristev <eugen.hristev@microchip.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/touchscreen/resistive-adc-touch.c
+
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org
@@ -6042,6 +6180,14 @@ F: Documentation/isdn/README.gigaset
F: drivers/isdn/gigaset/
F: include/uapi/linux/gigaset_dev.h
+GNSS SUBSYSTEM
+M: Johan Hovold <johan@kernel.org>
+S: Maintained
+F: Documentation/ABI/testing/sysfs-class-gnss
+F: Documentation/devicetree/bindings/gnss/
+F: drivers/gnss/
+F: include/linux/gnss.h
+
GO7007 MPEG CODEC
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@@ -6140,7 +6286,7 @@ F: drivers/staging/greybus/bootrom.c
F: drivers/staging/greybus/firmware.h
F: drivers/staging/greybus/fw-core.c
F: drivers/staging/greybus/fw-download.c
-F: drivers/staging/greybus/fw-managament.c
+F: drivers/staging/greybus/fw-management.c
F: drivers/staging/greybus/greybus_authentication.h
F: drivers/staging/greybus/greybus_firmware.h
F: drivers/staging/greybus/hid.c
@@ -6149,12 +6295,10 @@ F: drivers/staging/greybus/spi.c
F: drivers/staging/greybus/spilib.c
F: drivers/staging/greybus/spilib.h
-GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
+GREYBUS LOOPBACK DRIVER
M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
S: Maintained
F: drivers/staging/greybus/loopback.c
-F: drivers/staging/greybus/timesync.c
-F: drivers/staging/greybus/timesync_platform.c
GREYBUS PLATFORM DRIVERS
M: Vaibhav Hiremath <hvaibhav.linux@gmail.com>
@@ -6908,7 +7052,7 @@ F: drivers/clk/clk-versaclock5.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <alex.aring@gmail.com>
-M: Stefan Schmidt <stefan@osg.samsung.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -7027,7 +7171,7 @@ M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina209
-F: Documentation/devicetree/bindings/i2c/ina209.txt
+F: Documentation/devicetree/bindings/hwmon/ina2xx.txt
F: drivers/hwmon/ina209.c
INA2XX HARDWARE MONITOR DRIVER
@@ -7095,6 +7239,7 @@ F: include/uapi/linux/input.h
F: include/uapi/linux/input-event-codes.h
F: include/linux/input/
F: Documentation/devicetree/bindings/input/
+F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
INPUT MULTITOUCH (MT) PROTOCOL
@@ -7244,6 +7389,9 @@ F: drivers/dma/iop-adma.c
INTEL IPU3 CSI-2 CIO2 DRIVER
M: Yong Zhi <yong.zhi@intel.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
+M: Bingbu Cao <bingbu.cao@intel.com>
+R: Tian Shu Qiu <tian.shu.qiu@intel.com>
+R: Jian Xu Zheng <jian.xu.zheng@intel.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/pci/intel/ipu3/
@@ -7349,7 +7497,7 @@ M: Megha Dey <megha.dey@linux.intel.com>
R: Tim Chen <tim.c.chen@linux.intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
-F: arch/x86/crypto/sha*-mb
+F: arch/x86/crypto/sha*-mb/
F: crypto/mcryptd.c
INTEL TELEMETRY DRIVER
@@ -7561,9 +7709,8 @@ S: Maintained
F: drivers/firmware/iscsi_ibft*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
-M: Or Gerlitz <ogerlitz@mellanox.com>
M: Sagi Grimberg <sagi@grimberg.me>
-M: Roi Dayan <roid@mellanox.com>
+M: Max Gurtovoy <maxg@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
@@ -7984,7 +8131,7 @@ F: lib/test_kmod.c
F: tools/testing/selftests/kmod/
KPROBES
-M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -8315,17 +8462,18 @@ M: Jade Alglave <j.alglave@ucl.ac.uk>
M: Luc Maranget <luc.maranget@inria.fr>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
R: Akira Yokosawa <akiyks@gmail.com>
+R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
+L: linux-arch@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: tools/memory-model/
+F: Documentation/atomic_bitops.txt
+F: Documentation/atomic_t.txt
+F: Documentation/core-api/atomic_ops.rst
+F: Documentation/core-api/refcount-vs-atomic.rst
F: Documentation/memory-barriers.txt
-LINUX SECURITY MODULE (LSM) FRAMEWORK
-M: Chris Wright <chrisw@sous-sol.org>
-L: linux-security-module@vger.kernel.org
-S: Supported
-
LIS3LV02D ACCELEROMETER DRIVER
M: Eric Piel <eric.piel@tremplin-utc.net>
S: Maintained
@@ -8628,7 +8776,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar@gmail.com>
M: Nishant Sarmukadam <nishants@marvell.com>
M: Ganapathi Bhat <gbhat@marvell.com>
-M: Xinming Hu <huxm@marvell.com>
+M: Xinming Hu <huxinming820@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/marvell/mwifiex/
@@ -8955,6 +9103,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/dvb-frontends/stv6111*
+MEDIA DRIVERS FOR STM32 - DCMI
+M: Hugues Fruchet <hugues.fruchet@st.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+F: drivers/media/platform/stm32/stm32-dcmi.c
+
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
M: Dmitry Osipenko <digetx@gmail.com>
L: linux-media@vger.kernel.org
@@ -8986,6 +9142,14 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
+MEDIATEK BLUETOOTH DRIVER
+M: Sean Wang <sean.wang@mediatek.com>
+L: linux-bluetooth@vger.kernel.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+F: drivers/bluetooth/btmtkuart.c
+
MEDIATEK CIR DRIVER
M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
@@ -9074,7 +9238,7 @@ S: Maintained
F: drivers/usb/mtu3/
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
-M: Peter Senna Tschudin <peter.senna@collabora.com>
+M: Peter Senna Tschudin <peter.senna@gmail.com>
M: Martin Donnelly <martin.donnelly@ge.com>
M: Martyn Welch <martyn.welch@collabora.co.uk>
S: Maintained
@@ -9158,6 +9322,7 @@ S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+F: tools/testing/selftests/drivers/net/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
M: mlxsw@mellanox.com
@@ -9298,6 +9463,12 @@ F: drivers/leds/leds-menf21bmc.c
F: drivers/hwmon/menf21bmc_hwmon.c
F: Documentation/hwmon/menf21bmc
+MEN Z069 WATCHDOG DRIVER
+M: Johannes Thumshirn <jth@kernel.org>
+L: linux-watchdog@vger.kernel.org
+S: Maintained
+F: drivers/watchdog/menz069_wdt.c
+
MESON AO CEC DRIVER FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
L: linux-media@lists.freedesktop.org
@@ -9345,7 +9516,6 @@ F: drivers/media/platform/atmel/atmel-isc-regs.h
F: devicetree/bindings/media/atmel-isc.txt
MICROCHIP / ATMEL NAND DRIVER
-M: Wenyou Yang <wenyou.yang@microchip.com>
M: Josh Wu <rainyfeeling@outlook.com>
L: linux-mtd@lists.infradead.org
S: Supported
@@ -9637,6 +9807,14 @@ F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt
F: drivers/media/i2c/mt9v032.c
F: include/media/i2c/mt9v032.h
+MT9V111 APTINA CAMERA SENSOR
+M: Jacopo Mondi <jacopo@jmondi.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
+F: drivers/media/i2c/mt9v111.c
+
MULTIFUNCTION DEVICES (MFD)
M: Lee Jones <lee.jones@linaro.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
@@ -9681,6 +9859,12 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/musb/
+MXL301RF MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/mxl301rf*
+
MXL5007T MEDIA DRIVER
M: Michael Krufky <mkrufky@linuxtv.org>
L: linux-media@vger.kernel.org
@@ -9749,12 +9933,6 @@ F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
-NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
-M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/NCR_D700.*
-
NCSI LIBRARY:
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
S: Maintained
@@ -10213,11 +10391,13 @@ F: sound/soc/codecs/sgtl5000*
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Supported
+S: Maintained
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
F: drivers/gpu/drm/i2c/tda998x_drv.c
F: include/drm/i2c/tda998x.h
+F: include/dt-bindings/display/tda998x.h
+K: "nxp,tda998x"
NXP TFA9879 DRIVER
M: Peter Rosin <peda@axentia.se>
@@ -10396,6 +10576,7 @@ F: arch/arm/plat-omap/
F: arch/arm/configs/omap1_defconfig
F: drivers/i2c/busses/i2c-omap.c
F: include/linux/platform_data/i2c-omap.h
+F: include/linux/platform_data/ams-delta-fiq.h
OMAP2+ SUPPORT
M: Tony Lindgren <tony@atomide.com>
@@ -10461,6 +10642,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/ov13858.c
+OMNIVISION OV2680 SENSOR DRIVER
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov2680.c
+F: Documentation/devicetree/bindings/media/i2c/ov2680.txt
+
OMNIVISION OV2685 SENSOR DRIVER
M: Shunqian Zheng <zhengsq@rock-chips.com>
L: linux-media@vger.kernel.org
@@ -11188,6 +11377,13 @@ S: Maintained
F: include/linux/personality.h
F: include/uapi/linux/personality.h
+PHOENIX RC FLIGHT CONTROLLER ADAPTER
+M: Marcus Folkesson <marcus.folkesson@gmail.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: Documentation/input/devices/pxrc.rst
+F: drivers/input/joystick/pxrc.c
+
PHONET PROTOCOL
M: Remi Denis-Courmont <courmisch@gmail.com>
S: Supported
@@ -11254,7 +11450,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: drivers/pinctrl/intel/
@@ -11764,6 +11960,18 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
+QM1D1B0004 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/qm1d1b0004*
+
+QM1D1C0042 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/qm1d1c0042*
+
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -11812,7 +12020,7 @@ L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/qcom,camss.txt
F: Documentation/media/v4l-drivers/qcom_camss.rst
-F: drivers/media/platform/qcom/camss-8x16/
+F: drivers/media/platform/qcom/camss/
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@gmail.com>
@@ -11827,6 +12035,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/qualcomm/emac/
+QUALCOMM GENERIC INTERFACE I2C DRIVER
+M: Alok Chauhan <alokc@codeaurora.org>
+M: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
+L: linux-i2c@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Supported
+F: drivers/i2c/busses/i2c-qcom-geni.c
+
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -11835,7 +12051,7 @@ S: Supported
F: arch/hexagon/
QUALCOMM HIDMA DRIVER
-M: Sinan Kaya <okaya@codeaurora.org>
+M: Sinan Kaya <okaya@kernel.org>
L: linux-arm-kernel@lists.infradead.org
L: linux-arm-msm@vger.kernel.org
L: dmaengine@vger.kernel.org
@@ -12035,9 +12251,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
-X: include/linux/srcu.h
+X: include/linux/srcu*.h
F: kernel/rcu/
-X: kernel/torture.c
+X: kernel/rcu/srcu*.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -12062,6 +12278,13 @@ S: Maintained
F: sound/soc/codecs/rt*
F: include/sound/rt*.h
+REALTEK RTL83xx SMI DSA ROUTER CHIPS
+M: Linus Walleij <linus.walleij@linaro.org>
+S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
+F: drivers/net/dsa/realtek-smi*
+F: drivers/net/dsa/rtl83*
+
REGISTER MAP ABSTRACTION
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -12169,6 +12392,8 @@ S: Maintained
F: Documentation/rfkill.txt
F: Documentation/ABI/stable/sysfs-class-rfkill
F: net/rfkill/
+F: include/linux/rfkill.h
+F: include/uapi/linux/rfkill.h
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
@@ -12176,7 +12401,9 @@ M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
+F: lib/test_rhashtable.c
F: include/linux/rhashtable.h
+F: include/linux/rhashtable-types.h
RICOH R5C592 MEMORYSTICK DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
@@ -12398,7 +12625,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
-M: Dong Jia Shi <bjsdjshi@linux.ibm.com>
M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
@@ -12635,15 +12861,21 @@ S: Maintained
F: drivers/scsi/sr*
SCSI RDMA PROTOCOL (SRP) INITIATOR
-M: Bart Van Assche <bart.vanassche@sandisk.com>
+M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org
S: Supported
-W: http://www.openfabrics.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git
F: drivers/infiniband/ulp/srp/
F: include/scsi/srp.h
+SCSI RDMA PROTOCOL (SRP) TARGET
+M: Bart Van Assche <bvanassche@acm.org>
+L: linux-rdma@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/ulp/srpt/
+
SCSI SG DRIVER
M: Doug Gilbert <dgilbert@interlog.com>
L: linux-scsi@vger.kernel.org
@@ -12742,6 +12974,13 @@ S: Maintained
F: drivers/mmc/host/sdhci*
F: include/linux/mmc/sdhci*
+SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER
+M: Prabu Thangamuthu <prabu.t@synopsys.com>
+M: Manjunath M B <manjumb@synopsys.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/sdhci-pci-dwc-mshc.c
+
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
M: Ben Dooks <ben-linux@fluff.org>
M: Jaehoon Chung <jh80.chung@samsung.com>
@@ -12783,6 +13022,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
W: http://kernsec.org/
S: Supported
F: security/
+X: security/selinux/
SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
@@ -12859,6 +13099,14 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: net/smc/
+SHARP RJ54N1CB0C SENSOR DRIVER
+M: Jacopo Mondi <jacopo@jmondi.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: drivers/media/i2c/rj54n1cb0c.c
+F: include/media/i2c/rj54n1cb0c.h
+
SH_VEU V4L2 MEM2MEM DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -12965,7 +13213,7 @@ L: linux-input@vger.kernel.org
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/silead.c
-F: drivers/platform/x86/silead_dmi.c
+F: drivers/platform/x86/touchscreen_dmi.c
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -13074,8 +13322,8 @@ L: linux-kernel@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: include/linux/srcu.h
-F: kernel/rcu/srcu.c
+F: include/linux/srcu*.h
+F: kernel/rcu/srcu*.c
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -13485,6 +13733,13 @@ M: H Hartley Sweeten <hsweeten@visionengravers.com>
S: Odd Fixes
F: drivers/staging/comedi/
+STAGING - EROFS FILE SYSTEM
+M: Gao Xiang <gaoxiang25@huawei.com>
+M: Chao Yu <yuchao0@huawei.com>
+L: linux-erofs@lists.ozlabs.org
+S: Maintained
+F: drivers/staging/erofs/
+
STAGING - FLARION FT1000 DRIVERS
M: Marek Belisko <marek.belisko@gmail.com>
S: Odd Fixes
@@ -13571,6 +13826,13 @@ L: linux-block@vger.kernel.org
S: Maintained
F: drivers/block/skd*[ch]
+STI AUDIO (ASoC) DRIVERS
+M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+F: sound/soc/sti/
+
STI CEC DRIVER
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
S: Maintained
@@ -13584,6 +13846,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/stk1160/
+STM32 AUDIO (ASoC) DRIVERS
+M: Olivier Moysan <olivier.moysan@st.com>
+M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F: sound/soc/stm/
+
STM32 TIMER/LPTIMER DRIVERS
M: Fabrice Gasnier <fabrice.gasnier@st.com>
S: Maintained
@@ -13860,6 +14130,12 @@ F: include/uapi/linux/tc_act/
F: include/uapi/linux/tc_ematch/
F: net/sched/
+TC90522 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/dvb-frontends/tc90522*
+
TCP LOW PRIORITY MODULE
M: "Wong Hoi Sing, Edison" <hswong3i@gmail.com>
M: "Hung Hing Lun, Mike" <hlhung3i@gmail.com>
@@ -14054,6 +14330,13 @@ M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/input/keyboard/tegra-kbc.c
+TEGRA NAND DRIVER
+M: Stefan Agner <stefan@agner.ch>
+M: Lucas Stach <dev@lynxeye.de>
+S: Maintained
+F: Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
+F: drivers/mtd/nand/raw/tegra_nand.c
+
TEGRA PWM DRIVER
M: Thierry Reding <thierry.reding@gmail.com>
S: Supported
@@ -14434,6 +14717,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/torture.c
F: kernel/rcu/rcutorture.c
+F: kernel/rcu/rcuperf.c
F: kernel/locking/locktorture.c
TOSHIBA ACPI EXTRAS DRIVER
@@ -14955,7 +15239,7 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/typec/mux/pi3usb30532.c
-USB TYPEC SUBSYSTEM
+USB TYPEC CLASS
M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
L: linux-usb@vger.kernel.org
S: Maintained
@@ -14964,6 +15248,15 @@ F: Documentation/driver-api/usb/typec.rst
F: drivers/usb/typec/
F: include/linux/usb/typec.h
+USB TYPEC BUS FOR ALTERNATE MODES
+M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-typec
+F: Documentation/driver-api/usb/typec_bus.rst
+F: drivers/usb/typec/altmodes/
+F: include/linux/usb/typec_altmode.h
+
USB UHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -14994,6 +15287,7 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/gadget/function/*uvc*
F: drivers/usb/gadget/legacy/webcam.c
+F: include/uapi/linux/usb/g_uvc.h
USB WIRELESS RNDIS DRIVER (rndis_wlan)
M: Jussi Kivilinna <jussi.kivilinna@iki.fi>
@@ -15141,6 +15435,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
+VICODEC VIRTUAL CODEC DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Maintained
+F: drivers/media/platform/vicodec/*
+
VIDEO MULTIPLEXER DRIVER
M: Philipp Zabel <p.zabel@pengutronix.de>
L: linux-media@vger.kernel.org
@@ -15308,7 +15610,7 @@ F: include/linux/vme*
VMWARE BALLOON DRIVER
M: Xavier Deguillard <xdeguillard@vmware.com>
-M: Philip Moltmann <moltmann@vmware.com>
+M: Nadav Amit <namit@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -15400,6 +15702,7 @@ F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
M: Evgeniy Polyakov <zbr@ioremap.net>
S: Maintained
+F: Documentation/devicetree/bindings/w1/
F: Documentation/w1/
F: drivers/w1/
F: include/linux/w1.h
diff --git a/Makefile b/Makefile
index c5ce55cbc543..c13f8b85ba60 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
NAME = Merciless Moray
# *DOCUMENTATION*
@@ -224,11 +224,14 @@ clean-targets := %clean mrproper cleandocs
no-dot-config-targets := $(clean-targets) \
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers_% archheaders archscripts \
- kernelversion %src-pkg
+ %asm-generic kernelversion %src-pkg
+no-sync-config-targets := $(no-dot-config-targets) install %install \
+ kernelrelease
-config-targets := 0
-mixed-targets := 0
-dot-config := 1
+config-targets := 0
+mixed-targets := 0
+dot-config := 1
+may-sync-config := 1
ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
@@ -236,6 +239,16 @@ ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
endif
endif
+ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
+ ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
+ may-sync-config := 0
+ endif
+endif
+
+ifneq ($(KBUILD_EXTMOD),)
+ may-sync-config := 0
+endif
+
ifeq ($(KBUILD_EXTMOD),)
ifneq ($(filter config %config,$(MAKECMDGOALS)),)
config-targets := 1
@@ -353,17 +366,18 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
HOSTCC = gcc
HOSTCXX = g++
-HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
- -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS)
-HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
-HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
-HOST_LOADLIBES := $(HOST_LFS_LIBS)
+KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
+ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
+ $(HOSTCFLAGS)
+KBUILD_HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
+KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
+KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
# Make variables (CC, etc...)
AS = $(CROSS_COMPILE)as
@@ -429,10 +443,10 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
-export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES
+export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
+export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
-export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
+export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -507,11 +521,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
- CC_CAN_LINK := y
- export CC_CAN_LINK
-endif
-
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
@@ -589,7 +598,7 @@ virt-y := virt/
endif # KBUILD_EXTMOD
ifeq ($(dot-config),1)
--include include/config/auto.conf
+include include/config/auto.conf
endif
# The all: target is the default when no target is given on the
@@ -611,7 +620,7 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
ifeq ($(dot-config),1)
-ifeq ($(KBUILD_EXTMOD),)
+ifeq ($(may-sync-config),1)
# Read in dependencies to all Kconfig* files, make sure to run syncconfig if
# changes are detected. This should be included after arch/$(SRCARCH)/Makefile
# because some architectures define CROSS_COMPILE there.
@@ -626,8 +635,9 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else
-# external modules needs include/generated/autoconf.h and include/config/auto.conf
-# but do not care if they are up-to-date. Use auto.conf to trigger the test
+# External modules and some install targets need include/generated/autoconf.h
+# and include/config/auto.conf but do not care if they are up-to-date.
+# Use auto.conf to trigger the test
PHONY += include/config/auto.conf
include/config/auto.conf:
@@ -639,11 +649,7 @@ include/config/auto.conf:
echo >&2 ; \
/bin/false)
-endif # KBUILD_EXTMOD
-
-else
-# Dummy target needed, because used as prerequisite
-include/config/auto.conf: ;
+endif # may-sync-config
endif # $(dot-config)
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -748,12 +754,28 @@ ifdef CONFIG_FUNCTION_TRACER
ifndef CC_FLAGS_FTRACE
CC_FLAGS_FTRACE := -pg
endif
-export CC_FLAGS_FTRACE
+ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ # gcc 5 supports generating the mcount tables directly
+ ifeq ($(call cc-option-yn,-mrecord-mcount),y)
+ CC_FLAGS_FTRACE += -mrecord-mcount
+ export CC_USING_RECORD_MCOUNT := 1
+ endif
+ ifdef CONFIG_HAVE_NOP_MCOUNT
+ ifeq ($(call cc-option-yn, -mnop-mcount),y)
+ CC_FLAGS_FTRACE += -mnop-mcount
+ CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT
+ endif
+ endif
+endif
ifdef CONFIG_HAVE_FENTRY
-CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
+ ifeq ($(call cc-option-yn, -mfentry),y)
+ CC_FLAGS_FTRACE += -mfentry
+ CC_FLAGS_USING += -DCC_USING_FENTRY
+ endif
endif
-KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_USING_FENTRY)
-KBUILD_AFLAGS += $(CC_USING_FENTRY)
+export CC_FLAGS_FTRACE
+KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_FLAGS_USING)
+KBUILD_AFLAGS += $(CC_FLAGS_USING)
ifdef CONFIG_DYNAMIC_FTRACE
ifdef CONFIG_HAVE_C_RECORDMCOUNT
BUILD_C_RECORDMCOUNT := y
@@ -1014,9 +1036,10 @@ ifdef CONFIG_GDB_SCRIPTS
endif
+$(call if_changed,link-vmlinux)
-# Build samples along the rest of the kernel
+# Build samples along the rest of the kernel. This needs headers_install.
ifdef CONFIG_SAMPLES
vmlinux-dirs += samples
+samples: headers_install
endif
# The actual objects are generated when descending,
@@ -1038,15 +1061,14 @@ define filechk_kernel.release
endef
# Store (new) KERNELRELEASE string in include/config/kernel.release
-include/config/kernel.release: include/config/auto.conf FORCE
+include/config/kernel.release: $(srctree)/Makefile FORCE
$(call filechk,kernel.release)
# Additional helpers built in scripts/
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic gcc-plugins $(autoksyms_h)
+scripts: scripts_basic asm-generic gcc-plugins $(autoksyms_h)
$(Q)$(MAKE) $(build)=$(@)
# Things we need to do before we recursively start building the kernel
@@ -1076,8 +1098,7 @@ endif
# that need to depend on updated CONFIG_* values can be checked here.
prepare2: prepare3 outputmakefile asm-generic
-prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h \
- include/config/auto.conf
+prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir)
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -1121,7 +1142,7 @@ define filechk_version.h
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
endef
-$(version_h): $(srctree)/Makefile FORCE
+$(version_h): FORCE
$(call filechk,version.h)
$(Q)rm -f $(old_version_h)
@@ -1215,7 +1236,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
$(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin
-%/modules.builtin: include/config/auto.conf
+%/modules.builtin: include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(modbuiltin)=$*
@@ -1717,6 +1738,6 @@ endif # skip-makefile
PHONY += FORCE
FORCE:
-# Declare the contents of the .PHONY variable as phony. We keep that
+# Declare the contents of the PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
diff --git a/arch/Kconfig b/arch/Kconfig
index 1aa59063f1fd..4426e9687d89 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -3,6 +3,14 @@
# General architecture dependent options
#
+#
+# Note: arch/$(SRCARCH)/Kconfig needs to be included first so that it can
+# override the default values in this file.
+#
+source "arch/$(SRCARCH)/Kconfig"
+
+menu "General architecture-dependent options"
+
config CRASH_CORE
bool
@@ -13,6 +21,9 @@ config KEXEC_CORE
config HAVE_IMA_KEXEC
bool
+config HOTPLUG_SMT
+ bool
+
config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
@@ -405,150 +416,6 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
-preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC))
-
-config PLUGIN_HOSTCC
- string
- default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")"
- help
- Host compiler used to build GCC plugins. This can be $(HOSTCXX),
- $(HOSTCC), or a null string if GCC plugin is unsupported.
-
-config HAVE_GCC_PLUGINS
- bool
- help
- An arch should select this symbol if it supports building with
- GCC plugins.
-
-menuconfig GCC_PLUGINS
- bool "GCC plugins"
- depends on HAVE_GCC_PLUGINS
- depends on PLUGIN_HOSTCC != ""
- help
- GCC plugins are loadable modules that provide extra features to the
- compiler. They are useful for runtime instrumentation and static analysis.
-
- See Documentation/gcc-plugins.txt for details.
-
-config GCC_PLUGIN_CYC_COMPLEXITY
- bool "Compute the cyclomatic complexity of a function" if EXPERT
- depends on GCC_PLUGINS
- depends on !COMPILE_TEST # too noisy
- help
- The complexity M of a function's control flow graph is defined as:
- M = E - N + 2P
- where
-
- E = the number of edges
- N = the number of nodes
- P = the number of connected components (exit nodes).
-
- Enabling this plugin reports the complexity to stderr during the
- build. It mainly serves as a simple example of how to create a
- gcc plugin for the kernel.
-
-config GCC_PLUGIN_SANCOV
- bool
- depends on GCC_PLUGINS
- help
- This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
- basic blocks. It supports all gcc versions with plugin support (from
- gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
- by Dmitry Vyukov <dvyukov@google.com>.
-
-config GCC_PLUGIN_LATENT_ENTROPY
- bool "Generate some entropy during boot and runtime"
- depends on GCC_PLUGINS
- help
- By saying Y here the kernel will instrument some kernel code to
- extract some entropy from both original and artificially created
- program state. This will help especially embedded systems where
- there is little 'natural' source of entropy normally. The cost
- is some slowdown of the boot process (about 0.5%) and fork and
- irq processing.
-
- Note that entropy extracted this way is not cryptographically
- secure!
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_STRUCTLEAK
- bool "Force initialization of variables containing userspace addresses"
- depends on GCC_PLUGINS
- # Currently STRUCTLEAK inserts initialization out of live scope of
- # variables from KASAN point of view. This leads to KASAN false
- # positive reports. Prohibit this combination for now.
- depends on !KASAN_EXTRA
- help
- This plugin zero-initializes any structures containing a
- __user attribute. This can prevent some classes of information
- exposures.
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
- bool "Force initialize all struct type variables passed by reference"
- depends on GCC_PLUGIN_STRUCTLEAK
- depends on !COMPILE_TEST
- help
- Zero initialize any struct type local variable that may be passed by
- reference without having been initialized.
-
-config GCC_PLUGIN_STRUCTLEAK_VERBOSE
- bool "Report forcefully initialized variables"
- depends on GCC_PLUGIN_STRUCTLEAK
- depends on !COMPILE_TEST # too noisy
- help
- This option will cause a warning to be printed each time the
- structleak plugin finds a variable it thinks needs to be
- initialized. Since not all existing initializers are detected
- by the plugin, this can produce false positive warnings.
-
-config GCC_PLUGIN_RANDSTRUCT
- bool "Randomize layout of sensitive kernel structures"
- depends on GCC_PLUGINS
- select MODVERSIONS if MODULES
- help
- If you say Y here, the layouts of structures that are entirely
- function pointers (and have not been manually annotated with
- __no_randomize_layout), or structures that have been explicitly
- marked with __randomize_layout, will be randomized at compile-time.
- This can introduce the requirement of an additional information
- exposure vulnerability for exploits targeting these structure
- types.
-
- Enabling this feature will introduce some performance impact,
- slightly increase memory usage, and prevent the use of forensic
- tools like Volatility against the system (unless the kernel
- source tree isn't cleaned after kernel installation).
-
- The seed used for compilation is located at
- scripts/gcc-plgins/randomize_layout_seed.h. It remains after
- a make clean to allow for external modules to be compiled with
- the existing seed and will be removed by a make mrproper or
- make distclean.
-
- Note that the implementation requires gcc 4.7 or newer.
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
- bool "Use cacheline-aware structure randomization"
- depends on GCC_PLUGIN_RANDSTRUCT
- depends on !COMPILE_TEST # do not reduce test coverage
- help
- If you say Y here, the RANDSTRUCT randomization will make a
- best effort at restricting randomization to cacheline-sized
- groups of elements. It will further not randomize bitfields
- in structures. This reduces the performance hit of RANDSTRUCT
- at the cost of weakened randomization.
-
config HAVE_STACKPROTECTOR
bool
help
@@ -875,6 +742,9 @@ config COMPAT_32BIT_TIME
config ARCH_NO_COHERENT_DMA_MMAP
bool
+config ARCH_NO_PREEMPT
+ bool
+
config CPU_NO_EFFICIENT_FFS
def_bool n
@@ -971,4 +841,18 @@ config REFCOUNT_FULL
against various use-after-free conditions that can be used in
security flaw exploits.
+config HAVE_ARCH_PREL32_RELOCATIONS
+ bool
+ help
+ May be selected by an architecture if it supports place-relative
+ 32-bit relocations, both in the toolchain and in the module loader,
+ in which case relative references can be used in special sections
+ for PCI fixup, initcalls etc which are only half the size on 64 bit
+ architectures, and don't require runtime relocation on relocatable
+ kernels.
+
source "kernel/gcov/Kconfig"
+
+source "scripts/gcc-plugins/Kconfig"
+
+endmenu
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 04a4a138ed13..5b4f88363453 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -4,6 +4,7 @@ config ALPHA
default y
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_NO_PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_AOUT
select HAVE_IDE
@@ -74,9 +75,6 @@ config PGTABLE_LEVELS
int
default 3
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
config AUDIT_ARCH
bool
@@ -573,8 +571,6 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more.
-source "mm/Kconfig"
-
config NUMA
bool "NUMA Support (EXPERIMENTAL)"
depends on DISCONTIGMEM && BROKEN
@@ -713,28 +709,11 @@ config SRM_ENV
This driver is also available as a module and will be called
srm_env then.
-source "fs/Kconfig.binfmt"
-
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/alpha/Kconfig.debug"
-
# DUMMY_CONSOLE may be defined in drivers/video/console/Kconfig
# but we also need it if VGA_HOSE is set
config DUMMY_CONSOLE
bool
depends on VGA_HOSE
default y
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
diff --git a/arch/alpha/Kconfig.debug b/arch/alpha/Kconfig.debug
index 5e93dffb818a..b88c7b641d72 100644
--- a/arch/alpha/Kconfig.debug
+++ b/arch/alpha/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config EARLY_PRINTK
bool
@@ -39,5 +36,3 @@ config MATHEMU
This option is required for IEEE compliant floating point arithmetic
on the Alpha. The only time you would ever not say Y is to say M in
order to debug the code. Say Y unless you know what you are doing.
-
-endmenu
diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile
index 0cbe4c59d3ce..991e023a6fc4 100644
--- a/arch/alpha/boot/Makefile
+++ b/arch/alpha/boot/Makefile
@@ -14,7 +14,7 @@ targets := vmlinux.gz vmlinux \
tools/bootpzh bootloader bootpheader bootpzheader
OBJSTRIP := $(obj)/tools/objstrip
-HOSTCFLAGS := -Wall -I$(objtree)/usr/include
+KBUILD_HOSTCFLAGS := -Wall -I$(objtree)/usr/include
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
# SRM bootable image. Copy to offset 512 of a partition.
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 767bfdd42992..150a1c5d6a2c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -18,11 +18,11 @@
* To ensure dependency ordering is preserved for the _relaxed and
* _release atomics, an smp_read_barrier_depends() is unconditionally
* inserted into the _relaxed variants, which are used to build the
- * barriered versions. To avoid redundant back-to-back fences, we can
- * define the _acquire and _fence versions explicitly.
+ * barriered versions. Avoid redundant back-to-back fences in the
+ * _acquire and _fence versions.
*/
-#define __atomic_op_acquire(op, args...) op##_relaxed(args)
-#define __atomic_op_fence __atomic_op_release
+#define __atomic_acquire_fence()
+#define __atomic_post_full_fence()
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
- long c, tmp;
+ long c, new, old;
smp_mb();
__asm__ __volatile__(
- "1: ldq_l %[tmp],%[mem]\n"
- " cmpeq %[tmp],%[u],%[c]\n"
- " addq %[tmp],%[a],%[tmp]\n"
+ "1: ldq_l %[old],%[mem]\n"
+ " cmpeq %[old],%[u],%[c]\n"
+ " addq %[old],%[a],%[new]\n"
" bne %[c],2f\n"
- " stq_c %[tmp],%[mem]\n"
- " beq %[tmp],3f\n"
+ " stq_c %[new],%[mem]\n"
+ " beq %[new],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
- : [tmp] "=&r"(tmp), [c] "=&r"(c)
+ : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
- return !c;
+ return old;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
@@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
-
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-#define atomic64_inc_return(v) atomic64_add_return(1,(v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic64_inc(v) atomic64_add(1,(v))
-
-#define atomic_dec(v) atomic_sub(1,(v))
-#define atomic64_dec(v) atomic64_sub(1,(v))
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index be14f16149d5..065fb372e355 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -112,4 +112,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6e921754c8fc..c210a25dd6da 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
struct rusage32 __user *, ur)
{
- unsigned int status = 0;
struct rusage r;
- long err = kernel_wait4(pid, &status, options, &r);
+ long err = kernel_wait4(pid, ustatus, options, &r);
if (err <= 0)
return err;
- if (put_user(status, ustatus))
- return -EFAULT;
if (!ur)
return err;
if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index de2bd217adad..d73dc473fbb9 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -87,7 +87,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
const struct exception_table_entry *fixup;
- int fault, si_code = SEGV_MAPERR;
+ int si_code = SEGV_MAPERR;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e81bcd271be7..6d5eb8267e42 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -50,6 +50,9 @@ config ARC
select HAVE_KERNEL_LZMA
select ARCH_HAS_PTE_SPECIAL
+config ARCH_HAS_CACHE_LINE_SIZE
+ def_bool y
+
config MIGHT_HAVE_PCI
bool
@@ -94,9 +97,6 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
def_bool y
depends on ARC_MMU_V4
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
menu "ARC Architecture Configuration"
menu "ARC Platform/SoC/Board"
@@ -413,7 +413,7 @@ config ARC_HAS_DIV_REM
config ARC_HAS_ACCL_REGS
bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
- default n
+ default y
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so
@@ -548,24 +548,13 @@ config ARC_BUILTIN_DTB_NAME
Set the name of the DTB to embed in the vmlinux binary
Leaving it blank selects the minimal "skeleton" dtb
-source "kernel/Kconfig.preempt"
-
-menu "Executable file formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
endmenu # "ARC Architecture Configuration"
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if ARC_HUGEPAGE_16M
default "11"
-source "net/Kconfig"
-source "drivers/Kconfig"
-
menu "Bus Support"
config PCI
@@ -586,9 +575,4 @@ source "drivers/pci/Kconfig"
endmenu
-source "fs/Kconfig"
-source "arch/arc/Kconfig.debug"
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
source "kernel/power/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
index 03da1a6b3072..45add86decd5 100644
--- a/arch/arc/Kconfig.debug
+++ b/arch/arc/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config 16KSTACKS
bool "Use 16Kb for kernel stacks instead of 8Kb"
@@ -11,5 +8,3 @@ config 16KSTACKS
This increases the resident kernel footprint and will cause less
threads to run on the system and also increase the pressure
on the VM subsystem for higher order allocations.
-
-endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index d37f49d6a27f..6c1b20dd76ad 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -16,7 +16,7 @@ endif
KBUILD_DEFCONFIG := nsim_700_defconfig
-cflags-y += -fno-common -pipe -fno-builtin -D__linux__
+cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
@@ -140,16 +140,3 @@ dtbs: scripts
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 09f85154c5a4..a635ea972304 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 09fed3ef22b6..aa507e423075 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index ea2f6d817d1a..eba07f468654 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index ab231c040efe..098b19fbaa51 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index cf449cbf440d..0104c404d897 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 1b54c72f4296..6491be0ddbc9 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 31c2c70b34a1..99e05cf63fca 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index a578c721d50f..0dc4f9b737e7 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 37d7395f3272..be3c30a15e54 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 1e1470e2a7f0..3a74b9b21772 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 084a6e42685b..ea2834b4dc1d 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index f36d47990415..80a5a1b4924b 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 1aca2e8fd1ba..2cc87f909747 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 11859287c52a..4e0072730241 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define atomic_andnot atomic_andnot
+#define atomic_fetch_andnot atomic_fetch_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
-#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
@@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v
- */
-#define __atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- \
- /* \
- * Explicit full memory barrier needed before/after as \
- * LLOCK/SCOND thmeselves don't provide any such semantics \
- */ \
- smp_mb(); \
- \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
- c = old; \
- \
- smp_mb(); \
- \
- c; \
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
@@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define atomic64_andnot atomic64_andnot
+#define atomic64_fetch_andnot atomic64_fetch_andnot
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
@@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return val;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * if (v != u) { v += a; ret = 1} else {ret = 0}
- * Returns 1 iff @v was not @u (i.e. if add actually happened)
+ * Atomically adds @a to @v, if it was not @u.
+ * Returns the old value of @v
*/
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
- int op_done;
+ long long old, temp;
smp_mb();
__asm__ __volatile__(
"1: llockd %0, [%2] \n"
- " mov %1, 1 \n"
" brne %L0, %L4, 2f # continue to add since v != u \n"
" breq.d %H0, %H4, 3f # return since v == u \n"
- " mov %1, 0 \n"
"2: \n"
- " add.f %L0, %L0, %L3 \n"
- " adc %H0, %H0, %H3 \n"
- " scondd %0, [%2] \n"
+ " add.f %L1, %L0, %L3 \n"
+ " adc %H1, %H0, %H3 \n"
+ " scondd %1, [%2] \n"
" bnz 1b \n"
"3: \n"
- : "=&r"(val), "=&r" (op_done)
+ : "=&r"(old), "=&r" (temp)
: "r"(&v->counter), "r"(a), "r"(u)
: "cc"); /* memory clobber comes from smp_mb() */
smp_mb();
- return op_done;
+ return old;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 8486f328cc5d..ff7d3232764a 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -48,7 +48,9 @@
})
/* Largest line length for either L1 or L2 is 128 bytes */
-#define ARCH_DMA_MINALIGN 128
+#define SMP_CACHE_BYTES 128
+#define cache_line_size() SMP_CACHE_BYTES
+#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index d5da2115d78a..03d6bb0f4e13 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
+#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
+extern unsigned long loops_per_jiffy;
+
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index ec36d5b6d435..29f3988c9424 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -234,6 +234,9 @@
POP gp
RESTORE_R12_TO_R0
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
.endm
@@ -315,6 +318,9 @@
POP gp
RESTORE_R12_TO_R0
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
.endm
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 51597f344a62..302b0db8ea2b 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -86,9 +86,6 @@
POP r1
POP r0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
.endm
/*--------------------------------------------------------------
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 2e52d18e6bc7..2c1b479d5aea 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -45,8 +45,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index c28e6c347b49..871f3cb16af9 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -34,9 +34,7 @@ struct machine_desc {
const char *name;
const char **dt_compat;
void (*init_early)(void);
-#ifdef CONFIG_SMP
void (*init_per_cpu)(unsigned int);
-#endif
void (*init_machine)(void);
void (*init_late)(void);
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 109baa06831c..09ddddf71cc5 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define WANT_PAGE_VIRTUAL 1
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 8ec5599a0957..cf4be70d5892 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 538b36afe89e..62b185057c04 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -31,10 +31,10 @@ void __init init_IRQ(void)
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
-#endif
}
/*
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 42b05046fad9..df35d4c0b0b8 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
- * non-zero - which is expected from setjmp_pre_handler for
- * jprobe, we return without single stepping and leave that to
- * the break-handler which is invoked by a kprobe from
- * jprobe_return
+ * non-zero - which means user handler setup registers to exit
+ * to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
}
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- setup_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
- }
}
/* no_kprobe: */
@@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr = regs->sp;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ret = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- __asm__ __volatile__("unimp_s");
- return;
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr;
-
- *regs = kcb->jprobe_saved_regs;
- sp_addr = regs->sp;
- memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
- preempt_enable_no_resched();
-
- return 1;
-}
-
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global kretprobe_trampoline\n"
@@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->ret = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 5ac3b547453f..4674541eba3f 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
struct pt_regs *regs = current_pt_regs();
- int uval = -EFAULT;
+ u32 uval;
+ int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
/* Z indicates to userspace if operation succeded */
regs->status32 &= ~STATUS_Z_MASK;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
+ ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
+ if (!ret)
+ goto fail;
+again:
preempt_disable();
- if (__get_user(uval, uaddr))
- goto done;
+ ret = __get_user(uval, uaddr);
+ if (ret)
+ goto fault;
- if (uval == expected) {
- if (!__put_user(new, uaddr))
- regs->status32 |= STATUS_Z_MASK;
- }
+ if (uval != expected)
+ goto out;
-done:
- preempt_enable();
+ ret = __put_user(new, uaddr);
+ if (ret)
+ goto fault;
+
+ regs->status32 |= STATUS_Z_MASK;
+out:
+ preempt_enable();
return uval;
+
+fault:
+ preempt_enable();
+
+ if (unlikely(ret != -EFAULT))
+ goto fail;
+
+ down_read(&current->mm->mmap_sem);
+ ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+ FAULT_FLAG_WRITE, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (likely(!ret))
+ goto again;
+
+fail:
+ force_sig(SIGSEGV, current);
+ return ret;
}
#ifdef CONFIG_ISA_ARCV2
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9dbe645ee127..25c631942500 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
- unsigned int paddr = pfn << PAGE_SHIFT;
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page(page_address(page), u_vaddr);
- __flush_dcache_page(page_address(page), page_address(page));
+ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+ __flush_dcache_page((phys_addr_t)page_address(page),
+ (phys_addr_t)page_address(page));
}
@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
}
}
+ /*
+ * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
+ * or equal to any cache line length.
+ */
+ BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
+ "SMP_CACHE_BYTES must be >= any cache line length");
+ if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
+ panic("L2 Cache line [%d] > kernel Config [%d]\n",
+ l2_line_sz, SMP_CACHE_BYTES);
+
/* Note that SLC disable not formally supported till HS 3.0 */
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c1071840979..ec47e6079f5d 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
+/*
+ * Cache operations depending on function and direction argument, inspired by
+ * https://lkml.org/lkml/2018/5/18/979
+ * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
+ * dma-mapping: provide a generic dma-noncoherent implementation)"
+ *
+ * | map == for_device | unmap == for_cpu
+ * |----------------------------------------------------------------
+ * TO_DEV | writeback writeback | none none
+ * FROM_DEV | invalidate invalidate | invalidate* invalidate*
+ * BIDIR | writeback+inv writeback+inv | invalidate invalidate
+ *
+ * [*] needed for CPU speculative prefetches
+ *
+ * NOTE: we don't check the validity of direction argument as it is done in
+ * upper layer functions (in include/linux/dma-mapping.h)
+ */
+
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
- dma_cache_wback(paddr, size);
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
- dma_cache_inv(paddr, size);
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+
+ /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index b884bbd6f354..db6913094be3 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
+#include <linux/mm_types.h>
#include <asm/pgalloc.h>
#include <asm/mmu.h>
@@ -66,7 +67,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
siginfo_t info;
- int fault, ret;
+ int ret;
+ vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index 0c7d11022d0f..4f6a1673b3a6 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -21,6 +21,7 @@
#error "Incorrect ctop.h include"
#endif
+#include <linux/types.h>
#include <soc/nps/common.h>
/* core auxiliary registers */
@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
};
/* AUX registers definition */
+struct nps_host_reg_aux_dpc {
+ union {
+ struct {
+ u32 ien:1, men:1, hen:1, reserved:29;
+ };
+ u32 value;
+ };
+};
+
struct nps_host_reg_aux_udmc {
union {
struct {
diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c
index 2388de3d09ef..ed0077ef666e 100644
--- a/arch/arc/plat-eznps/mtm.c
+++ b/arch/arc/plat-eznps/mtm.c
@@ -15,6 +15,8 @@
*/
#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <asm/arcregs.h>
@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
/* Verify and set the value of the mtm hs counter */
static int __init set_mtm_hs_ctr(char *ctr_str)
{
- long hs_ctr;
+ int hs_ctr;
int ret;
- ret = kstrtol(ctr_str, 0, &hs_ctr);
+ ret = kstrtoint(ctr_str, 0, &hs_ctr);
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index 19ab3cf98f0f..9356753c2ed8 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -7,5 +7,8 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
+ depends on ISA_ARCV2
+ select ARC_HAS_ACCL_REGS
select CLK_HSDK
select RESET_HSDK
+ select MIGHT_HAVE_PCI
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 2958aedb649a..2588b842407c 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+#define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000)
+
+static void __init hsdk_enable_gpio_intc_wire(void)
+{
+ /*
+ * Peripherals on CPU Card are wired to cpu intc via intermediate
+ * DW APB GPIO blocks (mainly for debouncing)
+ *
+ * ---------------------
+ * | snps,archs-intc |
+ * ---------------------
+ * |
+ * ----------------------
+ * | snps,archs-idu-intc |
+ * ----------------------
+ * | | | | |
+ * | [eth] [USB] [... other peripherals]
+ * |
+ * -------------------
+ * | snps,dw-apb-intc |
+ * -------------------
+ * | | | |
+ * [Bt] [HAPS] [... other peripherals]
+ *
+ * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+ * with stacked INTCs. In particular problem happens if its master INTC
+ * not yet instantiated. See discussion here -
+ * https://lkml.org/lkml/2015/3/4/755
+ *
+ * So setup the first gpio block as a passive pass thru and hide it from
+ * DT hardware topology - connect intc directly to cpu intc
+ * The GPIO "wire" needs to be init nevertheless (here)
+ *
+ * One side adv is that peripheral interrupt handling avoids one nested
+ * intc ISR hop
+ *
+ * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
+ * we have the following GPIO input lines used as sources of interrupt:
+ * - GPIO[0] - Bluetooth interrupt of RS9113 module
+ * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
+ * - GPIO[3] - Audio codec (MAX9880A) interrupt
+ * - GPIO[8-23] - Available on Arduino and PMOD_x headers
+ * For now there's no use of Arduino and PMOD_x headers in Linux
+ * use-case so we only enable lines 0, 2 and 3.
+ *
+ * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
+ */
+#define GPIO_INTEN (HSDK_GPIO_INTC + 0x30)
+#define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c)
+#define GPIO_INT_CONNECTED_MASK 0x0d
+
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
+ iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
+ iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
+ iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
+}
+
static void __init hsdk_init_early(void)
{
/*
@@ -62,6 +122,8 @@ static void __init hsdk_init_early(void)
* minimum possible div-by-2.
*/
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+ hsdk_enable_gpio_intc_wire();
}
static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 843edfd000be..e8cd55a5b04c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,6 +9,7 @@ config ARM
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY
@@ -298,10 +299,6 @@ config PGTABLE_LEVELS
default 3 if ARM_LPAE
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System Type"
config MMU
@@ -337,8 +334,8 @@ config ARCH_MULTIPLATFORM
select TIMER_OF
select COMMON_CLK
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select PCI_DOMAINS if PCI
select SPARSE_IRQ
select USE_OF
@@ -465,9 +462,9 @@ config ARCH_DOVE
bool "Marvell Dove"
select CPU_PJ4
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select MVEBU_MBUS
select PINCTRL
select PINCTRL_DOVE
@@ -512,8 +509,8 @@ config ARCH_LPC32XX
select COMMON_CLK
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
help
@@ -532,11 +529,11 @@ config ARCH_PXA
select TIMER_OF
select CPU_XSCALE if !CPU_XSC3
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
help
@@ -572,11 +569,11 @@ config ARCH_SA1100
select CPU_FREQ
select CPU_SA1100
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select ISA
- select MULTI_IRQ_HANDLER
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
@@ -590,10 +587,10 @@ config ARCH_S3C24XX
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
select GPIOLIB
+ select GENERIC_IRQ_MULTI_HANDLER
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H
select SAMSUNG_ATAGS
select USE_OF
@@ -606,13 +603,16 @@ config ARCH_S3C24XX
config ARCH_DAVINCI
bool "TI DaVinci"
select ARCH_HAS_HOLES_MEMORYMODEL
- select CLKDEV_LOOKUP
+ select COMMON_CLK
select CPU_ARM926T
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GPIOLIB
select HAVE_IDE
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select RESET_CONTROLLER
select USE_OF
select ZONE_DMA
help
@@ -627,10 +627,10 @@ config ARCH_OMAP1
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H if PCCARD
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
@@ -921,11 +921,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config MULTI_IRQ_HANDLER
- bool
- help
- Allow each machine to specify it's own IRQ handler at run time.
-
if !MMU
source "arch/arm/Kconfig-nommu"
endif
@@ -1485,8 +1480,6 @@ config ARCH_NR_GPIO
If unsure, leave the default value.
-source kernel/Kconfig.preempt
-
config HZ_FIXED
int
default 200 if ARCH_EBSA110
@@ -1721,8 +1714,6 @@ config ARM_MODULE_PLTS
Disabling this is usually safe for small single-platform
configurations. If unsure, say y.
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
@@ -2175,12 +2166,6 @@ config KERNEL_MODE_NEON
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -2201,23 +2186,10 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/arm/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
-source "lib/Kconfig"
-
source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 693f84392f1b..f6fcb8a79889 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config ARM_PTDUMP_CORE
def_bool n
@@ -207,6 +204,14 @@ choice
depends on ARCH_BCM_HR2
select DEBUG_UART_8250
+ config DEBUG_BCM_IPROC_UART3
+ bool "Kernel low-level debugging on BCM IPROC UART3"
+ depends on ARCH_BCM_CYGNUS
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the third serial port on these devices.
+
config DEBUG_BCM_KONA_UART
bool "Kernel low-level debugging messages via BCM KONA UART"
depends on ARCH_BCM_MOBILE
@@ -1565,14 +1570,15 @@ config DEBUG_UART_PHYS
default 0x18000400 if DEBUG_BCM_HR2
default 0x18010000 if DEBUG_SIRFATLAS7_UART0
default 0x18020000 if DEBUG_SIRFATLAS7_UART1
+ default 0x18023000 if DEBUG_BCM_IPROC_UART3
default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
default 0x20001000 if DEBUG_HIP01_UART
default 0x20060000 if DEBUG_RK29_UART0
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
default 0x20201000 if DEBUG_BCM2835
- default 0x3f201000 if DEBUG_BCM2836
default 0x3e000000 if DEBUG_BCM_KONA_UART
+ default 0x3f201000 if DEBUG_BCM2836
default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40028000 if DEBUG_AT91_SAMV7_USART1
default 0x40081000 if DEBUG_LPC18XX_UART0
@@ -1685,6 +1691,7 @@ config DEBUG_UART_VIRT
default 0xf1002000 if DEBUG_MT8127_UART0
default 0xf1006000 if DEBUG_MT6589_UART0
default 0xf1009000 if DEBUG_MT8135_UART3
+ default 0xf1023000 if DEBUG_BCM_IPROC_UART3
default 0xf11f1000 if DEBUG_VERSATILE
default 0xf1600000 if DEBUG_INTEGRATOR
default 0xf1c28000 if DEBUG_SUNXI_UART0
@@ -1800,7 +1807,7 @@ config DEBUG_UART_8250_WORD
DEBUG_KEYSTONE_UART0 || DEBUG_KEYSTONE_UART1 || \
DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
- DEBUG_DAVINCI_DA8XX_UART2 || \
+ DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_BCM_IPROC_UART3 || \
DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2
config DEBUG_UART_8250_PALMCHIP
@@ -1863,5 +1870,3 @@ config PID_IN_CONTEXTIDR
are planning to use hardware trace tools with this kernel.
source "drivers/hwtracing/coresight/Kconfig"
-
-endmenu
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index fc26c3d7b9b6..ed94cf7e3157 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,9 +10,6 @@
#
# Copyright (C) 1995-2001 by Russell King
-# Ensure linker flags are correct
-LDFLAGS :=
-
LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
@@ -46,12 +43,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
AS += -EB
-LD += -EB
+LDFLAGS += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
AS += -EL
-LD += -EL
+LDFLAGS += -EL
endif
#
@@ -222,7 +219,6 @@ machine-$(CONFIG_ARCH_TANGO) += tango
machine-$(CONFIG_ARCH_TEGRA) += tegra
machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500
-machine-$(CONFIG_ARCH_UNIPHIER) += uniphier
machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
machine-$(CONFIG_ARCH_VT8500) += vt8500
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f9e8667f5886..73b514dddf65 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -168,7 +168,6 @@
AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
>;
};
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index ca294914bbb1..23ea381d363f 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -39,6 +39,8 @@
ti,davinci-ctrl-ram-size = <0x2000>;
ti,davinci-rmii-en = /bits/ 8 <1>;
local-mac-address = [ 00 00 00 00 00 00 ];
+ clocks = <&emac_ick>;
+ clock-names = "ick";
};
davinci_mdio: ethernet@5c030000 {
@@ -49,6 +51,8 @@
bus_freq = <1000000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&emac_fck>;
+ clock-names = "fck";
};
uart4: serial@4809e000 {
@@ -87,6 +91,11 @@
};
};
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+ status = "disabled";
+};
+
&iva {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 440351ad0b80..d4be3fd0b6f4 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -610,6 +610,8 @@
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
+
+ wakeup-source;
};
tlv320aic3106: tlv320aic3106@1b {
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 18edc9bc7927..929459c42760 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -547,7 +547,7 @@
thermal: thermal@e8078 {
compatible = "marvell,armada380-thermal";
- reg = <0xe4078 0x4>, <0xe4074 0x4>;
+ reg = <0xe4078 0x4>, <0xe4070 0x8>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
index 389f5f83bef9..0b9b37d4d6ef 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
@@ -52,6 +52,7 @@
compatible = "fsi-master-gpio", "fsi-master";
#address-cells = <2>;
#size-cells = <0>;
+ no-gpio-delays;
clock-gpios = <&gpio ASPEED_GPIO(AA, 0) GPIO_ACTIVE_HIGH>;
data-gpios = <&gpio ASPEED_GPIO(AA, 2) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
index 78a511e6e482..656036106001 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
@@ -153,6 +153,7 @@
compatible = "fsi-master-gpio", "fsi-master";
#address-cells = <2>;
#size-cells = <0>;
+ no-gpio-delays;
clock-gpios = <&gpio ASPEED_GPIO(AA, 0) GPIO_ACTIVE_HIGH>;
data-gpios = <&gpio ASPEED_GPIO(E, 0) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
index ccbf645ab84d..2c5aa90a546d 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
@@ -91,6 +91,7 @@
compatible = "fsi-master-gpio", "fsi-master";
#address-cells = <2>;
#size-cells = <0>;
+ no-gpio-delays;
trans-gpios = <&gpio ASPEED_GPIO(O, 6) GPIO_ACTIVE_HIGH>;
enable-gpios = <&gpio ASPEED_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 9dcd14edc202..e03495a799ce 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1580,7 +1580,6 @@
dr_mode = "otg";
snps,dis_u3_susphy_quirk;
snps,dis_u2_susphy_quirk;
- snps,dis_metastability_quirk;
};
};
@@ -1608,6 +1607,7 @@
dr_mode = "otg";
snps,dis_u3_susphy_quirk;
snps,dis_u2_susphy_quirk;
+ snps,dis_metastability_quirk;
};
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index fb5c954ab95a..6f258b50eb44 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -156,6 +156,100 @@
};
};
+ /* This is a RealTek RTL8366RB switch and PHY using SMI over GPIO */
+ switch {
+ compatible = "realtek,rtl8366rb";
+ /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ realtek,disable-leds;
+
+ switch_intc: interrupt-controller {
+ /* GPIO 15 provides the interrupt */
+ interrupt-parent = <&gpio0>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ phy-handle = <&phy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy3>;
+ };
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ phy-handle = <&phy4>;
+ };
+ rtl8366rb_cpu_port: port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <0>;
+ };
+ phy1: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <1>;
+ };
+ phy2: phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <2>;
+ };
+ phy3: phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <3>;
+ };
+ phy4: phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <12>;
+ };
+ };
+ };
+
soc {
flash@30000000 {
/*
@@ -223,10 +317,12 @@
* gpio0bgrp cover line 7 used by WPS LED
* gpio0cgrp cover line 8, 13 used by keys
* and 11, 12 used by the HD LEDs
+ * and line 14, 15 used by RTL8366
+ * RESET and phy ready
* gpio0egrp cover line 16 used by VDISP
* gpio0fgrp cover line 17 used by TK IRQ
* gpio0ggrp cover line 20 used by panel CS
- * gpio0hgrp cover line 21,22 used by RTL8366RB
+ * gpio0hgrp cover line 21,22 used by RTL8366RB MDIO
*/
gpio0_default_pins: pinctrl-gpio0 {
mux {
@@ -250,6 +346,32 @@
groups = "gpio1bgrp";
};
};
+ pinctrl-gmii {
+ mux {
+ function = "gmii";
+ groups = "gmii_gmac0_grp";
+ };
+ conf0 {
+ pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV",
+ "Y7 GMAC0 RXC", "Y11 GMAC1 RXC",
+ "T8 GMAC0 TXEN", "W11 GMAC1 TXEN",
+ "U8 GMAC0 TXC", "V11 GMAC1 TXC",
+ "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
+ "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
+ "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
+ "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
+ "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
+ "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
+ "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
+ "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
+ skew-delay = <7>;
+ };
+ /* Set up drive strength on GMAC0 to 16 mA */
+ conf1 {
+ groups = "gmii_gmac0_grp";
+ drive-strength = <16>;
+ };
+ };
};
};
@@ -291,6 +413,22 @@
<0x6000 0 0 4 &pci_intc 2>;
};
+ ethernet@60000000 {
+ status = "okay";
+
+ ethernet-port@0 {
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ ethernet-port@1 {
+ /* Not used in this platform */
+ };
+ };
+
ata@63000000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index df9eca94d812..8a878687197b 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -770,7 +770,7 @@
pinctrl_ts: tsgrp {
fsl,pins = <
- MX51_PAD_CSI1_D8__GPIO3_12 0x85
+ MX51_PAD_CSI1_D8__GPIO3_12 0x04
MX51_PAD_CSI1_D9__GPIO3_13 0x85
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index 19a075aee19e..f14df0baf2ab 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -692,7 +692,7 @@
dsa,member = <0 0>;
eeprom-length = <512>;
interrupt-parent = <&gpio6>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index bdf73cbcec3a..e7c3c563ff8f 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -159,13 +159,7 @@
dais = <&mcbsp2_port>, <&mcbsp3_port>;
};
-};
-
-&dss {
- status = "okay";
-};
-&gpio6 {
pwm8: dmtimer-pwm-8 {
pinctrl-names = "default";
pinctrl-0 = <&vibrator_direction_pin>;
@@ -192,7 +186,10 @@
pwm-names = "enable", "direction";
direction-duty-cycle-ns = <10000000>;
};
+};
+&dss {
+ status = "okay";
};
&dsi1 {
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 85b2369d6b20..27ea6dfcf2f2 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -155,8 +155,8 @@ CONFIG_THERMAL_EMULATION=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MFD_CROS_EC=y
-CONFIG_MFD_CROS_EC_I2C=y
-CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_CROS_EC_I2C=y
+CONFIG_CROS_EC_SPI=y
CONFIG_MFD_MAX14577=y
CONFIG_MFD_MAX77686=y
CONFIG_MFD_MAX77693=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 054591dc9a00..4cd2f4a2bff4 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f70507ab91ee..200ebda47e0c 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
CONFIG_USB_FUNCTIONFS=m
CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
+CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 8f6be1982545..be732f382418 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -490,8 +490,8 @@ CONFIG_MFD_AC100=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_MFD_AXP20X_RSB=y
CONFIG_MFD_CROS_EC=m
-CONFIG_MFD_CROS_EC_I2C=m
-CONFIG_MFD_CROS_EC_SPI=m
+CONFIG_CROS_EC_I2C=m
+CONFIG_CROS_EC_SPI=m
CONFIG_MFD_DA9063=m
CONFIG_MFD_MAX14577=y
CONFIG_MFD_MAX77686=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 5655a1cee87d..6bb506edb1f5 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -398,8 +398,8 @@ CONFIG_MFD_AS3711=y
CONFIG_MFD_BCM590XX=m
CONFIG_MFD_AXP20X=y
CONFIG_MFD_CROS_EC=m
-CONFIG_MFD_CROS_EC_I2C=m
-CONFIG_MFD_CROS_EC_SPI=m
+CONFIG_CROS_EC_I2C=m
+CONFIG_CROS_EC_SPI=m
CONFIG_MFD_ASIC3=y
CONFIG_PMIC_DA903X=y
CONFIG_HTC_EGPIO=y
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 3fecb2124c35..451a849ad518 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -51,9 +51,8 @@ ENTRY(chacha20_block_xor_neon)
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
@@ -82,9 +81,8 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index d9bb52cae2ac..8930fc4e7c22 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -152,7 +152,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_key),
.cra_module = THIS_MODULE,
@@ -308,9 +308,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index 555f72b5e659..b732522e20f8 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -75,7 +75,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 6fc73bf8766d..98ab8239f919 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -67,7 +67,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 4e22f122f966..d15e0ea2c95e 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -83,7 +83,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index df4dcef054ae..1211a5c129fc 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -78,7 +78,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -93,7 +92,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index a84e869ef900..bf8ccff2c9d0 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -71,7 +71,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +85,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 39ccd658817e..9bbee56fbdc8 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -79,7 +79,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -94,7 +93,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 269a394e4a53..86540cd4a6fa 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -78,7 +77,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 32693684a3ab..8a5642b41fd6 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -75,7 +75,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@@ -91,7 +90,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
index 3c1e203e53b9..57caa742016e 100644
--- a/arch/arm/crypto/speck-neon-core.S
+++ b/arch/arm/crypto/speck-neon-core.S
@@ -272,9 +272,11 @@
* Allocate stack space to store 128 bytes worth of tweaks. For
* performance, this space is aligned to a 16-byte boundary so that we
* can use the load/store instructions that declare 16-byte alignment.
+ * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
*/
- sub sp, #128
- bic sp, #0xf
+ sub r12, sp, #128
+ bic r12, #0xf
+ mov sp, r12
.if \n == 64
// Load first tweak
diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile
index a71f16536b6c..6e41336b0bc4 100644
--- a/arch/arm/firmware/Makefile
+++ b/arch/arm/firmware/Makefile
@@ -1 +1,4 @@
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT := n
diff --git a/arch/arm/firmware/trusted_foundations.c b/arch/arm/firmware/trusted_foundations.c
index 3fb1b5a1dce9..689e6565abfc 100644
--- a/arch/arm/firmware/trusted_foundations.c
+++ b/arch/arm/firmware/trusted_foundations.c
@@ -31,21 +31,25 @@
static unsigned long cpu_boot_addr;
-static void __naked tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
asm volatile(
".arch_extension sec\n\t"
- "stmfd sp!, {r4 - r11, lr}\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
__asmeq("%0", "r0")
__asmeq("%1", "r1")
__asmeq("%2", "r2")
"mov r3, #0\n\t"
"mov r4, #0\n\t"
"smc #0\n\t"
- "ldmfd sp!, {r4 - r11, pc}"
+ "ldmfd sp!, {r4 - r11}\n\t"
:
- : "r" (type), "r" (arg1), "r" (arg2)
- : "memory");
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
}
static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 0cd4dccbae78..b17ee03d280b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
#endif
.endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 66d0e215a773..f74756641410 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+#define atomic_fetch_andnot atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
-#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
long long counter;
@@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return result;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
+ long long oldval, newval;
unsigned long tmp;
- int ret = 1;
smp_mb();
prefetchw(&v->counter);
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
-" moveq %1, #0\n"
" beq 2f\n"
-" adds %Q0, %Q0, %Q6\n"
-" adc %R0, %R0, %R6\n"
-" strexd %2, %0, %H0, [%4]\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
- if (ret)
+ if (oldval != u)
smp_mb();
- return ret;
+ return oldval;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 4cab9bb823fb..c92e42a5c8f7 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else
-static inline int constant_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
- * On ARMv5 and above those functions can be implemented around the
- * clz instruction for much better code efficiency. __clz returns
- * the number of leading zeros, zero input will return 32, and
- * 0x80000000 will return 0.
- */
-static inline unsigned int __clz(unsigned int x)
-{
- unsigned int ret;
-
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-
- return ret;
-}
-
-/*
- * fls() returns zero if the input is zero, otherwise returns the bit
- * position of the last set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int fls(int x)
-{
- if (__builtin_constant_p(x))
- return constant_fls(x);
-
- return 32 - __clz(x);
-}
-
-/*
- * __fls() returns the bit position of the last bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
- */
-static inline unsigned long __fls(unsigned long x)
-{
- return fls(x) - 1;
-}
-
-/*
- * ffs() returns zero if the input was zero, otherwise returns the bit
- * position of the first set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int ffs(int x)
-{
- return fls(x & -x);
-}
-
/*
- * __ffs() returns the bit position of the first bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
*/
-static inline unsigned long __ffs(unsigned long x)
-{
- return ffs(x) - 1;
-}
-
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
#endif
+#include <asm-generic/bitops/ffz.h>
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 17f1f1a814ff..38badaae8d9d 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (false)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index e46e4e7bdba3..ac54c06764e6 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg,
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
+struct perf_event_attr;
struct notifier_block;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index b6f319606e30..c883fcbe93b6 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
#ifdef CONFIG_SMP
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 59655459da59..82290f212d8e 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -44,8 +44,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 6493bd479ddc..77121b713bef 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -26,13 +26,13 @@
#include <asm/cputype.h>
/* arm64 compatibility macros */
-#define COMPAT_PSR_MODE_ABT ABT_MODE
-#define COMPAT_PSR_MODE_UND UND_MODE
-#define COMPAT_PSR_T_BIT PSR_T_BIT
-#define COMPAT_PSR_I_BIT PSR_I_BIT
-#define COMPAT_PSR_A_BIT PSR_A_BIT
-#define COMPAT_PSR_E_BIT PSR_E_BIT
-#define COMPAT_PSR_IT_MASK PSR_IT_MASK
+#define PSR_AA32_MODE_ABT ABT_MODE
+#define PSR_AA32_MODE_UND UND_MODE
+#define PSR_AA32_T_BIT PSR_T_BIT
+#define PSR_AA32_I_BIT PSR_I_BIT
+#define PSR_AA32_A_BIT PSR_A_BIT
+#define PSR_AA32_E_BIT PSR_E_BIT
+#define PSR_AA32_IT_MASK PSR_IT_MASK
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
@@ -107,9 +107,19 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr;
}
+static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr &= ~HCR_TWE;
+}
+
+static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr |= HCR_TWE;
+}
+
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
- return 1;
+ return true;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 1f1fe4109b02..79906cecb091 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -216,6 +216,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 8553d68b7c8a..265ea9cf7df7 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -75,17 +75,9 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
-static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
-{
- *pmd = new_pmd;
- dsb(ishst);
-}
-
-static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
- *pte = new_pte;
- dsb(ishst);
-}
+#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
+#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
+#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 5c1ad11aa392..bb8851208e17 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -59,7 +59,7 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
void (*restart)(enum reboot_mode, const char *);
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 0f79e4dec7f9..4ac3a019a46f 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -13,7 +13,6 @@
extern void timer_tick(void);
typedef void (*clock_access_fn)(struct timespec64 *);
-extern int register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent);
+extern int register_persistent_clock(clock_access_fn read_persistent);
#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 89ad0596033a..9e81b7c498d8 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -34,6 +34,7 @@ struct mod_arch_specific {
#endif
};
+struct module;
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
/*
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 1e5b9bb92270..991c9127c650 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -51,7 +51,6 @@ struct arch_probes_insn {
* We assume one instruction can consume at most 64 bytes stack, which is
* 'push {r0-r15}'. Instructions consume more or unknown stack space like
* 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
*/
#define MAX_STACK_SIZE 64
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e71cc35de163..9b37b6ab27fe 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -123,8 +123,8 @@ struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
#endif
/*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index d5562f9ce600..f854148c8d7c 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -292,5 +292,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
{
}
+static inline void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+}
+
+static inline void tlb_flush_remove_tables_local(void *arg)
+{
+}
+
#endif /* CONFIG_MMU */
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 3d614e90c19f..5451e1f05a19 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -85,6 +85,13 @@ static inline void set_fs(mm_segment_t fs)
flag; })
/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \
- register typeof(x) __r2 asm("r2"); \
+ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs())
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
+
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \
})
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_err((x), (ptr), err); \
- (void) 0; \
-})
-
#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
@@ -324,6 +335,7 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
+#endif
#define __put_user_switch(x, ptr, __err, __fn) \
diff --git a/arch/arm/include/debug/renesas-scif.S b/arch/arm/include/debug/renesas-scif.S
index 97820a8df51a..1c5f795587fc 100644
--- a/arch/arm/include/debug/renesas-scif.S
+++ b/arch/arm/include/debug/renesas-scif.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SCIF(A) debugging macro include header
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2012-2013 Renesas Electronics Corporation
* Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define SCIF_PHYS CONFIG_DEBUG_UART_PHYS
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 16e006f708ca..4602464ebdfb 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 974d8d7d1bcd..3968d6c22455 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -38,25 +38,14 @@
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
/*
- * GCC 3.0, 3.1: general bad code generation.
- * GCC 3.2.0: incorrect function argument offset calculation.
- * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
- * (http://gcc.gnu.org/PR8896) and incorrect structure
- * initialisation in fs/jffs2/erase.c
* GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
* miscompiles find_get_entry(), and can result in EXT3 and EXT4
* filesystem corruption (possibly other FS too).
*/
-#ifdef __GNUC__
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
+#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
#error Your compiler is too buggy; it is known to miscompile kernels
#error and result in filesystem corruption and oopses.
#endif
-#endif
int main(void)
{
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 179a9f6bd1e3..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
* Interrupt handling.
*/
.macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 106a1466518d..746565a876dc 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,7 @@ saved_pc .req lr
* from those features make this path too inefficient.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
@@ -78,6 +79,7 @@ fast_work_pending:
* call.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
@@ -255,7 +257,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- invoke_syscall tbl, scno, r10, ret_fast_syscall
+ invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index dd546d65a383..ec29de250076 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -89,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
@@ -177,7 +185,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
- isb
+ instr_sync
#elif defined (CONFIG_CPU_V7M)
#ifdef CONFIG_ARM_MPU
ldreq r3, [r12, MPU_CTRL]
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 629e25152c0d..1d5fbf1d1c67 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
- if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
* by the hardware and must be aligned to the appropriate number of
* bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Privilege */
- info->ctrl.privilege = ARM_BREAKPOINT_USER;
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+ hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
/* Mismatch */
- info->ctrl.mismatch = 0;
+ hw->ctrl.mismatch = 0;
return 0;
}
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret = 0;
u32 offset, alignment_mask = 0x3;
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -ENODEV;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
goto out;
/* Check address alignment. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case 1:
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
case 3:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
default:
ret = -EINVAL;
goto out;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
if (is_default_overflow_handler(bp)) {
/*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */
- if (arch_check_bp_in_kernelspace(bp))
+ if (arch_check_bp_in_kernelspace(hw))
return -EPERM;
/*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* reports them.
*/
if (!debug_exception_updates_fsr() &&
- (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
- info->ctrl.type == ARM_BREAKPOINT_STORE))
+ (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ hw->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..9908dacf9229 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
uniphier_cache_init();
}
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-#endif
-
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index be42c4f66a40..1ae99deeec54 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}
-static inline u32 armv6pmu_read_counter(struct perf_event *event)
+static inline u64 armv6pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
@@ -491,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -542,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 57f01e059f39..a4fb0f8b8f84 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
isb();
}
-static inline u32 armv7pmu_read_counter(struct perf_event *event)
+static inline u64 armv7pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
@@ -1167,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv7pmu_read_counter;
cpu_pmu->write_counter = armv7pmu_write_counter;
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
- cpu_pmu->max_period = (1LLU << 32) - 1;
};
static void armv7_read_num_pmnc_events(void *info)
@@ -1638,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool krait_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -1967,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool scorpion_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || scorpion_event) {
bit = scorpion_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -2030,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
.driver = {
.name = "armv7-pmu",
.of_match_table = armv7_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv7_pmu_device_probe,
};
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 88d1a76f5367..f6cdcacfb96d 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
@@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale1pmu_read_counter(struct perf_event *event)
+static inline u64 xscale1pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
@@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale2pmu_read_counter(struct perf_event *event)
+static inline u64 xscale2pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 5;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 225d1c58d2de..82ab015bf42b 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -330,15 +330,15 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
* atomic helpers. Insert it into the gate_vma so that it is visible
* through ptrace and /proc/<pid>/mem.
*/
-static struct vm_area_struct gate_vma = {
- .vm_start = 0xffff0000,
- .vm_end = 0xffff0000 + PAGE_SIZE,
- .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-};
+static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
+ vma_init(&gate_vma, NULL);
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+ gate_vma.vm_start = 0xffff0000;
+ gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+ gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
return 0;
}
arch_initcall(gate_vma_init);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 35ca494c028c..4c249cb261f3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index dec130e7078c..b8f766cf3a90 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(char __user **auxp)
{
- struct vfp_sigframe __user *frame =
- (struct vfp_sigframe __user *)*auxp;
- unsigned long magic;
- unsigned long size;
- int err = 0;
-
- __get_user_error(magic, &frame->magic, err);
- __get_user_error(size, &frame->size, err);
+ struct vfp_sigframe frame;
+ int err;
+ err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err)
- return -EFAULT;
- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return err;
+
+ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
- *auxp += size;
- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+ *auxp += sizeof(frame);
+ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
+ struct sigcontext context;
char __user *aux;
sigset_t set;
int err;
@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
+ if (err == 0) {
+ regs->ARM_r0 = context.arm_r0;
+ regs->ARM_r1 = context.arm_r1;
+ regs->ARM_r2 = context.arm_r2;
+ regs->ARM_r3 = context.arm_r3;
+ regs->ARM_r4 = context.arm_r4;
+ regs->ARM_r5 = context.arm_r5;
+ regs->ARM_r6 = context.arm_r6;
+ regs->ARM_r7 = context.arm_r7;
+ regs->ARM_r8 = context.arm_r8;
+ regs->ARM_r9 = context.arm_r9;
+ regs->ARM_r10 = context.arm_r10;
+ regs->ARM_fp = context.arm_fp;
+ regs->ARM_ip = context.arm_ip;
+ regs->ARM_sp = context.arm_sp;
+ regs->ARM_lr = context.arm_lr;
+ regs->ARM_pc = context.arm_pc;
+ regs->ARM_cpsr = context.arm_cpsr;
+ }
err |= !valid_user_regs(regs);
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 1df21a61e379..f0dd4b6ebb63 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+ struct oabi_sembuf osb;
+ err |= __copy_from_user(&osb, tsops, sizeof(osb));
+ sops[i].sem_num = osb.sem_num;
+ sops[i].sem_op = osb.sem_op;
+ sops[i].sem_flg = osb.sem_flg;
tsops++;
}
if (timeout) {
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cf2701cb0de8..078b259ead4e 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock64(struct timespec64 *ts)
-{
- __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
- if (__read_persistent_clock == dummy_clock_access &&
- __read_boot_clock == dummy_clock_access) {
- if (read_boot)
- __read_boot_clock = read_boot;
+ if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;
-
return 0;
}
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3a02e76699a6..450c7a4fbc8a 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -246,6 +246,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
u64 reg;
+ bool g1;
if (!p->is_write)
return read_from_write_only(vcpu, p);
@@ -253,7 +254,25 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
reg |= *vcpu_reg(vcpu, p->Rt1) ;
- vgic_v3_dispatch_sgi(vcpu, reg);
+ /*
+ * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
+ * Group0 SGIs only, while ICC_SGI1R can generate either group,
+ * depending on the SGI configuration. ICC_ASGI1R is effectively
+ * equivalent to ICC_SGI0R, as there is no "alternative" secure
+ * group.
+ */
+ switch (p->Op1) {
+ default: /* Keep GCC quiet */
+ case 0: /* ICC_SGI1R */
+ g1 = true;
+ break;
+ case 1: /* ICC_ASGI1R */
+ case 2: /* ICC_SGI0R */
+ g1 = false;
+ break;
+ }
+
+ vgic_v3_dispatch_sgi(vcpu, reg, g1);
return true;
}
@@ -459,6 +478,10 @@ static const struct coproc_reg cp15_regs[] = {
/* ICC_SGI1R */
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
+ /* ICC_ASGI1R */
+ { CRm64(12), Op1( 1), is64, access_gic_sgi},
+ /* ICC_SGI0R */
+ { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index a18f33edc471..2b8de885b2bf 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -261,6 +261,29 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
+
+ return 0;
+}
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ bool serror_pending = events->exception.serror_pending;
+ bool has_esr = events->exception.serror_has_esr;
+
+ if (serror_pending && has_esr)
+ return -EINVAL;
+ else if (serror_pending)
+ kvm_inject_vabt(vcpu);
+
+ return 0;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a4b06049001..a826df3d3814 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -90,6 +90,15 @@
.text
ENTRY(arm_copy_from_user)
+#ifdef CONFIG_CPU_SPECTRE
+ get_thread_info r3
+ ldr r3, [r3, #TI_ADDR_LIMIT]
+ adds ip, r1, r2 @ ip=addr+size
+ sub r3, r3, #1 @ addr_limit - 1
+ cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
+ movcs r1, #0 @ addr = NULL
+ csdb
+#endif
#include "copy_template.S"
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 1254bf9d91b4..903f23c309df 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -27,6 +27,7 @@ config SOC_SAMA5D2
select HAVE_AT91_H32MX
select HAVE_AT91_GENERATED_CLK
select HAVE_AT91_AUDIO_PLL
+ select HAVE_AT91_I2S_MUX_CLK
select PINCTRL_AT91PIO4
help
Select this if ou are using one of Microchip's SAMA5D2 family SoC.
@@ -129,6 +130,9 @@ config HAVE_AT91_GENERATED_CLK
config HAVE_AT91_AUDIO_PLL
bool
+config HAVE_AT91_I2S_MUX_CLK
+ bool
+
config SOC_SAM_V4_V5
bool
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 4ea93c9df77b..7415f181907b 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -19,31 +19,6 @@ ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
endif
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
- "/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:->::; p;}"
-endef
-
-# Use filechk to avoid rebuilds when a header changes, but the resulting file
-# does not
-define filechk_offsets
- (set -e; \
- echo "#ifndef $2"; \
- echo "#define $2"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y); \
- echo ""; \
- echo "#endif" )
-endef
-
arch/arm/mach-at91/pm_data-offsets.s: arch/arm/mach-at91/pm_data-offsets.c
$(call if_changed_dep,cc_s_c)
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 849014c01cf4..e8f3d0f97e61 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -40,15 +40,16 @@ extern void at91_pinctrl_gpio_resume(void);
#endif
static const match_table_t pm_modes __initconst = {
- { 0, "standby" },
- { AT91_PM_SLOW_CLOCK, "ulp0" },
+ { AT91_PM_STANDBY, "standby" },
+ { AT91_PM_ULP0, "ulp0" },
+ { AT91_PM_ULP1, "ulp1" },
{ AT91_PM_BACKUP, "backup" },
{ -1, NULL },
};
static struct at91_pm_data pm_data = {
- .standby_mode = 0,
- .suspend_mode = AT91_PM_SLOW_CLOCK,
+ .standby_mode = AT91_PM_STANDBY,
+ .suspend_mode = AT91_PM_ULP0,
};
#define at91_ramc_read(id, field) \
@@ -79,6 +80,90 @@ static struct at91_pm_bu {
phys_addr_t resume;
} *pm_bu;
+struct wakeup_source_info {
+ unsigned int pmc_fsmr_bit;
+ unsigned int shdwc_mr_bit;
+ bool set_polarity;
+};
+
+static const struct wakeup_source_info ws_info[] = {
+ { .pmc_fsmr_bit = AT91_PMC_FSTT(10), .set_polarity = true },
+ { .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) },
+ { .pmc_fsmr_bit = AT91_PMC_USBAL },
+ { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
+};
+
+static const struct of_device_id sama5d2_ws_ids[] = {
+ { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] },
+ { .compatible = "atmel,at91rm9200-rtc", .data = &ws_info[1] },
+ { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] },
+ { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
+ { .compatible = "usb-ohci", .data = &ws_info[2] },
+ { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
+ { .compatible = "usb-ehci", .data = &ws_info[2] },
+ { .compatible = "atmel,sama5d2-sdhci", .data = &ws_info[3] },
+ { /* sentinel */ }
+};
+
+static int at91_pm_config_ws(unsigned int pm_mode, bool set)
+{
+ const struct wakeup_source_info *wsi;
+ const struct of_device_id *match;
+ struct platform_device *pdev;
+ struct device_node *np;
+ unsigned int mode = 0, polarity = 0, val = 0;
+
+ if (pm_mode != AT91_PM_ULP1)
+ return 0;
+
+ if (!pm_data.pmc || !pm_data.shdwc)
+ return -EPERM;
+
+ if (!set) {
+ writel(mode, pm_data.pmc + AT91_PMC_FSMR);
+ return 0;
+ }
+
+ /* SHDWC.WUIR */
+ val = readl(pm_data.shdwc + 0x0c);
+ mode |= (val & 0x3ff);
+ polarity |= ((val >> 16) & 0x3ff);
+
+ /* SHDWC.MR */
+ val = readl(pm_data.shdwc + 0x04);
+
+ /* Loop through defined wakeup sources. */
+ for_each_matching_node_and_match(np, sama5d2_ws_ids, &match) {
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ continue;
+
+ if (device_may_wakeup(&pdev->dev)) {
+ wsi = match->data;
+
+ /* Check if enabled on SHDWC. */
+ if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
+ goto put_node;
+
+ mode |= wsi->pmc_fsmr_bit;
+ if (wsi->set_polarity)
+ polarity |= wsi->pmc_fsmr_bit;
+ }
+
+put_node:
+ of_node_put(np);
+ }
+
+ if (mode) {
+ writel(mode, pm_data.pmc + AT91_PMC_FSMR);
+ writel(polarity, pm_data.pmc + AT91_PMC_FSPR);
+ } else {
+ pr_err("AT91: PM: no ULP1 wakeup sources found!");
+ }
+
+ return mode ? 0 : -EPERM;
+}
+
/*
* Called after processes are frozen, but before we shutdown devices.
*/
@@ -97,7 +182,7 @@ static int at91_pm_begin(suspend_state_t state)
pm_data.mode = -1;
}
- return 0;
+ return at91_pm_config_ws(pm_data.mode, true);
}
/*
@@ -145,7 +230,7 @@ static int at91_pm_verify_clocks(void)
*/
int at91_suspend_entering_slow_clock(void)
{
- return (pm_data.mode >= AT91_PM_SLOW_CLOCK);
+ return (pm_data.mode >= AT91_PM_ULP0);
}
EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
@@ -186,7 +271,7 @@ static void at91_pm_suspend(suspend_state_t state)
* event sources; and reduces DRAM power. But otherwise it's identical to
* PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
*
- * AT91_PM_SLOW_CLOCK is like STANDBY plus slow clock mode, so drivers must
+ * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
* suspend more deeply, the master clock switches to the clk32k and turns off
* the main oscillator
*
@@ -204,7 +289,7 @@ static int at91_pm_enter(suspend_state_t state)
/*
* Ensure that clocks are in a valid state.
*/
- if ((pm_data.mode >= AT91_PM_SLOW_CLOCK) &&
+ if (pm_data.mode >= AT91_PM_ULP0 &&
!at91_pm_verify_clocks())
goto error;
@@ -233,6 +318,7 @@ error:
*/
static void at91_pm_end(void)
{
+ at91_pm_config_ws(pm_data.mode, false);
}
@@ -478,31 +564,28 @@ static void __init at91_pm_sram_init(void)
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
}
-static void __init at91_pm_backup_init(void)
+static bool __init at91_is_pm_mode_active(int pm_mode)
+{
+ return (pm_data.standby_mode == pm_mode ||
+ pm_data.suspend_mode == pm_mode);
+}
+
+static int __init at91_pm_backup_init(void)
{
struct gen_pool *sram_pool;
struct device_node *np;
struct platform_device *pdev = NULL;
+ int ret = -ENODEV;
- if ((pm_data.standby_mode != AT91_PM_BACKUP) &&
- (pm_data.suspend_mode != AT91_PM_BACKUP))
- return;
+ if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
+ return 0;
pm_bu = NULL;
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
- if (!np) {
- pr_warn("%s: failed to find shdwc!\n", __func__);
- return;
- }
-
- pm_data.shdwc = of_iomap(np, 0);
- of_node_put(np);
-
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
if (!np) {
pr_warn("%s: failed to find sfrbu!\n", __func__);
- goto sfrbu_fail;
+ return ret;
}
pm_data.sfrbu = of_iomap(np, 0);
@@ -529,6 +612,7 @@ static void __init at91_pm_backup_init(void)
pm_bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
if (!pm_bu) {
pr_warn("%s: unable to alloc securam!\n", __func__);
+ ret = -ENOMEM;
goto securam_fail;
}
@@ -536,19 +620,60 @@ static void __init at91_pm_backup_init(void)
pm_bu->canary = __pa_symbol(&canary);
pm_bu->resume = __pa_symbol(cpu_resume);
- return;
+ return 0;
-sfrbu_fail:
- iounmap(pm_data.shdwc);
- pm_data.shdwc = NULL;
securam_fail:
iounmap(pm_data.sfrbu);
pm_data.sfrbu = NULL;
+ return ret;
+}
- if (pm_data.standby_mode == AT91_PM_BACKUP)
- pm_data.standby_mode = AT91_PM_SLOW_CLOCK;
- if (pm_data.suspend_mode == AT91_PM_BACKUP)
- pm_data.suspend_mode = AT91_PM_SLOW_CLOCK;
+static void __init at91_pm_use_default_mode(int pm_mode)
+{
+ if (pm_mode != AT91_PM_ULP1 && pm_mode != AT91_PM_BACKUP)
+ return;
+
+ if (pm_data.standby_mode == pm_mode)
+ pm_data.standby_mode = AT91_PM_ULP0;
+ if (pm_data.suspend_mode == pm_mode)
+ pm_data.suspend_mode = AT91_PM_ULP0;
+}
+
+static void __init at91_pm_modes_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ if (!at91_is_pm_mode_active(AT91_PM_BACKUP) &&
+ !at91_is_pm_mode_active(AT91_PM_ULP1))
+ return;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
+ if (!np) {
+ pr_warn("%s: failed to find shdwc!\n", __func__);
+ goto ulp1_default;
+ }
+
+ pm_data.shdwc = of_iomap(np, 0);
+ of_node_put(np);
+
+ ret = at91_pm_backup_init();
+ if (ret) {
+ if (!at91_is_pm_mode_active(AT91_PM_ULP1))
+ goto unmap;
+ else
+ goto backup_default;
+ }
+
+ return;
+
+unmap:
+ iounmap(pm_data.shdwc);
+ pm_data.shdwc = NULL;
+ulp1_default:
+ at91_pm_use_default_mode(AT91_PM_ULP1);
+backup_default:
+ at91_pm_use_default_mode(AT91_PM_BACKUP);
}
struct pmc_info {
@@ -644,7 +769,7 @@ void __init sama5d2_pm_init(void)
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
return;
- at91_pm_backup_init();
+ at91_pm_modes_init();
sama5_pm_init();
}
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index f95d31496f08..9bd4e6ca672a 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -21,8 +21,10 @@
#define AT91_MEMCTRL_SDRAMC 1
#define AT91_MEMCTRL_DDRSDR 2
-#define AT91_PM_SLOW_CLOCK 0x01
-#define AT91_PM_BACKUP 0x02
+#define AT91_PM_STANDBY 0x00
+#define AT91_PM_ULP0 0x01
+#define AT91_PM_ULP1 0x02
+#define AT91_PM_BACKUP 0x03
#ifndef __ASSEMBLY__
struct at91_pm_data {
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index daca91feea6a..a7c6ae13c945 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -42,6 +42,15 @@ tmp2 .req r5
.endm
/*
+ * Wait for main oscillator selection is done
+ */
+ .macro wait_moscsels
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCSELS
+ beq 1b
+ .endm
+
+/*
* Wait until PLLA has locked.
*/
.macro wait_pllalock
@@ -112,19 +121,20 @@ ENTRY(at91_pm_suspend_in_sram)
bl at91_sramc_self_refresh
ldr r0, .pm_mode
- cmp r0, #AT91_PM_SLOW_CLOCK
- beq slow_clock
+ cmp r0, #AT91_PM_STANDBY
+ beq standby
cmp r0, #AT91_PM_BACKUP
beq backup_mode
+ bl at91_ulp_mode
+ b exit_suspend
+
+standby:
/* Wait for interrupt */
ldr pmc, .pmc_base
at91_cpu_idle
b exit_suspend
-slow_clock:
- bl at91_slowck_mode
- b exit_suspend
backup_mode:
bl at91_backup_mode
b exit_suspend
@@ -151,7 +161,102 @@ ENTRY(at91_backup_mode)
str tmp1, [r0, #0]
ENDPROC(at91_backup_mode)
-ENTRY(at91_slowck_mode)
+.macro at91_pm_ulp0_mode
+ ldr pmc, .pmc_base
+
+ /* Turn off the crystal oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Wait for interrupt */
+ at91_cpu_idle
+
+ /* Turn on the crystal oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_moscrdy
+.endm
+
+/**
+ * Note: This procedure only applies on the platform which uses
+ * the external crystal oscillator as a main clock source.
+ */
+.macro at91_pm_ulp1_mode
+ ldr pmc, .pmc_base
+
+ /* Switch the main clock source to 12-MHz RC oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCSEL
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_moscsels
+
+ /* Disable the crystal oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Switch the master clock source to main clock */
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
+ /* Enter the ULP1 mode by set WAITMODE bit in CKGR_MOR */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_WAITMODE
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_mckrdy
+
+ /* Enable the crystal oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_moscrdy
+
+ /* Switch the master clock source to slow clock */
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
+ /* Switch main clock source to crystal oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCSEL
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ wait_moscsels
+
+ /* Switch the master clock source to main clock */
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+.endm
+
+ENTRY(at91_ulp_mode)
ldr pmc, .pmc_base
/* Save Master clock setting */
@@ -174,22 +279,19 @@ ENTRY(at91_slowck_mode)
orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
str tmp1, [pmc, #AT91_CKGR_PLLAR]
- /* Turn off the main oscillator */
- ldr tmp1, [pmc, #AT91_CKGR_MOR]
- bic tmp1, tmp1, #AT91_PMC_MOSCEN
- orr tmp1, tmp1, #AT91_PMC_KEY
- str tmp1, [pmc, #AT91_CKGR_MOR]
+ ldr r0, .pm_mode
+ cmp r0, #AT91_PM_ULP1
+ beq ulp1_mode
- /* Wait for interrupt */
- at91_cpu_idle
+ at91_pm_ulp0_mode
+ b ulp_exit
- /* Turn on the main oscillator */
- ldr tmp1, [pmc, #AT91_CKGR_MOR]
- orr tmp1, tmp1, #AT91_PMC_MOSCEN
- orr tmp1, tmp1, #AT91_PMC_KEY
- str tmp1, [pmc, #AT91_CKGR_MOR]
+ulp1_mode:
+ at91_pm_ulp1_mode
+ b ulp_exit
- wait_moscrdy
+ulp_exit:
+ ldr pmc, .pmc_base
/* Restore PLLA setting */
ldr tmp1, .saved_pllar
@@ -212,7 +314,7 @@ ENTRY(at91_slowck_mode)
wait_mckrdy
mov pc, lr
-ENDPROC(at91_slowck_mode)
+ENDPROC(at91_ulp_mode)
/*
* void at91_sramc_self_refresh(unsigned int is_active)
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 05c3eecf47cb..da8a039d65f9 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -59,6 +59,7 @@ config MACH_DA8XX_DT
default y
depends on ARCH_DAVINCI_DA850
select PINCTRL
+ select TIMER_OF
help
Say y here to include support for TI DaVinci DA850 based using
Flattened Device Tree. More information at Documentation/devicetree
@@ -231,18 +232,6 @@ config DAVINCI_MUX_WARNINGS
to change the pin multiplexing setup. When there are no warnings
printed, it's safe to deselect DAVINCI_MUX for your product.
-config DAVINCI_RESET_CLOCKS
- bool "Reset unused clocks during boot"
- depends on ARCH_DAVINCI
- help
- Say Y if you want to reset unused clocks during boot.
- This option saves power, but assumes all drivers are
- using the clock framework. Broken drivers that do not
- yet use clock framework may not work with this option.
- If you are booting from another operating system, you
- probably do not want this option enabled until your
- device drivers work properly.
-
endmenu
endif
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 4e8178050027..93d271b4d84b 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -5,8 +5,8 @@
#
# Common objects
-obj-y := time.o clock.o serial.o psc.o \
- usb.o common.o sram.o aemif.o
+obj-y := time.o serial.o usb.o \
+ common.o sram.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o
diff --git a/arch/arm/mach-davinci/aemif.c b/arch/arm/mach-davinci/aemif.c
deleted file mode 100644
index e4ab3f3a2a1f..000000000000
--- a/arch/arm/mach-davinci/aemif.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * AEMIF support for DaVinci SoCs
- *
- * Copyright (C) 2010 Texas Instruments Incorporated. http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/time.h>
-
-#include <linux/platform_data/mtd-davinci-aemif.h>
-#include <linux/platform_data/mtd-davinci.h>
-
-/* Timing value configuration */
-
-#define TA(x) ((x) << 2)
-#define RHOLD(x) ((x) << 4)
-#define RSTROBE(x) ((x) << 7)
-#define RSETUP(x) ((x) << 13)
-#define WHOLD(x) ((x) << 17)
-#define WSTROBE(x) ((x) << 20)
-#define WSETUP(x) ((x) << 26)
-
-#define TA_MAX 0x3
-#define RHOLD_MAX 0x7
-#define RSTROBE_MAX 0x3f
-#define RSETUP_MAX 0xf
-#define WHOLD_MAX 0x7
-#define WSTROBE_MAX 0x3f
-#define WSETUP_MAX 0xf
-
-#define TIMING_MASK (TA(TA_MAX) | \
- RHOLD(RHOLD_MAX) | \
- RSTROBE(RSTROBE_MAX) | \
- RSETUP(RSETUP_MAX) | \
- WHOLD(WHOLD_MAX) | \
- WSTROBE(WSTROBE_MAX) | \
- WSETUP(WSETUP_MAX))
-
-static inline unsigned int davinci_aemif_readl(void __iomem *base, int offset)
-{
- return readl_relaxed(base + offset);
-}
-
-static inline void davinci_aemif_writel(void __iomem *base,
- int offset, unsigned long value)
-{
- writel_relaxed(value, base + offset);
-}
-
-/*
- * aemif_calc_rate - calculate timing data.
- * @wanted: The cycle time needed in nanoseconds.
- * @clk: The input clock rate in kHz.
- * @max: The maximum divider value that can be programmed.
- *
- * On success, returns the calculated timing value minus 1 for easy
- * programming into AEMIF timing registers, else negative errno.
- */
-static int aemif_calc_rate(int wanted, unsigned long clk, int max)
-{
- int result;
-
- result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
-
- pr_debug("%s: result %d from %ld, %d\n", __func__, result, clk, wanted);
-
- /* It is generally OK to have a more relaxed timing than requested... */
- if (result < 0)
- result = 0;
-
- /* ... But configuring tighter timings is not an option. */
- else if (result > max)
- result = -EINVAL;
-
- return result;
-}
-
-/**
- * davinci_aemif_setup_timing - setup timing values for a given AEMIF interface
- * @t: timing values to be progammed
- * @base: The virtual base address of the AEMIF interface
- * @cs: chip-select to program the timing values for
- * @clkrate: the AEMIF clkrate
- *
- * This function programs the given timing values (in real clock) into the
- * AEMIF registers taking the AEMIF clock into account.
- *
- * This function does not use any locking while programming the AEMIF
- * because it is expected that there is only one user of a given
- * chip-select.
- *
- * Returns 0 on success, else negative errno.
- */
-static int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
- void __iomem *base, unsigned cs,
- unsigned long clkrate)
-{
- unsigned set, val;
- int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
- unsigned offset = A1CR_OFFSET + cs * 4;
-
- if (!t)
- return 0; /* Nothing to do */
-
- clkrate /= 1000; /* turn clock into kHz for ease of use */
-
- ta = aemif_calc_rate(t->ta, clkrate, TA_MAX);
- rhold = aemif_calc_rate(t->rhold, clkrate, RHOLD_MAX);
- rstrobe = aemif_calc_rate(t->rstrobe, clkrate, RSTROBE_MAX);
- rsetup = aemif_calc_rate(t->rsetup, clkrate, RSETUP_MAX);
- whold = aemif_calc_rate(t->whold, clkrate, WHOLD_MAX);
- wstrobe = aemif_calc_rate(t->wstrobe, clkrate, WSTROBE_MAX);
- wsetup = aemif_calc_rate(t->wsetup, clkrate, WSETUP_MAX);
-
- if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
- whold < 0 || wstrobe < 0 || wsetup < 0) {
- pr_err("%s: cannot get suitable timings\n", __func__);
- return -EINVAL;
- }
-
- set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
- WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
-
- val = __raw_readl(base + offset);
- val &= ~TIMING_MASK;
- val |= set;
- __raw_writel(val, base + offset);
-
- return 0;
-}
-
-/**
- * davinci_aemif_setup - setup AEMIF interface by davinci_nand_pdata
- * @pdev - link to platform device to setup settings for
- *
- * This function does not use any locking while programming the AEMIF
- * because it is expected that there is only one user of a given
- * chip-select.
- *
- * Returns 0 on success, else negative errno.
- */
-int davinci_aemif_setup(struct platform_device *pdev)
-{
- struct davinci_nand_pdata *pdata = dev_get_platdata(&pdev->dev);
- uint32_t val;
- unsigned long clkrate;
- struct resource *res;
- void __iomem *base;
- struct clk *clk;
- int ret = 0;
-
- clk = clk_get(&pdev->dev, "aemif");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
- ret);
- goto err_put;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n");
- ret = -ENOMEM;
- goto err;
- }
-
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res);
- ret = -ENOMEM;
- goto err;
- }
-
- /*
- * Setup Async configuration register in case we did not boot
- * from NAND and so bootloader did not bother to set it up.
- */
- val = davinci_aemif_readl(base, A1CR_OFFSET + pdata->core_chipsel * 4);
- /*
- * Extended Wait is not valid and Select Strobe mode is not
- * used
- */
- val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
- if (pdata->options & NAND_BUSWIDTH_16)
- val |= 0x1;
-
- davinci_aemif_writel(base, A1CR_OFFSET + pdata->core_chipsel * 4, val);
-
- clkrate = clk_get_rate(clk);
-
- if (pdata->timing)
- ret = davinci_aemif_setup_timing(pdata->timing, base,
- pdata->core_chipsel, clkrate);
-
- if (ret < 0)
- dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
-
- iounmap(base);
-err:
- clk_disable_unprepare(clk);
-err_put:
- clk_put(clk);
- return ret;
-}
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 14a6fc061744..7d8ab36ff83d 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -28,6 +28,7 @@
#include <linux/platform_data/mtd-davinci-aemif.h>
#include <linux/platform_data/spi-davinci.h>
#include <linux/platform_data/usb-davinci.h>
+#include <linux/platform_data/ti-aemif.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
@@ -110,15 +111,9 @@ static __init void da830_evm_usb_init(void)
{
int ret;
- /* USB_REFCLKIN is not used. */
- ret = da8xx_register_usb20_phy_clk(false);
+ ret = da8xx_register_usb_phy_clocks();
if (ret)
- pr_warn("%s: USB 2.0 PHY CLK registration failed: %d\n",
- __func__, ret);
-
- ret = da8xx_register_usb11_phy_clk(false);
- if (ret)
- pr_warn("%s: USB 1.1 PHY CLK registration failed: %d\n",
+ pr_warn("%s: USB PHY CLK registration failed: %d\n",
__func__, ret);
ret = da8xx_register_usb_phy();
@@ -339,14 +334,48 @@ static struct resource da830_evm_nand_resources[] = {
},
};
-static struct platform_device da830_evm_nand_device = {
- .name = "davinci_nand",
- .id = 1,
- .dev = {
- .platform_data = &da830_evm_nand_pdata,
+static struct platform_device da830_evm_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 1,
+ .dev = {
+ .platform_data = &da830_evm_nand_pdata,
+ },
+ .num_resources = ARRAY_SIZE(da830_evm_nand_resources),
+ .resource = da830_evm_nand_resources,
+ },
+};
+
+static struct resource da830_evm_aemif_resource[] = {
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct aemif_abus_data da830_evm_aemif_abus_data[] = {
+ {
+ .cs = 3,
},
- .num_resources = ARRAY_SIZE(da830_evm_nand_resources),
- .resource = da830_evm_nand_resources,
+};
+
+static struct aemif_platform_data da830_evm_aemif_pdata = {
+ .abus_data = da830_evm_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(da830_evm_aemif_abus_data),
+ .sub_devices = da830_evm_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(da830_evm_aemif_devices),
+ .cs_offset = 2,
+};
+
+static struct platform_device da830_evm_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &da830_evm_aemif_pdata,
+ },
+ .resource = da830_evm_aemif_resource,
+ .num_resources = ARRAY_SIZE(da830_evm_aemif_resource),
};
/*
@@ -377,12 +406,9 @@ static inline void da830_evm_init_nand(int mux_mode)
if (ret)
pr_warn("%s: emif25 mux setup failed: %d\n", __func__, ret);
- ret = platform_device_register(&da830_evm_nand_device);
+ ret = platform_device_register(&da830_evm_aemif_device);
if (ret)
- pr_warn("%s: NAND device not registered\n", __func__);
-
- if (davinci_aemif_setup(&da830_evm_nand_device))
- pr_warn("%s: Cannot configure AEMIF\n", __func__);
+ pr_warn("%s: AEMIF device not registered\n", __func__);
gpio_direction_output(mux_mode, 1);
}
@@ -557,6 +583,8 @@ static __init void da830_evm_init(void)
struct davinci_soc_info *soc_info = &davinci_soc_info;
int ret;
+ da830_register_clocks();
+
ret = da830_register_gpio();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6d5beb11bd96..e1a949b47306 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -33,6 +33,7 @@
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
+#include <linux/platform_data/ti-aemif.h>
#include <linux/platform_data/spi-davinci.h>
#include <linux/platform_data/uio_pruss.h>
#include <linux/regulator/machine.h>
@@ -185,16 +186,6 @@ static struct resource da850_evm_norflash_resource[] = {
},
};
-static struct platform_device da850_evm_norflash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &da850_evm_norflash_data,
- },
- .num_resources = 1,
- .resource = da850_evm_norflash_resource,
-};
-
/* DA850/OMAP-L138 EVM includes a 512 MByte large-page NAND flash
* (128K blocks). It may be used instead of the (default) SPI flash
* to boot, using TI's tools to install the secondary boot loader
@@ -266,37 +257,58 @@ static struct resource da850_evm_nandflash_resource[] = {
},
};
-static struct platform_device da850_evm_nandflash_device = {
- .name = "davinci_nand",
- .id = 1,
- .dev = {
- .platform_data = &da850_evm_nandflash_data,
- },
- .num_resources = ARRAY_SIZE(da850_evm_nandflash_resource),
- .resource = da850_evm_nandflash_resource,
+static struct resource da850_evm_aemif_resource[] = {
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K,
+ .flags = IORESOURCE_MEM,
+ }
};
-static struct platform_device *da850_evm_devices[] = {
- &da850_evm_nandflash_device,
- &da850_evm_norflash_device,
+static struct aemif_abus_data da850_evm_aemif_abus_data[] = {
+ {
+ .cs = 3,
+ }
};
-#define DA8XX_AEMIF_CE2CFG_OFFSET 0x10
-#define DA8XX_AEMIF_ASIZE_16BIT 0x1
-
-static void __init da850_evm_init_nor(void)
-{
- void __iomem *aemif_addr;
-
- aemif_addr = ioremap(DA8XX_AEMIF_CTL_BASE, SZ_32K);
+static struct platform_device da850_evm_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 1,
+ .dev = {
+ .platform_data = &da850_evm_nandflash_data,
+ },
+ .num_resources = ARRAY_SIZE(da850_evm_nandflash_resource),
+ .resource = da850_evm_nandflash_resource,
+ },
+ {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &da850_evm_norflash_data,
+ },
+ .num_resources = 1,
+ .resource = da850_evm_norflash_resource,
+ }
+};
- /* Configure data bus width of CS2 to 16 bit */
- writel(readl(aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET) |
- DA8XX_AEMIF_ASIZE_16BIT,
- aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET);
+static struct aemif_platform_data da850_evm_aemif_pdata = {
+ .cs_offset = 2,
+ .abus_data = da850_evm_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(da850_evm_aemif_abus_data),
+ .sub_devices = da850_evm_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(da850_evm_aemif_devices),
+};
- iounmap(aemif_addr);
-}
+static struct platform_device da850_evm_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &da850_evm_aemif_pdata,
+ },
+ .resource = da850_evm_aemif_resource,
+ .num_resources = ARRAY_SIZE(da850_evm_aemif_resource),
+};
static const short da850_evm_nand_pins[] = {
DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
@@ -339,13 +351,10 @@ static inline void da850_evm_setup_nor_nand(void)
pr_warn("%s: NOR mux setup failed: %d\n",
__func__, ret);
- da850_evm_init_nor();
-
- platform_add_devices(da850_evm_devices,
- ARRAY_SIZE(da850_evm_devices));
-
- if (davinci_aemif_setup(&da850_evm_nandflash_device))
- pr_warn("%s: Cannot configure AEMIF.\n", __func__);
+ ret = platform_device_register(&da850_evm_aemif_device);
+ if (ret)
+ pr_warn("%s: registering aemif failed: %d\n",
+ __func__, ret);
}
}
@@ -1340,6 +1349,8 @@ static __init void da850_evm_init(void)
{
int ret;
+ da850_register_clocks();
+
ret = da850_register_gpio();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index a3377f959444..f53a461a606f 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -394,6 +394,8 @@ static __init void dm355_evm_init(void)
struct clk *aemif;
int ret;
+ dm355_register_clocks();
+
ret = dm355_gpio_register();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 8249a0bf69f0..0fdf1d03eb11 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -234,6 +234,8 @@ static __init void dm355_leopard_init(void)
struct clk *aemif;
int ret;
+ dm355_register_clocks();
+
ret = dm355_gpio_register();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 435f7ec7d9af..8143756ff38b 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -28,6 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/v4l2-dv-timings.h>
+#include <linux/platform_data/ti-aemif.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -159,16 +160,49 @@ static struct resource davinci_nand_resources[] = {
},
};
-static struct platform_device davinci_nand_device = {
- .name = "davinci_nand",
- .id = 0,
- .num_resources = ARRAY_SIZE(davinci_nand_resources),
- .resource = davinci_nand_resources,
- .dev = {
- .platform_data = &davinci_nand_data,
+static struct platform_device davinci_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(davinci_nand_resources),
+ .resource = davinci_nand_resources,
+ .dev = {
+ .platform_data = &davinci_nand_data,
+ },
+ }
+};
+
+static struct resource davinci_aemif_resources[] = {
+ {
+ .start = DM365_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM365_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
},
};
+static struct aemif_abus_data da850_evm_aemif_abus_data[] = {
+ {
+ .cs = 1,
+ },
+};
+
+static struct aemif_platform_data davinci_aemif_pdata = {
+ .abus_data = da850_evm_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(da850_evm_aemif_abus_data),
+ .sub_devices = davinci_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(davinci_aemif_devices),
+};
+
+static struct platform_device davinci_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &davinci_aemif_pdata,
+ },
+ .resource = davinci_aemif_resources,
+ .num_resources = ARRAY_SIZE(davinci_aemif_resources),
+};
+
static struct at24_platform_data eeprom_info = {
.byte_len = (256*1024) / 8,
.page_size = 64,
@@ -537,10 +571,6 @@ static void __init evm_init_i2c(void)
i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info));
}
-static struct platform_device *dm365_evm_nand_devices[] __initdata = {
- &davinci_nand_device,
-};
-
static inline int have_leds(void)
{
#ifdef CONFIG_LEDS_CLASS
@@ -628,6 +658,7 @@ static void __init evm_init_cpld(void)
u8 mux, resets;
const char *label;
struct clk *aemif_clk;
+ int rc;
/* Make sure we can configure the CPLD through CS1. Then
* leave it on for later access to MMC and LED registers.
@@ -660,8 +691,10 @@ fail:
/* external keypad mux */
mux |= BIT(7);
- platform_add_devices(dm365_evm_nand_devices,
- ARRAY_SIZE(dm365_evm_nand_devices));
+ rc = platform_device_register(&davinci_aemif_device);
+ if (rc)
+ pr_warn("%s(): error registering the aemif device: %d\n",
+ __func__, rc);
} else {
/* no OneNAND support yet */
}
@@ -742,6 +775,8 @@ static __init void dm365_evm_init(void)
{
int ret;
+ dm365_register_clocks();
+
ret = dm365_gpio_register();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 48436f74fd71..e4a8f9225d16 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -42,6 +42,7 @@
#include <linux/platform_data/mmc-davinci.h>
#include <linux/platform_data/usb-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
+#include <linux/platform_data/ti-aemif.h>
#include "davinci.h"
@@ -174,14 +175,47 @@ static struct resource davinci_evm_nandflash_resource[] = {
},
};
-static struct platform_device davinci_evm_nandflash_device = {
- .name = "davinci_nand",
- .id = 0,
- .dev = {
- .platform_data = &davinci_evm_nandflash_data,
+static struct resource davinci_evm_aemif_resource[] = {
+ {
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct aemif_abus_data davinci_evm_aemif_abus_data[] = {
+ {
+ .cs = 1,
+ },
+};
+
+static struct platform_device davinci_evm_nandflash_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 0,
+ .dev = {
+ .platform_data = &davinci_evm_nandflash_data,
+ },
+ .num_resources = ARRAY_SIZE(davinci_evm_nandflash_resource),
+ .resource = davinci_evm_nandflash_resource,
+ },
+};
+
+static struct aemif_platform_data davinci_evm_aemif_pdata = {
+ .abus_data = davinci_evm_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(davinci_evm_aemif_abus_data),
+ .sub_devices = davinci_evm_nandflash_devices,
+ .num_sub_devices = ARRAY_SIZE(davinci_evm_nandflash_devices),
+};
+
+static struct platform_device davinci_evm_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &davinci_evm_aemif_pdata,
},
- .num_resources = ARRAY_SIZE(davinci_evm_nandflash_resource),
- .resource = davinci_evm_nandflash_resource,
+ .resource = davinci_evm_aemif_resource,
+ .num_resources = ARRAY_SIZE(davinci_evm_aemif_resource),
};
static u64 davinci_fb_dma_mask = DMA_BIT_MASK(32);
@@ -773,6 +807,8 @@ static __init void davinci_evm_init(void)
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ dm644x_register_clocks();
+
dm644x_init_devices();
ret = dm644x_gpio_register();
@@ -793,12 +829,7 @@ static __init void davinci_evm_init(void)
/* only one device will be jumpered and detected */
if (HAS_NAND) {
- platform_device_register(&davinci_evm_nandflash_device);
-
- if (davinci_aemif_setup(&davinci_evm_nandflash_device))
- pr_warn("%s: Cannot configure AEMIF\n",
- __func__);
-
+ platform_device_register(&davinci_evm_aemif_device);
#ifdef CONFIG_I2C
evm_leds[7].default_trigger = "nand-disk";
#endif
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 584064fdabf5..3e5ee09ee717 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/platform_data/at24.h>
#include <linux/platform_data/pcf857x.h>
+#include <linux/platform_data/ti-aemif.h>
#include <media/i2c/tvp514x.h>
#include <media/i2c/adv7343.h>
@@ -106,18 +107,49 @@ static struct resource davinci_nand_resources[] = {
},
};
-static struct platform_device davinci_nand_device = {
- .name = "davinci_nand",
- .id = 0,
+static struct platform_device davinci_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(davinci_nand_resources),
+ .resource = davinci_nand_resources,
+ .dev = {
+ .platform_data = &davinci_nand_data,
+ },
+ },
+};
- .num_resources = ARRAY_SIZE(davinci_nand_resources),
- .resource = davinci_nand_resources,
+static struct resource davinci_aemif_resources[] = {
+ {
+ .start = DM646X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM646X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
- .dev = {
- .platform_data = &davinci_nand_data,
+static struct aemif_abus_data davinci_aemif_abus_data[] = {
+ {
+ .cs = 1,
},
};
+static struct aemif_platform_data davinci_aemif_pdata = {
+ .abus_data = davinci_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(davinci_aemif_abus_data),
+ .sub_devices = davinci_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(davinci_aemif_devices),
+};
+
+static struct platform_device davinci_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &davinci_aemif_pdata,
+ },
+ .resource = davinci_aemif_resources,
+ .num_resources = ARRAY_SIZE(davinci_aemif_resources),
+};
+
#define HAS_ATA (IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
IS_ENABLED(CONFIG_PATA_BK3710))
@@ -776,6 +808,8 @@ static __init void evm_init(void)
int ret;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ dm646x_register_clocks();
+
ret = dm646x_gpio_register();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
@@ -791,10 +825,8 @@ static __init void evm_init(void)
if (machine_is_davinci_dm6467tevm())
davinci_nand_data.timing = &dm6467tevm_nandflash_timing;
- platform_device_register(&davinci_nand_device);
-
- if (davinci_aemif_setup(&davinci_nand_device))
- pr_warn("%s: Cannot configure AEMIF.\n", __func__);
+ if (platform_device_register(&davinci_aemif_device))
+ pr_warn("%s: Cannot register AEMIF device.\n", __func__);
dm646x_init_edma(dm646x_edma_rsv);
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 37b3e48a21d1..2933e0c87cfa 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -30,6 +30,7 @@
#include <mach/da8xx.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
+#include <linux/platform_data/ti-aemif.h>
#include <mach/mux.h>
#include <linux/platform_data/spi-davinci.h>
@@ -422,27 +423,53 @@ static struct resource mityomapl138_nandflash_resource[] = {
},
};
-static struct platform_device mityomapl138_nandflash_device = {
- .name = "davinci_nand",
- .id = 1,
- .dev = {
- .platform_data = &mityomapl138_nandflash_data,
+static struct platform_device mityomapl138_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = 1,
+ .dev = {
+ .platform_data = &mityomapl138_nandflash_data,
+ },
+ .num_resources = ARRAY_SIZE(mityomapl138_nandflash_resource),
+ .resource = mityomapl138_nandflash_resource,
+ },
+};
+
+static struct resource mityomapl138_aemif_resources[] = {
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
},
- .num_resources = ARRAY_SIZE(mityomapl138_nandflash_resource),
- .resource = mityomapl138_nandflash_resource,
};
-static struct platform_device *mityomapl138_devices[] __initdata = {
- &mityomapl138_nandflash_device,
+static struct aemif_abus_data mityomapl138_aemif_abus_data[] = {
+ {
+ .cs = 1,
+ },
+};
+
+static struct aemif_platform_data mityomapl138_aemif_pdata = {
+ .abus_data = mityomapl138_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(mityomapl138_aemif_abus_data),
+ .sub_devices = mityomapl138_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(mityomapl138_aemif_devices),
+};
+
+static struct platform_device mityomapl138_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &mityomapl138_aemif_pdata,
+ },
+ .resource = mityomapl138_aemif_resources,
+ .num_resources = ARRAY_SIZE(mityomapl138_aemif_resources),
};
static void __init mityomapl138_setup_nand(void)
{
- platform_add_devices(mityomapl138_devices,
- ARRAY_SIZE(mityomapl138_devices));
-
- if (davinci_aemif_setup(&mityomapl138_nandflash_device))
- pr_warn("%s: Cannot configure AEMIF\n", __func__);
+ if (platform_device_register(&mityomapl138_aemif_device))
+ pr_warn("%s: Cannot register AEMIF device\n", __func__);
}
static const short mityomap_mii_pins[] = {
@@ -503,6 +530,8 @@ static void __init mityomapl138_init(void)
{
int ret;
+ da850_register_clocks();
+
/* for now, no special EDMA channels are reserved */
ret = da850_register_edma(NULL);
if (ret)
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 25ad9b0612be..353f9e5a1454 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -175,6 +175,8 @@ static __init void davinci_ntosd2_init(void)
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ dm644x_register_clocks();
+
dm644x_init_devices();
ret = dm644x_gpio_register();
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index be8b892a6ea7..8e8d51f4a276 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -15,7 +15,12 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+#include <linux/platform_data/ti-aemif.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
@@ -166,6 +171,129 @@ mmc_setup_mmcsd_fail:
gpiod_remove_lookup_table(&mmc_gpios_table);
}
+static struct mtd_partition omapl138_hawk_nandflash_partition[] = {
+ {
+ .name = "u-boot env",
+ .offset = 0,
+ .size = SZ_128K,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "u-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_512K,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "free space",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ },
+};
+
+static struct davinci_aemif_timing omapl138_hawk_nandflash_timing = {
+ .wsetup = 24,
+ .wstrobe = 21,
+ .whold = 14,
+ .rsetup = 19,
+ .rstrobe = 50,
+ .rhold = 0,
+ .ta = 20,
+};
+
+static struct davinci_nand_pdata omapl138_hawk_nandflash_data = {
+ .core_chipsel = 1,
+ .parts = omapl138_hawk_nandflash_partition,
+ .nr_parts = ARRAY_SIZE(omapl138_hawk_nandflash_partition),
+ .ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 4,
+ .bbt_options = NAND_BBT_USE_FLASH,
+ .options = NAND_BUSWIDTH_16,
+ .timing = &omapl138_hawk_nandflash_timing,
+ .mask_chipsel = 0,
+ .mask_ale = 0,
+ .mask_cle = 0,
+};
+
+static struct resource omapl138_hawk_nandflash_resource[] = {
+ {
+ .start = DA8XX_AEMIF_CS3_BASE,
+ .end = DA8XX_AEMIF_CS3_BASE + SZ_32M,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource omapl138_hawk_aemif_resource[] = {
+ {
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct aemif_abus_data omapl138_hawk_aemif_abus_data[] = {
+ {
+ .cs = 3,
+ }
+};
+
+static struct platform_device omapl138_hawk_aemif_devices[] = {
+ {
+ .name = "davinci_nand",
+ .id = -1,
+ .dev = {
+ .platform_data = &omapl138_hawk_nandflash_data,
+ },
+ .resource = omapl138_hawk_nandflash_resource,
+ .num_resources = ARRAY_SIZE(omapl138_hawk_nandflash_resource),
+ }
+};
+
+static struct aemif_platform_data omapl138_hawk_aemif_pdata = {
+ .cs_offset = 2,
+ .abus_data = omapl138_hawk_aemif_abus_data,
+ .num_abus_data = ARRAY_SIZE(omapl138_hawk_aemif_abus_data),
+ .sub_devices = omapl138_hawk_aemif_devices,
+ .num_sub_devices = ARRAY_SIZE(omapl138_hawk_aemif_devices),
+};
+
+static struct platform_device omapl138_hawk_aemif_device = {
+ .name = "ti-aemif",
+ .id = -1,
+ .dev = {
+ .platform_data = &omapl138_hawk_aemif_pdata,
+ },
+ .resource = omapl138_hawk_aemif_resource,
+ .num_resources = ARRAY_SIZE(omapl138_hawk_aemif_resource),
+};
+
+static const short omapl138_hawk_nand_pins[] = {
+ DA850_EMA_WAIT_1, DA850_NEMA_OE, DA850_NEMA_WE, DA850_NEMA_CS_3,
+ DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
+ DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
+ DA850_EMA_D_8, DA850_EMA_D_9, DA850_EMA_D_10, DA850_EMA_D_11,
+ DA850_EMA_D_12, DA850_EMA_D_13, DA850_EMA_D_14, DA850_EMA_D_15,
+ DA850_EMA_A_1, DA850_EMA_A_2,
+ -1
+};
+
+static int omapl138_hawk_register_aemif(void)
+{
+ int ret;
+
+ ret = davinci_cfg_reg_list(omapl138_hawk_nand_pins);
+ if (ret)
+ pr_warn("%s: NAND mux setup failed: %d\n", __func__, ret);
+
+ return platform_device_register(&omapl138_hawk_aemif_device);
+}
+
static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id);
static da8xx_ocic_handler_t hawk_usb_ocic_handler;
@@ -236,14 +364,9 @@ static __init void omapl138_hawk_usb_init(void)
return;
}
- ret = da8xx_register_usb20_phy_clk(false);
- if (ret)
- pr_warn("%s: USB 2.0 PHY CLK registration failed: %d\n",
- __func__, ret);
-
- ret = da8xx_register_usb11_phy_clk(false);
+ ret = da8xx_register_usb_phy_clocks();
if (ret)
- pr_warn("%s: USB 1.1 PHY CLK registration failed: %d\n",
+ pr_warn("%s: USB PHY CLK registration failed: %d\n",
__func__, ret);
ret = da8xx_register_usb_phy();
@@ -285,6 +408,8 @@ static __init void omapl138_hawk_init(void)
{
int ret;
+ da850_register_clocks();
+
ret = da850_register_gpio();
if (ret)
pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
@@ -301,6 +426,10 @@ static __init void omapl138_hawk_init(void)
omapl138_hawk_usb_init();
+ ret = omapl138_hawk_register_aemif();
+ if (ret)
+ pr_warn("%s: aemif registration failed: %d\n", __func__, ret);
+
ret = da8xx_register_watchdog();
if (ret)
pr_warn("%s: watchdog registration failed: %d\n",
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index e7c1728b0833..792bb84d5011 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -134,6 +134,8 @@ static __init void davinci_sffsdr_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ dm644x_register_clocks();
+
dm644x_init_devices();
platform_add_devices(davinci_sffsdr_devices,
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
deleted file mode 100644
index f77a4f766050..000000000000
--- a/arch/arm/mach-davinci/clock.c
+++ /dev/null
@@ -1,745 +0,0 @@
-/*
- * Clock and PLL control for DaVinci devices
- *
- * Copyright (C) 2006-2007 Texas Instruments.
- * Copyright (C) 2008-2009 Deep Root Systems, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#include <mach/hardware.h>
-
-#include <mach/clock.h>
-#include "psc.h"
-#include <mach/cputype.h>
-#include "clock.h"
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-static DEFINE_SPINLOCK(clockfw_lock);
-
-void davinci_clk_enable(struct clk *clk)
-{
- if (clk->parent)
- davinci_clk_enable(clk->parent);
- if (clk->usecount++ == 0) {
- if (clk->flags & CLK_PSC)
- davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
- true, clk->flags);
- else if (clk->clk_enable)
- clk->clk_enable(clk);
- }
-}
-
-void davinci_clk_disable(struct clk *clk)
-{
- if (WARN_ON(clk->usecount == 0))
- return;
- if (--clk->usecount == 0) {
- if (!(clk->flags & CLK_PLL) && (clk->flags & CLK_PSC))
- davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
- false, clk->flags);
- else if (clk->clk_disable)
- clk->clk_disable(clk);
- }
- if (clk->parent)
- davinci_clk_disable(clk->parent);
-}
-
-int davinci_clk_reset(struct clk *clk, bool reset)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->flags & CLK_PSC)
- davinci_psc_reset(clk->gpsc, clk->lpsc, reset);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(davinci_clk_reset);
-
-int davinci_clk_reset_assert(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk) || !clk->reset)
- return -EINVAL;
-
- return clk->reset(clk, true);
-}
-EXPORT_SYMBOL(davinci_clk_reset_assert);
-
-int davinci_clk_reset_deassert(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk) || !clk->reset)
- return -EINVAL;
-
- return clk->reset(clk, false);
-}
-EXPORT_SYMBOL(davinci_clk_reset_deassert);
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
-
- if (!clk)
- return 0;
- else if (IS_ERR(clk))
- return -EINVAL;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- davinci_clk_enable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- davinci_clk_disable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return 0;
-
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk == NULL || IS_ERR(clk))
- return 0;
-
- if (clk->round_rate)
- return clk->round_rate(clk, rate);
-
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Propagate rate to children */
-static void propagate_rate(struct clk *root)
-{
- struct clk *clk;
-
- list_for_each_entry(clk, &root->children, childnode) {
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- }
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (!clk)
- return 0;
- else if (IS_ERR(clk))
- return -EINVAL;
-
- if (clk->set_rate)
- ret = clk->set_rate(clk, rate);
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (ret == 0) {
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- }
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- unsigned long flags;
-
- if (!clk)
- return 0;
- else if (IS_ERR(clk))
- return -EINVAL;
-
- /* Cannot change parent on enabled clock */
- if (WARN_ON(clk->usecount))
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- if (clk->set_parent) {
- int ret = clk->set_parent(clk, parent);
-
- if (ret) {
- mutex_unlock(&clocks_mutex);
- return ret;
- }
- }
- clk->parent = parent;
- list_del_init(&clk->childnode);
- list_add(&clk->childnode, &clk->parent->children);
- mutex_unlock(&clocks_mutex);
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- if (!clk)
- return NULL;
-
- return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
-
-int clk_register(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- if (WARN(clk->parent && !clk->parent->rate,
- "CLK: %s parent %s has no rate!\n",
- clk->name, clk->parent->name))
- return -EINVAL;
-
- INIT_LIST_HEAD(&clk->children);
-
- mutex_lock(&clocks_mutex);
- list_add_tail(&clk->node, &clocks);
- if (clk->parent) {
- if (clk->set_parent) {
- int ret = clk->set_parent(clk, clk->parent);
-
- if (ret) {
- mutex_unlock(&clocks_mutex);
- return ret;
- }
- }
- list_add_tail(&clk->childnode, &clk->parent->children);
- }
- mutex_unlock(&clocks_mutex);
-
- /* If rate is already set, use it */
- if (clk->rate)
- return 0;
-
- /* Else, see if there is a way to calculate it */
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
-
- /* Otherwise, default to parent rate */
- else if (clk->parent)
- clk->rate = clk->parent->rate;
-
- return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
-
- mutex_lock(&clocks_mutex);
- list_del(&clk->node);
- list_del(&clk->childnode);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unregister);
-
-#ifdef CONFIG_DAVINCI_RESET_CLOCKS
-/*
- * Disable any unused clocks left on by the bootloader
- */
-int __init davinci_clk_disable_unused(void)
-{
- struct clk *ck;
-
- spin_lock_irq(&clockfw_lock);
- list_for_each_entry(ck, &clocks, node) {
- if (ck->usecount > 0)
- continue;
- if (!(ck->flags & CLK_PSC))
- continue;
-
- /* ignore if in Disabled or SwRstDisable states */
- if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
- continue;
-
- pr_debug("Clocks: disable unused %s\n", ck->name);
-
- davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
- false, ck->flags);
- }
- spin_unlock_irq(&clockfw_lock);
-
- return 0;
-}
-#endif
-
-static unsigned long clk_sysclk_recalc(struct clk *clk)
-{
- u32 v, plldiv;
- struct pll_data *pll;
- unsigned long rate = clk->rate;
-
- /* If this is the PLL base clock, no more calculations needed */
- if (clk->pll_data)
- return rate;
-
- if (WARN_ON(!clk->parent))
- return rate;
-
- rate = clk->parent->rate;
-
- /* Otherwise, the parent must be a PLL */
- if (WARN_ON(!clk->parent->pll_data))
- return rate;
-
- pll = clk->parent->pll_data;
-
- /* If pre-PLL, source clock is before the multiplier and divider(s) */
- if (clk->flags & PRE_PLL)
- rate = pll->input_rate;
-
- if (!clk->div_reg)
- return rate;
-
- v = __raw_readl(pll->base + clk->div_reg);
- if (v & PLLDIV_EN) {
- plldiv = (v & pll->div_ratio_mask) + 1;
- if (plldiv)
- rate /= plldiv;
- }
-
- return rate;
-}
-
-int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
-{
- unsigned v;
- struct pll_data *pll;
- unsigned long input;
- unsigned ratio = 0;
-
- /* If this is the PLL base clock, wrong function to call */
- if (clk->pll_data)
- return -EINVAL;
-
- /* There must be a parent... */
- if (WARN_ON(!clk->parent))
- return -EINVAL;
-
- /* ... the parent must be a PLL... */
- if (WARN_ON(!clk->parent->pll_data))
- return -EINVAL;
-
- /* ... and this clock must have a divider. */
- if (WARN_ON(!clk->div_reg))
- return -EINVAL;
-
- pll = clk->parent->pll_data;
-
- input = clk->parent->rate;
-
- /* If pre-PLL, source clock is before the multiplier and divider(s) */
- if (clk->flags & PRE_PLL)
- input = pll->input_rate;
-
- if (input > rate) {
- /*
- * Can afford to provide an output little higher than requested
- * only if maximum rate supported by hardware on this sysclk
- * is known.
- */
- if (clk->maxrate) {
- ratio = DIV_ROUND_CLOSEST(input, rate);
- if (input / ratio > clk->maxrate)
- ratio = 0;
- }
-
- if (ratio == 0)
- ratio = DIV_ROUND_UP(input, rate);
-
- ratio--;
- }
-
- if (ratio > pll->div_ratio_mask)
- return -EINVAL;
-
- do {
- v = __raw_readl(pll->base + PLLSTAT);
- } while (v & PLLSTAT_GOSTAT);
-
- v = __raw_readl(pll->base + clk->div_reg);
- v &= ~pll->div_ratio_mask;
- v |= ratio | PLLDIV_EN;
- __raw_writel(v, pll->base + clk->div_reg);
-
- v = __raw_readl(pll->base + PLLCMD);
- v |= PLLCMD_GOSET;
- __raw_writel(v, pll->base + PLLCMD);
-
- do {
- v = __raw_readl(pll->base + PLLSTAT);
- } while (v & PLLSTAT_GOSTAT);
-
- return 0;
-}
-EXPORT_SYMBOL(davinci_set_sysclk_rate);
-
-static unsigned long clk_leafclk_recalc(struct clk *clk)
-{
- if (WARN_ON(!clk->parent))
- return clk->rate;
-
- return clk->parent->rate;
-}
-
-int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
-{
- clk->rate = rate;
- return 0;
-}
-
-static unsigned long clk_pllclk_recalc(struct clk *clk)
-{
- u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
- u8 bypass;
- struct pll_data *pll = clk->pll_data;
- unsigned long rate = clk->rate;
-
- ctrl = __raw_readl(pll->base + PLLCTL);
- rate = pll->input_rate = clk->parent->rate;
-
- if (ctrl & PLLCTL_PLLEN) {
- bypass = 0;
- mult = __raw_readl(pll->base + PLLM);
- if (cpu_is_davinci_dm365())
- mult = 2 * (mult & PLLM_PLLM_MASK);
- else
- mult = (mult & PLLM_PLLM_MASK) + 1;
- } else
- bypass = 1;
-
- if (pll->flags & PLL_HAS_PREDIV) {
- prediv = __raw_readl(pll->base + PREDIV);
- if (prediv & PLLDIV_EN)
- prediv = (prediv & pll->div_ratio_mask) + 1;
- else
- prediv = 1;
- }
-
- /* pre-divider is fixed, but (some?) chips won't report that */
- if (cpu_is_davinci_dm355() && pll->num == 1)
- prediv = 8;
-
- if (pll->flags & PLL_HAS_POSTDIV) {
- postdiv = __raw_readl(pll->base + POSTDIV);
- if (postdiv & PLLDIV_EN)
- postdiv = (postdiv & pll->div_ratio_mask) + 1;
- else
- postdiv = 1;
- }
-
- if (!bypass) {
- rate /= prediv;
- rate *= mult;
- rate /= postdiv;
- }
-
- pr_debug("PLL%d: input = %lu MHz [ ",
- pll->num, clk->parent->rate / 1000000);
- if (bypass)
- pr_debug("bypass ");
- if (prediv > 1)
- pr_debug("/ %d ", prediv);
- if (mult > 1)
- pr_debug("* %d ", mult);
- if (postdiv > 1)
- pr_debug("/ %d ", postdiv);
- pr_debug("] --> %lu MHz output.\n", rate / 1000000);
-
- return rate;
-}
-
-/**
- * davinci_set_pllrate - set the output rate of a given PLL.
- *
- * Note: Currently tested to work with OMAP-L138 only.
- *
- * @pll: pll whose rate needs to be changed.
- * @prediv: The pre divider value. Passing 0 disables the pre-divider.
- * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
- * @postdiv: The post divider value. Passing 0 disables the post-divider.
- */
-int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
- unsigned int mult, unsigned int postdiv)
-{
- u32 ctrl;
- unsigned int locktime;
- unsigned long flags;
-
- if (pll->base == NULL)
- return -EINVAL;
-
- /*
- * PLL lock time required per OMAP-L138 datasheet is
- * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
- * as 4 and OSCIN cycle as 25 MHz.
- */
- if (prediv) {
- locktime = ((2000 * prediv) / 100);
- prediv = (prediv - 1) | PLLDIV_EN;
- } else {
- locktime = PLL_LOCK_TIME;
- }
- if (postdiv)
- postdiv = (postdiv - 1) | PLLDIV_EN;
- if (mult)
- mult = mult - 1;
-
- /* Protect against simultaneous calls to PLL setting seqeunce */
- spin_lock_irqsave(&clockfw_lock, flags);
-
- ctrl = __raw_readl(pll->base + PLLCTL);
-
- /* Switch the PLL to bypass mode */
- ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
- __raw_writel(ctrl, pll->base + PLLCTL);
-
- udelay(PLL_BYPASS_TIME);
-
- /* Reset and enable PLL */
- ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
- __raw_writel(ctrl, pll->base + PLLCTL);
-
- if (pll->flags & PLL_HAS_PREDIV)
- __raw_writel(prediv, pll->base + PREDIV);
-
- __raw_writel(mult, pll->base + PLLM);
-
- if (pll->flags & PLL_HAS_POSTDIV)
- __raw_writel(postdiv, pll->base + POSTDIV);
-
- udelay(PLL_RESET_TIME);
-
- /* Bring PLL out of reset */
- ctrl |= PLLCTL_PLLRST;
- __raw_writel(ctrl, pll->base + PLLCTL);
-
- udelay(locktime);
-
- /* Remove PLL from bypass mode */
- ctrl |= PLLCTL_PLLEN;
- __raw_writel(ctrl, pll->base + PLLCTL);
-
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(davinci_set_pllrate);
-
-/**
- * davinci_set_refclk_rate() - Set the reference clock rate
- * @rate: The new rate.
- *
- * Sets the reference clock rate to a given value. This will most likely
- * result in the entire clock tree getting updated.
- *
- * This is used to support boards which use a reference clock different
- * than that used by default in <soc>.c file. The reference clock rate
- * should be updated early in the boot process; ideally soon after the
- * clock tree has been initialized once with the default reference clock
- * rate (davinci_clk_init()).
- *
- * Returns 0 on success, error otherwise.
- */
-int davinci_set_refclk_rate(unsigned long rate)
-{
- struct clk *refclk;
-
- refclk = clk_get(NULL, "ref");
- if (IS_ERR(refclk)) {
- pr_err("%s: failed to get reference clock\n", __func__);
- return PTR_ERR(refclk);
- }
-
- clk_set_rate(refclk, rate);
-
- clk_put(refclk);
-
- return 0;
-}
-
-int __init davinci_clk_init(struct clk_lookup *clocks)
-{
- struct clk_lookup *c;
- struct clk *clk;
- size_t num_clocks = 0;
-
- for (c = clocks; c->clk; c++) {
- clk = c->clk;
-
- if (!clk->recalc) {
-
- /* Check if clock is a PLL */
- if (clk->pll_data)
- clk->recalc = clk_pllclk_recalc;
-
- /* Else, if it is a PLL-derived clock */
- else if (clk->flags & CLK_PLL)
- clk->recalc = clk_sysclk_recalc;
-
- /* Otherwise, it is a leaf clock (PSC clock) */
- else if (clk->parent)
- clk->recalc = clk_leafclk_recalc;
- }
-
- if (clk->pll_data) {
- struct pll_data *pll = clk->pll_data;
-
- if (!pll->div_ratio_mask)
- pll->div_ratio_mask = PLLDIV_RATIO_MASK;
-
- if (pll->phys_base && !pll->base) {
- pll->base = ioremap(pll->phys_base, SZ_4K);
- WARN_ON(!pll->base);
- }
- }
-
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
-
- if (clk->lpsc)
- clk->flags |= CLK_PSC;
-
- if (clk->flags & PSC_LRST)
- clk->reset = davinci_clk_reset;
-
- clk_register(clk);
- num_clocks++;
-
- /* Turn on clocks that Linux doesn't otherwise manage */
- if (clk->flags & ALWAYS_ENABLED)
- clk_enable(clk);
- }
-
- clkdev_add_table(clocks, num_clocks);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#define CLKNAME_MAX 10 /* longest clock name */
-#define NEST_DELTA 2
-#define NEST_MAX 4
-
-static void
-dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
-{
- char *state;
- char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
- struct clk *clk;
- unsigned i;
-
- if (parent->flags & CLK_PLL)
- state = "pll";
- else if (parent->flags & CLK_PSC)
- state = "psc";
- else
- state = "";
-
- /* <nest spaces> name <pad to end> */
- memset(buf, ' ', sizeof(buf) - 1);
- buf[sizeof(buf) - 1] = 0;
- i = strlen(parent->name);
- memcpy(buf + nest, parent->name,
- min(i, (unsigned)(sizeof(buf) - 1 - nest)));
-
- seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
- buf, parent->usecount, state, clk_get_rate(parent));
- /* REVISIT show device associations too */
-
- /* cost is now small, but not linear... */
- list_for_each_entry(clk, &parent->children, childnode) {
- dump_clock(s, nest + NEST_DELTA, clk);
- }
-}
-
-static int davinci_ck_show(struct seq_file *m, void *v)
-{
- struct clk *clk;
-
- /*
- * Show clock tree; We trust nonzero usecounts equate to PSC enables...
- */
- mutex_lock(&clocks_mutex);
- list_for_each_entry(clk, &clocks, node)
- if (!clk->parent)
- dump_clock(m, 0, clk);
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-
-static int davinci_ck_open(struct inode *inode, struct file *file)
-{
- return single_open(file, davinci_ck_show, NULL);
-}
-
-static const struct file_operations davinci_ck_operations = {
- .open = davinci_ck_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init davinci_clk_debugfs_init(void)
-{
- debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
- &davinci_ck_operations);
- return 0;
-
-}
-device_initcall(davinci_clk_debugfs_init);
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index d7894d5aaa25..307383472400 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -12,10 +12,6 @@
#ifndef __ARCH_ARM_DAVINCI_CLOCK_H
#define __ARCH_ARM_DAVINCI_CLOCK_H
-#define DAVINCI_PLL1_BASE 0x01c40800
-#define DAVINCI_PLL2_BASE 0x01c40c00
-#define MAX_PLL 2
-
/* PLL/Reset register offsets */
#define PLLCTL 0x100
#define PLLCTL_PLLEN BIT(0)
@@ -65,76 +61,4 @@
*/
#define PLL_LOCK_TIME 20
-#ifndef __ASSEMBLER__
-
-#include <linux/list.h>
-#include <linux/clkdev.h>
-
-#define PLLSTAT_GOSTAT BIT(0)
-#define PLLCMD_GOSET BIT(0)
-
-struct pll_data {
- u32 phys_base;
- void __iomem *base;
- u32 num;
- u32 flags;
- u32 input_rate;
- u32 div_ratio_mask;
-};
-#define PLL_HAS_PREDIV 0x01
-#define PLL_HAS_POSTDIV 0x02
-
-struct clk {
- struct list_head node;
- struct module *owner;
- const char *name;
- unsigned long rate;
- unsigned long maxrate; /* H/W supported max rate */
- u8 usecount;
- u8 lpsc;
- u8 gpsc;
- u8 domain;
- u32 flags;
- struct clk *parent;
- struct list_head children; /* list of children */
- struct list_head childnode; /* parent's child list node */
- struct pll_data *pll_data;
- u32 div_reg;
- unsigned long (*recalc) (struct clk *);
- int (*set_rate) (struct clk *clk, unsigned long rate);
- int (*round_rate) (struct clk *clk, unsigned long rate);
- int (*reset) (struct clk *clk, bool reset);
- void (*clk_enable) (struct clk *clk);
- void (*clk_disable) (struct clk *clk);
- int (*set_parent) (struct clk *clk, struct clk *parent);
-};
-
-/* Clock flags: SoC-specific flags start at BIT(16) */
-#define ALWAYS_ENABLED BIT(1)
-#define CLK_PSC BIT(2)
-#define CLK_PLL BIT(3) /* PLL-derived clock */
-#define PRE_PLL BIT(4) /* source is before PLL mult/div */
-#define PSC_SWRSTDISABLE BIT(5) /* Disable state is SwRstDisable */
-#define PSC_FORCE BIT(6) /* Force module state transtition */
-#define PSC_LRST BIT(8) /* Use local reset on enable/disable */
-
-#define CLK(dev, con, ck) \
- { \
- .dev_id = dev, \
- .con_id = con, \
- .clk = ck, \
- } \
-
-int davinci_clk_init(struct clk_lookup *clocks);
-int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
- unsigned int mult, unsigned int postdiv);
-int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
-int davinci_set_refclk_rate(unsigned long rate);
-int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
-int davinci_clk_reset(struct clk *clk, bool reset);
-void davinci_clk_enable(struct clk *clk);
-void davinci_clk_disable(struct clk *clk);
-
-#endif
-
#endif
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index bcb6a7ba84e9..e1d0f0d841ff 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -20,8 +20,6 @@
#include <mach/common.h>
#include <mach/cputype.h>
-#include "clock.h"
-
struct davinci_soc_info davinci_soc_info;
EXPORT_SYMBOL(davinci_soc_info);
@@ -118,5 +116,4 @@ err:
void __init davinci_init_late(void)
{
davinci_cpufreq_init();
- davinci_clk_disable_unused();
}
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 350d7673aa4d..0bc5bd2665df 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -8,21 +8,20 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/gpio.h>
#include <linux/init.h>
-#include <linux/clk.h>
#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
-#include "psc.h"
-#include <mach/irqs.h>
-#include <mach/cputype.h>
#include <mach/common.h>
-#include <mach/time.h>
+#include <mach/cputype.h>
#include <mach/da8xx.h>
+#include <mach/irqs.h>
+#include <mach/time.h>
-#include "clock.h"
#include "mux.h"
/* Offsets of the 8 compare registers on the da830 */
@@ -37,402 +36,6 @@
#define DA830_REF_FREQ 24000000
-static struct pll_data pll0_data = {
- .num = 1,
- .phys_base = DA8XX_PLL0_BASE,
- .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .rate = DA830_REF_FREQ,
-};
-
-static struct clk pll0_clk = {
- .name = "pll0",
- .parent = &ref_clk,
- .pll_data = &pll0_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll0_aux_clk = {
- .name = "pll0_aux_clk",
- .parent = &pll0_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll0_sysclk2 = {
- .name = "pll0_sysclk2",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll0_sysclk3 = {
- .name = "pll0_sysclk3",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll0_sysclk4 = {
- .name = "pll0_sysclk4",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll0_sysclk5 = {
- .name = "pll0_sysclk5",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll0_sysclk6 = {
- .name = "pll0_sysclk6",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV6,
-};
-
-static struct clk pll0_sysclk7 = {
- .name = "pll0_sysclk7",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV7,
-};
-
-static struct clk i2c0_clk = {
- .name = "i2c0",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk timerp64_0_clk = {
- .name = "timer0",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk timerp64_1_clk = {
- .name = "timer1",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk arm_rom_clk = {
- .name = "arm_rom",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_ARM_RAM_ROM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk scr0_ss_clk = {
- .name = "scr0_ss",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_SCR0_SS,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk scr1_ss_clk = {
- .name = "scr1_ss",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_SCR1_SS,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk scr2_ss_clk = {
- .name = "scr2_ss",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_SCR2_SS,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk dmax_clk = {
- .name = "dmax",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_PRUSS,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk tpcc_clk = {
- .name = "tpcc",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPCC,
- .flags = ALWAYS_ENABLED | CLK_PSC,
-};
-
-static struct clk tptc0_clk = {
- .name = "tptc0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPTC0,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk tptc1_clk = {
- .name = "tptc1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPTC1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk mmcsd_clk = {
- .name = "mmcsd",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_MMC_SD,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_UART1,
- .gpsc = 1,
-};
-
-static struct clk uart2_clk = {
- .name = "uart2",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_UART2,
- .gpsc = 1,
-};
-
-static struct clk spi0_clk = {
- .name = "spi0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_SPI0,
-};
-
-static struct clk spi1_clk = {
- .name = "spi1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_SPI1,
- .gpsc = 1,
-};
-
-static struct clk ecap0_clk = {
- .name = "ecap0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_ECAP,
- .gpsc = 1,
-};
-
-static struct clk ecap1_clk = {
- .name = "ecap1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_ECAP,
- .gpsc = 1,
-};
-
-static struct clk ecap2_clk = {
- .name = "ecap2",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_ECAP,
- .gpsc = 1,
-};
-
-static struct clk pwm0_clk = {
- .name = "pwm0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_PWM,
- .gpsc = 1,
-};
-
-static struct clk pwm1_clk = {
- .name = "pwm1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_PWM,
- .gpsc = 1,
-};
-
-static struct clk pwm2_clk = {
- .name = "pwm2",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_PWM,
- .gpsc = 1,
-};
-
-static struct clk eqep0_clk = {
- .name = "eqep0",
- .parent = &pll0_sysclk2,
- .lpsc = DA830_LPSC1_EQEP,
- .gpsc = 1,
-};
-
-static struct clk eqep1_clk = {
- .name = "eqep1",
- .parent = &pll0_sysclk2,
- .lpsc = DA830_LPSC1_EQEP,
- .gpsc = 1,
-};
-
-static struct clk lcdc_clk = {
- .name = "lcdc",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_LCDC,
- .gpsc = 1,
-};
-
-static struct clk mcasp0_clk = {
- .name = "mcasp0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_McASP0,
- .gpsc = 1,
-};
-
-static struct clk mcasp1_clk = {
- .name = "mcasp1",
- .parent = &pll0_sysclk2,
- .lpsc = DA830_LPSC1_McASP1,
- .gpsc = 1,
-};
-
-static struct clk mcasp2_clk = {
- .name = "mcasp2",
- .parent = &pll0_sysclk2,
- .lpsc = DA830_LPSC1_McASP2,
- .gpsc = 1,
-};
-
-static struct clk usb20_clk = {
- .name = "usb20",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_USB20,
- .gpsc = 1,
-};
-
-static struct clk cppi41_clk = {
- .name = "cppi41",
- .parent = &usb20_clk,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll0_sysclk3,
- .lpsc = DA8XX_LPSC0_EMIF25,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk aintc_clk = {
- .name = "aintc",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC0_AINTC,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk secu_mgr_clk = {
- .name = "secu_mgr",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC0_SECU_MGR,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk emac_clk = {
- .name = "emac",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_CPGMAC,
- .gpsc = 1,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_GPIO,
- .gpsc = 1,
-};
-
-static struct clk i2c1_clk = {
- .name = "i2c1",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_I2C,
- .gpsc = 1,
-};
-
-static struct clk usb11_clk = {
- .name = "usb11",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_USB11,
- .gpsc = 1,
-};
-
-static struct clk emif3_clk = {
- .name = "emif3",
- .parent = &pll0_sysclk5,
- .lpsc = DA8XX_LPSC1_EMIF3C,
- .gpsc = 1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk arm_clk = {
- .name = "arm",
- .parent = &pll0_sysclk6,
- .lpsc = DA8XX_LPSC0_ARM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk rmii_clk = {
- .name = "rmii",
- .parent = &pll0_sysclk7,
-};
-
-static struct clk_lookup da830_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "pll0", &pll0_clk),
- CLK(NULL, "pll0_aux", &pll0_aux_clk),
- CLK(NULL, "pll0_sysclk2", &pll0_sysclk2),
- CLK(NULL, "pll0_sysclk3", &pll0_sysclk3),
- CLK(NULL, "pll0_sysclk4", &pll0_sysclk4),
- CLK(NULL, "pll0_sysclk5", &pll0_sysclk5),
- CLK(NULL, "pll0_sysclk6", &pll0_sysclk6),
- CLK(NULL, "pll0_sysclk7", &pll0_sysclk7),
- CLK("i2c_davinci.1", NULL, &i2c0_clk),
- CLK(NULL, "timer0", &timerp64_0_clk),
- CLK("davinci-wdt", NULL, &timerp64_1_clk),
- CLK(NULL, "arm_rom", &arm_rom_clk),
- CLK(NULL, "scr0_ss", &scr0_ss_clk),
- CLK(NULL, "scr1_ss", &scr1_ss_clk),
- CLK(NULL, "scr2_ss", &scr2_ss_clk),
- CLK(NULL, "dmax", &dmax_clk),
- CLK(NULL, "tpcc", &tpcc_clk),
- CLK(NULL, "tptc0", &tptc0_clk),
- CLK(NULL, "tptc1", &tptc1_clk),
- CLK("da830-mmc.0", NULL, &mmcsd_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("serial8250.2", NULL, &uart2_clk),
- CLK("spi_davinci.0", NULL, &spi0_clk),
- CLK("spi_davinci.1", NULL, &spi1_clk),
- CLK(NULL, "ecap0", &ecap0_clk),
- CLK(NULL, "ecap1", &ecap1_clk),
- CLK(NULL, "ecap2", &ecap2_clk),
- CLK(NULL, "pwm0", &pwm0_clk),
- CLK(NULL, "pwm1", &pwm1_clk),
- CLK(NULL, "pwm2", &pwm2_clk),
- CLK("eqep.0", NULL, &eqep0_clk),
- CLK("eqep.1", NULL, &eqep1_clk),
- CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
- CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
- CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
- CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
- CLK("musb-da8xx", NULL, &usb20_clk),
- CLK("cppi41-dmaengine", NULL, &cppi41_clk),
- CLK(NULL, "aemif", &aemif_clk),
- CLK(NULL, "aintc", &aintc_clk),
- CLK(NULL, "secu_mgr", &secu_mgr_clk),
- CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &emac_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK("i2c_davinci.2", NULL, &i2c1_clk),
- CLK("ohci-da8xx", NULL, &usb11_clk),
- CLK(NULL, "emif3", &emif3_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK(NULL, "rmii", &rmii_clk),
- CLK(NULL, NULL, NULL),
-};
-
/*
* Device specific mux setup
*
@@ -1130,8 +733,6 @@ static struct map_desc da830_io_desc[] = {
},
};
-static u32 da830_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
-
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da830_ids[] = {
{
@@ -1200,8 +801,6 @@ static const struct davinci_soc_info davinci_soc_info_da830 = {
.jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da830_ids,
.ids_num = ARRAY_SIZE(da830_ids),
- .psc_bases = da830_psc_bases,
- .psc_bases_num = ARRAY_SIZE(da830_psc_bases),
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
@@ -1223,6 +822,53 @@ void __init da830_init(void)
void __init da830_init_time(void)
{
- davinci_clk_init(da830_clks);
- davinci_timer_init();
+ void __iomem *pll;
+ struct clk *clk;
+
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DA830_REF_FREQ);
+
+ pll = ioremap(DA8XX_PLL0_BASE, SZ_4K);
+
+ da830_pll_init(NULL, pll, NULL);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
+}
+
+static struct resource da830_psc0_resources[] = {
+ {
+ .start = DA8XX_PSC0_BASE,
+ .end = DA8XX_PSC0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da830_psc0_device = {
+ .name = "da830-psc0",
+ .id = -1,
+ .resource = da830_psc0_resources,
+ .num_resources = ARRAY_SIZE(da830_psc0_resources),
+};
+
+static struct resource da830_psc1_resources[] = {
+ {
+ .start = DA8XX_PSC1_BASE,
+ .end = DA8XX_PSC1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da830_psc1_device = {
+ .name = "da830-psc1",
+ .id = -1,
+ .resource = da830_psc1_resources,
+ .num_resources = ARRAY_SIZE(da830_psc1_resources),
+};
+
+void __init da830_register_clocks(void)
+{
+ /* PLL is registered in da830_init_time() */
+ platform_device_register(&da830_psc0_device);
+ platform_device_register(&da830_psc1_device);
}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 34117e614e08..4528bbf0c861 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -11,27 +11,31 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
+
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
#include <linux/clkdev.h>
+#include <linux/cpufreq.h>
#include <linux/gpio.h>
#include <linux/init.h>
-#include <linux/clk.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/platform_data/clk-da8xx-cfgchip.h>
+#include <linux/platform_data/clk-davinci-pll.h>
+#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_device.h>
-#include <linux/cpufreq.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
-#include "psc.h"
-#include <mach/irqs.h>
-#include <mach/cputype.h>
#include <mach/common.h>
-#include <mach/time.h>
-#include <mach/da8xx.h>
#include <mach/cpufreq.h>
+#include <mach/cputype.h>
+#include <mach/da8xx.h>
+#include <mach/irqs.h>
#include <mach/pm.h>
+#include <mach/time.h>
-#include "clock.h"
#include "mux.h"
#define DA850_PLL1_BASE 0x01e1a000
@@ -40,550 +44,6 @@
#define DA850_REF_FREQ 24000000
-#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
-#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
-#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
-
-static int da850_set_armrate(struct clk *clk, unsigned long rate);
-static int da850_round_armrate(struct clk *clk, unsigned long rate);
-static int da850_set_pll0rate(struct clk *clk, unsigned long armrate);
-
-static struct pll_data pll0_data = {
- .num = 1,
- .phys_base = DA8XX_PLL0_BASE,
- .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .rate = DA850_REF_FREQ,
- .set_rate = davinci_simple_set_rate,
-};
-
-static struct clk pll0_clk = {
- .name = "pll0",
- .parent = &ref_clk,
- .pll_data = &pll0_data,
- .flags = CLK_PLL,
- .set_rate = da850_set_pll0rate,
-};
-
-static struct clk pll0_aux_clk = {
- .name = "pll0_aux_clk",
- .parent = &pll0_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll0_sysclk1 = {
- .name = "pll0_sysclk1",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll0_sysclk2 = {
- .name = "pll0_sysclk2",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll0_sysclk3 = {
- .name = "pll0_sysclk3",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
- .set_rate = davinci_set_sysclk_rate,
- .maxrate = 100000000,
-};
-
-static struct clk pll0_sysclk4 = {
- .name = "pll0_sysclk4",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll0_sysclk5 = {
- .name = "pll0_sysclk5",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll0_sysclk6 = {
- .name = "pll0_sysclk6",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV6,
-};
-
-static struct clk pll0_sysclk7 = {
- .name = "pll0_sysclk7",
- .parent = &pll0_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV7,
-};
-
-static struct pll_data pll1_data = {
- .num = 2,
- .phys_base = DA850_PLL1_BASE,
- .flags = PLL_HAS_POSTDIV,
-};
-
-static struct clk pll1_clk = {
- .name = "pll1",
- .parent = &ref_clk,
- .pll_data = &pll1_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll1_aux_clk = {
- .name = "pll1_aux_clk",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll1_sysclk2 = {
- .name = "pll1_sysclk2",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll1_sysclk3 = {
- .name = "pll1_sysclk3",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static int da850_async3_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
-
- if (parent == &pll0_sysclk2) {
- val &= ~CFGCHIP3_ASYNC3_CLKSRC;
- } else if (parent == &pll1_sysclk2) {
- val |= CFGCHIP3_ASYNC3_CLKSRC;
- } else {
- pr_err("Bad parent on async3 clock mux\n");
- return -EINVAL;
- }
-
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
-
- return 0;
-}
-
-static struct clk async3_clk = {
- .name = "async3",
- .parent = &pll1_sysclk2,
- .set_parent = da850_async3_set_parent,
-};
-
-static struct clk i2c0_clk = {
- .name = "i2c0",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk timerp64_0_clk = {
- .name = "timer0",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk timerp64_1_clk = {
- .name = "timer1",
- .parent = &pll0_aux_clk,
-};
-
-static struct clk arm_rom_clk = {
- .name = "arm_rom",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_ARM_RAM_ROM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk tpcc0_clk = {
- .name = "tpcc0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPCC,
- .flags = ALWAYS_ENABLED | CLK_PSC,
-};
-
-static struct clk tptc0_clk = {
- .name = "tptc0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPTC0,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk tptc1_clk = {
- .name = "tptc1",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_TPTC1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk tpcc1_clk = {
- .name = "tpcc1",
- .parent = &pll0_sysclk2,
- .lpsc = DA850_LPSC1_TPCC1,
- .gpsc = 1,
- .flags = CLK_PSC | ALWAYS_ENABLED,
-};
-
-static struct clk tptc2_clk = {
- .name = "tptc2",
- .parent = &pll0_sysclk2,
- .lpsc = DA850_LPSC1_TPTC2,
- .gpsc = 1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk pruss_clk = {
- .name = "pruss",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_PRUSS,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_UART1,
- .gpsc = 1,
-};
-
-static struct clk uart2_clk = {
- .name = "uart2",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_UART2,
- .gpsc = 1,
-};
-
-static struct clk aintc_clk = {
- .name = "aintc",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC0_AINTC,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_GPIO,
- .gpsc = 1,
-};
-
-static struct clk i2c1_clk = {
- .name = "i2c1",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_I2C,
- .gpsc = 1,
-};
-
-static struct clk emif3_clk = {
- .name = "emif3",
- .parent = &pll0_sysclk5,
- .lpsc = DA8XX_LPSC1_EMIF3C,
- .gpsc = 1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk arm_clk = {
- .name = "arm",
- .parent = &pll0_sysclk6,
- .lpsc = DA8XX_LPSC0_ARM,
- .flags = ALWAYS_ENABLED,
- .set_rate = da850_set_armrate,
- .round_rate = da850_round_armrate,
-};
-
-static struct clk rmii_clk = {
- .name = "rmii",
- .parent = &pll0_sysclk7,
-};
-
-static struct clk emac_clk = {
- .name = "emac",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_CPGMAC,
- .gpsc = 1,
-};
-
-/*
- * In order to avoid adding the emac_clk to the clock lookup table twice (and
- * screwing up the linked list in the process) create a separate clock for
- * mdio inheriting the rate from emac_clk.
- */
-static struct clk mdio_clk = {
- .name = "mdio",
- .parent = &emac_clk,
-};
-
-static struct clk mcasp_clk = {
- .name = "mcasp",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_McASP0,
- .gpsc = 1,
-};
-
-static struct clk mcbsp0_clk = {
- .name = "mcbsp0",
- .parent = &async3_clk,
- .lpsc = DA850_LPSC1_McBSP0,
- .gpsc = 1,
-};
-
-static struct clk mcbsp1_clk = {
- .name = "mcbsp1",
- .parent = &async3_clk,
- .lpsc = DA850_LPSC1_McBSP1,
- .gpsc = 1,
-};
-
-static struct clk lcdc_clk = {
- .name = "lcdc",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_LCDC,
- .gpsc = 1,
-};
-
-static struct clk mmcsd0_clk = {
- .name = "mmcsd0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_MMC_SD,
-};
-
-static struct clk mmcsd1_clk = {
- .name = "mmcsd1",
- .parent = &pll0_sysclk2,
- .lpsc = DA850_LPSC1_MMC_SD1,
- .gpsc = 1,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll0_sysclk3,
- .lpsc = DA8XX_LPSC0_EMIF25,
- .flags = ALWAYS_ENABLED,
-};
-
-/*
- * In order to avoid adding the aemif_clk to the clock lookup table twice (and
- * screwing up the linked list in the process) create a separate clock for
- * nand inheriting the rate from aemif_clk.
- */
-static struct clk aemif_nand_clk = {
- .name = "nand",
- .parent = &aemif_clk,
-};
-
-static struct clk usb11_clk = {
- .name = "usb11",
- .parent = &pll0_sysclk4,
- .lpsc = DA8XX_LPSC1_USB11,
- .gpsc = 1,
-};
-
-static struct clk usb20_clk = {
- .name = "usb20",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC1_USB20,
- .gpsc = 1,
-};
-
-static struct clk cppi41_clk = {
- .name = "cppi41",
- .parent = &usb20_clk,
-};
-
-static struct clk spi0_clk = {
- .name = "spi0",
- .parent = &pll0_sysclk2,
- .lpsc = DA8XX_LPSC0_SPI0,
-};
-
-static struct clk spi1_clk = {
- .name = "spi1",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_SPI1,
- .gpsc = 1,
-};
-
-static struct clk vpif_clk = {
- .name = "vpif",
- .parent = &pll0_sysclk2,
- .lpsc = DA850_LPSC1_VPIF,
- .gpsc = 1,
-};
-
-static struct clk sata_clk = {
- .name = "sata",
- .parent = &pll0_sysclk2,
- .lpsc = DA850_LPSC1_SATA,
- .gpsc = 1,
- .flags = PSC_FORCE,
-};
-
-static struct clk dsp_clk = {
- .name = "dsp",
- .parent = &pll0_sysclk1,
- .domain = DAVINCI_GPSC_DSPDOMAIN,
- .lpsc = DA8XX_LPSC0_GEM,
- .flags = PSC_LRST | PSC_FORCE,
-};
-
-static struct clk ehrpwm_clk = {
- .name = "ehrpwm",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_PWM,
- .gpsc = 1,
-};
-
-static struct clk ehrpwm0_clk = {
- .name = "ehrpwm0",
- .parent = &ehrpwm_clk,
-};
-
-static struct clk ehrpwm1_clk = {
- .name = "ehrpwm1",
- .parent = &ehrpwm_clk,
-};
-
-#define DA8XX_EHRPWM_TBCLKSYNC BIT(12)
-
-static void ehrpwm_tblck_enable(struct clk *clk)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG));
- val |= DA8XX_EHRPWM_TBCLKSYNC;
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG));
-}
-
-static void ehrpwm_tblck_disable(struct clk *clk)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG));
- val &= ~DA8XX_EHRPWM_TBCLKSYNC;
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG));
-}
-
-static struct clk ehrpwm_tbclk = {
- .name = "ehrpwm_tbclk",
- .parent = &ehrpwm_clk,
- .clk_enable = ehrpwm_tblck_enable,
- .clk_disable = ehrpwm_tblck_disable,
-};
-
-static struct clk ehrpwm0_tbclk = {
- .name = "ehrpwm0_tbclk",
- .parent = &ehrpwm_tbclk,
-};
-
-static struct clk ehrpwm1_tbclk = {
- .name = "ehrpwm1_tbclk",
- .parent = &ehrpwm_tbclk,
-};
-
-static struct clk ecap_clk = {
- .name = "ecap",
- .parent = &async3_clk,
- .lpsc = DA8XX_LPSC1_ECAP,
- .gpsc = 1,
-};
-
-static struct clk ecap0_clk = {
- .name = "ecap0_clk",
- .parent = &ecap_clk,
-};
-
-static struct clk ecap1_clk = {
- .name = "ecap1_clk",
- .parent = &ecap_clk,
-};
-
-static struct clk ecap2_clk = {
- .name = "ecap2_clk",
- .parent = &ecap_clk,
-};
-
-static struct clk_lookup da850_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "pll0", &pll0_clk),
- CLK(NULL, "pll0_aux", &pll0_aux_clk),
- CLK(NULL, "pll0_sysclk1", &pll0_sysclk1),
- CLK(NULL, "pll0_sysclk2", &pll0_sysclk2),
- CLK(NULL, "pll0_sysclk3", &pll0_sysclk3),
- CLK(NULL, "pll0_sysclk4", &pll0_sysclk4),
- CLK(NULL, "pll0_sysclk5", &pll0_sysclk5),
- CLK(NULL, "pll0_sysclk6", &pll0_sysclk6),
- CLK(NULL, "pll0_sysclk7", &pll0_sysclk7),
- CLK(NULL, "pll1", &pll1_clk),
- CLK(NULL, "pll1_aux", &pll1_aux_clk),
- CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
- CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
- CLK(NULL, "async3", &async3_clk),
- CLK("i2c_davinci.1", NULL, &i2c0_clk),
- CLK(NULL, "timer0", &timerp64_0_clk),
- CLK("davinci-wdt", NULL, &timerp64_1_clk),
- CLK(NULL, "arm_rom", &arm_rom_clk),
- CLK(NULL, "tpcc0", &tpcc0_clk),
- CLK(NULL, "tptc0", &tptc0_clk),
- CLK(NULL, "tptc1", &tptc1_clk),
- CLK(NULL, "tpcc1", &tpcc1_clk),
- CLK(NULL, "tptc2", &tptc2_clk),
- CLK("pruss_uio", "pruss", &pruss_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("serial8250.2", NULL, &uart2_clk),
- CLK(NULL, "aintc", &aintc_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK("i2c_davinci.2", NULL, &i2c1_clk),
- CLK(NULL, "emif3", &emif3_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK(NULL, "rmii", &rmii_clk),
- CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &mdio_clk),
- CLK("davinci-mcasp.0", NULL, &mcasp_clk),
- CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk),
- CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk),
- CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
- CLK("da830-mmc.0", NULL, &mmcsd0_clk),
- CLK("da830-mmc.1", NULL, &mmcsd1_clk),
- CLK("ti-aemif", NULL, &aemif_clk),
- CLK("davinci-nand.0", "aemif", &aemif_nand_clk),
- CLK("ohci-da8xx", NULL, &usb11_clk),
- CLK("musb-da8xx", NULL, &usb20_clk),
- CLK("cppi41-dmaengine", NULL, &cppi41_clk),
- CLK("spi_davinci.0", NULL, &spi0_clk),
- CLK("spi_davinci.1", NULL, &spi1_clk),
- CLK("vpif", NULL, &vpif_clk),
- CLK("ahci_da850", "fck", &sata_clk),
- CLK("davinci-rproc.0", NULL, &dsp_clk),
- CLK(NULL, NULL, &ehrpwm_clk),
- CLK("ehrpwm.0", "fck", &ehrpwm0_clk),
- CLK("ehrpwm.1", "fck", &ehrpwm1_clk),
- CLK(NULL, NULL, &ehrpwm_tbclk),
- CLK("ehrpwm.0", "tbclk", &ehrpwm0_tbclk),
- CLK("ehrpwm.1", "tbclk", &ehrpwm1_tbclk),
- CLK(NULL, NULL, &ecap_clk),
- CLK("ecap.0", "fck", &ecap0_clk),
- CLK("ecap.1", "fck", &ecap1_clk),
- CLK("ecap.2", "fck", &ecap2_clk),
- CLK(NULL, NULL, NULL),
-};
-
/*
* Device specific mux setup
*
@@ -958,8 +418,6 @@ static struct map_desc da850_io_desc[] = {
},
};
-static u32 da850_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
-
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da850_ids[] = {
{
@@ -1169,89 +627,11 @@ int da850_register_cpufreq(char *async_clk)
return platform_device_register(&da850_cpufreq_device);
}
-
-static int da850_round_armrate(struct clk *clk, unsigned long rate)
-{
- int ret = 0, diff;
- unsigned int best = (unsigned int) -1;
- struct cpufreq_frequency_table *table = cpufreq_info.freq_table;
- struct cpufreq_frequency_table *pos;
-
- rate /= 1000; /* convert to kHz */
-
- cpufreq_for_each_entry(pos, table) {
- diff = pos->frequency - rate;
- if (diff < 0)
- diff = -diff;
-
- if (diff < best) {
- best = diff;
- ret = pos->frequency;
- }
- }
-
- return ret * 1000;
-}
-
-static int da850_set_armrate(struct clk *clk, unsigned long index)
-{
- struct clk *pllclk = &pll0_clk;
-
- return clk_set_rate(pllclk, index);
-}
-
-static int da850_set_pll0rate(struct clk *clk, unsigned long rate)
-{
- struct pll_data *pll = clk->pll_data;
- struct cpufreq_frequency_table *freq;
- unsigned int prediv, mult, postdiv;
- struct da850_opp *opp = NULL;
- int ret;
-
- rate /= 1000;
-
- for (freq = da850_freq_table;
- freq->frequency != CPUFREQ_TABLE_END; freq++) {
- /* rate is in Hz, freq->frequency is in KHz */
- if (freq->frequency == rate) {
- opp = (struct da850_opp *)freq->driver_data;
- break;
- }
- }
-
- if (!opp)
- return -EINVAL;
-
- prediv = opp->prediv;
- mult = opp->mult;
- postdiv = opp->postdiv;
-
- ret = davinci_set_pllrate(pll, prediv, mult, postdiv);
- if (WARN_ON(ret))
- return ret;
-
- return 0;
-}
#else
int __init da850_register_cpufreq(char *async_clk)
{
return 0;
}
-
-static int da850_set_armrate(struct clk *clk, unsigned long rate)
-{
- return -EINVAL;
-}
-
-static int da850_set_pll0rate(struct clk *clk, unsigned long armrate)
-{
- return -EINVAL;
-}
-
-static int da850_round_armrate(struct clk *clk, unsigned long rate)
-{
- return clk->rate;
-}
#endif
/* VPIF resource, platform data */
@@ -1353,8 +733,6 @@ static const struct davinci_soc_info davinci_soc_info_da850 = {
.jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da850_ids,
.ids_num = ARRAY_SIZE(da850_ids),
- .psc_bases = da850_psc_bases,
- .psc_bases_num = ARRAY_SIZE(da850_psc_bases),
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da850_pins,
.pinmux_pins_num = ARRAY_SIZE(da850_pins),
@@ -1370,8 +748,6 @@ static const struct davinci_soc_info davinci_soc_info_da850 = {
void __init da850_init(void)
{
- unsigned int v;
-
davinci_common_init(&davinci_soc_info_da850);
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
@@ -1379,22 +755,124 @@ void __init da850_init(void)
return;
da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K);
- if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"))
- return;
+ WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module");
+}
+
+void __init da850_init_time(void)
+{
+ void __iomem *pll0;
+ struct regmap *cfgchip;
+ struct clk *clk;
- /* Unlock writing to PLL0 registers */
- v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG));
- v &= ~CFGCHIP0_PLL_MASTER_LOCK;
- __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG));
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DA850_REF_FREQ);
- /* Unlock writing to PLL1 registers */
- v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
- v &= ~CFGCHIP3_PLL1_MASTER_LOCK;
- __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
+ pll0 = ioremap(DA8XX_PLL0_BASE, SZ_4K);
+ cfgchip = da8xx_get_cfgchip();
+
+ da850_pll0_init(NULL, pll0, cfgchip);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
}
-void __init da850_init_time(void)
+static struct resource da850_pll1_resources[] = {
+ {
+ .start = DA850_PLL1_BASE,
+ .end = DA850_PLL1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct davinci_pll_platform_data da850_pll1_pdata;
+
+static struct platform_device da850_pll1_device = {
+ .name = "da850-pll1",
+ .id = -1,
+ .resource = da850_pll1_resources,
+ .num_resources = ARRAY_SIZE(da850_pll1_resources),
+ .dev = {
+ .platform_data = &da850_pll1_pdata,
+ },
+};
+
+static struct resource da850_psc0_resources[] = {
+ {
+ .start = DA8XX_PSC0_BASE,
+ .end = DA8XX_PSC0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da850_psc0_device = {
+ .name = "da850-psc0",
+ .id = -1,
+ .resource = da850_psc0_resources,
+ .num_resources = ARRAY_SIZE(da850_psc0_resources),
+};
+
+static struct resource da850_psc1_resources[] = {
+ {
+ .start = DA8XX_PSC1_BASE,
+ .end = DA8XX_PSC1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da850_psc1_device = {
+ .name = "da850-psc1",
+ .id = -1,
+ .resource = da850_psc1_resources,
+ .num_resources = ARRAY_SIZE(da850_psc1_resources),
+};
+
+static struct da8xx_cfgchip_clk_platform_data da850_async1_pdata;
+
+static struct platform_device da850_async1_clksrc_device = {
+ .name = "da850-async1-clksrc",
+ .id = -1,
+ .dev = {
+ .platform_data = &da850_async1_pdata,
+ },
+};
+
+static struct da8xx_cfgchip_clk_platform_data da850_async3_pdata;
+
+static struct platform_device da850_async3_clksrc_device = {
+ .name = "da850-async3-clksrc",
+ .id = -1,
+ .dev = {
+ .platform_data = &da850_async3_pdata,
+ },
+};
+
+static struct da8xx_cfgchip_clk_platform_data da850_tbclksync_pdata;
+
+static struct platform_device da850_tbclksync_device = {
+ .name = "da830-tbclksync",
+ .id = -1,
+ .dev = {
+ .platform_data = &da850_tbclksync_pdata,
+ },
+};
+
+void __init da850_register_clocks(void)
{
- davinci_clk_init(da850_clks);
- davinci_timer_init();
+ /* PLL0 is registered in da850_init_time() */
+
+ da850_pll1_pdata.cfgchip = da8xx_get_cfgchip();
+ platform_device_register(&da850_pll1_device);
+
+ da850_async1_pdata.cfgchip = da8xx_get_cfgchip();
+ platform_device_register(&da850_async1_clksrc_device);
+
+ da850_async3_pdata.cfgchip = da8xx_get_cfgchip();
+ platform_device_register(&da850_async3_clksrc_device);
+
+ platform_device_register(&da850_psc0_device);
+
+ platform_device_register(&da850_psc1_device);
+
+ da850_tbclksync_pdata.cfgchip = da8xx_get_cfgchip();
+ platform_device_register(&da850_tbclksync_device);
}
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index ab199f4b9ce4..beac80ec4037 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -7,81 +7,16 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/io.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/irqdomain.h>
-#include <linux/platform_data/ti-aemif.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include "cp_intc.h"
#include <mach/da8xx.h>
-static struct of_dev_auxdata da850_aemif_auxdata_lookup[] = {
- OF_DEV_AUXDATA("ti,davinci-nand", 0x62000000, "davinci-nand.0", NULL),
- {}
-};
-
-static struct aemif_platform_data aemif_data = {
- .dev_lookup = da850_aemif_auxdata_lookup,
-};
-
-static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
- OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
- OF_DEV_AUXDATA("ti,davinci-wdt", 0x01c21000, "davinci-wdt", NULL),
- OF_DEV_AUXDATA("ti,da830-mmc", 0x01c40000, "da830-mmc.0", NULL),
- OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm.0", NULL),
- OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f02000, "ehrpwm.1", NULL),
- OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap.0", NULL),
- OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap.1", NULL),
- OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap.2", NULL),
- OF_DEV_AUXDATA("ti,da830-spi", 0x01c41000, "spi_davinci.0", NULL),
- OF_DEV_AUXDATA("ti,da830-spi", 0x01f0e000, "spi_davinci.1", NULL),
- OF_DEV_AUXDATA("ns16550a", 0x01c42000, "serial8250.0", NULL),
- OF_DEV_AUXDATA("ns16550a", 0x01d0c000, "serial8250.1", NULL),
- OF_DEV_AUXDATA("ns16550a", 0x01d0d000, "serial8250.2", NULL),
- OF_DEV_AUXDATA("ti,davinci_mdio", 0x01e24000, "davinci_mdio.0", NULL),
- OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
- NULL),
- OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
- OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", &aemif_data),
- OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL),
- OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL),
- OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
- OF_DEV_AUXDATA("ti,da830-usb-phy", 0x01c1417c, "da8xx-usb-phy", NULL),
- OF_DEV_AUXDATA("ti,da850-ahci", 0x01e18000, "ahci_da850", NULL),
- OF_DEV_AUXDATA("ti,da850-vpif", 0x01e17000, "vpif", NULL),
- OF_DEV_AUXDATA("ti,da850-dsp", 0x11800000, "davinci-rproc.0", NULL),
- {}
-};
-
#ifdef CONFIG_ARCH_DAVINCI_DA850
static void __init da850_init_machine(void)
{
- /* All existing boards use 100MHz SATA refclkpn */
- static const unsigned long sata_refclkpn = 100 * 1000 * 1000;
-
- int ret;
-
- ret = da8xx_register_usb20_phy_clk(false);
- if (ret)
- pr_warn("%s: registering USB 2.0 PHY clock failed: %d",
- __func__, ret);
- ret = da8xx_register_usb11_phy_clk(false);
- if (ret)
- pr_warn("%s: registering USB 1.1 PHY clock failed: %d",
- __func__, ret);
-
- ret = da850_register_sata_refclk(sata_refclkpn);
- if (ret)
- pr_warn("%s: registering SATA REFCLK failed: %d",
- __func__, ret);
-
- of_platform_default_populate(NULL, da850_auxdata_lookup, NULL);
davinci_pm_init();
pdata_quirks_init();
}
@@ -96,7 +31,6 @@ static const char *const da850_boards_compat[] __initconst = {
DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x")
.map_io = da850_init,
- .init_time = da850_init_time,
.init_machine = da850_init_machine,
.dt_compat = da850_boards_compat,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index 376cdd51ce9d..db4c95ef4d5c 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -35,6 +35,10 @@
#include <media/davinci/vpbe.h>
#include <media/davinci/vpbe_osd.h>
+#define DAVINCI_PLL1_BASE 0x01c40800
+#define DAVINCI_PLL2_BASE 0x01c40c00
+#define DAVINCI_PWR_SLEEP_CNTRL_BASE 0x01c41000
+
#define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000
#define SYSMOD_VDAC_CONFIG 0x2c
#define SYSMOD_VIDCLKCTL 0x38
@@ -84,6 +88,7 @@ int davinci_init_wdt(void);
/* DM355 function declarations */
void dm355_init(void);
void dm355_init_time(void);
+void dm355_register_clocks(void);
void dm355_init_spi0(unsigned chipselect_mask,
const struct spi_board_info *info, unsigned len);
void dm355_init_asp1(u32 evt_enable);
@@ -93,6 +98,7 @@ int dm355_gpio_register(void);
/* DM365 function declarations */
void dm365_init(void);
void dm365_init_time(void);
+void dm365_register_clocks(void);
void dm365_init_asp(void);
void dm365_init_vc(void);
void dm365_init_ks(struct davinci_ks_platform_data *pdata);
@@ -106,6 +112,7 @@ int dm365_gpio_register(void);
void dm644x_init(void);
void dm644x_init_devices(void);
void dm644x_init_time(void);
+void dm644x_register_clocks(void);
void dm644x_init_asp(void);
int dm644x_init_video(struct vpfe_config *, struct vpbe_config *);
int dm644x_gpio_register(void);
@@ -113,6 +120,7 @@ int dm644x_gpio_register(void);
/* DM646x function declarations */
void dm646x_init(void);
void dm646x_init_time(unsigned long ref_clk_rate, unsigned long aux_clkin_rate);
+void dm646x_register_clocks(void);
void dm646x_init_mcasp0(struct snd_platform_data *pdata);
void dm646x_init_mcasp1(struct snd_platform_data *pdata);
int dm646x_init_edma(struct edma_rsv_info *rsv);
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 78390c64e6ca..1fd3619f6a09 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -10,25 +10,25 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/dma-contiguous.h>
-#include <linux/serial_8250.h>
#include <linux/ahci_platform.h>
+#include <linux/clk-provider.h>
#include <linux/clk.h>
-#include <linux/reboot.h>
+#include <linux/clkdev.h>
+#include <linux/dma-contiguous.h>
#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/serial_8250.h>
-#include <mach/cputype.h>
#include <mach/common.h>
-#include <mach/time.h>
+#include <mach/cputype.h>
#include <mach/da8xx.h>
-#include <mach/clock.h>
-#include "cpuidle.h"
-#include "sram.h"
+#include <mach/time.h>
-#include "clock.h"
#include "asp.h"
+#include "cpuidle.h"
+#include "sram.h"
#define DA8XX_TPCC_BASE 0x01c00000
#define DA8XX_TPTC0_BASE 0x01c08000
@@ -1040,26 +1040,15 @@ int __init da8xx_register_spi_bus(int instance, unsigned num_chipselect)
}
#ifdef CONFIG_ARCH_DAVINCI_DA850
-static struct clk sata_refclk = {
- .name = "sata_refclk",
- .set_rate = davinci_simple_set_rate,
-};
-
-static struct clk_lookup sata_refclk_lookup =
- CLK("ahci_da850", "refclk", &sata_refclk);
-
int __init da850_register_sata_refclk(int rate)
{
- int ret;
-
- sata_refclk.rate = rate;
- ret = clk_register(&sata_refclk);
- if (ret)
- return ret;
+ struct clk *clk;
- clkdev_add(&sata_refclk_lookup);
+ clk = clk_register_fixed_rate(NULL, "sata_refclk", NULL, 0, rate);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- return 0;
+ return clk_register_clkdev(clk, "refclk", "ahci_da850");
}
static struct resource da850_sata_resources[] = {
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 0edda4093e47..e8dbbb7479ab 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -26,7 +26,6 @@
#include "davinci.h"
-#include "clock.h"
#define DAVINCI_I2C_BASE 0x01C21000
#define DAVINCI_ATA_BASE 0x01C66000
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index f29480495c18..9f7d38d12c88 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -8,31 +8,32 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/serial_8250.h>
-#include <linux/platform_device.h>
+
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
+#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/spi/spi.h>
+#include <linux/init.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/spi-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/spi/spi.h>
#include <asm/mach/map.h>
+#include <mach/common.h>
#include <mach/cputype.h>
-#include "psc.h"
-#include <mach/mux.h>
#include <mach/irqs.h>
-#include <mach/time.h>
+#include <mach/mux.h>
#include <mach/serial.h>
-#include <mach/common.h>
+#include <mach/time.h>
+#include "asp.h"
#include "davinci.h"
-#include "clock.h"
#include "mux.h"
-#include "asp.h"
#define DM355_UART2_BASE (IO_PHYS + 0x206000)
#define DM355_OSD_BASE (IO_PHYS + 0x70200)
@@ -43,348 +44,6 @@
*/
#define DM355_REF_FREQ 24000000 /* 24 or 36 MHz */
-static struct pll_data pll1_data = {
- .num = 1,
- .phys_base = DAVINCI_PLL1_BASE,
- .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-static struct pll_data pll2_data = {
- .num = 2,
- .phys_base = DAVINCI_PLL2_BASE,
- .flags = PLL_HAS_PREDIV,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- /* FIXME -- crystal rate is board-specific */
- .rate = DM355_REF_FREQ,
-};
-
-static struct clk pll1_clk = {
- .name = "pll1",
- .parent = &ref_clk,
- .flags = CLK_PLL,
- .pll_data = &pll1_data,
-};
-
-static struct clk pll1_aux_clk = {
- .name = "pll1_aux_clk",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll1_sysclk1 = {
- .name = "pll1_sysclk1",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll1_sysclk2 = {
- .name = "pll1_sysclk2",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll1_sysclk3 = {
- .name = "pll1_sysclk3",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll1_sysclk4 = {
- .name = "pll1_sysclk4",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll1_sysclkbp = {
- .name = "pll1_sysclkbp",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV
-};
-
-static struct clk vpss_dac_clk = {
- .name = "vpss_dac",
- .parent = &pll1_sysclk3,
- .lpsc = DM355_LPSC_VPSS_DAC,
-};
-
-static struct clk vpss_master_clk = {
- .name = "vpss_master",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_VPSSMSTR,
- .flags = CLK_PSC,
-};
-
-static struct clk vpss_slave_clk = {
- .name = "vpss_slave",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_VPSSSLV,
-};
-
-static struct clk clkout1_clk = {
- .name = "clkout1",
- .parent = &pll1_aux_clk,
- /* NOTE: clkout1 can be externally gated by muxing GPIO-18 */
-};
-
-static struct clk clkout2_clk = {
- .name = "clkout2",
- .parent = &pll1_sysclkbp,
-};
-
-static struct clk pll2_clk = {
- .name = "pll2",
- .parent = &ref_clk,
- .flags = CLK_PLL,
- .pll_data = &pll2_data,
-};
-
-static struct clk pll2_sysclk1 = {
- .name = "pll2_sysclk1",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll2_sysclkbp = {
- .name = "pll2_sysclkbp",
- .parent = &pll2_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV
-};
-
-static struct clk clkout3_clk = {
- .name = "clkout3",
- .parent = &pll2_sysclkbp,
- /* NOTE: clkout3 can be externally gated by muxing GPIO-16 */
-};
-
-static struct clk arm_clk = {
- .name = "arm_clk",
- .parent = &pll1_sysclk1,
- .lpsc = DAVINCI_LPSC_ARM,
- .flags = ALWAYS_ENABLED,
-};
-
-/*
- * NOT LISTED below, and not touched by Linux
- * - in SyncReset state by default
- * .lpsc = DAVINCI_LPSC_TPCC,
- * .lpsc = DAVINCI_LPSC_TPTC0,
- * .lpsc = DAVINCI_LPSC_TPTC1,
- * .lpsc = DAVINCI_LPSC_DDR_EMIF, .parent = &sysclk2_clk,
- * .lpsc = DAVINCI_LPSC_MEMSTICK,
- * - in Enabled state by default
- * .lpsc = DAVINCI_LPSC_SYSTEM_SUBSYS,
- * .lpsc = DAVINCI_LPSC_SCR2, // "bus"
- * .lpsc = DAVINCI_LPSC_SCR3, // "bus"
- * .lpsc = DAVINCI_LPSC_SCR4, // "bus"
- * .lpsc = DAVINCI_LPSC_CROSSBAR, // "emulation"
- * .lpsc = DAVINCI_LPSC_CFG27, // "test"
- * .lpsc = DAVINCI_LPSC_CFG3, // "test"
- * .lpsc = DAVINCI_LPSC_CFG5, // "test"
- */
-
-static struct clk mjcp_clk = {
- .name = "mjcp",
- .parent = &pll1_sysclk1,
- .lpsc = DAVINCI_LPSC_IMCOP,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART1,
-};
-
-static struct clk uart2_clk = {
- .name = "uart2",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_UART2,
-};
-
-static struct clk i2c_clk = {
- .name = "i2c",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_I2C,
-};
-
-static struct clk asp0_clk = {
- .name = "asp0",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_McBSP,
-};
-
-static struct clk asp1_clk = {
- .name = "asp1",
- .parent = &pll1_sysclk2,
- .lpsc = DM355_LPSC_McBSP1,
-};
-
-static struct clk mmcsd0_clk = {
- .name = "mmcsd0",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_MMC_SD,
-};
-
-static struct clk mmcsd1_clk = {
- .name = "mmcsd1",
- .parent = &pll1_sysclk2,
- .lpsc = DM355_LPSC_MMC_SD1,
-};
-
-static struct clk spi0_clk = {
- .name = "spi0",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_SPI,
-};
-
-static struct clk spi1_clk = {
- .name = "spi1",
- .parent = &pll1_sysclk2,
- .lpsc = DM355_LPSC_SPI1,
-};
-
-static struct clk spi2_clk = {
- .name = "spi2",
- .parent = &pll1_sysclk2,
- .lpsc = DM355_LPSC_SPI2,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_GPIO,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_AEMIF,
-};
-
-static struct clk pwm0_clk = {
- .name = "pwm0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM0,
-};
-
-static struct clk pwm1_clk = {
- .name = "pwm1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM1,
-};
-
-static struct clk pwm2_clk = {
- .name = "pwm2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM2,
-};
-
-static struct clk pwm3_clk = {
- .name = "pwm3",
- .parent = &pll1_aux_clk,
- .lpsc = DM355_LPSC_PWM3,
-};
-
-static struct clk timer0_clk = {
- .name = "timer0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER0,
-};
-
-static struct clk timer1_clk = {
- .name = "timer1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER1,
-};
-
-static struct clk timer2_clk = {
- .name = "timer2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER2,
- .usecount = 1, /* REVISIT: why can't this be disabled? */
-};
-
-static struct clk timer3_clk = {
- .name = "timer3",
- .parent = &pll1_aux_clk,
- .lpsc = DM355_LPSC_TIMER3,
-};
-
-static struct clk rto_clk = {
- .name = "rto",
- .parent = &pll1_aux_clk,
- .lpsc = DM355_LPSC_RTO,
-};
-
-static struct clk usb_clk = {
- .name = "usb",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_USB,
-};
-
-static struct clk_lookup dm355_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "pll1", &pll1_clk),
- CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
- CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
- CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
- CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
- CLK(NULL, "pll1_aux", &pll1_aux_clk),
- CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
- CLK(NULL, "vpss_dac", &vpss_dac_clk),
- CLK("vpss", "master", &vpss_master_clk),
- CLK("vpss", "slave", &vpss_slave_clk),
- CLK(NULL, "clkout1", &clkout1_clk),
- CLK(NULL, "clkout2", &clkout2_clk),
- CLK(NULL, "pll2", &pll2_clk),
- CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
- CLK(NULL, "pll2_sysclkbp", &pll2_sysclkbp),
- CLK(NULL, "clkout3", &clkout3_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK(NULL, "mjcp", &mjcp_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("serial8250.2", NULL, &uart2_clk),
- CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK("davinci-mcbsp.0", NULL, &asp0_clk),
- CLK("davinci-mcbsp.1", NULL, &asp1_clk),
- CLK("dm6441-mmc.0", NULL, &mmcsd0_clk),
- CLK("dm6441-mmc.1", NULL, &mmcsd1_clk),
- CLK("spi_davinci.0", NULL, &spi0_clk),
- CLK("spi_davinci.1", NULL, &spi1_clk),
- CLK("spi_davinci.2", NULL, &spi2_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK(NULL, "aemif", &aemif_clk),
- CLK(NULL, "pwm0", &pwm0_clk),
- CLK(NULL, "pwm1", &pwm1_clk),
- CLK(NULL, "pwm2", &pwm2_clk),
- CLK(NULL, "pwm3", &pwm3_clk),
- CLK(NULL, "timer0", &timer0_clk),
- CLK(NULL, "timer1", &timer1_clk),
- CLK("davinci-wdt", NULL, &timer2_clk),
- CLK(NULL, "timer3", &timer3_clk),
- CLK(NULL, "rto", &rto_clk),
- CLK(NULL, "usb", &usb_clk),
- CLK(NULL, NULL, NULL),
-};
-
-/*----------------------------------------------------------------------*/
-
static u64 dm355_spi0_dma_mask = DMA_BIT_MASK(32);
static struct resource dm355_spi0_resources[] = {
@@ -926,8 +585,6 @@ static struct davinci_id dm355_ids[] = {
},
};
-static u32 dm355_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
-
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
* T0_TOP: Timer 0, top : clocksource for generic timekeeping
@@ -1012,8 +669,6 @@ static const struct davinci_soc_info davinci_soc_info_dm355 = {
.jtag_id_reg = 0x01c40028,
.ids = dm355_ids,
.ids_num = ARRAY_SIZE(dm355_ids),
- .psc_bases = dm355_psc_bases,
- .psc_bases_num = ARRAY_SIZE(dm355_psc_bases),
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm355_pins,
.pinmux_pins_num = ARRAY_SIZE(dm355_pins),
@@ -1046,8 +701,41 @@ void __init dm355_init(void)
void __init dm355_init_time(void)
{
- davinci_clk_init(dm355_clks);
- davinci_timer_init();
+ void __iomem *pll1, *psc;
+ struct clk *clk;
+
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DM355_REF_FREQ);
+
+ pll1 = ioremap(DAVINCI_PLL1_BASE, SZ_1K);
+ dm355_pll1_init(NULL, pll1, NULL);
+
+ psc = ioremap(DAVINCI_PWR_SLEEP_CNTRL_BASE, SZ_4K);
+ dm355_psc_init(NULL, psc);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
+}
+
+static struct resource dm355_pll2_resources[] = {
+ {
+ .start = DAVINCI_PLL2_BASE,
+ .end = DAVINCI_PLL2_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm355_pll2_device = {
+ .name = "dm355-pll2",
+ .id = -1,
+ .resource = dm355_pll2_resources,
+ .num_resources = ARRAY_SIZE(dm355_pll2_resources),
+};
+
+void __init dm355_register_clocks(void)
+{
+ /* PLL1 and PSC are registered in dm355_init_time() */
+ platform_device_register(&dm355_pll2_device);
}
int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 1e3df9df1e10..abcf2a5ed89b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -12,32 +12,33 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/serial_8250.h>
-#include <linux/platform_device.h>
+
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
+#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/spi/spi.h>
+#include <linux/init.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/keyscan-davinci.h>
#include <linux/platform_data/spi-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/spi/spi.h>
#include <asm/mach/map.h>
+#include <mach/common.h>
#include <mach/cputype.h>
-#include "psc.h"
-#include <mach/mux.h>
#include <mach/irqs.h>
-#include <mach/time.h>
+#include <mach/mux.h>
#include <mach/serial.h>
-#include <mach/common.h>
+#include <mach/time.h>
+#include "asp.h"
#include "davinci.h"
-#include "clock.h"
#include "mux.h"
-#include "asp.h"
#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */
#define DM365_RTC_BASE 0x01c69000
@@ -54,440 +55,6 @@
#define DM365_EMAC_CNTRL_RAM_OFFSET 0x1000
#define DM365_EMAC_CNTRL_RAM_SIZE 0x2000
-static struct pll_data pll1_data = {
- .num = 1,
- .phys_base = DAVINCI_PLL1_BASE,
- .flags = PLL_HAS_POSTDIV | PLL_HAS_PREDIV,
-};
-
-static struct pll_data pll2_data = {
- .num = 2,
- .phys_base = DAVINCI_PLL2_BASE,
- .flags = PLL_HAS_POSTDIV | PLL_HAS_PREDIV,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .rate = DM365_REF_FREQ,
-};
-
-static struct clk pll1_clk = {
- .name = "pll1",
- .parent = &ref_clk,
- .flags = CLK_PLL,
- .pll_data = &pll1_data,
-};
-
-static struct clk pll1_aux_clk = {
- .name = "pll1_aux_clk",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll1_sysclkbp = {
- .name = "pll1_sysclkbp",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV
-};
-
-static struct clk clkout0_clk = {
- .name = "clkout0",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll1_sysclk1 = {
- .name = "pll1_sysclk1",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll1_sysclk2 = {
- .name = "pll1_sysclk2",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll1_sysclk3 = {
- .name = "pll1_sysclk3",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll1_sysclk4 = {
- .name = "pll1_sysclk4",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll1_sysclk5 = {
- .name = "pll1_sysclk5",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll1_sysclk6 = {
- .name = "pll1_sysclk6",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV6,
-};
-
-static struct clk pll1_sysclk7 = {
- .name = "pll1_sysclk7",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV7,
-};
-
-static struct clk pll1_sysclk8 = {
- .name = "pll1_sysclk8",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV8,
-};
-
-static struct clk pll1_sysclk9 = {
- .name = "pll1_sysclk9",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV9,
-};
-
-static struct clk pll2_clk = {
- .name = "pll2",
- .parent = &ref_clk,
- .flags = CLK_PLL,
- .pll_data = &pll2_data,
-};
-
-static struct clk pll2_aux_clk = {
- .name = "pll2_aux_clk",
- .parent = &pll2_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk clkout1_clk = {
- .name = "clkout1",
- .parent = &pll2_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll2_sysclk1 = {
- .name = "pll2_sysclk1",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll2_sysclk2 = {
- .name = "pll2_sysclk2",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll2_sysclk3 = {
- .name = "pll2_sysclk3",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll2_sysclk4 = {
- .name = "pll2_sysclk4",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll2_sysclk5 = {
- .name = "pll2_sysclk5",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll2_sysclk6 = {
- .name = "pll2_sysclk6",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV6,
-};
-
-static struct clk pll2_sysclk7 = {
- .name = "pll2_sysclk7",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV7,
-};
-
-static struct clk pll2_sysclk8 = {
- .name = "pll2_sysclk8",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV8,
-};
-
-static struct clk pll2_sysclk9 = {
- .name = "pll2_sysclk9",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV9,
-};
-
-static struct clk vpss_dac_clk = {
- .name = "vpss_dac",
- .parent = &pll1_sysclk3,
- .lpsc = DM365_LPSC_DAC_CLK,
-};
-
-static struct clk vpss_master_clk = {
- .name = "vpss_master",
- .parent = &pll1_sysclk5,
- .lpsc = DM365_LPSC_VPSSMSTR,
- .flags = CLK_PSC,
-};
-
-static struct clk vpss_slave_clk = {
- .name = "vpss_slave",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_VPSSSLV,
-};
-
-static struct clk arm_clk = {
- .name = "arm_clk",
- .parent = &pll2_sysclk2,
- .lpsc = DAVINCI_LPSC_ARM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_UART1,
-};
-
-static struct clk i2c_clk = {
- .name = "i2c",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_I2C,
-};
-
-static struct clk mmcsd0_clk = {
- .name = "mmcsd0",
- .parent = &pll1_sysclk8,
- .lpsc = DAVINCI_LPSC_MMC_SD,
-};
-
-static struct clk mmcsd1_clk = {
- .name = "mmcsd1",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_MMC_SD1,
-};
-
-static struct clk spi0_clk = {
- .name = "spi0",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_SPI,
-};
-
-static struct clk spi1_clk = {
- .name = "spi1",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_SPI1,
-};
-
-static struct clk spi2_clk = {
- .name = "spi2",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_SPI2,
-};
-
-static struct clk spi3_clk = {
- .name = "spi3",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_SPI3,
-};
-
-static struct clk spi4_clk = {
- .name = "spi4",
- .parent = &pll1_aux_clk,
- .lpsc = DM365_LPSC_SPI4,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_GPIO,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_AEMIF,
-};
-
-static struct clk pwm0_clk = {
- .name = "pwm0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM0,
-};
-
-static struct clk pwm1_clk = {
- .name = "pwm1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM1,
-};
-
-static struct clk pwm2_clk = {
- .name = "pwm2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM2,
-};
-
-static struct clk pwm3_clk = {
- .name = "pwm3",
- .parent = &ref_clk,
- .lpsc = DM365_LPSC_PWM3,
-};
-
-static struct clk timer0_clk = {
- .name = "timer0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER0,
-};
-
-static struct clk timer1_clk = {
- .name = "timer1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER1,
-};
-
-static struct clk timer2_clk = {
- .name = "timer2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER2,
- .usecount = 1,
-};
-
-static struct clk timer3_clk = {
- .name = "timer3",
- .parent = &pll1_aux_clk,
- .lpsc = DM365_LPSC_TIMER3,
-};
-
-static struct clk usb_clk = {
- .name = "usb",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_USB,
-};
-
-static struct clk emac_clk = {
- .name = "emac",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_EMAC,
-};
-
-static struct clk voicecodec_clk = {
- .name = "voice_codec",
- .parent = &pll2_sysclk4,
- .lpsc = DM365_LPSC_VOICE_CODEC,
-};
-
-static struct clk asp0_clk = {
- .name = "asp0",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_McBSP1,
-};
-
-static struct clk rto_clk = {
- .name = "rto",
- .parent = &pll1_sysclk4,
- .lpsc = DM365_LPSC_RTO,
-};
-
-static struct clk mjcp_clk = {
- .name = "mjcp",
- .parent = &pll1_sysclk3,
- .lpsc = DM365_LPSC_MJCP,
-};
-
-static struct clk_lookup dm365_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "pll1", &pll1_clk),
- CLK(NULL, "pll1_aux", &pll1_aux_clk),
- CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
- CLK(NULL, "clkout0", &clkout0_clk),
- CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
- CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
- CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
- CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
- CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
- CLK(NULL, "pll1_sysclk6", &pll1_sysclk6),
- CLK(NULL, "pll1_sysclk7", &pll1_sysclk7),
- CLK(NULL, "pll1_sysclk8", &pll1_sysclk8),
- CLK(NULL, "pll1_sysclk9", &pll1_sysclk9),
- CLK(NULL, "pll2", &pll2_clk),
- CLK(NULL, "pll2_aux", &pll2_aux_clk),
- CLK(NULL, "clkout1", &clkout1_clk),
- CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
- CLK(NULL, "pll2_sysclk2", &pll2_sysclk2),
- CLK(NULL, "pll2_sysclk3", &pll2_sysclk3),
- CLK(NULL, "pll2_sysclk4", &pll2_sysclk4),
- CLK(NULL, "pll2_sysclk5", &pll2_sysclk5),
- CLK(NULL, "pll2_sysclk6", &pll2_sysclk6),
- CLK(NULL, "pll2_sysclk7", &pll2_sysclk7),
- CLK(NULL, "pll2_sysclk8", &pll2_sysclk8),
- CLK(NULL, "pll2_sysclk9", &pll2_sysclk9),
- CLK(NULL, "vpss_dac", &vpss_dac_clk),
- CLK("vpss", "master", &vpss_master_clk),
- CLK("vpss", "slave", &vpss_slave_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK("da830-mmc.0", NULL, &mmcsd0_clk),
- CLK("da830-mmc.1", NULL, &mmcsd1_clk),
- CLK("spi_davinci.0", NULL, &spi0_clk),
- CLK("spi_davinci.1", NULL, &spi1_clk),
- CLK("spi_davinci.2", NULL, &spi2_clk),
- CLK("spi_davinci.3", NULL, &spi3_clk),
- CLK("spi_davinci.4", NULL, &spi4_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK(NULL, "aemif", &aemif_clk),
- CLK(NULL, "pwm0", &pwm0_clk),
- CLK(NULL, "pwm1", &pwm1_clk),
- CLK(NULL, "pwm2", &pwm2_clk),
- CLK(NULL, "pwm3", &pwm3_clk),
- CLK(NULL, "timer0", &timer0_clk),
- CLK(NULL, "timer1", &timer1_clk),
- CLK("davinci-wdt", NULL, &timer2_clk),
- CLK(NULL, "timer3", &timer3_clk),
- CLK(NULL, "usb", &usb_clk),
- CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &emac_clk),
- CLK("davinci_voicecodec", NULL, &voicecodec_clk),
- CLK("davinci-mcbsp", NULL, &asp0_clk),
- CLK(NULL, "rto", &rto_clk),
- CLK(NULL, "mjcp", &mjcp_clk),
- CLK(NULL, NULL, NULL),
-};
-
-/*----------------------------------------------------------------------*/
-
#define INTMUX 0x18
#define EVTMUX 0x1c
@@ -1054,8 +621,6 @@ static struct davinci_id dm365_ids[] = {
},
};
-static u32 dm365_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
-
static struct davinci_timer_info dm365_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
@@ -1116,8 +681,6 @@ static const struct davinci_soc_info davinci_soc_info_dm365 = {
.jtag_id_reg = 0x01c40028,
.ids = dm365_ids,
.ids_num = ARRAY_SIZE(dm365_ids),
- .psc_bases = dm365_psc_bases,
- .psc_bases_num = ARRAY_SIZE(dm365_psc_bases),
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm365_pins,
.pinmux_pins_num = ARRAY_SIZE(dm365_pins),
@@ -1171,8 +734,28 @@ void __init dm365_init(void)
void __init dm365_init_time(void)
{
- davinci_clk_init(dm365_clks);
- davinci_timer_init();
+ void __iomem *pll1, *pll2, *psc;
+ struct clk *clk;
+
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DM365_REF_FREQ);
+
+ pll1 = ioremap(DAVINCI_PLL1_BASE, SZ_1K);
+ dm365_pll1_init(NULL, pll1, NULL);
+
+ pll2 = ioremap(DAVINCI_PLL2_BASE, SZ_1K);
+ dm365_pll2_init(NULL, pll2, NULL);
+
+ psc = ioremap(DAVINCI_PWR_SLEEP_CNTRL_BASE, SZ_4K);
+ dm365_psc_init(NULL, psc);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
+}
+
+void __init dm365_register_clocks(void)
+{
+ /* all clocks are currently registered in dm365_init_time() */
}
static struct resource dm365_vpss_resources[] = {
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index a2e8586c8a6d..0720da7809a6 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -8,28 +8,29 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/serial_8250.h>
+
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
+#include <linux/clkdev.h>
#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
+#include <linux/init.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
#include <asm/mach/map.h>
+#include <mach/common.h>
#include <mach/cputype.h>
#include <mach/irqs.h>
-#include "psc.h"
#include <mach/mux.h>
-#include <mach/time.h>
#include <mach/serial.h>
-#include <mach/common.h>
+#include <mach/time.h>
+#include "asp.h"
#include "davinci.h"
-#include "clock.h"
#include "mux.h"
-#include "asp.h"
/*
* Device specific clocks
@@ -43,290 +44,6 @@
#define DM644X_EMAC_CNTRL_RAM_OFFSET 0x2000
#define DM644X_EMAC_CNTRL_RAM_SIZE 0x2000
-static struct pll_data pll1_data = {
- .num = 1,
- .phys_base = DAVINCI_PLL1_BASE,
-};
-
-static struct pll_data pll2_data = {
- .num = 2,
- .phys_base = DAVINCI_PLL2_BASE,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .rate = DM644X_REF_FREQ,
-};
-
-static struct clk pll1_clk = {
- .name = "pll1",
- .parent = &ref_clk,
- .pll_data = &pll1_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll1_sysclk1 = {
- .name = "pll1_sysclk1",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll1_sysclk2 = {
- .name = "pll1_sysclk2",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll1_sysclk3 = {
- .name = "pll1_sysclk3",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll1_sysclk5 = {
- .name = "pll1_sysclk5",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll1_aux_clk = {
- .name = "pll1_aux_clk",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll1_sysclkbp = {
- .name = "pll1_sysclkbp",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV
-};
-
-static struct clk pll2_clk = {
- .name = "pll2",
- .parent = &ref_clk,
- .pll_data = &pll2_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll2_sysclk1 = {
- .name = "pll2_sysclk1",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll2_sysclk2 = {
- .name = "pll2_sysclk2",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll2_sysclkbp = {
- .name = "pll2_sysclkbp",
- .parent = &pll2_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV
-};
-
-static struct clk dsp_clk = {
- .name = "dsp",
- .parent = &pll1_sysclk1,
- .lpsc = DAVINCI_LPSC_GEM,
- .domain = DAVINCI_GPSC_DSPDOMAIN,
- .usecount = 1, /* REVISIT how to disable? */
-};
-
-static struct clk arm_clk = {
- .name = "arm",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_ARM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk vicp_clk = {
- .name = "vicp",
- .parent = &pll1_sysclk2,
- .lpsc = DAVINCI_LPSC_IMCOP,
- .domain = DAVINCI_GPSC_DSPDOMAIN,
- .usecount = 1, /* REVISIT how to disable? */
-};
-
-static struct clk vpss_master_clk = {
- .name = "vpss_master",
- .parent = &pll1_sysclk3,
- .lpsc = DAVINCI_LPSC_VPSSMSTR,
- .flags = CLK_PSC,
-};
-
-static struct clk vpss_slave_clk = {
- .name = "vpss_slave",
- .parent = &pll1_sysclk3,
- .lpsc = DAVINCI_LPSC_VPSSSLV,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART1,
-};
-
-static struct clk uart2_clk = {
- .name = "uart2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_UART2,
-};
-
-static struct clk emac_clk = {
- .name = "emac",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_EMAC_WRAPPER,
-};
-
-static struct clk i2c_clk = {
- .name = "i2c",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_I2C,
-};
-
-static struct clk ide_clk = {
- .name = "ide",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_ATA,
-};
-
-static struct clk asp_clk = {
- .name = "asp0",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_McBSP,
-};
-
-static struct clk mmcsd_clk = {
- .name = "mmcsd",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_MMC_SD,
-};
-
-static struct clk spi_clk = {
- .name = "spi",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_SPI,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_GPIO,
-};
-
-static struct clk usb_clk = {
- .name = "usb",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_USB,
-};
-
-static struct clk vlynq_clk = {
- .name = "vlynq",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_VLYNQ,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll1_sysclk5,
- .lpsc = DAVINCI_LPSC_AEMIF,
-};
-
-static struct clk pwm0_clk = {
- .name = "pwm0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM0,
-};
-
-static struct clk pwm1_clk = {
- .name = "pwm1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM1,
-};
-
-static struct clk pwm2_clk = {
- .name = "pwm2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_PWM2,
-};
-
-static struct clk timer0_clk = {
- .name = "timer0",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER0,
-};
-
-static struct clk timer1_clk = {
- .name = "timer1",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER1,
-};
-
-static struct clk timer2_clk = {
- .name = "timer2",
- .parent = &pll1_aux_clk,
- .lpsc = DAVINCI_LPSC_TIMER2,
- .usecount = 1, /* REVISIT: why can't this be disabled? */
-};
-
-static struct clk_lookup dm644x_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "pll1", &pll1_clk),
- CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
- CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
- CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
- CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
- CLK(NULL, "pll1_aux", &pll1_aux_clk),
- CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
- CLK(NULL, "pll2", &pll2_clk),
- CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
- CLK(NULL, "pll2_sysclk2", &pll2_sysclk2),
- CLK(NULL, "pll2_sysclkbp", &pll2_sysclkbp),
- CLK(NULL, "dsp", &dsp_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK(NULL, "vicp", &vicp_clk),
- CLK("vpss", "master", &vpss_master_clk),
- CLK("vpss", "slave", &vpss_slave_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("serial8250.2", NULL, &uart2_clk),
- CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &emac_clk),
- CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK("palm_bk3710", NULL, &ide_clk),
- CLK("davinci-mcbsp", NULL, &asp_clk),
- CLK("dm6441-mmc.0", NULL, &mmcsd_clk),
- CLK(NULL, "spi", &spi_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK(NULL, "usb", &usb_clk),
- CLK(NULL, "vlynq", &vlynq_clk),
- CLK(NULL, "aemif", &aemif_clk),
- CLK(NULL, "pwm0", &pwm0_clk),
- CLK(NULL, "pwm1", &pwm1_clk),
- CLK(NULL, "pwm2", &pwm2_clk),
- CLK(NULL, "timer0", &timer0_clk),
- CLK(NULL, "timer1", &timer1_clk),
- CLK("davinci-wdt", NULL, &timer2_clk),
- CLK(NULL, NULL, NULL),
-};
-
static struct emac_platform_data dm644x_emac_pdata = {
.ctrl_reg_offset = DM644X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM644X_EMAC_CNTRL_MOD_OFFSET,
@@ -819,8 +536,6 @@ static struct davinci_id dm644x_ids[] = {
},
};
-static u32 dm644x_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
-
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
* T0_TOP: Timer 0, top : clocksource for generic timekeeping
@@ -905,8 +620,6 @@ static const struct davinci_soc_info davinci_soc_info_dm644x = {
.jtag_id_reg = 0x01c40028,
.ids = dm644x_ids,
.ids_num = ARRAY_SIZE(dm644x_ids),
- .psc_bases = dm644x_psc_bases,
- .psc_bases_num = ARRAY_SIZE(dm644x_psc_bases),
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm644x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm644x_pins),
@@ -934,8 +647,41 @@ void __init dm644x_init(void)
void __init dm644x_init_time(void)
{
- davinci_clk_init(dm644x_clks);
- davinci_timer_init();
+ void __iomem *pll1, *psc;
+ struct clk *clk;
+
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, DM644X_REF_FREQ);
+
+ pll1 = ioremap(DAVINCI_PLL1_BASE, SZ_1K);
+ dm644x_pll1_init(NULL, pll1, NULL);
+
+ psc = ioremap(DAVINCI_PWR_SLEEP_CNTRL_BASE, SZ_4K);
+ dm644x_psc_init(NULL, psc);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
+}
+
+static struct resource dm644x_pll2_resources[] = {
+ {
+ .start = DAVINCI_PLL2_BASE,
+ .end = DAVINCI_PLL2_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm644x_pll2_device = {
+ .name = "dm644x-pll2",
+ .id = -1,
+ .resource = dm644x_pll2_resources,
+ .num_resources = ARRAY_SIZE(dm644x_pll2_resources),
+};
+
+void __init dm644x_register_clocks(void)
+{
+ /* PLL1 and PSC are registered in dm644x_init_time() */
+ platform_device_register(&dm644x_pll2_device);
}
int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index c32ca27ab343..6bd2ed069d0d 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -8,29 +8,30 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
+
+#include <linux/clk-provider.h>
+#include <linux/clk/davinci.h>
+#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/serial_8250.h>
-#include <linux/platform_device.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
#include <asm/mach/map.h>
+#include <mach/common.h>
#include <mach/cputype.h>
#include <mach/irqs.h>
-#include "psc.h"
#include <mach/mux.h>
-#include <mach/time.h>
#include <mach/serial.h>
-#include <mach/common.h>
+#include <mach/time.h>
+#include "asp.h"
#include "davinci.h"
-#include "clock.h"
#include "mux.h"
-#include "asp.h"
#define DAVINCI_VPIF_BASE (0x01C12000)
@@ -46,317 +47,6 @@
#define DM646X_EMAC_CNTRL_RAM_OFFSET 0x2000
#define DM646X_EMAC_CNTRL_RAM_SIZE 0x2000
-static struct pll_data pll1_data = {
- .num = 1,
- .phys_base = DAVINCI_PLL1_BASE,
-};
-
-static struct pll_data pll2_data = {
- .num = 2,
- .phys_base = DAVINCI_PLL2_BASE,
-};
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- /* rate is initialized in dm646x_init_time() */
-};
-
-static struct clk aux_clkin = {
- .name = "aux_clkin",
- /* rate is initialized in dm646x_init_time() */
-};
-
-static struct clk pll1_clk = {
- .name = "pll1",
- .parent = &ref_clk,
- .pll_data = &pll1_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll1_sysclk1 = {
- .name = "pll1_sysclk1",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk pll1_sysclk2 = {
- .name = "pll1_sysclk2",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV2,
-};
-
-static struct clk pll1_sysclk3 = {
- .name = "pll1_sysclk3",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV3,
-};
-
-static struct clk pll1_sysclk4 = {
- .name = "pll1_sysclk4",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV4,
-};
-
-static struct clk pll1_sysclk5 = {
- .name = "pll1_sysclk5",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV5,
-};
-
-static struct clk pll1_sysclk6 = {
- .name = "pll1_sysclk6",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV6,
-};
-
-static struct clk pll1_sysclk8 = {
- .name = "pll1_sysclk8",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV8,
-};
-
-static struct clk pll1_sysclk9 = {
- .name = "pll1_sysclk9",
- .parent = &pll1_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV9,
-};
-
-static struct clk pll1_sysclkbp = {
- .name = "pll1_sysclkbp",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
- .div_reg = BPDIV,
-};
-
-static struct clk pll1_aux_clk = {
- .name = "pll1_aux_clk",
- .parent = &pll1_clk,
- .flags = CLK_PLL | PRE_PLL,
-};
-
-static struct clk pll2_clk = {
- .name = "pll2_clk",
- .parent = &ref_clk,
- .pll_data = &pll2_data,
- .flags = CLK_PLL,
-};
-
-static struct clk pll2_sysclk1 = {
- .name = "pll2_sysclk1",
- .parent = &pll2_clk,
- .flags = CLK_PLL,
- .div_reg = PLLDIV1,
-};
-
-static struct clk dsp_clk = {
- .name = "dsp",
- .parent = &pll1_sysclk1,
- .lpsc = DM646X_LPSC_C64X_CPU,
- .usecount = 1, /* REVISIT how to disable? */
-};
-
-static struct clk arm_clk = {
- .name = "arm",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_ARM,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk edma_cc_clk = {
- .name = "edma_cc",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_TPCC,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk edma_tc0_clk = {
- .name = "edma_tc0",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_TPTC0,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk edma_tc1_clk = {
- .name = "edma_tc1",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_TPTC1,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk edma_tc2_clk = {
- .name = "edma_tc2",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_TPTC2,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk edma_tc3_clk = {
- .name = "edma_tc3",
- .parent = &pll1_sysclk2,
- .lpsc = DM646X_LPSC_TPTC3,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk uart0_clk = {
- .name = "uart0",
- .parent = &aux_clkin,
- .lpsc = DM646X_LPSC_UART0,
-};
-
-static struct clk uart1_clk = {
- .name = "uart1",
- .parent = &aux_clkin,
- .lpsc = DM646X_LPSC_UART1,
-};
-
-static struct clk uart2_clk = {
- .name = "uart2",
- .parent = &aux_clkin,
- .lpsc = DM646X_LPSC_UART2,
-};
-
-static struct clk i2c_clk = {
- .name = "I2CCLK",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_I2C,
-};
-
-static struct clk gpio_clk = {
- .name = "gpio",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_GPIO,
-};
-
-static struct clk mcasp0_clk = {
- .name = "mcasp0",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_McASP0,
-};
-
-static struct clk mcasp1_clk = {
- .name = "mcasp1",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_McASP1,
-};
-
-static struct clk aemif_clk = {
- .name = "aemif",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_AEMIF,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk emac_clk = {
- .name = "emac",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_EMAC,
-};
-
-static struct clk pwm0_clk = {
- .name = "pwm0",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_PWM0,
- .usecount = 1, /* REVIST: disabling hangs system */
-};
-
-static struct clk pwm1_clk = {
- .name = "pwm1",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_PWM1,
- .usecount = 1, /* REVIST: disabling hangs system */
-};
-
-static struct clk timer0_clk = {
- .name = "timer0",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_TIMER0,
-};
-
-static struct clk timer1_clk = {
- .name = "timer1",
- .parent = &pll1_sysclk3,
- .lpsc = DM646X_LPSC_TIMER1,
-};
-
-static struct clk timer2_clk = {
- .name = "timer2",
- .parent = &pll1_sysclk3,
- .flags = ALWAYS_ENABLED, /* no LPSC, always enabled; c.f. spruep9a */
-};
-
-
-static struct clk ide_clk = {
- .name = "ide",
- .parent = &pll1_sysclk4,
- .lpsc = DAVINCI_LPSC_ATA,
-};
-
-static struct clk vpif0_clk = {
- .name = "vpif0",
- .parent = &ref_clk,
- .lpsc = DM646X_LPSC_VPSSMSTR,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk vpif1_clk = {
- .name = "vpif1",
- .parent = &ref_clk,
- .lpsc = DM646X_LPSC_VPSSSLV,
- .flags = ALWAYS_ENABLED,
-};
-
-static struct clk_lookup dm646x_clks[] = {
- CLK(NULL, "ref", &ref_clk),
- CLK(NULL, "aux", &aux_clkin),
- CLK(NULL, "pll1", &pll1_clk),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk1),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk2),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk3),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk4),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk5),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk6),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk8),
- CLK(NULL, "pll1_sysclk", &pll1_sysclk9),
- CLK(NULL, "pll1_sysclk", &pll1_sysclkbp),
- CLK(NULL, "pll1_aux", &pll1_aux_clk),
- CLK(NULL, "pll2", &pll2_clk),
- CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
- CLK(NULL, "dsp", &dsp_clk),
- CLK(NULL, "arm", &arm_clk),
- CLK(NULL, "edma_cc", &edma_cc_clk),
- CLK(NULL, "edma_tc0", &edma_tc0_clk),
- CLK(NULL, "edma_tc1", &edma_tc1_clk),
- CLK(NULL, "edma_tc2", &edma_tc2_clk),
- CLK(NULL, "edma_tc3", &edma_tc3_clk),
- CLK("serial8250.0", NULL, &uart0_clk),
- CLK("serial8250.1", NULL, &uart1_clk),
- CLK("serial8250.2", NULL, &uart2_clk),
- CLK("i2c_davinci.1", NULL, &i2c_clk),
- CLK(NULL, "gpio", &gpio_clk),
- CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
- CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
- CLK(NULL, "aemif", &aemif_clk),
- CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &emac_clk),
- CLK(NULL, "pwm0", &pwm0_clk),
- CLK(NULL, "pwm1", &pwm1_clk),
- CLK(NULL, "timer0", &timer0_clk),
- CLK(NULL, "timer1", &timer1_clk),
- CLK("davinci-wdt", NULL, &timer2_clk),
- CLK("palm_bk3710", NULL, &ide_clk),
- CLK(NULL, "vpif0", &vpif0_clk),
- CLK(NULL, "vpif1", &vpif1_clk),
- CLK(NULL, NULL, NULL),
-};
-
static struct emac_platform_data dm646x_emac_pdata = {
.ctrl_reg_offset = DM646X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM646X_EMAC_CNTRL_MOD_OFFSET,
@@ -796,8 +486,6 @@ static struct davinci_id dm646x_ids[] = {
},
};
-static u32 dm646x_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
-
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
* T0_TOP: Timer 0, top : clocksource for generic timekeeping
@@ -882,8 +570,6 @@ static const struct davinci_soc_info davinci_soc_info_dm646x = {
.jtag_id_reg = 0x01c40028,
.ids = dm646x_ids,
.ids_num = ARRAY_SIZE(dm646x_ids),
- .psc_bases = dm646x_psc_bases,
- .psc_bases_num = ARRAY_SIZE(dm646x_psc_bases),
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm646x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm646x_pins),
@@ -954,10 +640,42 @@ void __init dm646x_init(void)
void __init dm646x_init_time(unsigned long ref_clk_rate,
unsigned long aux_clkin_rate)
{
- ref_clk.rate = ref_clk_rate;
- aux_clkin.rate = aux_clkin_rate;
- davinci_clk_init(dm646x_clks);
- davinci_timer_init();
+ void __iomem *pll1, *psc;
+ struct clk *clk;
+
+ clk_register_fixed_rate(NULL, "ref_clk", NULL, 0, ref_clk_rate);
+ clk_register_fixed_rate(NULL, "aux_clkin", NULL, 0, aux_clkin_rate);
+
+ pll1 = ioremap(DAVINCI_PLL1_BASE, SZ_1K);
+ dm646x_pll1_init(NULL, pll1, NULL);
+
+ psc = ioremap(DAVINCI_PWR_SLEEP_CNTRL_BASE, SZ_4K);
+ dm646x_psc_init(NULL, psc);
+
+ clk = clk_get(NULL, "timer0");
+
+ davinci_timer_init(clk);
+}
+
+static struct resource dm646x_pll2_resources[] = {
+ {
+ .start = DAVINCI_PLL2_BASE,
+ .end = DAVINCI_PLL2_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device dm646x_pll2_device = {
+ .name = "dm646x-pll2",
+ .id = -1,
+ .resource = dm646x_pll2_resources,
+ .num_resources = ARRAY_SIZE(dm646x_pll2_resources),
+};
+
+void __init dm646x_register_clocks(void)
+{
+ /* PLL1 and PSC are registered in dm646x_init_time() */
+ platform_device_register(&dm646x_pll2_device);
}
static int __init dm646x_init_devices(void)
diff --git a/arch/arm/mach-davinci/include/mach/clock.h b/arch/arm/mach-davinci/include/mach/clock.h
index 3e8af6a0b64c..42ed4f2f5ce4 100644
--- a/arch/arm/mach-davinci/include/mach/clock.h
+++ b/arch/arm/mach-davinci/include/mach/clock.h
@@ -15,9 +15,6 @@
struct clk;
-extern int clk_register(struct clk *clk);
-extern void clk_unregister(struct clk *clk);
-
int davinci_clk_reset_assert(struct clk *c);
int davinci_clk_reset_deassert(struct clk *c);
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index f0d5e858f158..b577e13a9c23 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -12,11 +12,12 @@
#ifndef __ARCH_ARM_MACH_DAVINCI_COMMON_H
#define __ARCH_ARM_MACH_DAVINCI_COMMON_H
+#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/reboot.h>
-extern void davinci_timer_init(void);
+void davinci_timer_init(struct clk *clk);
extern void davinci_irq_init(void);
extern void __iomem *davinci_intc_base;
@@ -53,8 +54,6 @@ struct davinci_soc_info {
u32 jtag_id_reg;
struct davinci_id *ids;
unsigned long ids_num;
- u32 *psc_bases;
- unsigned long psc_bases_num;
u32 pinmux_base;
const struct mux_config *pinmux_pins;
unsigned long pinmux_pins_num;
@@ -82,12 +81,6 @@ extern void davinci_common_init(const struct davinci_soc_info *soc_info);
extern void davinci_init_ide(void);
void davinci_init_late(void);
-#ifdef CONFIG_DAVINCI_RESET_CLOCKS
-int davinci_clk_disable_unused(void);
-#else
-static inline int davinci_clk_disable_unused(void) { return 0; }
-#endif
-
#ifdef CONFIG_CPU_FREQ
int davinci_cpufreq_init(void);
#else
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 9fd6d0125762..ab4a57f433f4 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -89,9 +89,11 @@ extern unsigned int da850_max_speed;
void da830_init(void);
void da830_init_time(void);
+void da830_register_clocks(void);
void da850_init(void);
void da850_init_time(void);
+void da850_register_clocks(void);
int da830_register_edma(struct edma_rsv_info *rsv);
int da850_register_edma(struct edma_rsv_info *rsv[2]);
@@ -101,9 +103,7 @@ int da8xx_register_watchdog(void);
int da8xx_register_usb_phy(void);
int da8xx_register_usb20(unsigned mA, unsigned potpgt);
int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
-int da8xx_register_usb_refclkin(int rate);
-int da8xx_register_usb20_phy_clk(bool use_usb_refclkin);
-int da8xx_register_usb11_phy_clk(bool use_usb_refclkin);
+int da8xx_register_usb_phy_clocks(void);
int da850_register_sata_refclk(int rate);
int da8xx_register_emac(void);
int da8xx_register_uio_pruss(void);
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index 78eac2c0c146..e251fd593bfd 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
static struct dev_pm_domain davinci_pm_domain = {
.ops = {
@@ -28,6 +29,10 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init davinci_pm_runtime_init(void)
{
+ if (of_have_populated_dt())
+ return 0;
+
+ /* Use pm_clk as fallback if we're not using genpd. */
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
deleted file mode 100644
index e5dc6bfde5f3..000000000000
--- a/arch/arm/mach-davinci/psc.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * TI DaVinci Power and Sleep Controller (PSC)
- *
- * Copyright (C) 2006 Texas Instruments.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <mach/cputype.h>
-#include "psc.h"
-
-#include "clock.h"
-
-/* Return nonzero iff the domain's clock is active */
-int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
-{
- void __iomem *psc_base;
- u32 mdstat;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
-
- if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
- pr_warn("PSC: Bad psc data: 0x%x[%d]\n",
- (int)soc_info->psc_bases, ctlr);
- return 0;
- }
-
- psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K);
- mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
- iounmap(psc_base);
-
- /* if clocked, state can be "Enable" or "SyncReset" */
- return mdstat & BIT(12);
-}
-
-/* Control "reset" line associated with PSC domain */
-void davinci_psc_reset(unsigned int ctlr, unsigned int id, bool reset)
-{
- u32 mdctl;
- void __iomem *psc_base;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
-
- if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
- pr_warn("PSC: Bad psc data: 0x%x[%d]\n",
- (int)soc_info->psc_bases, ctlr);
- return;
- }
-
- psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K);
-
- mdctl = readl(psc_base + MDCTL + 4 * id);
- if (reset)
- mdctl &= ~MDCTL_LRST;
- else
- mdctl |= MDCTL_LRST;
- writel(mdctl, psc_base + MDCTL + 4 * id);
-
- iounmap(psc_base);
-}
-
-/* Enable or disable a PSC domain */
-void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, bool enable, u32 flags)
-{
- u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
- void __iomem *psc_base;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
- u32 next_state = PSC_STATE_ENABLE;
-
- if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
- pr_warn("PSC: Bad psc data: 0x%x[%d]\n",
- (int)soc_info->psc_bases, ctlr);
- return;
- }
-
- psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K);
-
- if (!enable) {
- if (flags & PSC_SWRSTDISABLE)
- next_state = PSC_STATE_SWRSTDISABLE;
- else
- next_state = PSC_STATE_DISABLE;
- }
-
- mdctl = __raw_readl(psc_base + MDCTL + 4 * id);
- mdctl &= ~MDSTAT_STATE_MASK;
- mdctl |= next_state;
- if (flags & PSC_FORCE)
- mdctl |= MDCTL_FORCE;
- __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
-
- pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
- if ((pdstat & PDSTAT_STATE_MASK) == 0) {
- pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
- pdctl |= PDCTL_NEXT;
- __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
-
- ptcmd = 1 << domain;
- __raw_writel(ptcmd, psc_base + PTCMD);
-
- do {
- epcpr = __raw_readl(psc_base + EPCPR);
- } while ((((epcpr >> domain) & 1) == 0));
-
- pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
- pdctl |= PDCTL_EPCGOOD;
- __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
- } else {
- ptcmd = 1 << domain;
- __raw_writel(ptcmd, psc_base + PTCMD);
- }
-
- do {
- ptstat = __raw_readl(psc_base + PTSTAT);
- } while (!(((ptstat >> domain) & 1) == 0));
-
- do {
- mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
- } while (!((mdstat & MDSTAT_STATE_MASK) == next_state));
-
- iounmap(psc_base);
-}
diff --git a/arch/arm/mach-davinci/psc.h b/arch/arm/mach-davinci/psc.h
index 8af9f09fc10c..68cd9d3fc82b 100644
--- a/arch/arm/mach-davinci/psc.h
+++ b/arch/arm/mach-davinci/psc.h
@@ -27,8 +27,6 @@
#ifndef __ASM_ARCH_PSC_H
#define __ASM_ARCH_PSC_H
-#define DAVINCI_PWR_SLEEP_CNTRL_BASE 0x01C41000
-
/* Power and Sleep Controller (PSC) Domains */
#define DAVINCI_GPSC_ARMDOMAIN 0
#define DAVINCI_GPSC_DSPDOMAIN 1
@@ -206,14 +204,4 @@
#define PDCTL_NEXT BIT(0)
#define PDCTL_EPCGOOD BIT(8)
-#ifndef __ASSEMBLER__
-
-extern int davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id);
-extern void davinci_psc_reset(unsigned int ctlr, unsigned int id,
- bool reset);
-extern void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, bool enable, u32 flags);
-
-#endif
-
#endif /* __ASM_ARCH_PSC_H */
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 1bb991ad9c1e..5a6de5368ab0 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
@@ -27,8 +28,6 @@
#include <mach/hardware.h>
#include <mach/time.h>
-#include "clock.h"
-
static struct clock_event_device clockevent_davinci;
static unsigned int davinci_clock_tick_rate;
@@ -334,10 +333,8 @@ static struct clock_event_device clockevent_davinci = {
.set_state_oneshot = davinci_set_oneshot,
};
-
-void __init davinci_timer_init(void)
+void __init davinci_timer_init(struct clk *timer_clk)
{
- struct clk *timer_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
unsigned int clockevent_id;
unsigned int clocksource_id;
@@ -373,7 +370,6 @@ void __init davinci_timer_init(void)
}
}
- timer_clk = clk_get(NULL, "timer0");
BUG_ON(IS_ERR(timer_clk));
clk_prepare_enable(timer_clk);
@@ -402,3 +398,17 @@ void __init davinci_timer_init(void)
for (i=0; i< ARRAY_SIZE(timers); i++)
timer32_config(&timers[i]);
}
+
+static int __init of_davinci_timer_init(struct device_node *np)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ davinci_timer_init(clk);
+
+ return 0;
+}
+TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_init);
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
index 50445f0e98de..c17ce66a3d95 100644
--- a/arch/arm/mach-davinci/usb-da8xx.c
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -2,29 +2,30 @@
/*
* DA8xx USB
*/
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#include <linux/platform_data/clk-da8xx-cfgchip.h>
#include <linux/platform_data/phy-da8xx-usb.h>
#include <linux/platform_data/usb-davinci.h>
#include <linux/platform_device.h>
#include <linux/usb/musb.h>
-#include <mach/clock.h>
#include <mach/common.h>
#include <mach/cputype.h>
#include <mach/da8xx.h>
#include <mach/irqs.h>
-#include "clock.h"
-
#define DA8XX_USB0_BASE 0x01e00000
#define DA8XX_USB1_BASE 0x01e25000
+#ifndef CONFIG_COMMON_CLK
static struct clk *usb20_clk;
+#endif
static struct da8xx_usb_phy_platform_data da8xx_usb_phy_pdata;
@@ -81,11 +82,6 @@ static struct platform_device da8xx_usb20_dev = {
.name = "musb-da8xx",
.id = -1,
.dev = {
- /*
- * Setting init_name so that clock lookup will work in
- * usb20_phy_clk_enable() even if this device is not registered.
- */
- .init_name = "musb-da8xx",
.platform_data = &usb_data,
.dma_mask = &usb_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
@@ -134,229 +130,17 @@ int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata)
return platform_device_register(&da8xx_usb11_device);
}
-static struct clk usb_refclkin = {
- .name = "usb_refclkin",
- .set_rate = davinci_simple_set_rate,
-};
-
-static struct clk_lookup usb_refclkin_lookup =
- CLK(NULL, "usb_refclkin", &usb_refclkin);
-
-/**
- * da8xx_register_usb_refclkin - register USB_REFCLKIN clock
- *
- * @rate: The clock rate in Hz
- *
- * This clock is only needed if the board provides an external USB_REFCLKIN
- * signal, in which case it will be used as the parent of usb20_phy_clk and/or
- * usb11_phy_clk.
- */
-int __init da8xx_register_usb_refclkin(int rate)
-{
- int ret;
-
- usb_refclkin.rate = rate;
- ret = clk_register(&usb_refclkin);
- if (ret)
- return ret;
-
- clkdev_add(&usb_refclkin_lookup);
-
- return 0;
-}
-
-static void usb20_phy_clk_enable(struct clk *clk)
-{
- u32 val;
- u32 timeout = 500000; /* 500 msec */
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- /* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
- davinci_clk_enable(usb20_clk);
-
- /*
- * Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
- * host may use the PLL clock without USB 2.0 OTG being used.
- */
- val &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
- val |= CFGCHIP2_PHY_PLLON;
-
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- while (--timeout) {
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
- if (val & CFGCHIP2_PHYCLKGD)
- goto done;
- udelay(1);
- }
-
- pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
-done:
- davinci_clk_disable(usb20_clk);
-}
-
-static void usb20_phy_clk_disable(struct clk *clk)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
- val |= CFGCHIP2_PHYPWRDN;
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-}
-
-static int usb20_phy_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- /* Set the mux depending on the parent clock. */
- if (parent == &usb_refclkin) {
- val &= ~CFGCHIP2_USB2PHYCLKMUX;
- } else if (strcmp(parent->name, "pll0_aux_clk") == 0) {
- val |= CFGCHIP2_USB2PHYCLKMUX;
- } else {
- pr_err("Bad parent on USB 2.0 PHY clock\n");
- return -EINVAL;
- }
-
- /* reference frequency also comes from parent clock */
- val &= ~CFGCHIP2_REFFREQ_MASK;
- switch (clk_get_rate(parent)) {
- case 12000000:
- val |= CFGCHIP2_REFFREQ_12MHZ;
- break;
- case 13000000:
- val |= CFGCHIP2_REFFREQ_13MHZ;
- break;
- case 19200000:
- val |= CFGCHIP2_REFFREQ_19_2MHZ;
- break;
- case 20000000:
- val |= CFGCHIP2_REFFREQ_20MHZ;
- break;
- case 24000000:
- val |= CFGCHIP2_REFFREQ_24MHZ;
- break;
- case 26000000:
- val |= CFGCHIP2_REFFREQ_26MHZ;
- break;
- case 38400000:
- val |= CFGCHIP2_REFFREQ_38_4MHZ;
- break;
- case 40000000:
- val |= CFGCHIP2_REFFREQ_40MHZ;
- break;
- case 48000000:
- val |= CFGCHIP2_REFFREQ_48MHZ;
- break;
- default:
- pr_err("Bad parent clock rate on USB 2.0 PHY clock\n");
- return -EINVAL;
- }
-
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- return 0;
-}
-
-static struct clk usb20_phy_clk = {
- .name = "usb0_clk48",
- .clk_enable = usb20_phy_clk_enable,
- .clk_disable = usb20_phy_clk_disable,
- .set_parent = usb20_phy_clk_set_parent,
-};
-
-static struct clk_lookup usb20_phy_clk_lookup =
- CLK("da8xx-usb-phy", "usb0_clk48", &usb20_phy_clk);
-
-/**
- * da8xx_register_usb20_phy_clk - register USB0PHYCLKMUX clock
- *
- * @use_usb_refclkin: Selects the parent clock - either "usb_refclkin" if true
- * or "pll0_aux" if false.
- */
-int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
-{
- struct clk *parent;
- int ret;
-
- usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
- ret = PTR_ERR_OR_ZERO(usb20_clk);
- if (ret)
- return ret;
-
- parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
- ret = PTR_ERR_OR_ZERO(parent);
- if (ret) {
- clk_put(usb20_clk);
- return ret;
- }
-
- usb20_phy_clk.parent = parent;
- ret = clk_register(&usb20_phy_clk);
- if (!ret)
- clkdev_add(&usb20_phy_clk_lookup);
-
- clk_put(parent);
-
- return ret;
-}
-
-static int usb11_phy_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val;
-
- val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- /* Set the USB 1.1 PHY clock mux based on the parent clock. */
- if (parent == &usb20_phy_clk) {
- val &= ~CFGCHIP2_USB1PHYCLKMUX;
- } else if (parent == &usb_refclkin) {
- val |= CFGCHIP2_USB1PHYCLKMUX;
- } else {
- pr_err("Bad parent on USB 1.1 PHY clock\n");
- return -EINVAL;
- }
-
- writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
- return 0;
-}
-
-static struct clk usb11_phy_clk = {
- .name = "usb1_clk48",
- .set_parent = usb11_phy_clk_set_parent,
+static struct platform_device da8xx_usb_phy_clks_device = {
+ .name = "da830-usb-phy-clks",
+ .id = -1,
};
-static struct clk_lookup usb11_phy_clk_lookup =
- CLK("da8xx-usb-phy", "usb1_clk48", &usb11_phy_clk);
-
-/**
- * da8xx_register_usb11_phy_clk - register USB1PHYCLKMUX clock
- *
- * @use_usb_refclkin: Selects the parent clock - either "usb_refclkin" if true
- * or "usb0_clk48" if false.
- */
-int __init da8xx_register_usb11_phy_clk(bool use_usb_refclkin)
+int __init da8xx_register_usb_phy_clocks(void)
{
- struct clk *parent;
- int ret = 0;
-
- if (use_usb_refclkin)
- parent = clk_get(NULL, "usb_refclkin");
- else
- parent = clk_get(&da8xx_usb_phy.dev, "usb0_clk48");
- if (IS_ERR(parent))
- return PTR_ERR(parent);
-
- usb11_phy_clk.parent = parent;
- ret = clk_register(&usb11_phy_clk);
- if (!ret)
- clkdev_add(&usb11_phy_clk_lookup);
+ struct da8xx_cfgchip_clk_platform_data pdata;
- clk_put(parent);
+ pdata.cfgchip = da8xx_get_cfgchip();
+ da8xx_usb_phy_clks_device.dev.platform_data = &pdata;
- return ret;
+ return platform_device_register(&da8xx_usb_phy_clks_device);
}
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index f4b6c93a7fd0..865dcc4c3181 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -190,8 +190,6 @@ static void __init exynos_dt_fixup(void)
}
DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
- /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
- /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.l2c_aux_val = 0x3c400001,
.l2c_aux_mask = 0xc20fffff,
.smp = smp_ops(exynos_smp_ops),
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index d3db306a5a70..7ead3acd6fa4 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -203,6 +203,7 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
NULL);
if (!domain) {
iounmap(pmu_base_addr);
+ pmu_base_addr = NULL;
return -ENOMEM;
}
@@ -272,7 +273,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
static void exynos_pm_set_wakeup_mask(void)
{
/* Set wake-up mask registers */
- pmu_raw_writel(exynos_get_eint_wake_mask(), S5P_EINT_WAKEUP_MASK);
+ pmu_raw_writel(exynos_get_eint_wake_mask(), EXYNOS_EINT_WAKEUP_MASK);
pmu_raw_writel(exynos_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK);
}
diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c
index a129aae72602..909bb2493781 100644
--- a/arch/arm/mach-hisi/hotplug.c
+++ b/arch/arm/mach-hisi/hotplug.c
@@ -148,13 +148,20 @@ static int hi3xxx_hotplug_init(void)
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
- if (node) {
- ctrl_base = of_iomap(node, 0);
- id = HI3620_CTRL;
- return 0;
+ if (!node) {
+ id = ERROR_CTRL;
+ return -ENOENT;
}
- id = ERROR_CTRL;
- return -ENOENT;
+
+ ctrl_base = of_iomap(node, 0);
+ of_node_put(node);
+ if (!ctrl_base) {
+ id = ERROR_CTRL;
+ return -ENOMEM;
+ }
+
+ id = HI3620_CTRL;
+ return 0;
}
void hi3xxx_set_cpu(int cpu, bool enable)
@@ -173,11 +180,15 @@ static bool hix5hd2_hotplug_init(void)
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl");
- if (np) {
- ctrl_base = of_iomap(np, 0);
- return true;
- }
- return false;
+ if (!np)
+ return false;
+
+ ctrl_base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!ctrl_base)
+ return false;
+
+ return true;
}
void hix5hd2_set_cpu(int cpu, bool enable)
@@ -219,10 +230,10 @@ void hip01_set_cpu(int cpu, bool enable)
if (!ctrl_base) {
np = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
- if (np)
- ctrl_base = of_iomap(np, 0);
- else
- BUG();
+ BUG_ON(!np);
+ ctrl_base = of_iomap(np, 0);
+ of_node_put(np);
+ BUG_ON(!ctrl_base);
}
if (enable) {
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 6f4232384774..abc337111eff 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -523,18 +523,6 @@ config SOC_IMX6UL
help
This enables support for Freescale i.MX6 UltraLite processor.
-config SOC_IMX7D
- bool "i.MX7 Dual support"
- select PINCTRL_IMX7D
- select ARM_GIC
- select HAVE_ARM_ARCH_TIMER
- select HAVE_IMX_ANATOP
- select HAVE_IMX_MMDC
- select HAVE_IMX_SRC
- select IMX_GPCV2
- help
- This enables support for Freescale i.MX7 Dual processor.
-
config SOC_LS1021A
bool "Freescale LS1021A support"
select ARM_GIC
@@ -549,6 +537,27 @@ comment "Cortex-A/Cortex-M asymmetric multiprocessing platforms"
if ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
+config SOC_IMX7D_CA7
+ bool
+ select ARM_GIC
+ select HAVE_ARM_ARCH_TIMER
+ select HAVE_IMX_ANATOP
+ select HAVE_IMX_MMDC
+ select HAVE_IMX_SRC
+ select IMX_GPCV2
+
+config SOC_IMX7D_CM4
+ bool
+ select ARMV7M_SYSTICK
+
+config SOC_IMX7D
+ bool "i.MX7 Dual support"
+ select PINCTRL_IMX7D
+ select SOC_IMX7D_CA7 if ARCH_MULTI_V7
+ select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
+ help
+ This enables support for Freescale i.MX7 Dual processor.
+
config SOC_VF610
bool "Vybrid Family VF610 support"
select ARM_GIC if ARCH_MULTI_V7
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 2327e3e876d8..bae179af21f6 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -26,7 +26,7 @@ ifeq ($(CONFIG_CPU_IDLE),y)
obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
-obj-$(CONFIG_SOC_IMX6SLL) += cpuidle-imx6sl.o
+obj-$(CONFIG_SOC_IMX6SLL) += cpuidle-imx6sx.o
obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o
endif
@@ -81,7 +81,8 @@ obj-$(CONFIG_SOC_IMX6SL) += mach-imx6sl.o
obj-$(CONFIG_SOC_IMX6SLL) += mach-imx6sl.o
obj-$(CONFIG_SOC_IMX6SX) += mach-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += mach-imx6ul.o
-obj-$(CONFIG_SOC_IMX7D) += mach-imx7d.o
+obj-$(CONFIG_SOC_IMX7D_CA7) += mach-imx7d.o
+obj-$(CONFIG_SOC_IMX7D_CM4) += mach-imx7d-cm4.o
ifeq ($(CONFIG_SUSPEND),y)
AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index c8d68e918b2f..423dd76bb6b8 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -38,7 +38,6 @@ void imx21_soc_init(void);
void imx27_soc_init(void);
void imx31_soc_init(void);
void imx35_soc_init(void);
-void epit_timer_init(void __iomem *base, int irq);
int mx21_clocks_init(unsigned long lref, unsigned long fref);
int mx27_clocks_init(unsigned long fref);
int mx31_clocks_init(unsigned long fref);
@@ -58,10 +57,12 @@ struct device *imx_soc_device_init(void);
void imx6_enable_rbc(bool enable);
void imx_gpc_check_dt(void);
void imx_gpc_set_arm_power_in_lpm(bool power_off);
+void imx_gpc_set_l2_mem_power_in_lpm(bool power_off);
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw);
void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw);
void imx25_pm_init(void);
void imx27_pm_init(void);
+void imx5_pmu_init(void);
enum mxc_cpu_pwr_mode {
WAIT_CLOCKED, /* wfi only */
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index 4f2d1c772f85..e210bac18840 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -117,3 +117,48 @@ int mx53_revision(void)
return mx5_cpu_rev;
}
EXPORT_SYMBOL(mx53_revision);
+
+#define ARM_GPC 0x4
+#define DBGEN BIT(16)
+
+/*
+ * This enables the DBGEN bit in ARM_GPC register, which is
+ * required for accessing some performance counter features.
+ * Technically it is only required while perf is used, but to
+ * keep the source code simple we just enable it all the time
+ * when the kernel configuration allows using the feature.
+ */
+void __init imx5_pmu_init(void)
+{
+ void __iomem *tigerp_base;
+ struct device_node *np;
+ u32 gpc;
+
+ if (!IS_ENABLED(CONFIG_ARM_PMU))
+ return;
+
+ np = of_find_compatible_node(NULL, NULL, "arm,cortex-a8-pmu");
+ if (!np)
+ return;
+
+ if (!of_property_read_bool(np, "secure-reg-access"))
+ goto exit;
+
+ of_node_put(np);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx51-tigerp");
+ if (!np)
+ return;
+
+ tigerp_base = of_iomap(np, 0);
+ if (!tigerp_base)
+ goto exit;
+
+ gpc = readl_relaxed(tigerp_base + ARM_GPC);
+ gpc |= DBGEN;
+ writel_relaxed(gpc, tigerp_base + ARM_GPC);
+ iounmap(tigerp_base);
+exit:
+ of_node_put(np);
+
+}
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 32969f34486a..c6b1bf97a6c1 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -68,6 +68,7 @@ void __init imx_aips_allow_unprivileged_access(
for_each_compatible_node(np, NULL, compat) {
aips_base_addr = of_iomap(np, 0);
+ WARN_ON(!aips_base_addr);
imx_set_aips(aips_base_addr);
}
}
diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c
index fa8ead145d17..8d866fb674a8 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sl.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sl.c
@@ -12,7 +12,6 @@
#include "common.h"
#include "cpuidle.h"
-#include "hardware.h"
static int imx6sl_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -22,11 +21,9 @@ static int imx6sl_enter_wait(struct cpuidle_device *dev,
* Software workaround for ERR005311, see function
* description for details.
*/
- if (cpu_is_imx6sl())
- imx6sl_set_wait_clk(true);
+ imx6sl_set_wait_clk(true);
cpu_do_idle();
- if (cpu_is_imx6sl())
- imx6sl_set_wait_clk(false);
+ imx6sl_set_wait_clk(false);
imx6_set_lpm(WAIT_CLOCKED);
return index;
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index d0f14b761ff7..243a108a940b 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -103,6 +103,7 @@ int __init imx6sx_cpuidle_init(void)
{
imx6_set_int_mem_clk_lpm(true);
imx6_enable_rbc(false);
+ imx_gpc_set_l2_mem_power_in_lpm(false);
/*
* set ARM power up/down timing to the fastest,
* sw2iso and sw can be set to one 32K cycle = 31us
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index de535cb679b3..e11159d40fb8 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -20,6 +20,7 @@
#include "common.h"
#include "hardware.h"
+#define GPC_CNTR 0x0
#define GPC_IMR1 0x008
#define GPC_PGC_CPU_PDN 0x2a0
#define GPC_PGC_CPU_PUPSCR 0x2a4
@@ -27,6 +28,8 @@
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
+#define GPC_CNTR_L2_PGE_SHIFT 22
+
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
@@ -51,6 +54,17 @@ void imx_gpc_set_arm_power_in_lpm(bool power_off)
writel_relaxed(power_off, gpc_base + GPC_PGC_CPU_PDN);
}
+void imx_gpc_set_l2_mem_power_in_lpm(bool power_off)
+{
+ u32 val;
+
+ val = readl_relaxed(gpc_base + GPC_CNTR);
+ val &= ~(1 << GPC_CNTR_L2_PGE_SHIFT);
+ if (power_off)
+ val |= 1 << GPC_CNTR_L2_PGE_SHIFT;
+ writel_relaxed(val, gpc_base + GPC_CNTR);
+}
+
void imx_gpc_pre_suspend(bool arm_power_off)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
diff --git a/arch/arm/mach-imx/imx31-dt.c b/arch/arm/mach-imx/imx31-dt.c
index 668d74b72511..9d9640aaf858 100644
--- a/arch/arm/mach-imx/imx31-dt.c
+++ b/arch/arm/mach-imx/imx31-dt.c
@@ -9,35 +9,17 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/irq.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
#include "common.h"
-#include "mx31.h"
static const char * const imx31_dt_board_compat[] __initconst = {
"fsl,imx31",
NULL
};
-/* FIXME: replace with DT binding */
-static const struct resource imx31_rnga_res[] __initconst = {
- DEFINE_RES_MEM(MX31_RNGA_BASE_ADDR, SZ_16K),
-};
-
-static void __init imx31_dt_mach_init(void)
-{
- platform_device_register_simple("mxc_rnga", -1, imx31_rnga_res,
- ARRAY_SIZE(imx31_rnga_res));
-}
-
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
.map_io = mx31_map_io,
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
- .init_machine = imx31_dt_mach_init,
.dt_compat = imx31_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index 3835b6a3423c..c7169c2f94c4 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -48,11 +49,38 @@ static void __init imx51_ipu_mipi_setup(void)
iounmap(hsc_addr);
}
+static void __init imx51_m4if_setup(void)
+{
+ void __iomem *m4if_base;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx51-m4if");
+ if (!np)
+ return;
+
+ m4if_base = of_iomap(np, 0);
+ if (!m4if_base) {
+ pr_err("Unable to map M4IF registers\n");
+ return;
+ }
+
+ /*
+ * Configure VPU and IPU with higher priorities
+ * in order to avoid artifacts during video playback
+ */
+ writel_relaxed(0x00000203, m4if_base + 0x40);
+ writel_relaxed(0x00000000, m4if_base + 0x44);
+ writel_relaxed(0x00120125, m4if_base + 0x9c);
+ writel_relaxed(0x001901A3, m4if_base + 0x48);
+ iounmap(m4if_base);
+}
+
static void __init imx51_dt_init(void)
{
imx51_ipu_mipi_setup();
imx_src_init();
-
+ imx51_m4if_setup();
+ imx5_pmu_init();
imx_aips_allow_unprivileged_access("fsl,imx51-aipstz");
}
diff --git a/arch/arm/mach-imx/mach-imx53.c b/arch/arm/mach-imx/mach-imx53.c
index 07c2e8dca494..5ec7100737e8 100644
--- a/arch/arm/mach-imx/mach-imx53.c
+++ b/arch/arm/mach-imx/mach-imx53.c
@@ -31,7 +31,7 @@ static void __init imx53_init_early(void)
static void __init imx53_dt_init(void)
{
imx_src_init();
-
+ imx5_pmu_init();
imx_aips_allow_unprivileged_access("fsl,imx53-aipstz");
}
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index c7a1ef180dda..99be4225297a 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -42,7 +42,10 @@ static void __init imx6sl_init_late(void)
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
- imx6sl_cpuidle_init();
+ if (IS_ENABLED(CONFIG_SOC_IMX6SL) && cpu_is_imx6sl())
+ imx6sl_cpuidle_init();
+ else if (IS_ENABLED(CONFIG_SOC_IMX6SLL))
+ imx6sx_cpuidle_init();
}
static void __init imx6sl_init_machine(void)
diff --git a/arch/arm/mach-imx/mach-imx7d-cm4.c b/arch/arm/mach-imx/mach-imx7d-cm4.c
new file mode 100644
index 000000000000..0800b5891d2a
--- /dev/null
+++ b/arch/arm/mach-imx/mach-imx7d-cm4.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <asm/v7m.h>
+#include <asm/mach/arch.h>
+
+static const char * const imx7d_cm4_dt_compat[] __initconst = {
+ "fsl,imx7d-cm4",
+ NULL,
+};
+
+DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual Cortex-M4 (Device Tree)")
+ .dt_compat = imx7d_cm4_dt_compat,
+ .restart = armv7m_restart,
+MACHINE_END
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 017539dd712b..b08e407d8d96 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -130,6 +130,13 @@ static const u32 imx6sl_mmdc_io_offset[] __initconst = {
0x330, 0x334, 0x320, /* SDCKE0, SDCKE1, RESET */
};
+static const u32 imx6sll_mmdc_io_offset[] __initconst = {
+ 0x294, 0x298, 0x29c, 0x2a0, /* DQM0 ~ DQM3 */
+ 0x544, 0x54c, 0x554, 0x558, /* GPR_B0DS ~ GPR_B3DS */
+ 0x530, 0x540, 0x2ac, 0x52c, /* MODE_CTL, MODE, SDCLK_0, GPR_ADDDS */
+ 0x2a4, 0x2a8, /* SDCKE0, SDCKE1*/
+};
+
static const u32 imx6sx_mmdc_io_offset[] __initconst = {
0x2ec, 0x2f0, 0x2f4, 0x2f8, /* DQM0 ~ DQM3 */
0x60c, 0x610, 0x61c, 0x620, /* GPR_B0DS ~ GPR_B3DS */
@@ -175,6 +182,16 @@ static const struct imx6_pm_socdata imx6sl_pm_data __initconst = {
.mmdc_io_offset = imx6sl_mmdc_io_offset,
};
+static const struct imx6_pm_socdata imx6sll_pm_data __initconst = {
+ .mmdc_compat = "fsl,imx6sll-mmdc",
+ .src_compat = "fsl,imx6sll-src",
+ .iomuxc_compat = "fsl,imx6sll-iomuxc",
+ .gpc_compat = "fsl,imx6sll-gpc",
+ .pl310_compat = "arm,pl310-cache",
+ .mmdc_io_num = ARRAY_SIZE(imx6sll_mmdc_io_offset),
+ .mmdc_io_offset = imx6sll_mmdc_io_offset,
+};
+
static const struct imx6_pm_socdata imx6sx_pm_data __initconst = {
.mmdc_compat = "fsl,imx6sx-mmdc",
.src_compat = "fsl,imx6sx-src",
@@ -296,7 +313,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
if (cpu_is_imx6sl())
val |= BM_CLPCR_BYPASS_PMIC_READY;
if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
- cpu_is_imx6ull())
+ cpu_is_imx6ull() || cpu_is_imx6sll())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
else
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -314,7 +331,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
if (cpu_is_imx6sl() || cpu_is_imx6sx())
val |= BM_CLPCR_BYPASS_PMIC_READY;
if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
- cpu_is_imx6ull())
+ cpu_is_imx6ull() || cpu_is_imx6sll())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
else
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -631,7 +648,17 @@ void __init imx6dl_pm_init(void)
void __init imx6sl_pm_init(void)
{
- imx6_pm_common_init(&imx6sl_pm_data);
+ struct regmap *gpr;
+
+ if (cpu_is_imx6sl()) {
+ imx6_pm_common_init(&imx6sl_pm_data);
+ } else {
+ imx6_pm_common_init(&imx6sll_pm_data);
+ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (!IS_ERR(gpr))
+ regmap_update_bits(gpr, IOMUXC_GPR5,
+ IMX6SLL_GPR5_AFCG_X_BYPASS_MASK, 0);
+ }
}
void __init imx6sx_pm_init(void)
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index bf5e64906e65..ba91e4fe444d 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -15,6 +15,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 4ffbbd217e82..c130497dc6cc 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -35,6 +35,8 @@
#define AXP_BOOTROM_BASE 0xfff00000
#define AXP_BOOTROM_SIZE 0x100000
+static struct clk *boot_cpu_clk;
+
static struct clk *get_cpu_clk(int cpu)
{
struct clk *cpu_clk;
@@ -48,30 +50,6 @@ static struct clk *get_cpu_clk(int cpu)
return cpu_clk;
}
-static void set_secondary_cpu_clock(unsigned int cpu)
-{
- int thiscpu;
- unsigned long rate;
- struct clk *cpu_clk;
-
- thiscpu = get_cpu();
-
- cpu_clk = get_cpu_clk(thiscpu);
- if (!cpu_clk)
- goto out;
- clk_prepare_enable(cpu_clk);
- rate = clk_get_rate(cpu_clk);
-
- cpu_clk = get_cpu_clk(cpu);
- if (!cpu_clk)
- goto out;
- clk_set_rate(cpu_clk, rate);
- clk_prepare_enable(cpu_clk);
-
-out:
- put_cpu();
-}
-
static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int ret, hw_cpu;
@@ -79,7 +57,6 @@ static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
pr_info("Booting CPU %d\n", cpu);
hw_cpu = cpu_logical_map(cpu);
- set_secondary_cpu_clock(hw_cpu);
mvebu_pmsu_set_cpu_boot_addr(hw_cpu, armada_xp_secondary_startup);
/*
@@ -122,6 +99,19 @@ static void __init armada_xp_smp_init_cpus(void)
panic("Invalid number of CPUs in DT\n");
}
+static int armada_xp_sync_secondary_clk(unsigned int cpu)
+{
+ struct clk *cpu_clk = get_cpu_clk(cpu);
+
+ if (!cpu_clk || !boot_cpu_clk)
+ return 0;
+
+ clk_prepare_enable(cpu_clk);
+ clk_set_rate(cpu_clk, clk_get_rate(boot_cpu_clk));
+
+ return 0;
+}
+
static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
{
struct device_node *node;
@@ -131,6 +121,14 @@ static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
flush_cache_all();
set_cpu_coherent();
+ boot_cpu_clk = get_cpu_clk(smp_processor_id());
+ if (boot_cpu_clk) {
+ clk_prepare_enable(boot_cpu_clk);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ "arm/mvebu/sync_clocks:online",
+ armada_xp_sync_secondary_clk, NULL);
+ }
+
/*
* In order to boot the secondary CPUs we need to ensure
* the bootROM is mapped at the correct address.
@@ -223,7 +221,6 @@ static int mv98dx3236_boot_secondary(unsigned int cpu, struct task_struct *idle)
int ret, hw_cpu;
hw_cpu = cpu_logical_map(cpu);
- set_secondary_cpu_clock(hw_cpu);
mv98dx3236_resume_set_cpu_boot_addr(hw_cpu,
armada_xp_secondary_startup);
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 27a78c80e5b1..73d5d72dfc3e 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -116,8 +116,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
}
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
/*
* This function sets up the boot address workaround needed for SMP
@@ -130,7 +130,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
phys_addr_t resume_addr_reg)
{
void __iomem *sram_virt_base;
- u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+ u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index bf608441b357..ddc27638ba2a 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -14,11 +14,12 @@
*/
#include <linux/linkage.h>
-#include <asm/assembler.h>
+#include <linux/platform_data/ams-delta-fiq.h>
+#include <asm/assembler.h>
#include <mach/board-ams-delta.h>
-#include <mach/ams-delta-fiq.h>
+#include "ams-delta-fiq.h"
#include "iomap.h"
#include "soc.h"
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index d7ca9e2b40d2..b0dc7ddf5877 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -13,17 +13,20 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/platform_data/ams-delta-fiq.h>
+#include <linux/platform_device.h>
#include <mach/board-ams-delta.h>
#include <asm/fiq.h>
-#include <mach/ams-delta-fiq.h>
+#include "ams-delta-fiq.h"
static struct fiq_handler fh = {
.name = "ams-delta-fiq"
@@ -34,20 +37,24 @@ static struct fiq_handler fh = {
* The FIQ and IRQ isrs can both read and write it.
* It is structured as a header section several 32bit slots,
* followed by the circular buffer where the FIQ isr stores
- * keystrokes received from the qwerty keyboard.
- * See ams-delta-fiq.h for details of offsets.
+ * keystrokes received from the qwerty keyboard. See
+ * <linux/platform_data/ams-delta-fiq.h> for details of offsets.
*/
-unsigned int fiq_buffer[1024];
-EXPORT_SYMBOL(fiq_buffer);
+static unsigned int fiq_buffer[1024];
+static struct irq_chip *irq_chip;
+static struct irq_data *irq_data[16];
static unsigned int irq_counter[16];
+static const char *pin_name[16] __initconst = {
+ [AMS_DELTA_GPIO_PIN_KEYBRD_DATA] = "keybrd_data",
+ [AMS_DELTA_GPIO_PIN_KEYBRD_CLK] = "keybrd_clk",
+};
+
static irqreturn_t deferred_fiq(int irq, void *dev_id)
{
+ struct irq_data *d;
int gpio, irq_num, fiq_count;
- struct irq_chip *irq_chip;
-
- irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
/*
* For each handled GPIO interrupt, keep calling its interrupt handler
@@ -55,24 +62,21 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
*/
for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK;
gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) {
- irq_num = gpio_to_irq(gpio);
+ d = irq_data[gpio];
+ irq_num = d->irq;
fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
if (irq_counter[gpio] < fiq_count &&
gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
- struct irq_data *d = irq_get_irq_data(irq_num);
-
/*
* handle_simple_irq() that OMAP GPIO edge
* interrupts default to since commit 80ac93c27441
* requires interrupt already acked and unmasked.
*/
- if (irq_chip) {
- if (irq_chip->irq_ack)
- irq_chip->irq_ack(d);
- if (irq_chip->irq_unmask)
- irq_chip->irq_unmask(d);
- }
+ if (irq_chip->irq_ack)
+ irq_chip->irq_ack(d);
+ if (irq_chip->irq_unmask)
+ irq_chip->irq_unmask(d);
}
for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
generic_handle_irq(irq_num);
@@ -80,14 +84,56 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void __init ams_delta_init_fiq(void)
+void __init ams_delta_init_fiq(struct gpio_chip *chip,
+ struct platform_device *serio)
{
+ struct gpio_desc *gpiod, *data = NULL, *clk = NULL;
void *fiqhandler_start;
unsigned int fiqhandler_length;
struct pt_regs FIQ_regs;
unsigned long val, offset;
int i, retval;
+ /* Store irq_chip location for IRQ handler use */
+ irq_chip = chip->irq.chip;
+ if (!irq_chip) {
+ pr_err("%s: GPIO chip %s is missing IRQ function\n", __func__,
+ chip->label);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(irq_data); i++) {
+ gpiod = gpiochip_request_own_desc(chip, i, pin_name[i]);
+ if (IS_ERR(gpiod)) {
+ pr_err("%s: failed to get GPIO pin %d (%ld)\n",
+ __func__, i, PTR_ERR(gpiod));
+ return;
+ }
+ /* Store irq_data location for IRQ handler use */
+ irq_data[i] = irq_get_irq_data(gpiod_to_irq(gpiod));
+
+ /*
+ * FIQ handler takes full control over serio data and clk GPIO
+ * pins. Initiaize them and keep requested so nobody can
+ * interfere. Fail if any of those two couldn't be requested.
+ */
+ switch (i) {
+ case AMS_DELTA_GPIO_PIN_KEYBRD_DATA:
+ data = gpiod;
+ gpiod_direction_input(data);
+ break;
+ case AMS_DELTA_GPIO_PIN_KEYBRD_CLK:
+ clk = gpiod;
+ gpiod_direction_input(clk);
+ break;
+ default:
+ gpiochip_free_own_desc(gpiod);
+ break;
+ }
+ }
+ if (!data || !clk)
+ goto out_gpio;
+
fiqhandler_start = &qwerty_fiqin_start;
fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start;
pr_info("Installing fiq handler from %p, length 0x%x\n",
@@ -97,7 +143,7 @@ void __init ams_delta_init_fiq(void)
if (retval) {
pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n",
retval);
- return;
+ goto out_gpio;
}
retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq,
@@ -105,7 +151,7 @@ void __init ams_delta_init_fiq(void)
if (retval < 0) {
pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval);
release_fiq(&fh);
- return;
+ goto out_gpio;
}
/*
* Since no set_type() method is provided by OMAP irq chip,
@@ -155,4 +201,29 @@ void __init ams_delta_init_fiq(void)
offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
val = omap_readl(OMAP_IH1_BASE + offset) | 1;
omap_writel(val, OMAP_IH1_BASE + offset);
+
+ /* Initialize serio device IRQ resource and platform_data */
+ serio->resource[0].start = gpiod_to_irq(clk);
+ serio->resource[0].end = serio->resource[0].start;
+ serio->dev.platform_data = fiq_buffer;
+
+ /*
+ * Since FIQ handler performs handling of GPIO registers for
+ * "keybrd_clk" IRQ pin, ams_delta_serio driver used to set
+ * handle_simple_irq() as active IRQ handler for that pin to avoid
+ * bad interaction with gpio-omap driver. This is no longer needed
+ * as handle_simple_irq() is now the default handler for OMAP GPIO
+ * edge interrupts.
+ * This comment replaces the obsolete code which has been removed
+ * from the ams_delta_serio driver and stands here only as a reminder
+ * of that dependency on gpio-omap driver behavior.
+ */
+
+ return;
+
+out_gpio:
+ if (data)
+ gpiochip_free_own_desc(data);
+ if (clk)
+ gpiochip_free_own_desc(clk);
}
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.h b/arch/arm/mach-omap1/ams-delta-fiq.h
new file mode 100644
index 000000000000..fd76df3cce37
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * arch/arm/mach-omap1/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __AMS_DELTA_FIQ_H
+#define __AMS_DELTA_FIQ_H
+
+#include <mach/irqs.h>
+
+/*
+ * Interrupt number used for passing control from FIQ to IRQ.
+ * IRQ12, described as reserved, has been selected.
+ */
+#define INT_DEFERRED_FIQ INT_1510_RES12
+/*
+ * Base address of an interrupt handler that the INT_DEFERRED_FIQ belongs to.
+ */
+#if (INT_DEFERRED_FIQ < IH2_BASE)
+#define DEFERRED_FIQ_IH_BASE OMAP_IH1_BASE
+#else
+#define DEFERRED_FIQ_IH_BASE OMAP_IH2_BASE
+#endif
+
+#ifndef __ASSEMBLER__
+extern unsigned char qwerty_fiqin_start, qwerty_fiqin_end;
+
+extern void __init ams_delta_init_fiq(struct gpio_chip *chip,
+ struct platform_device *pdev);
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 80f54cb54276..dd28d2614d7f 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -41,10 +41,10 @@
#include <mach/mux.h>
#include <mach/hardware.h>
-#include <mach/ams-delta-fiq.h>
#include "camera.h"
#include <mach/usb.h>
+#include "ams-delta-fiq.h"
#include "iomap.h"
#include "common.h"
@@ -179,7 +179,10 @@ static struct resource latch1_resources[] = {
},
};
+#define LATCH1_LABEL "latch1"
+
static struct bgpio_pdata latch1_pdata = {
+ .label = LATCH1_LABEL,
.base = LATCH1_GPIO_BASE,
.ngpio = LATCH1_NGPIO,
};
@@ -194,6 +197,15 @@ static struct platform_device latch1_gpio_device = {
},
};
+#define LATCH1_PIN_LED_CAMERA 0
+#define LATCH1_PIN_LED_ADVERT 1
+#define LATCH1_PIN_LED_MAIL 2
+#define LATCH1_PIN_LED_HANDSFREE 3
+#define LATCH1_PIN_LED_VOICEMAIL 4
+#define LATCH1_PIN_LED_VOICE 5
+#define LATCH1_PIN_DOCKIT1 6
+#define LATCH1_PIN_DOCKIT2 7
+
static struct resource latch2_resources[] = {
[0] = {
.name = "dat",
@@ -398,38 +410,43 @@ static struct gpiod_lookup_table ams_delta_lcd_gpio_table = {
},
};
-static const struct gpio_led gpio_leds[] __initconst = {
- {
+/*
+ * Dynamically allocated GPIO numbers must be obtained fromm GPIO device
+ * before they can be put in the gpio_led table. Before that happens,
+ * initialize the table with invalid GPIO numbers, not 0.
+ */
+static struct gpio_led gpio_leds[] __initdata = {
+ [LATCH1_PIN_LED_CAMERA] = {
.name = "camera",
- .gpio = LATCH1_GPIO_BASE + 0,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
#ifdef CONFIG_LEDS_TRIGGERS
.default_trigger = "ams_delta_camera",
#endif
},
- {
+ [LATCH1_PIN_LED_ADVERT] = {
.name = "advert",
- .gpio = LATCH1_GPIO_BASE + 1,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
- {
+ [LATCH1_PIN_LED_MAIL] = {
.name = "email",
- .gpio = LATCH1_GPIO_BASE + 2,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
- {
+ [LATCH1_PIN_LED_HANDSFREE] = {
.name = "handsfree",
- .gpio = LATCH1_GPIO_BASE + 3,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
- {
+ [LATCH1_PIN_LED_VOICEMAIL] = {
.name = "voicemail",
- .gpio = LATCH1_GPIO_BASE + 4,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
- {
+ [LATCH1_PIN_LED_VOICE] = {
.name = "voice",
- .gpio = LATCH1_GPIO_BASE + 5,
+ .gpio = -EINVAL,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
};
@@ -504,16 +521,70 @@ static struct platform_device cx20442_codec_device = {
.id = -1,
};
-static struct gpiod_lookup_table ams_delta_serio_gpio_table = {
+static struct resource ams_delta_serio_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ /*
+ * Initialize IRQ resource with invalid IRQ number.
+ * It will be replaced with dynamically allocated GPIO IRQ
+ * obtained from GPIO chip as soon as the chip is available.
+ */
+ .start = -EINVAL,
+ .end = -EINVAL,
+ },
+};
+
+static struct platform_device ams_delta_serio_device = {
+ .name = "ams-delta-serio",
+ .id = PLATFORM_DEVID_NONE,
+ .dev = {
+ /*
+ * Initialize .platform_data explicitly with NULL to
+ * indicate it is going to be used. It will be replaced
+ * with FIQ buffer address as soon as FIQ is initialized.
+ */
+ .platform_data = NULL,
+ },
+ .num_resources = ARRAY_SIZE(ams_delta_serio_resources),
+ .resource = ams_delta_serio_resources,
+};
+
+static struct regulator_consumer_supply keybrd_pwr_consumers[] = {
+ /*
+ * Initialize supply .dev_name with NULL. It will be replaced
+ * with serio dev_name() as soon as the serio device is registered.
+ */
+ REGULATOR_SUPPLY("vcc", NULL),
+};
+
+static struct regulator_init_data keybrd_pwr_initdata = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(keybrd_pwr_consumers),
+ .consumer_supplies = keybrd_pwr_consumers,
+};
+
+static struct fixed_voltage_config keybrd_pwr_config = {
+ .supply_name = "keybrd_pwr",
+ .microvolts = 5000000,
+ .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_PWR,
+ .enable_high = 1,
+ .init_data = &keybrd_pwr_initdata,
+};
+
+static struct platform_device keybrd_pwr_device = {
+ .name = "reg-fixed-voltage",
+ .id = PLATFORM_DEVID_AUTO,
+ .dev = {
+ .platform_data = &keybrd_pwr_config,
+ },
+};
+
+static struct gpiod_lookup_table keybrd_pwr_gpio_table = {
.table = {
- GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_KEYBRD_DATA,
- "data", 0),
- GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_KEYBRD_CLK,
- "clock", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_KEYBRD_PWR,
- "power", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_KEYBRD_DATAOUT,
- "dataout", 0),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_KEYBRD_PWR, NULL,
+ GPIO_ACTIVE_HIGH),
{ },
},
};
@@ -524,9 +595,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
&ams_delta_kp_device,
&ams_delta_camera_device,
&ams_delta_audio_device,
-};
-
-static struct platform_device *late_devices[] __initdata = {
+ &ams_delta_serio_device,
&ams_delta_nand_device,
&ams_delta_lcd_device,
&cx20442_codec_device,
@@ -534,14 +603,55 @@ static struct platform_device *late_devices[] __initdata = {
static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
&ams_delta_audio_gpio_table,
- &ams_delta_serio_gpio_table,
-};
-
-static struct gpiod_lookup_table *late_gpio_tables[] __initdata = {
+ &keybrd_pwr_gpio_table,
&ams_delta_lcd_gpio_table,
&ams_delta_nand_gpio_table,
};
+/*
+ * Some drivers may not use GPIO lookup tables but need to be provided
+ * with GPIO numbers. The same applies to GPIO based IRQ lines - some
+ * drivers may even not use GPIO layer but expect just IRQ numbers.
+ * We could either define GPIO lookup tables then use them on behalf
+ * of those devices, or we can use GPIO driver level methods for
+ * identification of GPIO and IRQ numbers. For the purpose of the latter,
+ * defina a helper function which identifies GPIO chips by their labels.
+ */
+static int gpiochip_match_by_label(struct gpio_chip *chip, void *data)
+{
+ char *label = data;
+
+ return !strcmp(label, chip->label);
+}
+
+static struct gpiod_hog ams_delta_gpio_hogs[] = {
+ GPIO_HOG(LATCH2_LABEL, LATCH2_PIN_KEYBRD_DATAOUT, "keybrd_dataout",
+ GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW),
+ {},
+};
+
+/*
+ * The purpose of this function is to take care of proper initialization of
+ * devices and data structures which depend on GPIO lines provided by OMAP GPIO
+ * banks but their drivers don't use GPIO lookup tables or GPIO layer at all.
+ * The function may be called as soon as OMAP GPIO devices are probed.
+ * Since that happens at postcore_initcall, it can be called successfully
+ * from init_machine or later.
+ * Dependent devices may be registered from within this function or later.
+ */
+static void __init omap_gpio_deps_init(void)
+{
+ struct gpio_chip *chip;
+
+ chip = gpiochip_find(OMAP_GPIO_LABEL, gpiochip_match_by_label);
+ if (!chip) {
+ pr_err("%s: OMAP GPIO chip not found\n", __func__);
+ return;
+ }
+
+ ams_delta_init_fiq(chip, &ams_delta_serio_device);
+}
+
static void __init ams_delta_init(void)
{
/* mux pins for uarts */
@@ -562,6 +672,9 @@ static void __init ams_delta_init(void)
omap_cfg_reg(J19_1610_CAM_D6);
omap_cfg_reg(J18_1610_CAM_D7);
+ omap_gpio_deps_init();
+ gpiod_add_hogs(ams_delta_gpio_hogs);
+
omap_serial_init();
omap_register_i2c_bus(1, 100, NULL, 0);
@@ -571,25 +684,38 @@ static void __init ams_delta_init(void)
led_trigger_register_simple("ams_delta_camera",
&ams_delta_camera_led_trigger);
#endif
- gpio_led_register_device(-1, &leds_pdata);
platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
/*
- * As soon as devices have been registered, assign their dev_names
- * to respective GPIO lookup tables before they are added.
+ * As soon as regulator consumers have been registered, assign their
+ * dev_names to consumer supply entries of respective regulators.
+ */
+ keybrd_pwr_consumers[0].dev_name =
+ dev_name(&ams_delta_serio_device.dev);
+
+ /*
+ * Once consumer supply entries are populated with dev_names,
+ * register regulator devices. At this stage only the keyboard
+ * power regulator has its consumer supply table fully populated.
+ */
+ platform_device_register(&keybrd_pwr_device);
+
+ /*
+ * As soon as GPIO consumers have been registered, assign
+ * their dev_names to respective GPIO lookup tables.
*/
ams_delta_audio_gpio_table.dev_id =
dev_name(&ams_delta_audio_device.dev);
+ keybrd_pwr_gpio_table.dev_id = dev_name(&keybrd_pwr_device.dev);
+ ams_delta_nand_gpio_table.dev_id = dev_name(&ams_delta_nand_device.dev);
+ ams_delta_lcd_gpio_table.dev_id = dev_name(&ams_delta_lcd_device.dev);
+
/*
- * No device name is assigned to GPIO lookup table for serio device
- * as long as serio driver is not converted to platform device driver.
+ * Once GPIO lookup tables are populated with dev_names, register them.
*/
-
gpiod_add_lookup_tables(ams_delta_gpio_tables,
ARRAY_SIZE(ams_delta_gpio_tables));
- ams_delta_init_fiq();
-
omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
omapfb_set_lcd_config(&ams_delta_lcd_config);
@@ -643,35 +769,84 @@ static struct platform_device ams_delta_modem_device = {
},
};
-static int __init late_init(void)
+/*
+ * leds-gpio driver doesn't make use of GPIO lookup tables,
+ * it has to be provided with GPIO numbers over platform data
+ * if GPIO descriptor info can't be obtained from device tree.
+ * We could either define GPIO lookup tables and use them on behalf
+ * of the leds-gpio device, or we can use GPIO driver level methods
+ * for identification of GPIO numbers as long as we don't support
+ * device tree. Let's do the latter.
+ */
+static void __init ams_delta_led_init(struct gpio_chip *chip)
+{
+ struct gpio_desc *gpiod;
+ int i;
+
+ for (i = LATCH1_PIN_LED_CAMERA; i < LATCH1_PIN_DOCKIT1; i++) {
+ gpiod = gpiochip_request_own_desc(chip, i, NULL);
+ if (IS_ERR(gpiod)) {
+ pr_warn("%s: %s GPIO %d request failed (%ld)\n",
+ __func__, LATCH1_LABEL, i, PTR_ERR(gpiod));
+ continue;
+ }
+
+ /* Assign GPIO numbers to LED device. */
+ gpio_leds[i].gpio = desc_to_gpio(gpiod);
+
+ gpiochip_free_own_desc(gpiod);
+ }
+
+ gpio_led_register_device(PLATFORM_DEVID_NONE, &leds_pdata);
+}
+
+/*
+ * The purpose of this function is to take care of assignment of GPIO numbers
+ * to platform devices which depend on GPIO lines provided by Amstrad Delta
+ * latch1 and/or latch2 GPIO devices but don't use GPIO lookup tables.
+ * The function may be called as soon as latch1/latch2 GPIO devices are
+ * initilized. Since basic-mmio-gpio driver is not registered before
+ * device_initcall, this may happen at erliest during device_initcall_sync.
+ * Dependent devices shouldn't be registered before that, their
+ * registration may be performed from within this function or later.
+ */
+static int __init ams_delta_gpio_init(void)
{
+ struct gpio_chip *chip;
int err;
if (!machine_is_ams_delta())
return -ENODEV;
+ chip = gpiochip_find(LATCH1_LABEL, gpiochip_match_by_label);
+ if (!chip)
+ pr_err("%s: latch1 GPIO chip not found\n", __func__);
+ else
+ ams_delta_led_init(chip);
+
err = gpio_request_array(latch_gpios, ARRAY_SIZE(latch_gpios));
- if (err) {
+ if (err)
pr_err("Couldn't take over latch1/latch2 GPIO pins\n");
- return err;
- }
- platform_add_devices(late_devices, ARRAY_SIZE(late_devices));
-
- /*
- * As soon as devices have been registered, assign their dev_names
- * to respective GPIO lookup tables before they are added.
- */
- ams_delta_lcd_gpio_table.dev_id = dev_name(&ams_delta_lcd_device.dev);
- ams_delta_nand_gpio_table.dev_id = dev_name(&ams_delta_nand_device.dev);
+ return err;
+}
+device_initcall_sync(ams_delta_gpio_init);
- gpiod_add_lookup_tables(late_gpio_tables, ARRAY_SIZE(late_gpio_tables));
+static int __init modem_nreset_init(void)
+{
+ int err;
err = platform_device_register(&modem_nreset_device);
- if (err) {
+ if (err)
pr_err("Couldn't register the modem regulator device\n");
- return err;
- }
+
+ return err;
+}
+
+
+static int __init ams_delta_modem_init(void)
+{
+ int err;
omap_cfg_reg(M14_1510_GPIO2);
ams_delta_modem_ports[0].irq =
@@ -692,7 +867,22 @@ static int __init late_init(void)
err = platform_device_register(&ams_delta_modem_device);
if (err)
- goto gpio_free;
+ gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
+
+ return err;
+}
+
+static int __init late_init(void)
+{
+ int err;
+
+ err = modem_nreset_init();
+ if (err)
+ return err;
+
+ err = ams_delta_modem_init();
+ if (err)
+ return err;
/*
* Once the modem device is registered, the modem_nreset
@@ -708,7 +898,6 @@ static int __init late_init(void)
unregister:
platform_device_unregister(&ams_delta_modem_device);
-gpio_free:
gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
return err;
}
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index ab51f8554697..9aeb8ad8c327 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -274,7 +274,7 @@ static struct platform_device h2_kp_device = {
.resource = h2_kp_resources,
};
-static struct gpio_led h2_gpio_led_pins[] = {
+static const struct gpio_led h2_gpio_led_pins[] = {
{
.name = "h2:red",
.default_trigger = "heartbeat",
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index ad339f51cc78..2edcd6356f2d 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -326,7 +326,7 @@ static struct spi_board_info h3_spi_board_info[] __initdata = {
},
};
-static struct gpio_led h3_gpio_led_pins[] = {
+static const struct gpio_led h3_gpio_led_pins[] = {
{
.name = "h3:red",
.default_trigger = "heartbeat",
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index da8f3fc3180f..5733212759d3 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -292,7 +292,7 @@ static struct platform_device herald_gpiokeys_device = {
};
/* LEDs for the Herald. These connect to the HTCPLD GPIO device. */
-static struct gpio_led gpio_leds[] = {
+static const struct gpio_led gpio_leds[] = {
{"dpad", NULL, HTCPLD_GPIO_LED_DPAD, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
{"kbd", NULL, HTCPLD_GPIO_LED_KBD, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
{"vibrate", NULL, HTCPLD_GPIO_LED_VIBRATE, 0, 0, LEDS_GPIO_DEFSTATE_OFF},
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 9ffa8d755a59..4df15e693b6e 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -167,7 +167,7 @@ static struct platform_device *osk5912_devices[] __initdata = {
&osk5912_cf_device,
};
-static struct gpio_led tps_leds[] = {
+static const struct gpio_led tps_leds[] = {
/* NOTE: D9 and D2 have hardware blink support.
* Also, D9 requires non-battery power.
*/
@@ -385,7 +385,7 @@ static struct platform_device osk5912_lcd_device = {
.id = -1,
};
-static struct gpio_led mistral_gpio_led_pins[] = {
+static const struct gpio_led mistral_gpio_led_pins[] = {
{
.name = "mistral:red",
.default_trigger = "heartbeat",
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 69df3620eca5..1c73694c871a 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
static inline void omap5_erratum_workaround_801819(void) { }
#endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+ u32 acr, acr_mask;
+
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+ /*
+ * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+ */
+ acr_mask = BIT(0);
+
+ /* Do we already have it done.. if yes, skip expensive smc */
+ if ((acr & acr_mask) == acr_mask)
+ return;
+
+ acr |= acr_mask;
+ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+ pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+ __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
static void omap4_secondary_init(unsigned int cpu)
{
/*
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
set_cntfreq();
/* Configure ACR to disable streaming WA for 801819 */
omap5_erratum_workaround_801819();
+ /* Enable ACR to allow for ICUALLU workaround */
+ omap5_secondary_harden_predictor();
}
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index b68f9c0aff0b..d5ddba00bb73 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -92,11 +92,13 @@ static void omap_rtc_wait_not_busy(struct omap_hwmod *oh)
*/
void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
{
- local_irq_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
- local_irq_enable();
+ local_irq_restore(flags);
}
/**
@@ -110,9 +112,11 @@ void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
*/
void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
{
- local_irq_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
- local_irq_enable();
+ local_irq_restore(flags);
}
diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c
index b9846b19e5e2..d8ae8a85b14b 100644
--- a/arch/arm/mach-omap2/pm-asm-offsets.c
+++ b/arch/arm/mach-omap2/pm-asm-offsets.c
@@ -27,6 +27,8 @@ int main(void)
offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_virt));
DEFINE(AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET,
offsetof(struct am33xx_pm_ro_sram_data, amx3_pm_sram_data_phys));
+ DEFINE(AMX3_PM_RTC_BASE_VIRT_OFFSET,
+ offsetof(struct am33xx_pm_ro_sram_data, rtc_base_virt));
DEFINE(AMX3_PM_RO_SRAM_DATA_SIZE,
sizeof(struct am33xx_pm_ro_sram_data));
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index acb698d5780f..5a8839203958 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -47,11 +47,6 @@ static int pm_dbg_init_done;
static int pm_dbg_init(void);
-enum {
- DEBUG_FILE_COUNTERS = 0,
- DEBUG_FILE_TIMERS,
-};
-
static const char pwrdm_state_names[][PWRDM_MAX_PWRSTS] = {
"OFF",
"RET",
@@ -141,39 +136,21 @@ static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user)
return 0;
}
-static int pm_dbg_show_counters(struct seq_file *s, void *unused)
+static int pm_dbg_counters_show(struct seq_file *s, void *unused)
{
pwrdm_for_each(pwrdm_dbg_show_counter, s);
clkdm_for_each(clkdm_dbg_show_counter, s);
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(pm_dbg_counters);
-static int pm_dbg_show_timers(struct seq_file *s, void *unused)
+static int pm_dbg_timers_show(struct seq_file *s, void *unused)
{
pwrdm_for_each(pwrdm_dbg_show_timer, s);
return 0;
}
-
-static int pm_dbg_open(struct inode *inode, struct file *file)
-{
- switch ((int)inode->i_private) {
- case DEBUG_FILE_COUNTERS:
- return single_open(file, pm_dbg_show_counters,
- &inode->i_private);
- case DEBUG_FILE_TIMERS:
- default:
- return single_open(file, pm_dbg_show_timers,
- &inode->i_private);
- }
-}
-
-static const struct file_operations debug_fops = {
- .open = pm_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pm_dbg_timers);
static int pwrdm_suspend_get(void *data, u64 *val)
{
@@ -259,10 +236,8 @@ static int __init pm_dbg_init(void)
if (!d)
return -EINVAL;
- (void) debugfs_create_file("count", S_IRUGO,
- d, (void *)DEBUG_FILE_COUNTERS, &debug_fops);
- (void) debugfs_create_file("time", S_IRUGO,
- d, (void *)DEBUG_FILE_TIMERS, &debug_fops);
+ (void) debugfs_create_file("count", 0444, d, NULL, &pm_dbg_counters_fops);
+ (void) debugfs_create_file("time", 0444, d, NULL, &pm_dbg_timers_fops);
pwrdm_for_each(pwrdms_setup, (void *)d);
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 9b3755a2e2ec..f4971e4a86b2 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -26,6 +26,7 @@
static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
static struct clockdomain *gfx_l4ls_clkdm;
static void __iomem *scu_base;
+static struct omap_hwmod *rtc_oh;
static int __init am43xx_map_scu(void)
{
@@ -106,12 +107,13 @@ static void amx3_post_suspend_common(void)
pr_err("PM: GFX domain did not transition: %x\n", status);
}
-static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long))
+static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long),
+ unsigned long args)
{
int ret = 0;
amx3_pre_suspend_common();
- ret = cpu_suspend(0, fn);
+ ret = cpu_suspend(args, fn);
amx3_post_suspend_common();
/*
@@ -128,13 +130,14 @@ static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long))
return ret;
}
-static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long))
+static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
+ unsigned long args)
{
int ret = 0;
amx3_pre_suspend_common();
scu_power_mode(scu_base, SCU_PM_POWEROFF);
- ret = cpu_suspend(0, fn);
+ ret = cpu_suspend(args, fn);
scu_power_mode(scu_base, SCU_PM_NORMAL);
amx3_post_suspend_common();
@@ -151,16 +154,25 @@ static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
return NULL;
}
+void __iomem *am43xx_get_rtc_base_addr(void)
+{
+ rtc_oh = omap_hwmod_lookup("rtc");
+
+ return omap_hwmod_get_mpu_rt_va(rtc_oh);
+}
+
static struct am33xx_pm_platform_data am33xx_ops = {
.init = am33xx_suspend_init,
.soc_suspend = am33xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
+ .get_rtc_base_addr = am43xx_get_rtc_base_addr,
};
static struct am33xx_pm_platform_data am43xx_ops = {
.init = am43xx_suspend_init,
.soc_suspend = am43xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
+ .get_rtc_base_addr = am43xx_get_rtc_base_addr,
};
static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
index 322b3bb868b4..47a816468cdb 100644
--- a/arch/arm/mach-omap2/sleep33xx.S
+++ b/arch/arm/mach-omap2/sleep33xx.S
@@ -8,6 +8,7 @@
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
+#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/memory.h>
@@ -19,12 +20,25 @@
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
+/* replicated define because linux/bitops.h cannot be included in assembly */
+#define BIT(nr) (1 << (nr))
+
.arm
.align 3
ENTRY(am33xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
+ /* Save wfi_flags arg to data space */
+ mov r4, r0
+ adr r3, am33xx_pm_ro_sram_data
+ ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
+ str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
+
+ /* Only flush cache is we know we are losing MPU context */
+ tst r4, #WFI_FLAG_FLUSH_CACHE
+ beq cache_skip_flush
+
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
@@ -48,14 +62,33 @@ ENTRY(am33xx_do_wfi)
ldr r1, kernel_flush
blx r1
+ adr r3, am33xx_pm_ro_sram_data
+ ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
+ ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
+
+cache_skip_flush:
+ /* Check if we want self refresh */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_enter_sr
+
adr r9, am33xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
+emif_skip_enter_sr:
+ /* Only necessary if PER is losing context */
+ tst r4, #WFI_FLAG_SAVE_EMIF
+ beq emif_skip_save
+
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
+emif_skip_save:
+ /* Only can disable EMIF if we have entered self refresh */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_disable
+
/* Disable EMIF */
ldr r1, virt_emif_clkctrl
ldr r2, [r1]
@@ -69,6 +102,10 @@ wait_emif_disable:
cmp r2, r3
bne wait_emif_disable
+emif_skip_disable:
+ tst r4, #WFI_FLAG_WAKE_M3
+ beq wkup_m3_skip
+
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
@@ -79,6 +116,7 @@ wait_emif_disable:
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
+wkup_m3_skip:
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
@@ -132,10 +170,18 @@ wait_emif_enable:
cmp r2, r3
bne wait_emif_enable
+ /* Only necessary if PER is losing context */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_exit_sr_abt
+ adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
+emif_skip_exit_sr_abt:
+ tst r4, #WFI_FLAG_FLUSH_CACHE
+ beq cache_skip_restore
+
/*
* Set SCTLR.C bit to allow data cache allocation
*/
@@ -144,6 +190,7 @@ wait_emif_enable:
mcr p15, 0, r0, c1, c0, 0
isb
+cache_skip_restore:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
@@ -181,8 +228,6 @@ ENDPROC(am33xx_resume_from_deep_sleep)
* Local variables
*/
.align
-resume_addr:
- .word cpu_resume - PAGE_OFFSET + 0x80000000
kernel_flush:
.word v7_flush_dcache_all
virt_mpu_clkctrl:
@@ -205,6 +250,9 @@ ENTRY(am33xx_pm_sram)
.word am33xx_emif_sram_table
.word am33xx_pm_ro_sram_data
+resume_addr:
+.word cpu_resume - PAGE_OFFSET + 0x80000000
+
.align 3
ENTRY(am33xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index 8903814a6677..5b9343b58fc7 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -9,7 +9,7 @@
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>
-
+#include <linux/platform_data/pm33xx.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/memory.h>
@@ -22,6 +22,9 @@
#include "prm33xx.h"
#include "prcm43xx.h"
+/* replicated define because linux/bitops.h cannot be included in assembly */
+#define BIT(nr) (1 << (nr))
+
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
@@ -45,12 +48,25 @@
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#define AM43XX_PRM_EMIF_CTRL_OFFSET 0x0030
+#define RTC_SECONDS_REG 0x0
+#define RTC_PMIC_REG 0x98
+#define RTC_PMIC_POWER_EN BIT(16)
+#define RTC_PMIC_EXT_WAKEUP_STS BIT(12)
+#define RTC_PMIC_EXT_WAKEUP_POL BIT(4)
+#define RTC_PMIC_EXT_WAKEUP_EN BIT(0)
+
.arm
.align 3
ENTRY(am43xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
+ /* Save wfi_flags arg to data space */
+ mov r4, r0
+ adr r3, am43xx_pm_ro_sram_data
+ ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
+ str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
+
#ifdef CONFIG_CACHE_L2X0
/* Retrieve l2 cache virt address BEFORE we shut off EMIF */
ldr r1, get_l2cache_base
@@ -58,6 +74,10 @@ ENTRY(am43xx_do_wfi)
mov r8, r0
#endif
+ /* Only flush cache is we know we are losing MPU context */
+ tst r4, #WFI_FLAG_FLUSH_CACHE
+ beq cache_skip_flush
+
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
@@ -128,13 +148,47 @@ sync:
bne sync
#endif
+ /* Restore wfi_flags */
+ adr r3, am43xx_pm_ro_sram_data
+ ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
+ ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
+
+cache_skip_flush:
+ /*
+ * If we are trying to enter RTC+DDR mode we must perform
+ * a read from the rtc address space to ensure translation
+ * presence in the TLB to avoid page table walk after DDR
+ * is unavailable.
+ */
+ tst r4, #WFI_FLAG_RTC_ONLY
+ beq skip_rtc_va_refresh
+
+ adr r3, am43xx_pm_ro_sram_data
+ ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
+ ldr r0, [r1]
+
+skip_rtc_va_refresh:
+ /* Check if we want self refresh */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_enter_sr
+
adr r9, am43xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
+emif_skip_enter_sr:
+ /* Only necessary if PER is losing context */
+ tst r4, #WFI_FLAG_SAVE_EMIF
+ beq emif_skip_save
+
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
- blx r3
+ blx r3
+
+emif_skip_save:
+ /* Only can disable EMIF if we have entered self refresh */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_disable
/* Disable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
@@ -148,6 +202,38 @@ wait_emif_disable:
cmp r2, r3
bne wait_emif_disable
+emif_skip_disable:
+ tst r4, #WFI_FLAG_RTC_ONLY
+ beq skip_rtc_only
+
+ adr r3, am43xx_pm_ro_sram_data
+ ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
+
+ ldr r0, [r1, #RTC_PMIC_REG]
+ orr r0, r0, #RTC_PMIC_POWER_EN
+ orr r0, r0, #RTC_PMIC_EXT_WAKEUP_STS
+ orr r0, r0, #RTC_PMIC_EXT_WAKEUP_EN
+ orr r0, r0, #RTC_PMIC_EXT_WAKEUP_POL
+ str r0, [r1, #RTC_PMIC_REG]
+ ldr r0, [r1, #RTC_PMIC_REG]
+ /* Wait for 2 seconds to lose power */
+ mov r3, #2
+ ldr r2, [r1, #RTC_SECONDS_REG]
+rtc_loop:
+ ldr r0, [r1, #RTC_SECONDS_REG]
+ cmp r0, r2
+ beq rtc_loop
+ mov r2, r0
+ subs r3, r3, #1
+ bne rtc_loop
+
+ b re_enable_emif
+
+skip_rtc_only:
+
+ tst r4, #WFI_FLAG_WAKE_M3
+ beq wkup_m3_skip
+
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
@@ -165,6 +251,7 @@ wait_emif_disable:
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP
str r2, [r1]
+wkup_m3_skip:
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
@@ -209,6 +296,7 @@ wait_emif_disable:
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
+re_enable_emif:
/* Re-enable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
@@ -218,6 +306,9 @@ wait_emif_enable:
cmp r2, r3
bne wait_emif_enable
+ tst r4, #WFI_FLAG_FLUSH_CACHE
+ beq cache_skip_restore
+
/*
* Set SCTLR.C bit to allow data cache allocation
*/
@@ -226,9 +317,16 @@ wait_emif_enable:
mcr p15, 0, r0, c1, c0, 0
isb
- ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
- blx r1
+cache_skip_restore:
+ /* Only necessary if PER is losing context */
+ tst r4, #WFI_FLAG_SELF_REFRESH
+ beq emif_skip_exit_sr_abt
+
+ adr r9, am43xx_emif_sram_table
+ ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
+ blx r1
+emif_skip_exit_sr_abt:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
@@ -333,8 +431,6 @@ ENDPROC(am43xx_resume_from_deep_sleep)
* Local variables
*/
.align
-resume_addr:
- .word cpu_resume - PAGE_OFFSET + 0x80000000
kernel_flush:
.word v7_flush_dcache_all
ddr_start:
@@ -381,6 +477,8 @@ ENTRY(am43xx_pm_sram)
.word am43xx_emif_sram_table
.word am43xx_pm_ro_sram_data
+resume_addr:
+ .word cpu_resume - PAGE_OFFSET + 0x80000000
.align 3
ENTRY(am43xx_pm_ro_sram_data)
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index f4f8f23bda8c..af46d2182533 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -688,7 +688,6 @@ struct platform_nand_data balloon3_nand_pdata = {
.chip_delay = 50,
},
.ctrl = {
- .hwcontrol = 0,
.dev_ready = balloon3_nand_dev_ready,
.select_chip = balloon3_nand_select_chip,
.cmd_ctrl = balloon3_nand_cmd_ctl,
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index d7c9a8476d57..a24783a03827 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -3,7 +3,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/platform_data/i2c-pxa.h>
@@ -59,16 +61,6 @@ static struct resource pxamci_resources[] = {
.end = IRQ_MMC,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 21,
- .end = 21,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 22,
- .end = 22,
- .flags = IORESOURCE_DMA,
- },
};
static u64 pxamci_dmamask = 0xffffffffUL;
@@ -406,16 +398,6 @@ static struct resource pxa_ir_resources[] = {
.end = 0x40700023,
.flags = IORESOURCE_MEM,
},
- [5] = {
- .start = 17,
- .end = 17,
- .flags = IORESOURCE_DMA,
- },
- [6] = {
- .start = 18,
- .end = 18,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa_device_ficp = {
@@ -496,6 +478,18 @@ struct platform_device pxa_device_ac97 = {
void __init pxa_set_ac97_info(pxa2xx_audio_ops_t *ops)
{
+ int ret;
+
+ ret = clk_add_alias("ac97_clk", "pxa2xx-ac97:0", "AC97CLK",
+ &pxa_device_ac97.dev);
+ if (ret)
+ pr_err("PXA AC97 clock1 alias error: %d\n", ret);
+
+ ret = clk_add_alias("ac97_clk", "pxa2xx-ac97:1", "AC97CLK",
+ &pxa_device_ac97.dev);
+ if (ret)
+ pr_err("PXA AC97 clock2 alias error: %d\n", ret);
+
pxa_register_device(&pxa_device_ac97, ops);
}
@@ -544,18 +538,6 @@ static struct resource pxa25x_resource_ssp[] = {
.end = IRQ_SSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 13,
- .end = 13,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 14,
- .end = 14,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_ssp = {
@@ -582,18 +564,6 @@ static struct resource pxa25x_resource_nssp[] = {
.end = IRQ_NSSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 15,
- .end = 15,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 16,
- .end = 16,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_nssp = {
@@ -620,18 +590,6 @@ static struct resource pxa25x_resource_assp[] = {
.end = IRQ_ASSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 23,
- .end = 23,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 24,
- .end = 24,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_assp = {
@@ -750,18 +708,6 @@ static struct resource pxa27x_resource_ssp1[] = {
.end = IRQ_SSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 13,
- .end = 13,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 14,
- .end = 14,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp1 = {
@@ -788,18 +734,6 @@ static struct resource pxa27x_resource_ssp2[] = {
.end = IRQ_SSP2,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 15,
- .end = 15,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 16,
- .end = 16,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp2 = {
@@ -826,18 +760,6 @@ static struct resource pxa27x_resource_ssp3[] = {
.end = IRQ_SSP3,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 66,
- .end = 66,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 67,
- .end = 67,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp3 = {
@@ -894,16 +816,6 @@ static struct resource pxa3xx_resources_mci2[] = {
.end = IRQ_MMC2,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 93,
- .end = 93,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 94,
- .end = 94,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa3xx_device_mci2 = {
@@ -933,16 +845,6 @@ static struct resource pxa3xx_resources_mci3[] = {
.end = IRQ_MMC3,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 100,
- .end = 100,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 101,
- .end = 101,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa3xx_device_mci3 = {
@@ -1020,18 +922,6 @@ static struct resource pxa3xx_resources_nand[] = {
.end = IRQ_NAND,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for Data DMA */
- .start = 97,
- .end = 97,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for Command DMA */
- .start = 99,
- .end = 99,
- .flags = IORESOURCE_DMA,
- },
};
static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32);
@@ -1065,18 +955,6 @@ static struct resource pxa3xx_resource_ssp4[] = {
.end = IRQ_SSP4,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 2,
- .end = 2,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 3,
- .end = 3,
- .flags = IORESOURCE_DMA,
- },
};
/*
@@ -1202,11 +1080,6 @@ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
platform_device_add(pd);
}
-static struct mmp_dma_platdata pxa_dma_pdata = {
- .dma_channels = 0,
- .nb_requestors = 0,
-};
-
static struct resource pxa_dma_resource[] = {
[0] = {
.start = 0x40000000,
@@ -1233,9 +1106,7 @@ static struct platform_device pxa2xx_pxa_dma = {
.resource = pxa_dma_resource,
};
-void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
+void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata)
{
- pxa_dma_pdata.dma_channels = nb_channels;
- pxa_dma_pdata.nb_requestors = nb_requestors;
- pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
+ pxa_register_device(&pxa2xx_pxa_dma, dma_pdata);
}
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 11263f7c455b..498b07bc6a3e 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#define PDMA_FILTER_PARAM(_prio, _requestor) (&(struct pxad_param) { \
+ .prio = PXAD_PRIO_##_prio, .drcmr = _requestor })
+struct mmp_dma_platdata;
+
extern struct platform_device pxa_device_pmu;
extern struct platform_device pxa_device_mci;
extern struct platform_device pxa3xx_device_mci2;
@@ -55,7 +59,7 @@ extern struct platform_device pxa3xx_device_gpio;
extern struct platform_device pxa93x_device_gpio;
void __init pxa_register_device(struct platform_device *dev, void *data);
-void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
+void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata);
struct i2c_pxa_platform_data;
extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 49022ad338e9..29be04c6cc48 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -346,7 +346,6 @@ struct platform_nand_data em_x270_nand_platdata = {
.chip_delay = 20,
},
.ctrl = {
- .hwcontrol = 0,
.dev_ready = em_x270_nand_device_ready,
.select_chip = 0,
.cmd_ctrl = em_x270_nand_cmd_ctl,
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index e2e7f247a645..b79b757fdd41 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -54,6 +54,7 @@
#include "devices.h"
#include "generic.h"
+#include "udc.h"
/* Physical address space information */
@@ -594,6 +595,8 @@ static struct platform_device gpio_vbus = {
},
};
+static struct pxa2xx_udc_mach_info hx4700_udc_info;
+
/*
* Touchscreen - TSC2046 connected to SSP2
*/
@@ -891,6 +894,7 @@ static void __init hx4700_init(void)
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
mdelay(10);
+ pxa_set_udc_info(&hx4700_udc_info);
regulator_has_full_constraints();
}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 9c10248fadcc..4e8c2116808e 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR);
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 9b6c7ea45a40..04dc78d0809f 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -677,14 +677,12 @@ MIO_SIMPLE_DEV(mioa701_led, "leds-gpio", &gpio_led_info)
MIO_SIMPLE_DEV(pxa2xx_pcm, "pxa2xx-pcm", NULL)
MIO_SIMPLE_DEV(mioa701_sound, "mioa701-wm9713", NULL)
MIO_SIMPLE_DEV(mioa701_board, "mioa701-board", NULL)
-MIO_SIMPLE_DEV(wm9713_acodec, "wm9713-codec", NULL);
MIO_SIMPLE_DEV(gpio_vbus, "gpio-vbus", &gpio_vbus_data);
static struct platform_device *devices[] __initdata = {
&mioa701_gpio_keys,
&mioa701_backlight,
&mioa701_led,
- &wm9713_acodec,
&pxa2xx_pcm,
&mioa701_sound,
&power_dev,
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index ba431fad5c47..ab8808ce7e21 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -16,6 +16,8 @@
* initialization stuff for PXA machines which can be overridden later if
* need be.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/module.h>
@@ -26,6 +28,7 @@
#include <linux/syscore_ops.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
@@ -201,6 +204,39 @@ static struct platform_device *pxa25x_devices[] __initdata = {
&pxa_device_asoc_platform,
};
+static const struct dma_slave_map pxa25x_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+
+ /* PXA25x specific map */
+ { "pxa25x-ssp.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa25x-ssp.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa25x-nssp.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa25x-nssp.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa25x-nssp.2", "rx", PDMA_FILTER_PARAM(LOWEST, 23) },
+ { "pxa25x-nssp.2", "tx", PDMA_FILTER_PARAM(LOWEST, 24) },
+};
+
+static struct mmp_dma_platdata pxa25x_dma_pdata = {
+ .dma_channels = 16,
+ .nb_requestors = 40,
+ .slave_map = pxa25x_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa25x_slave_map),
+};
+
static int __init pxa25x_init(void)
{
int ret = 0;
@@ -215,7 +251,7 @@ static int __init pxa25x_init(void)
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
if (!of_have_populated_dt()) {
- pxa2xx_set_dmac_info(16, 40);
+ pxa2xx_set_dmac_info(&pxa25x_dma_pdata);
pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
ret = platform_add_devices(pxa25x_devices,
ARRAY_SIZE(pxa25x_devices));
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 0c06f383ad52..5a8990a9313d 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -11,6 +11,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/module.h>
@@ -23,6 +25,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
@@ -297,6 +300,40 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_pwm1,
};
+static const struct dma_slave_map pxa27x_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
+
+ /* PXA27x specific map */
+ { "pxa2xx-i2s", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
+ { "pxa2xx-i2s", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
+ { "pxa27x-camera.0", "CI_Y", PDMA_FILTER_PARAM(HIGHEST, 68) },
+ { "pxa27x-camera.0", "CI_U", PDMA_FILTER_PARAM(HIGHEST, 69) },
+ { "pxa27x-camera.0", "CI_V", PDMA_FILTER_PARAM(HIGHEST, 70) },
+};
+
+static struct mmp_dma_platdata pxa27x_dma_pdata = {
+ .dma_channels = 32,
+ .nb_requestors = 75,
+ .slave_map = pxa27x_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa27x_slave_map),
+};
+
static int __init pxa27x_init(void)
{
int ret = 0;
@@ -313,7 +350,7 @@ static int __init pxa27x_init(void)
if (!of_have_populated_dt()) {
pxa_register_device(&pxa27x_device_gpio,
&pxa27x_gpio_info);
- pxa2xx_set_dmac_info(32, 75);
+ pxa2xx_set_dmac_info(&pxa27x_dma_pdata);
ret = platform_add_devices(devices,
ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 8c64f93b669b..df9c8970adcf 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -12,6 +12,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -24,6 +26,7 @@
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
@@ -421,6 +424,42 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_pwm1,
};
+static const struct dma_slave_map pxa3xx_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
+
+ /* PXA3xx specific map */
+ { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
+ { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
+ { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
+ { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
+ { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
+ { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
+ { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
+};
+
+static struct mmp_dma_platdata pxa3xx_dma_pdata = {
+ .dma_channels = 32,
+ .nb_requestors = 100,
+ .slave_map = pxa3xx_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
+};
+
static int __init pxa3xx_init(void)
{
int ret = 0;
@@ -456,7 +495,7 @@ static int __init pxa3xx_init(void)
if (of_have_populated_dt())
return 0;
- pxa2xx_set_dmac_info(32, 100);
+ pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret)
return ret;
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index d69de312d8d9..52e70a5c1281 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -47,16 +47,6 @@ int wm9713_irq;
int lcd_id;
int lcd_orientation;
-struct platform_device pxa_device_wm9713_audio = {
- .name = "wm9713-codec",
- .id = -1,
-};
-
-static void __init zylonite_init_wm9713_audio(void)
-{
- platform_device_register(&pxa_device_wm9713_audio);
-}
-
static struct resource smc91x_resources[] = {
[0] = {
.start = ZYLONITE_ETH_PHYS + 0x300,
@@ -428,7 +418,6 @@ static void __init zylonite_init(void)
zylonite_init_nand();
zylonite_init_leds();
zylonite_init_ohci();
- zylonite_init_wm9713_audio();
}
MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 39aef4876ed4..04b2f22c2739 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
*/
static void ecard_init_pgtables(struct mm_struct *mm)
{
- struct vm_area_struct vma;
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
/* We want to set up the page tables for the following mapping:
* Virtual Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
- vma.vm_flags = VM_EXEC;
- vma.vm_mm = mm;
-
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
}
diff --git a/arch/arm/mach-s3c24xx/include/mach/s3c2412.h b/arch/arm/mach-s3c24xx/include/mach/s3c2412.h
index b6b32724ace8..4ff83f956cfb 100644
--- a/arch/arm/mach-s3c24xx/include/mach/s3c2412.h
+++ b/arch/arm/mach-s3c24xx/include/mach/s3c2412.h
@@ -6,7 +6,7 @@
*/
#ifndef __ARCH_ARM_MACH_S3C24XX_S3C2412_H
-#define __ARCH_ARM_REGS_S3C24XX_S3C2412_H __FILE__
+#define __ARCH_ARM_MACH_S3C24XX_S3C2412_H __FILE__
#define S3C2412_MEMREG(x) (S3C24XX_VA_MEMCTRL + (x))
#define S3C2412_EBIREG(x) (S3C2412_VA_EBI + (x))
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 0b67254eabb2..aeb2eed08598 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -15,6 +15,7 @@ config ARCH_RCAR_GEN1
config ARCH_RCAR_GEN2
bool
+ select HAVE_ARM_ARCH_TIMER
select PM
select PM_GENERIC_DOMAINS
select RENESAS_IRQC
@@ -58,6 +59,7 @@ config ARCH_R8A73A4
bool "R-Mobile APE6 (R8A73A40)"
select ARCH_RMOBILE
select ARM_ERRATA_798181 if SMP
+ select HAVE_ARM_ARCH_TIMER
select RENESAS_IRQC
config ARCH_R8A7740
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 1939f521579c..b33dc59d8698 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -11,9 +11,7 @@ obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o
obj-$(CONFIG_ARCH_R8A73A4) += setup-r8a73a4.o
obj-$(CONFIG_ARCH_R8A7740) += setup-r8a7740.o
obj-$(CONFIG_ARCH_R8A7778) += setup-r8a7778.o
-obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o pm-r8a7779.o
-obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o
-obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o
+obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o
obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o
obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o
@@ -23,17 +21,15 @@ cpu-y := platsmp.o headsmp.o
# Shared SoC family objects
obj-$(CONFIG_ARCH_RCAR_GEN2) += setup-rcar-gen2.o platsmp-apmu.o $(cpu-y)
CFLAGS_setup-rcar-gen2.o += -march=armv7-a
-obj-$(CONFIG_ARCH_RCAR_GEN2) += headsmp-apmu.o
obj-$(CONFIG_ARCH_R8A7790) += regulator-quirk-rcar-gen2.o
obj-$(CONFIG_ARCH_R8A7791) += regulator-quirk-rcar-gen2.o
obj-$(CONFIG_ARCH_R8A7793) += regulator-quirk-rcar-gen2.o
# SMP objects
smp-y := $(cpu-y)
+smp-$(CONFIG_ARCH_RCAR_GEN2) += headsmp-apmu.o
smp-$(CONFIG_ARCH_SH73A0) += smp-sh73a0.o headsmp-scu.o platsmp-scu.o
smp-$(CONFIG_ARCH_R8A7779) += smp-r8a7779.o headsmp-scu.o platsmp-scu.o
-smp-$(CONFIG_ARCH_R8A7790) += smp-r8a7790.o
-smp-$(CONFIG_ARCH_R8A7791) += smp-r8a7791.o
smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o platsmp-scu.o
# PM objects
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 2109f123bdfb..3ac4b36b5c2b 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -15,7 +15,6 @@ extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
-extern bool shmobile_smp_init_fallback_ops(void);
extern void shmobile_boot_apmu(void);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
diff --git a/arch/arm/mach-shmobile/headsmp-apmu.S b/arch/arm/mach-shmobile/headsmp-apmu.S
index d49ab194766a..fabe9cadd12e 100644
--- a/arch/arm/mach-shmobile/headsmp-apmu.S
+++ b/arch/arm/mach-shmobile/headsmp-apmu.S
@@ -1,19 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SMP support for APMU based systems with Cortex A7/A15
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#ifdef CONFIG_SMP
ENTRY(shmobile_boot_apmu)
bl secure_cntvoff_init
b secondary_startup
ENDPROC(shmobile_boot_apmu)
-#endif
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index ba732effc90b..96330ef25641 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SMP support for SoCs with APMU
*
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/cpu_pm.h>
#include <linux/delay.h>
@@ -23,7 +20,6 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include "common.h"
-#include "platsmp-apmu.h"
#include "rcar-gen2.h"
static struct {
@@ -87,6 +83,104 @@ static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)
return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
}
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
+/* nicked from arch/arm/mach-exynos/hotplug.c */
+static inline void cpu_enter_lowpower_a15(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+
+ flush_cache_louis();
+
+ asm volatile(
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+}
+
+static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
+{
+
+ /* Select next sleep mode using the APMU */
+ apmu_wrap(cpu, apmu_power_off);
+
+ /* Do ARM specific CPU shutdown */
+ cpu_enter_lowpower_a15();
+}
+#endif
+
+#if defined(CONFIG_HOTPLUG_CPU)
+static void shmobile_smp_apmu_cpu_die(unsigned int cpu)
+{
+ /* For this particular CPU deregister boot vector */
+ shmobile_smp_hook(cpu, 0, 0);
+
+ /* Shutdown CPU core */
+ shmobile_smp_apmu_cpu_shutdown(cpu);
+
+ /* jump to shared mach-shmobile sleep / reset code */
+ shmobile_smp_sleep();
+}
+
+static int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
+{
+ return apmu_wrap(cpu, apmu_power_off_poll);
+}
+#endif
+
+#if defined(CONFIG_SUSPEND)
+static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
+{
+ shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
+ shmobile_smp_apmu_cpu_shutdown(cpu);
+ cpu_do_idle(); /* WFI selects Core Standby */
+ return 1;
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+{
+ cpu_suspend(smp_processor_id(), shmobile_smp_apmu_do_suspend);
+ cpu_leave_lowpower();
+ return 0;
+}
+
+void __init shmobile_smp_apmu_suspend_init(void)
+{
+ shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
+}
+#endif
+
#ifdef CONFIG_SMP
static void apmu_init_cpu(struct resource *res, int cpu, int bit)
{
@@ -106,38 +200,6 @@ static void apmu_init_cpu(struct resource *res, int cpu, int bit)
writel(x, apmu_cpus[cpu].iomem + DBGRCR_OFFS);
}
-static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit),
- struct rcar_apmu_config *apmu_config, int num)
-{
- int id;
- int k;
- int bit, index;
- bool is_allowed;
-
- for (k = 0; k < num; k++) {
- /* only enable the cluster that includes the boot CPU */
- is_allowed = false;
- for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) {
- id = apmu_config[k].cpus[bit];
- if (id >= 0) {
- if (id == cpu_logical_map(0))
- is_allowed = true;
- }
- }
- if (!is_allowed)
- continue;
-
- for (bit = 0; bit < ARRAY_SIZE(apmu_config[k].cpus); bit++) {
- id = apmu_config[k].cpus[bit];
- if (id >= 0) {
- index = get_logical_index(id);
- if (index >= 0)
- fn(&apmu_config[k].iomem, index, bit);
- }
- }
- }
-}
-
static const struct of_device_id apmu_ids[] = {
{ .compatible = "renesas,apmu" },
{ /*sentinel*/ }
@@ -194,15 +256,8 @@ static void __init shmobile_smp_apmu_setup_boot(void)
shmobile_boot_fn_gen2 = shmobile_boot_fn;
}
-void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
- struct rcar_apmu_config *apmu_config,
- int num)
-{
- shmobile_smp_apmu_setup_boot();
- apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
-}
-
-int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
{
/* For this particular CPU register boot vector */
shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_apmu), 0);
@@ -229,101 +284,3 @@ static struct smp_operations apmu_smp_ops __initdata = {
CPU_METHOD_OF_DECLARE(shmobile_smp_apmu, "renesas,apmu", &apmu_smp_ops);
#endif /* CONFIG_SMP */
-
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
-/* nicked from arch/arm/mach-exynos/hotplug.c */
-static inline void cpu_enter_lowpower_a15(void)
-{
- unsigned int v;
-
- asm volatile(
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "Ir" (CR_C)
- : "cc");
-
- flush_cache_louis();
-
- asm volatile(
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (0x40)
- : "cc");
-
- isb();
- dsb();
-}
-
-static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
-{
-
- /* Select next sleep mode using the APMU */
- apmu_wrap(cpu, apmu_power_off);
-
- /* Do ARM specific CPU shutdown */
- cpu_enter_lowpower_a15();
-}
-
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-#endif
-
-#if defined(CONFIG_HOTPLUG_CPU)
-void shmobile_smp_apmu_cpu_die(unsigned int cpu)
-{
- /* For this particular CPU deregister boot vector */
- shmobile_smp_hook(cpu, 0, 0);
-
- /* Shutdown CPU core */
- shmobile_smp_apmu_cpu_shutdown(cpu);
-
- /* jump to shared mach-shmobile sleep / reset code */
- shmobile_smp_sleep();
-}
-
-int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
-{
- return apmu_wrap(cpu, apmu_power_off_poll);
-}
-#endif
-
-#if defined(CONFIG_SUSPEND)
-static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
-{
- shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
- shmobile_smp_apmu_cpu_shutdown(cpu);
- cpu_do_idle(); /* WFI selects Core Standby */
- return 1;
-}
-
-static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
-{
- cpu_suspend(smp_processor_id(), shmobile_smp_apmu_do_suspend);
- cpu_leave_lowpower();
- return 0;
-}
-
-void __init shmobile_smp_apmu_suspend_init(void)
-{
- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
-}
-#endif
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.h b/arch/arm/mach-shmobile/platsmp-apmu.h
deleted file mode 100644
index 76512c9a2545..000000000000
--- a/arch/arm/mach-shmobile/platsmp-apmu.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * rmobile apmu definition
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef PLATSMP_APMU_H
-#define PLATSMP_APMU_H
-
-struct rcar_apmu_config {
- struct resource iomem;
- int cpus[4];
-};
-
-extern void shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
- struct rcar_apmu_config *apmu_config,
- int num);
-extern int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
- struct task_struct *idle);
-extern void shmobile_smp_apmu_cpu_die(unsigned int cpu);
-extern int shmobile_smp_apmu_cpu_kill(unsigned int cpu);
-
-#endif /* PLATSMP_APMU_H */
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 02e21bceb085..b23378f3d7e1 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -36,12 +36,3 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
return true; /* Hotplug of any CPU is supported */
}
#endif
-
-bool __init shmobile_smp_init_fallback_ops(void)
-{
- /* fallback on PSCI/smp_ops if no other DT based method is detected */
- if (!IS_ENABLED(CONFIG_SMP))
- return false;
-
- return platform_can_secondary_boot() ? true : false;
-}
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
deleted file mode 100644
index 5c9a93f5e650..000000000000
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * r8a7779 Power management support
- *
- * Copyright (C) 2011 Renesas Solutions Corp.
- * Copyright (C) 2011 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/soc/renesas/rcar-sysc.h>
-
-#include <asm/io.h>
-
-#include "r8a7779.h"
-
-/* SYSC */
-#define SYSCIER 0x0c
-#define SYSCIMR 0x10
-
-#if defined(CONFIG_PM) || defined(CONFIG_SMP)
-
-static void __init r8a7779_sysc_init(void)
-{
- rcar_sysc_init(0xffd85000, 0x0131000e);
-}
-
-#else /* CONFIG_PM || CONFIG_SMP */
-
-static inline void r8a7779_sysc_init(void) {}
-
-#endif /* CONFIG_PM || CONFIG_SMP */
-
-void __init r8a7779_pm_init(void)
-{
- static int once;
-
- if (!once++)
- r8a7779_sysc_init();
-}
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
index 5a798b406af0..345af3ebcc3a 100644
--- a/arch/arm/mach-shmobile/pm-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/smp.h>
-#include <linux/soc/renesas/rcar-sysc.h>
#include <asm/io.h>
#include <asm/cputype.h>
#include "common.h"
@@ -46,23 +45,6 @@ static inline u32 phys_to_sbar(phys_addr_t addr)
return (addr >> 8) & 0xfffffc00;
}
-/* SYSC */
-#define SYSCIER 0x0c
-#define SYSCIMR 0x10
-
-#if defined(CONFIG_SMP)
-
-static void __init rcar_gen2_sysc_init(u32 syscier)
-{
- rcar_sysc_init(0xe6180000, syscier);
-}
-
-#else /* CONFIG_SMP */
-
-static inline void rcar_gen2_sysc_init(u32 syscier) {}
-
-#endif /* CONFIG_SMP */
-
void __init rcar_gen2_pm_init(void)
{
void __iomem *p;
@@ -72,7 +54,6 @@ void __init rcar_gen2_pm_init(void)
bool has_a7 = false;
bool has_a15 = false;
struct resource res;
- u32 syscier = 0;
int error;
if (once++)
@@ -89,11 +70,6 @@ void __init rcar_gen2_pm_init(void)
has_a7 = true;
}
- if (of_machine_is_compatible("renesas,r8a7790"))
- syscier = 0x013111ef;
- else if (of_machine_is_compatible("renesas,r8a7791"))
- syscier = 0x00111003;
-
np = of_find_compatible_node(NULL, NULL, "renesas,smp-sram");
if (!np) {
/* No smp-sram in DT, fall back to hardcoded address */
@@ -155,6 +131,5 @@ map:
}
iounmap(p);
- rcar_gen2_sysc_init(syscier);
shmobile_smp_apmu_suspend_init();
}
diff --git a/arch/arm/mach-shmobile/r8a7779.h b/arch/arm/mach-shmobile/r8a7779.h
index 30668aa6acc3..ca9db8fde2f7 100644
--- a/arch/arm/mach-shmobile/r8a7779.h
+++ b/arch/arm/mach-shmobile/r8a7779.h
@@ -2,8 +2,6 @@
#ifndef __ASM_R8A7779_H__
#define __ASM_R8A7779_H__
-extern void r8a7779_pm_init(void);
-
extern const struct smp_operations r8a7779_smp_ops;
#endif /* __ASM_R8A7779_H__ */
diff --git a/arch/arm/mach-shmobile/r8a7790.h b/arch/arm/mach-shmobile/r8a7790.h
deleted file mode 100644
index 669c8cd09e07..000000000000
--- a/arch/arm/mach-shmobile/r8a7790.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_R8A7790_H__
-#define __ASM_R8A7790_H__
-
-extern const struct smp_operations r8a7790_smp_ops;
-
-#endif /* __ASM_R8A7790_H__ */
diff --git a/arch/arm/mach-shmobile/r8a7791.h b/arch/arm/mach-shmobile/r8a7791.h
deleted file mode 100644
index 8c794aace938..000000000000
--- a/arch/arm/mach-shmobile/r8a7791.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_R8A7791_H__
-#define __ASM_R8A7791_H__
-
-extern const struct smp_operations r8a7791_smp_ops;
-
-#endif /* __ASM_R8A7791_H__ */
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 93f628acfd94..21ebc7678ffd 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Generation 2 da9063/da9210 regulator quirk
*
@@ -16,15 +17,6 @@
* been initialized, but before the i2c slave drivers are initialized.
*
* Copyright (C) 2015 Glider bvba
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index 3c99aaf65325..a328d2f52678 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Emma Mobile EV2 processor support
*
* Copyright (C) 2012 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c
index 319ca9508ec6..14867226f8f4 100644
--- a/arch/arm/mach-shmobile/setup-r7s72100.c
+++ b/arch/arm/mach-shmobile/setup-r7s72100.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r7s72100 processor support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index 20173c4f415d..23a29a0ea9c9 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a73a4 processor support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
@@ -26,7 +18,6 @@ static const char *const r8a73a4_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R8A73A4_DT, "Generic R8A73A4 (Flattened Device Tree)")
- .init_early = shmobile_init_delay,
.init_late = shmobile_init_late,
.dt_compat = r8a73a4_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 3849eef0d3a7..787d039b5a07 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7740 processor support
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 7fa4a0b5f654..ce51794f64c7 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7778 processor support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
* Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 0686112f2435..d589326099e0 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7779 processor support
*
* Copyright (C) 2011, 2013 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
* Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
deleted file mode 100644
index 78d3e859bd64..000000000000
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * r8a7790 processor support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "r8a7790.h"
-#include "rcar-gen2.h"
-
-static const char * const r8a7790_boards_compat_dt[] __initconst = {
- "renesas,r8a7790",
- NULL,
-};
-
-DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
- .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
- .smp = smp_ops(r8a7790_smp_ops),
- .init_early = shmobile_init_delay,
- .init_time = rcar_gen2_timer_init,
- .init_late = shmobile_init_late,
- .reserve = rcar_gen2_reserve,
- .dt_compat = r8a7790_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
deleted file mode 100644
index 26e2d181a190..000000000000
--- a/arch/arm/mach-shmobile/setup-r8a7791.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * r8a7791 processor support
- *
- * Copyright (C) 2013 Renesas Electronics Corporation
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "r8a7791.h"
-#include "rcar-gen2.h"
-
-static const char *const r8a7791_boards_compat_dt[] __initconst = {
- "renesas,r8a7791",
- NULL,
-};
-
-DT_MACHINE_START(R8A7791_DT, "Generic R8A7791 (Flattened Device Tree)")
- .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
- .smp = smp_ops(r8a7791_smp_ops),
- .init_early = shmobile_init_delay,
- .init_time = rcar_gen2_timer_init,
- .init_late = shmobile_init_late,
- .reserve = rcar_gen2_reserve,
- .dt_compat = r8a7791_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 88fdc1801d90..013acc97795c 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Generation 2 support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
@@ -67,7 +59,6 @@ static unsigned int __init get_extal_freq(void)
void __init rcar_gen2_timer_init(void)
{
-#ifdef CONFIG_ARM_ARCH_TIMER
void __iomem *base;
u32 freq;
@@ -109,7 +100,6 @@ void __init rcar_gen2_timer_init(void)
}
iounmap(base);
-#endif /* CONFIG_ARM_ARCH_TIMER */
of_clk_init(NULL);
timer_probe();
@@ -186,10 +176,8 @@ void __init rcar_gen2_reserve(void)
}
static const char * const rcar_gen2_boards_compat_dt[] __initconst = {
- /*
- * R8A7790 and R8A7791 can't be handled here as long as they need SMP
- * initialization fallback.
- */
+ "renesas,r8a7790",
+ "renesas,r8a7791",
"renesas,r8a7792",
"renesas,r8a7793",
"renesas,r8a7794",
@@ -197,7 +185,6 @@ static const char * const rcar_gen2_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(RCAR_GEN2_DT, "Generic R-Car Gen2 (Flattened Device Tree)")
- .init_early = shmobile_init_delay,
.init_late = shmobile_init_late,
.init_time = rcar_gen2_timer_init,
.reserve = rcar_gen2_reserve,
@@ -212,7 +199,6 @@ static const char * const rz_g1_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(RZ_G1_DT, "Generic RZ/G1 (Flattened Device Tree)")
- .init_early = shmobile_init_delay,
.init_late = shmobile_init_late,
.init_time = rcar_gen2_timer_init,
.reserve = rcar_gen2_reserve,
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index a25ff188e403..cc08aa752244 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sh73a0 processor support
*
* Copyright (C) 2010 Takashi Yoshii
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2008 Yoshihiro Shimoda
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 3a732199cf5e..3853ecea44ca 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SMP support for Emma Mobile EV2
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index c6951ee24588..0ed73b650c14 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SMP support for R-Mobile / SH-Mobile - r8a7779 portion
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -31,59 +23,13 @@
#define AVECR IOMEM(0xfe700040)
#define R8A7779_SCU_BASE 0xf0000000
-static const struct rcar_sysc_ch r8a7779_ch_cpu1 = {
- .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
- .chan_bit = 1, /* ARM1 */
- .isr_bit = 1, /* ARM1 */
-};
-
-static const struct rcar_sysc_ch r8a7779_ch_cpu2 = {
- .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
- .chan_bit = 2, /* ARM2 */
- .isr_bit = 2, /* ARM2 */
-};
-
-static const struct rcar_sysc_ch r8a7779_ch_cpu3 = {
- .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
- .chan_bit = 3, /* ARM3 */
- .isr_bit = 3, /* ARM3 */
-};
-
-static const struct rcar_sysc_ch * const r8a7779_ch_cpu[4] = {
- [1] = &r8a7779_ch_cpu1,
- [2] = &r8a7779_ch_cpu2,
- [3] = &r8a7779_ch_cpu3,
-};
-
-static int r8a7779_platform_cpu_kill(unsigned int cpu)
+static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- const struct rcar_sysc_ch *ch = NULL;
int ret = -EIO;
cpu = cpu_logical_map(cpu);
-
- if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
- ch = r8a7779_ch_cpu[cpu];
-
- if (ch)
- ret = rcar_sysc_power_down(ch);
-
- return ret ? ret : 1;
-}
-
-static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- const struct rcar_sysc_ch *ch = NULL;
- unsigned int lcpu = cpu_logical_map(cpu);
- int ret;
-
- if (lcpu < ARRAY_SIZE(r8a7779_ch_cpu))
- ch = r8a7779_ch_cpu[lcpu];
-
- if (ch)
- ret = rcar_sysc_power_up(ch);
- else
- ret = -EIO;
+ if (cpu)
+ ret = rcar_sysc_power_up_cpu(cpu);
return ret;
}
@@ -95,16 +41,20 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
/* setup r8a7779 specific SCU bits */
shmobile_smp_scu_prepare_cpus(R8A7779_SCU_BASE, max_cpus);
+}
- r8a7779_pm_init();
+#ifdef CONFIG_HOTPLUG_CPU
+static int r8a7779_platform_cpu_kill(unsigned int cpu)
+{
+ int ret = -EIO;
- /* power off secondary CPUs */
- r8a7779_platform_cpu_kill(1);
- r8a7779_platform_cpu_kill(2);
- r8a7779_platform_cpu_kill(3);
+ cpu = cpu_logical_map(cpu);
+ if (cpu)
+ ret = rcar_sysc_power_down_cpu(cpu);
+
+ return ret ? ret : 1;
}
-#ifdef CONFIG_HOTPLUG_CPU
static int r8a7779_cpu_kill(unsigned int cpu)
{
if (shmobile_smp_scu_cpu_kill(cpu))
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
deleted file mode 100644
index 28f26d5362d8..000000000000
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * SMP support for r8a7790
- *
- * Copyright (C) 2012-2013 Renesas Solutions Corp.
- * Copyright (C) 2012 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-#include <linux/soc/renesas/rcar-sysc.h>
-
-#include <asm/smp_plat.h>
-
-#include "common.h"
-#include "platsmp-apmu.h"
-#include "rcar-gen2.h"
-#include "r8a7790.h"
-
-static const struct rcar_sysc_ch r8a7790_ca15_scu = {
- .chan_offs = 0x180, /* PWRSR5 .. PWRER5 */
- .isr_bit = 12, /* CA15-SCU */
-};
-
-static const struct rcar_sysc_ch r8a7790_ca7_scu = {
- .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
- .isr_bit = 21, /* CA7-SCU */
-};
-
-static struct rcar_apmu_config r8a7790_apmu_config[] = {
- {
- .iomem = DEFINE_RES_MEM(0xe6152000, 0x188),
- .cpus = { 0, 1, 2, 3 },
- },
- {
- .iomem = DEFINE_RES_MEM(0xe6151000, 0x188),
- .cpus = { 0x100, 0x0101, 0x102, 0x103 },
- }
-};
-
-static void __init r8a7790_smp_prepare_cpus(unsigned int max_cpus)
-{
- /* let APMU code install data related to shmobile_boot_vector */
- shmobile_smp_apmu_prepare_cpus(max_cpus,
- r8a7790_apmu_config,
- ARRAY_SIZE(r8a7790_apmu_config));
-
- /* turn on power to SCU */
- rcar_gen2_pm_init();
- rcar_sysc_power_up(&r8a7790_ca15_scu);
- rcar_sysc_power_up(&r8a7790_ca7_scu);
-}
-
-const struct smp_operations r8a7790_smp_ops __initconst = {
- .smp_prepare_cpus = r8a7790_smp_prepare_cpus,
- .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_can_disable = shmobile_smp_cpu_can_disable,
- .cpu_die = shmobile_smp_apmu_cpu_die,
- .cpu_kill = shmobile_smp_apmu_cpu_kill,
-#endif
-};
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
deleted file mode 100644
index 2948c22cfc53..000000000000
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * SMP support for r8a7791
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/smp_plat.h>
-
-#include "common.h"
-#include "platsmp-apmu.h"
-#include "r8a7791.h"
-#include "rcar-gen2.h"
-
-static struct rcar_apmu_config r8a7791_apmu_config[] = {
- {
- .iomem = DEFINE_RES_MEM(0xe6152000, 0x188),
- .cpus = { 0, 1 },
- }
-};
-
-static void __init r8a7791_smp_prepare_cpus(unsigned int max_cpus)
-{
- /* let APMU code install data related to shmobile_boot_vector */
- shmobile_smp_apmu_prepare_cpus(max_cpus,
- r8a7791_apmu_config,
- ARRAY_SIZE(r8a7791_apmu_config));
-
- rcar_gen2_pm_init();
-}
-
-const struct smp_operations r8a7791_smp_ops __initconst = {
- .smp_prepare_cpus = r8a7791_smp_prepare_cpus,
- .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_can_disable = shmobile_smp_cpu_can_disable,
- .cpu_die = shmobile_smp_apmu_cpu_die,
- .cpu_kill = shmobile_smp_apmu_cpu_kill,
-#endif
-};
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index 6196a6380385..828e8aea037e 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -32,14 +32,6 @@ void __init shmobile_init_delay(void)
for_each_child_of_node(cpus, np) {
u32 freq;
- if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER) &&
- (of_device_is_compatible(np, "arm,cortex-a7") ||
- of_device_is_compatible(np, "arm,cortex-a15"))) {
- of_node_put(np);
- of_node_put(cpus);
- return;
- }
-
if (!of_property_read_u32(np, "clock-frequency", &freq))
max_freq = max(max_freq, freq);
}
diff --git a/arch/arm/mach-uniphier/Kconfig b/arch/arm/mach-uniphier/Kconfig
index 779235a9147d..e661d2626675 100644
--- a/arch/arm/mach-uniphier/Kconfig
+++ b/arch/arm/mach-uniphier/Kconfig
@@ -9,6 +9,7 @@ config ARCH_UNIPHIER
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
select PINCTRL
+ select RESET_CONTROLLER
help
Support for UniPhier SoC family developed by Socionext Inc.
(formerly, System LSI Business Division of Panasonic Corporation)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 96a7b6cf459b..b169e580bf82 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -702,7 +702,6 @@ config ARM_THUMBEE
config ARM_VIRT_EXT
bool
- depends on MMU
default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index be0fa7e39c26..66566472c153 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -594,7 +594,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
struct page *page;
void *ptr = NULL;
- page = dma_alloc_from_contiguous(dev, count, order, gfp);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
if (!page)
return NULL;
@@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask)
return __dma_supported(dev, mask, false);
}
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
@@ -1294,7 +1299,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
unsigned long order = get_order(size);
struct page *page;
- page = dma_alloc_from_contiguous(dev, count, order, gfp);
+ page = dma_alloc_from_contiguous(dev, count, order,
+ gfp & __GFP_NOWARN);
if (!page)
goto error;
@@ -2296,7 +2302,7 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
- set_dma_ops(dev, NULL);
+ set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
@@ -2357,11 +2363,6 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
-{
- return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
-}
-
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 84becc911ee3..3232afb6fdc0 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -224,12 +224,12 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
return vma->vm_flags & mask ? false : true;
}
-static int __kprobes
+static vm_fault_t __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault;
+ vm_fault_t fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -264,7 +264,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
- int fault, sig, code;
+ int sig, code;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, fsr))
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c186474422f3..0cc8e04295a4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
return 0;
}
+static int kernel_set_to_readonly __read_mostly;
+
void mark_rodata_ro(void)
{
+ kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
debug_checkwx();
}
void set_kernel_text_rw(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
}
void set_kernel_text_ro(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5dd6c58d653b..7d67c70bbded 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
{
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
- return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
+ cpuid_feature_extract(CPUID_EXT_PFR1, 20);
return 0;
}
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a40..24101925fe64 100644
--- a/arch/arm/mm/tcm.h
+++ b/arch/arm/mm/tcm.h
@@ -11,7 +11,7 @@
void __init tcm_init(void);
#else
/* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
+static inline void tcm_init(void)
{
}
#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6e8b71613039..25b3ee85066e 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
#include <asm/opcodes.h>
+#include <asm/system_info.h>
#include "bpf_jit_32.h"
@@ -47,32 +48,73 @@
* The callee saved registers depends on whether frame pointers are enabled.
* With frame pointers (to be compliant with the ABI):
*
- * high
- * original ARM_SP => +------------------+ \
- * | pc | |
- * current ARM_FP => +------------------+ } callee saved registers
- * |r4-r8,r10,fp,ip,lr| |
- * +------------------+ /
- * low
+ * high
+ * original ARM_SP => +--------------+ \
+ * | pc | |
+ * current ARM_FP => +--------------+ } callee saved registers
+ * |r4-r9,fp,ip,lr| |
+ * +--------------+ /
+ * low
*
* Without frame pointers:
*
- * high
- * original ARM_SP => +------------------+
- * | r4-r8,r10,fp,lr | callee saved registers
- * current ARM_FP => +------------------+
- * low
+ * high
+ * original ARM_SP => +--------------+
+ * | r4-r9,fp,lr | callee saved registers
+ * current ARM_FP => +--------------+
+ * low
*
* When popping registers off the stack at the end of a BPF function, we
* reference them via the current ARM_FP register.
*/
#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
- 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
1 << ARM_FP)
#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
-#define STACK_OFFSET(k) (k)
+enum {
+ /* Stack layout - these are offsets from (top of stack - 4) */
+ BPF_R2_HI,
+ BPF_R2_LO,
+ BPF_R3_HI,
+ BPF_R3_LO,
+ BPF_R4_HI,
+ BPF_R4_LO,
+ BPF_R5_HI,
+ BPF_R5_LO,
+ BPF_R7_HI,
+ BPF_R7_LO,
+ BPF_R8_HI,
+ BPF_R8_LO,
+ BPF_R9_HI,
+ BPF_R9_LO,
+ BPF_FP_HI,
+ BPF_FP_LO,
+ BPF_TC_HI,
+ BPF_TC_LO,
+ BPF_AX_HI,
+ BPF_AX_LO,
+ /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
+ * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
+ * BPF_REG_FP and Tail call counts.
+ */
+ BPF_JIT_SCRATCH_REGS,
+};
+
+/*
+ * Negative "register" values indicate the register is stored on the stack
+ * and are the offset from the top of the eBPF JIT scratch space.
+ */
+#define STACK_OFFSET(k) (-4 - (k) * 4)
+#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4)
+
+#ifdef CONFIG_FRAME_POINTER
+#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
+#else
+#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
+#endif
+
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
@@ -94,35 +136,35 @@
* scratch memory space and we have to build eBPF 64 bit register from those.
*
*/
-static const u8 bpf2a32[][2] = {
+static const s8 bpf2a32[][2] = {
/* return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARM_R1, ARM_R0},
/* arguments from eBPF program to in-kernel function */
[BPF_REG_1] = {ARM_R3, ARM_R2},
/* Stored on stack scratch space */
- [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
- [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
- [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
- [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
+ [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
+ [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
+ [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
+ [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
/* callee saved registers that in-kernel function will preserve */
[BPF_REG_6] = {ARM_R5, ARM_R4},
/* Stored on stack scratch space */
- [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
- [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
- [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
+ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
+ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
/* Read only Frame Pointer to access Stack */
- [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
+ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
/* Temporary Register for internal BPF JIT, can be used
* for constant blindings and others.
*/
[TMP_REG_1] = {ARM_R7, ARM_R6},
- [TMP_REG_2] = {ARM_R10, ARM_R8},
+ [TMP_REG_2] = {ARM_R9, ARM_R8},
/* Tail call count. Stored on stack scratch space. */
- [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
+ [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
/* temporary register for blinding constants.
* Stored on stack scratch space.
*/
- [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
+ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
};
#define dst_lo dst[1]
@@ -151,6 +193,7 @@ struct jit_ctx {
unsigned int idx;
unsigned int prologue_bytes;
unsigned int epilogue_offset;
+ unsigned int cpu_architecture;
u32 flags;
u32 *offsets;
u32 *target;
@@ -196,9 +239,55 @@ static inline void emit(u32 inst, struct jit_ctx *ctx)
}
/*
+ * This is rather horrid, but necessary to convert an integer constant
+ * to an immediate operand for the opcodes, and be able to detect at
+ * build time whether the constant can't be converted (iow, usable in
+ * BUILD_BUG_ON()).
+ */
+#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
+#define const_imm8m(x) \
+ ({ int r; \
+ u32 v = (x); \
+ if (!(v & ~0x000000ff)) \
+ r = imm12val(v, 0); \
+ else if (!(v & ~0xc000003f)) \
+ r = imm12val(v, 2); \
+ else if (!(v & ~0xf000000f)) \
+ r = imm12val(v, 4); \
+ else if (!(v & ~0xfc000003)) \
+ r = imm12val(v, 6); \
+ else if (!(v & ~0xff000000)) \
+ r = imm12val(v, 8); \
+ else if (!(v & ~0x3fc00000)) \
+ r = imm12val(v, 10); \
+ else if (!(v & ~0x0ff00000)) \
+ r = imm12val(v, 12); \
+ else if (!(v & ~0x03fc0000)) \
+ r = imm12val(v, 14); \
+ else if (!(v & ~0x00ff0000)) \
+ r = imm12val(v, 16); \
+ else if (!(v & ~0x003fc000)) \
+ r = imm12val(v, 18); \
+ else if (!(v & ~0x000ff000)) \
+ r = imm12val(v, 20); \
+ else if (!(v & ~0x0003fc00)) \
+ r = imm12val(v, 22); \
+ else if (!(v & ~0x0000ff00)) \
+ r = imm12val(v, 24); \
+ else if (!(v & ~0x00003fc0)) \
+ r = imm12val(v, 26); \
+ else if (!(v & ~0x00000ff0)) \
+ r = imm12val(v, 28); \
+ else if (!(v & ~0x000003fc)) \
+ r = imm12val(v, 30); \
+ else \
+ r = -1; \
+ r; })
+
+/*
* Checks if immediate value can be converted to imm12(12 bits) value.
*/
-static int16_t imm8m(u32 x)
+static int imm8m(u32 x)
{
u32 rot;
@@ -208,6 +297,38 @@ static int16_t imm8m(u32 x)
return -1;
}
+#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
+
+static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
+{
+ op |= rt << 12 | rn << 16;
+ if (imm12 >= 0)
+ op |= ARM_INST_LDST__U;
+ else
+ imm12 = -imm12;
+ return op | (imm12 & ARM_INST_LDST__IMM12);
+}
+
+static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
+{
+ op |= rt << 12 | rn << 16;
+ if (imm8 >= 0)
+ op |= ARM_INST_LDST__U;
+ else
+ imm8 = -imm8;
+ return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
+}
+
+#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
+#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
+#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
+#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
+
+#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
+#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
+#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
+#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
+
/*
* Initializes the JIT space with undefined instructions.
*/
@@ -227,19 +348,10 @@ static void jit_fill_hole(void *area, unsigned int size)
#define STACK_ALIGNMENT 4
#endif
-/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
- * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
- * BPF_REG_FP and Tail call counts.
- */
-#define SCRATCH_SIZE 80
-
/* total stack size used in JITed code */
#define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE)
#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
-/* Get the offset of eBPF REGISTERs stored on scratch space. */
-#define STACK_VAR(off) (STACK_SIZE - off)
-
#if __LINUX_ARM_ARCH__ < 7
static u16 imm_offset(u32 k, struct jit_ctx *ctx)
@@ -355,7 +467,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{
- const u8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
#if __LINUX_ARM_ARCH__ == 7
if (elf_hwcap & HWCAP_IDIVA) {
@@ -402,44 +514,110 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
}
-/* Checks whether BPF register is on scratch stack space or not. */
-static inline bool is_on_stack(u8 bpf_reg)
+/* Is the translated BPF register on stack? */
+static bool is_stacked(s8 reg)
+{
+ return reg < 0;
+}
+
+/* If a BPF register is on the stack (stk is true), load it to the
+ * supplied temporary register and return the temporary register
+ * for subsequent operations, otherwise just use the CPU register.
+ */
+static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
+{
+ if (is_stacked(reg)) {
+ emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
+ struct jit_ctx *ctx)
{
- static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
- BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
- BPF_REG_2, BPF_REG_FP};
- int i, reg_len = sizeof(stack_regs);
-
- for (i = 0 ; i < reg_len ; i++) {
- if (bpf_reg == stack_regs[i])
- return true;
+ if (is_stacked(reg[1])) {
+ if (__LINUX_ARM_ARCH__ >= 6 ||
+ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+ emit(ARM_LDRD_I(tmp[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ } else {
+ emit(ARM_LDR_I(tmp[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ emit(ARM_LDR_I(tmp[0], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+ }
+ reg = tmp;
+ }
+ return reg;
+}
+
+/* If a BPF register is on the stack (stk is true), save the register
+ * back to the stack. If the source register is not the same, then
+ * move it into the correct register.
+ */
+static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
+{
+ if (is_stacked(reg))
+ emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+ else if (reg != src)
+ emit(ARM_MOV_R(reg, src), ctx);
+}
+
+static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
+ struct jit_ctx *ctx)
+{
+ if (is_stacked(reg[1])) {
+ if (__LINUX_ARM_ARCH__ >= 6 ||
+ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+ emit(ARM_STRD_I(src[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ } else {
+ emit(ARM_STR_I(src[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ emit(ARM_STR_I(src[0], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+ }
+ } else {
+ if (reg[1] != src[1])
+ emit(ARM_MOV_R(reg[1], src[1]), ctx);
+ if (reg[0] != src[0])
+ emit(ARM_MOV_R(reg[0], src[0]), ctx);
}
- return false;
}
-static inline void emit_a32_mov_i(const u8 dst, const u32 val,
- bool dstk, struct jit_ctx *ctx)
+static inline void emit_a32_mov_i(const s8 dst, const u32 val,
+ struct jit_ctx *ctx)
{
- const u8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
- if (dstk) {
+ if (is_stacked(dst)) {
emit_mov_i(tmp[1], val, ctx);
- emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
+ arm_bpf_put_reg32(dst, tmp[1], ctx);
} else {
emit_mov_i(dst, val, ctx);
}
}
+static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
+{
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+
+ emit_mov_i(rd[1], (u32)val, ctx);
+ emit_mov_i(rd[0], val >> 32, ctx);
+
+ arm_bpf_put_reg64(dst, rd, ctx);
+}
+
/* Sign extended move */
-static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
- const u32 val, bool dstk,
- struct jit_ctx *ctx) {
- u32 hi = 0;
+static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
+ const u32 val, struct jit_ctx *ctx) {
+ u64 val64 = val;
if (is64 && (val & (1<<31)))
- hi = (u32)~0;
- emit_a32_mov_i(dst_lo, val, dstk, ctx);
- emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+ val64 |= 0xffffffff00000000ULL;
+ emit_a32_mov_i64(dst, val64, ctx);
}
static inline void emit_a32_add_r(const u8 dst, const u8 src,
@@ -521,75 +699,94 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
/* ALU operation (32 bit)
* dst = dst (op) src
*/
-static inline void emit_a32_alu_r(const u8 dst, const u8 src,
- bool dstk, bool sstk,
+static inline void emit_a32_alu_r(const s8 dst, const s8 src,
struct jit_ctx *ctx, const bool is64,
const bool hi, const u8 op) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rn = sstk ? tmp[1] : src;
-
- if (sstk)
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rn, rd;
+ rn = arm_bpf_get_reg32(src, tmp[1], ctx);
+ rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
/* ALU operation */
- if (dstk) {
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
- emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
- emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
- } else {
- emit_alu_r(dst, rn, is64, hi, op, ctx);
- }
+ emit_alu_r(rd, rn, is64, hi, op, ctx);
+ arm_bpf_put_reg32(dst, rd, ctx);
}
/* ALU operation (64 bit) */
-static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
- const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx,
+static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
+ const s8 src[], struct jit_ctx *ctx,
const u8 op) {
- emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
- if (is64)
- emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
- else
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
+ if (is64) {
+ const s8 *rs;
+
+ rs = arm_bpf_get_reg64(src, tmp2, ctx);
+
+ /* ALU operation */
+ emit_alu_r(rd[1], rs[1], true, false, op, ctx);
+ emit_alu_r(rd[0], rs[0], true, true, op, ctx);
+ } else {
+ s8 rs;
+
+ rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+
+ /* ALU operation */
+ emit_alu_r(rd[1], rs, true, false, op, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
+ }
+
+ arm_bpf_put_reg64(dst, rd, ctx);
}
-/* dst = imm (4 bytes)*/
-static inline void emit_a32_mov_r(const u8 dst, const u8 src,
- bool dstk, bool sstk,
+/* dst = src (4 bytes)*/
+static inline void emit_a32_mov_r(const s8 dst, const s8 src,
struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rt = sstk ? tmp[0] : src;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rt;
- if (sstk)
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
- if (dstk)
- emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
- else
- emit(ARM_MOV_R(dst, rt), ctx);
+ rt = arm_bpf_get_reg32(src, tmp[0], ctx);
+ arm_bpf_put_reg32(dst, rt, ctx);
}
/* dst = src */
-static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
- const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
- if (is64) {
+static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
+ const s8 src[],
+ struct jit_ctx *ctx) {
+ if (!is64) {
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ /* Zero out high 4 bytes */
+ emit_a32_mov_i(dst_hi, 0, ctx);
+ } else if (__LINUX_ARM_ARCH__ < 6 &&
+ ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, ctx);
+ } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+
+ emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+ emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
+ } else if (is_stacked(src_lo)) {
+ emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+ } else if (is_stacked(dst_lo)) {
+ emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
} else {
- /* Zero out high 4 bytes */
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit(ARM_MOV_R(dst[0], src[0]), ctx);
+ emit(ARM_MOV_R(dst[1], src[1]), ctx);
}
}
/* Shift operations */
-static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
+static inline void emit_a32_alu_i(const s8 dst, const u32 val,
struct jit_ctx *ctx, const u8 op) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[0] : dst;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rd;
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
/* Do shift operation */
switch (op) {
@@ -604,303 +801,245 @@ static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
break;
}
- if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ arm_bpf_put_reg32(dst, rd, ctx);
}
/* dst = ~dst (64 bit) */
-static inline void emit_a32_neg64(const u8 dst[], bool dstk,
+static inline void emit_a32_neg64(const s8 dst[],
struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst[1];
- u8 rm = dstk ? tmp[0] : dst[0];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd;
/* Setup Operand */
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do Negate Operation */
- emit(ARM_RSBS_I(rd, rd, 0), ctx);
- emit(ARM_RSC_I(rm, rm, 0), ctx);
+ emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
+ emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst << src */
-static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSH operation */
emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
- emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
-
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst >> src (signed)*/
-static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
+
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do the ARSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
_emit(ARM_COND_MI, ARM_B(0), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst >> src */
-static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
+
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do RSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst << val */
-static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
- const u32 val, struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
+static inline void emit_a32_lsh_i64(const s8 dst[],
+ const u32 val, struct jit_ctx *ctx){
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSH operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
- emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
- emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
+ emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
+ emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
} else {
if (val == 32)
- emit(ARM_MOV_R(rm, rd), ctx);
+ emit(ARM_MOV_R(rd[0], rd[1]), ctx);
else
- emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
+ emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst >> val */
-static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_rsh_i64(const s8 dst[],
const u32 val, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSR operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
- emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
+ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
} else if (val == 32) {
- emit(ARM_MOV_R(rd, rm), ctx);
- emit(ARM_MOV_I(rm, 0), ctx);
+ emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+ emit(ARM_MOV_I(rd[0], 0), ctx);
} else {
- emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
- emit(ARM_MOV_I(rm, 0), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
+ emit(ARM_MOV_I(rd[0], 0), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst >> val (signed) */
-static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_arsh_i64(const s8 dst[],
const u32 val, struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do ARSH operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
- emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
+ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
} else if (val == 32) {
- emit(ARM_MOV_R(rd, rm), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
} else {
- emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
-static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd, *rt;
+
/* Setup operands for multiplication */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rn = sstk ? tmp2[0] : src_hi;
-
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
- if (sstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
+ rt = arm_bpf_get_reg64(src, tmp2, ctx);
/* Do Multiplication */
- emit(ARM_MUL(ARM_IP, rd, rn), ctx);
- emit(ARM_MUL(ARM_LR, rm, rt), ctx);
+ emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
+ emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
- emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
- emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_IP), ctx);
- }
+ emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
+ emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
+ arm_bpf_put_reg32(dst_hi, rd[0], ctx);
}
/* *(size *)(dst + off) = src */
-static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
- const s32 off, struct jit_ctx *ctx, const u8 sz){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst;
-
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
- if (off) {
- emit_a32_mov_i(tmp[0], off, false, ctx);
- emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
+static inline void emit_str_r(const s8 dst, const s8 src[],
+ s32 off, struct jit_ctx *ctx, const u8 sz){
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s32 off_max;
+ s8 rd;
+
+ rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
+
+ if (sz == BPF_H)
+ off_max = 0xff;
+ else
+ off_max = 0xfff;
+
+ if (off < 0 || off > off_max) {
+ emit_a32_mov_i(tmp[0], off, ctx);
+ emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
rd = tmp[0];
+ off = 0;
}
switch (sz) {
- case BPF_W:
- /* Store a Word */
- emit(ARM_STR_I(src, rd, 0), ctx);
+ case BPF_B:
+ /* Store a Byte */
+ emit(ARM_STRB_I(src_lo, rd, off), ctx);
break;
case BPF_H:
/* Store a HalfWord */
- emit(ARM_STRH_I(src, rd, 0), ctx);
+ emit(ARM_STRH_I(src_lo, rd, off), ctx);
break;
- case BPF_B:
- /* Store a Byte */
- emit(ARM_STRB_I(src, rd, 0), ctx);
+ case BPF_W:
+ /* Store a Word */
+ emit(ARM_STR_I(src_lo, rd, off), ctx);
+ break;
+ case BPF_DW:
+ /* Store a Double Word */
+ emit(ARM_STR_I(src_lo, rd, off), ctx);
+ emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
break;
}
}
/* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+static inline void emit_ldx_r(const s8 dst[], const s8 src,
s32 off, struct jit_ctx *ctx, const u8 sz){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *rd = dstk ? tmp : dst;
- u8 rm = src;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+ s8 rm = src;
s32 off_max;
if (sz == BPF_H)
@@ -909,7 +1048,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
off_max = 0xfff;
if (off < 0 || off > off_max) {
- emit_a32_mov_i(tmp[0], off, false, ctx);
+ emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
off = 0;
@@ -921,17 +1060,17 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
case BPF_B:
/* Load a Byte */
emit(ARM_LDRB_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_H:
/* Load a HalfWord */
emit(ARM_LDRH_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_W:
/* Load a Word */
emit(ARM_LDR_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_DW:
/* Load a Double Word */
@@ -939,10 +1078,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
break;
}
- if (dstk)
- emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
- if (dstk && sz == BPF_DW)
- emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* Arithmatic Operation */
@@ -981,64 +1117,66 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
- const u8 *r2 = bpf2a32[BPF_REG_2];
- const u8 *r3 = bpf2a32[BPF_REG_3];
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- const u8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 *r2 = bpf2a32[BPF_REG_2];
+ const s8 *r3 = bpf2a32[BPF_REG_3];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 *tc;
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
#define jmp_offset (out_offset - (cur_offset) - 2)
- u32 off, lo, hi;
+ u32 lo, hi;
+ s8 r_array, r_index;
+ int off;
/* if (index >= array->map.max_entries)
* goto out;
*/
+ BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
+ ARM_INST_LDST__IMM12);
off = offsetof(struct bpf_array, map.max_entries);
- /* array->map.max_entries */
- emit_a32_mov_i(tmp[1], off, false, ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
- emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
+ r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
/* index is 32-bit for arrays */
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
+ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
+ /* array->map.max_entries */
+ emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
/* index >= array->map.max_entries */
- emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
+ emit(ARM_CMP_R(r_index, tmp[1]), ctx);
_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
+ /* tmp2[0] = array, tmp2[1] = index */
+
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
* tail_call_cnt++;
*/
lo = (u32)MAX_TAIL_CALL_CNT;
hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
- emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
- emit(ARM_CMP_I(tmp[0], hi), ctx);
- _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
+ tc = arm_bpf_get_reg64(tcc, tmp, ctx);
+ emit(ARM_CMP_I(tc[0], hi), ctx);
+ _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
- emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
- emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
- emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
- emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
+ emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
+ emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
+ arm_bpf_put_reg64(tcc, tmp, ctx);
/* prog = array->ptrs[index]
* if (prog == NULL)
* goto out;
*/
- off = offsetof(struct bpf_array, ptrs);
- emit_a32_mov_i(tmp[1], off, false, ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
- emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
- emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
- emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
+ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
+ off = imm8m(offsetof(struct bpf_array, ptrs));
+ emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
+ emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
emit(ARM_CMP_I(tmp[1], 0), ctx);
_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */
+ BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
+ ARM_INST_LDST__IMM12);
off = offsetof(struct bpf_prog, bpf_func);
- emit_a32_mov_i(tmp2[1], off, false, ctx);
- emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
+ emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
emit_bx_r(tmp[1], ctx);
@@ -1059,7 +1197,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
{
#if __LINUX_ARM_ARCH__ < 6
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
@@ -1074,7 +1212,7 @@ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
{
#if __LINUX_ARM_ARCH__ < 6
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
@@ -1094,28 +1232,27 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
}
// push the scratch stack register on top of the stack
-static inline void emit_push_r64(const u8 src[], const u8 shift,
- struct jit_ctx *ctx)
+static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
{
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rt;
u16 reg_set = 0;
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
- emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
+ rt = arm_bpf_get_reg64(src, tmp2, ctx);
- reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
+ reg_set = (1 << rt[1]) | (1 << rt[0]);
emit(ARM_PUSH(reg_set), ctx);
}
static void build_prologue(struct jit_ctx *ctx)
{
- const u8 r0 = bpf2a32[BPF_REG_0][1];
- const u8 r2 = bpf2a32[BPF_REG_1][1];
- const u8 r3 = bpf2a32[BPF_REG_1][0];
- const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 fplo = bpf2a32[BPF_REG_FP][1];
- const u8 fphi = bpf2a32[BPF_REG_FP][0];
- const u8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 r0 = bpf2a32[BPF_REG_0][1];
+ const s8 r2 = bpf2a32[BPF_REG_1][1];
+ const s8 r3 = bpf2a32[BPF_REG_1][0];
+ const s8 r4 = bpf2a32[BPF_REG_6][1];
+ const s8 fplo = bpf2a32[BPF_REG_FP][1];
+ const s8 fphi = bpf2a32[BPF_REG_FP][0];
+ const s8 *tcc = bpf2a32[TCALL_CNT];
/* Save callee saved registers. */
#ifdef CONFIG_FRAME_POINTER
@@ -1136,8 +1273,8 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
/* Set up BPF prog stack base register */
- emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
- emit_a32_mov_i(fphi, 0, true, ctx);
+ emit_a32_mov_r(fplo, ARM_IP, ctx);
+ emit_a32_mov_i(fphi, 0, ctx);
/* mov r4, 0 */
emit(ARM_MOV_I(r4, 0), ctx);
@@ -1146,8 +1283,8 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_MOV_R(r3, r4), ctx);
emit(ARM_MOV_R(r2, r0), ctx);
/* Initialize Tail Count */
- emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
- emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
+ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx);
+ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx);
/* end of prologue */
}
@@ -1178,17 +1315,16 @@ static void build_epilogue(struct jit_ctx *ctx)
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
- const u8 *dst = bpf2a32[insn->dst_reg];
- const u8 *src = bpf2a32[insn->src_reg];
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *dst = bpf2a32[insn->dst_reg];
+ const s8 *src = bpf2a32[insn->src_reg];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
- const bool dstk = is_on_stack(insn->dst_reg);
- const bool sstk = is_on_stack(insn->src_reg);
- u8 rd, rt, rm, rn;
+ const s8 *rd, *rs;
+ s8 rd_lo, rt, rm, rn;
s32 jmp_offset;
#define check_imm(bits, imm) do { \
@@ -1211,11 +1347,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_MOV | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
+ emit_a32_mov_r64(is64, dst, src, ctx);
break;
case BPF_K:
/* Sign-extend immediate value to destination reg */
- emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
+ emit_a32_mov_se_i64(is64, dst, imm, ctx);
break;
}
break;
@@ -1255,8 +1391,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_XOR | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_alu_r64(is64, dst, src, dstk, sstk,
- ctx, BPF_OP(code));
+ emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
break;
case BPF_K:
/* Move immediate value to the temporary register
@@ -1265,9 +1400,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
* value into temporary reg and then it would be
* safe to do the operation on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
- emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
- ctx, BPF_OP(code));
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+ emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
break;
}
break;
@@ -1277,26 +1411,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
- rt = src_lo;
- rd = dstk ? tmp2[1] : dst_lo;
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
switch (BPF_SRC(code)) {
case BPF_X:
- rt = sstk ? tmp2[0] : rt;
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
- ctx);
+ rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
break;
case BPF_K:
rt = tmp2[0];
- emit_a32_mov_i(rt, imm, false, ctx);
+ emit_a32_mov_i(rt, imm, ctx);
+ break;
+ default:
+ rt = src_lo;
break;
}
- emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
- if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
+ arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_X:
@@ -1310,54 +1440,54 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
if (unlikely(imm > 31))
return -EINVAL;
if (imm)
- emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
/* dst = dst << imm */
case BPF_ALU64 | BPF_LSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_lsh_i64(dst, dstk, imm, ctx);
+ emit_a32_lsh_i64(dst, imm, ctx);
break;
/* dst = dst >> imm */
case BPF_ALU64 | BPF_RSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_rsh_i64(dst, dstk, imm, ctx);
+ emit_a32_rsh_i64(dst, imm, ctx);
break;
/* dst = dst << src */
case BPF_ALU64 | BPF_LSH | BPF_X:
- emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_lsh_r64(dst, src, ctx);
break;
/* dst = dst >> src */
case BPF_ALU64 | BPF_RSH | BPF_X:
- emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_rsh_r64(dst, src, ctx);
break;
/* dst = dst >> src (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_X:
- emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_arsh_r64(dst, src, ctx);
break;
/* dst = dst >> imm (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_arsh_i64(dst, dstk, imm, ctx);
+ emit_a32_arsh_i64(dst, imm, ctx);
break;
/* dst = ~dst */
case BPF_ALU | BPF_NEG:
- emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
/* dst = ~dst (64 bit) */
case BPF_ALU64 | BPF_NEG:
- emit_a32_neg64(dst, dstk, ctx);
+ emit_a32_neg64(dst, ctx);
break;
/* dst = dst * src/imm */
case BPF_ALU64 | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_K:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_mul_r64(dst, src, ctx);
break;
case BPF_K:
/* Move immediate value to the temporary register
@@ -1366,8 +1496,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
* reg then it would be safe to do the operation
* on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
- emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+ emit_a32_mul_r64(dst, tmp2, ctx);
break;
}
break;
@@ -1375,25 +1505,20 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* dst = htobe(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
case BPF_ALU | BPF_END | BPF_FROM_BE:
- rd = dstk ? tmp[0] : dst_hi;
- rt = dstk ? tmp[1] : dst_lo;
- if (dstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
if (BPF_SRC(code) == BPF_FROM_LE)
goto emit_bswap_uxt;
switch (imm) {
case 16:
- emit_rev16(rt, rt, ctx);
+ emit_rev16(rd[1], rd[1], ctx);
goto emit_bswap_uxt;
case 32:
- emit_rev32(rt, rt, ctx);
+ emit_rev32(rd[1], rd[1], ctx);
goto emit_bswap_uxt;
case 64:
- emit_rev32(ARM_LR, rt, ctx);
- emit_rev32(rt, rd, ctx);
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
+ emit_rev32(ARM_LR, rd[1], ctx);
+ emit_rev32(rd[1], rd[0], ctx);
+ emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
break;
}
goto exit;
@@ -1402,36 +1527,30 @@ emit_bswap_uxt:
case 16:
/* zero-extend 16 bits into 64 bits */
#if __LINUX_ARM_ARCH__ < 6
- emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
- emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
+ emit_a32_mov_i(tmp2[1], 0xffff, ctx);
+ emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
#else /* ARMv6+ */
- emit(ARM_UXTH(rt, rt), ctx);
+ emit(ARM_UXTH(rd[1], rd[1]), ctx);
#endif
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
break;
case 32:
/* zero-extend 32 bits into 64 bits */
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
break;
case 64:
/* nop */
break;
}
exit:
- if (dstk) {
- emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
- const struct bpf_insn insn1 = insn[1];
- u32 hi, lo = imm;
+ u64 val = (u32)imm | (u64)insn[1].imm << 32;
- hi = insn1.imm;
- emit_a32_mov_i(dst_lo, lo, dstk, ctx);
- emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+ emit_a32_mov_i64(dst, val, ctx);
return 1;
}
@@ -1440,10 +1559,8 @@ exit:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_DW:
- rn = sstk ? tmp2[1] : src_lo;
- if (sstk)
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
+ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
@@ -1453,18 +1570,15 @@ exit:
switch (BPF_SIZE(code)) {
case BPF_DW:
/* Sign-extend immediate value into temp reg */
- emit_a32_mov_i64(true, tmp2, imm, false, ctx);
- emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
- emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
break;
case BPF_W:
case BPF_H:
case BPF_B:
- emit_a32_mov_i(tmp2[1], imm, false, ctx);
- emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
- BPF_SIZE(code));
+ emit_a32_mov_i(tmp2[1], imm, ctx);
break;
}
+ emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
break;
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
@@ -1476,25 +1590,9 @@ exit:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- {
- u8 sz = BPF_SIZE(code);
-
- rn = sstk ? tmp2[1] : src_lo;
- rm = sstk ? tmp2[0] : src_hi;
- if (sstk) {
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
-
- /* Store the value */
- if (BPF_SIZE(code) == BPF_DW) {
- emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
- emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
- } else {
- emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
- }
+ rs = arm_bpf_get_reg64(src, tmp2, ctx);
+ emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
break;
- }
/* PC += off if dst == src */
/* PC += off if dst > src */
/* PC += off if dst >= src */
@@ -1518,12 +1616,8 @@ exit:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
/* Setup source registers */
- rm = sstk ? tmp2[0] : src_hi;
- rn = sstk ? tmp2[1] : src_lo;
- if (sstk) {
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
+ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
+ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
goto go_jmp;
/* PC += off if dst == imm */
/* PC += off if dst > imm */
@@ -1552,18 +1646,13 @@ exit:
rm = tmp2[0];
rn = tmp2[1];
/* Sign-extend immediate value */
- emit_a32_mov_i64(true, tmp2, imm, false, ctx);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
go_jmp:
/* Setup destination register */
- rd = dstk ? tmp[0] : dst_hi;
- rt = dstk ? tmp[1] : dst_lo;
- if (dstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Check for the condition */
- emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
+ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
/* Setup JUMP instruction */
jmp_offset = bpf2a32_offset(i+off, i, ctx);
@@ -1619,21 +1708,21 @@ go_jmp:
/* function call */
case BPF_JMP | BPF_CALL:
{
- const u8 *r0 = bpf2a32[BPF_REG_0];
- const u8 *r1 = bpf2a32[BPF_REG_1];
- const u8 *r2 = bpf2a32[BPF_REG_2];
- const u8 *r3 = bpf2a32[BPF_REG_3];
- const u8 *r4 = bpf2a32[BPF_REG_4];
- const u8 *r5 = bpf2a32[BPF_REG_5];
+ const s8 *r0 = bpf2a32[BPF_REG_0];
+ const s8 *r1 = bpf2a32[BPF_REG_1];
+ const s8 *r2 = bpf2a32[BPF_REG_2];
+ const s8 *r3 = bpf2a32[BPF_REG_3];
+ const s8 *r4 = bpf2a32[BPF_REG_4];
+ const s8 *r5 = bpf2a32[BPF_REG_5];
const u32 func = (u32)__bpf_call_base + (u32)imm;
- emit_a32_mov_r64(true, r0, r1, false, false, ctx);
- emit_a32_mov_r64(true, r1, r2, false, true, ctx);
- emit_push_r64(r5, 0, ctx);
- emit_push_r64(r4, 8, ctx);
- emit_push_r64(r3, 16, ctx);
+ emit_a32_mov_r64(true, r0, r1, ctx);
+ emit_a32_mov_r64(true, r1, r2, ctx);
+ emit_push_r64(r5, ctx);
+ emit_push_r64(r4, ctx);
+ emit_push_r64(r3, ctx);
- emit_a32_mov_i(tmp[1], func, false, ctx);
+ emit_a32_mov_i(tmp[1], func, ctx);
emit_blx_r(tmp[1], ctx);
emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
@@ -1745,6 +1834,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
+ ctx.cpu_architecture = cpu_architecture();
/* Not able to allocate memory for offsets[] , then
* we must fall back to the interpreter
@@ -1844,7 +1934,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
- set_memory_ro((unsigned long)header, header->pages);
+ bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index d5cf5f6208aa..f4e58bcdaa43 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -77,11 +77,14 @@
#define ARM_INST_EOR_R 0x00200000
#define ARM_INST_EOR_I 0x02200000
-#define ARM_INST_LDRB_I 0x05d00000
+#define ARM_INST_LDST__U 0x00800000
+#define ARM_INST_LDST__IMM12 0x00000fff
+#define ARM_INST_LDRB_I 0x05500000
#define ARM_INST_LDRB_R 0x07d00000
-#define ARM_INST_LDRH_I 0x01d000b0
+#define ARM_INST_LDRD_I 0x014000d0
+#define ARM_INST_LDRH_I 0x015000b0
#define ARM_INST_LDRH_R 0x019000b0
-#define ARM_INST_LDR_I 0x05900000
+#define ARM_INST_LDR_I 0x05100000
#define ARM_INST_LDR_R 0x07900000
#define ARM_INST_LDM 0x08900000
@@ -124,9 +127,10 @@
#define ARM_INST_SBC_R 0x00c00000
#define ARM_INST_SBCS_R 0x00d00000
-#define ARM_INST_STR_I 0x05800000
-#define ARM_INST_STRB_I 0x05c00000
-#define ARM_INST_STRH_I 0x01c000b0
+#define ARM_INST_STR_I 0x05000000
+#define ARM_INST_STRB_I 0x05400000
+#define ARM_INST_STRD_I 0x014000f0
+#define ARM_INST_STRH_I 0x014000b0
#define ARM_INST_TST_R 0x01100000
#define ARM_INST_TST_I 0x03100000
@@ -183,17 +187,18 @@
#define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm)
#define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm)
-#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
- | ((off) & 0xfff))
-#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
-#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
- | (off))
-#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \
+ (ARM_INST_LDR_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
+ | (imm) << 7 | (type) << 5 | (rm))
+#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
-#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
@@ -254,13 +259,6 @@
#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm)
#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm)
-#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
- | ((off) & 0xfff))
-#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-
#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm)
#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 2438b96004c1..fcc5bfec8bd1 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock64);
+ register_persistent_clock(omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ba13f793fbce..ed36dcab80f1 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -127,53 +127,6 @@ static int pxa_ssp_probe(struct platform_device *pdev)
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
- if (dev->of_node) {
- struct of_phandle_args dma_spec;
- struct device_node *np = dev->of_node;
- int ret;
-
- /*
- * FIXME: we should allocate the DMA channel from this
- * context and pass the channel down to the ssp users.
- * For now, we lookup the rx and tx indices manually
- */
-
- /* rx */
- ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
- 0, &dma_spec);
-
- if (ret) {
- dev_err(dev, "Can't parse dmas property\n");
- return -ENODEV;
- }
- ssp->drcmr_rx = dma_spec.args[0];
- of_node_put(dma_spec.np);
-
- /* tx */
- ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
- 1, &dma_spec);
- if (ret) {
- dev_err(dev, "Can't parse dmas property\n");
- return -ENODEV;
- }
- ssp->drcmr_tx = dma_spec.args[0];
- of_node_put(dma_spec.np);
- } else {
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(dev, "no SSP RX DRCMR defined\n");
- return -ENODEV;
- }
- ssp->drcmr_rx = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res == NULL) {
- dev_err(dev, "no SSP TX DRCMR defined\n");
- return -ENODEV;
- }
- ssp->drcmr_tx = res->start;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 2da35735fa38..ee3d5c989a76 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/list.h>
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index e90cc8a08186..f8bd523d64d1 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -47,9 +47,6 @@
(unsigned long)(addr) + \
(size))
-/* Used as a marker in ARM_pc to note when we're in a jprobe. */
-#define JPROBE_MAGIC_ADDR 0xffffffff
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
- pr_warn("Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
/* fall through */
default:
/* impossible cases */
@@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
- * continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
- reset_current_kprobe();
- }
- }
- } else if (cur) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur->break_handler && cur->break_handler(cur, regs)) {
- kcb->kprobe_status = KPROBE_HIT_SS;
- singlestep(cur, regs, kcb);
- if (cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
}
+ reset_current_kprobe();
}
- reset_current_kprobe();
} else {
/*
* The probe was removed and a race is in progress.
@@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long sp_addr = regs->ARM_sp;
- long cpsr;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ARM_pc = (long)jp->entry;
-
- cpsr = regs->ARM_cpsr | PSR_I_BIT;
-#ifdef CONFIG_THUMB2_KERNEL
- /* Set correct Thumb state in cpsr */
- if (regs->ARM_pc & 1)
- cpsr |= PSR_T_BIT;
- else
- cpsr &= ~PSR_T_BIT;
-#endif
- regs->ARM_cpsr = cpsr;
-
- preempt_disable();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- __asm__ __volatile__ (
- /*
- * Setup an empty pt_regs. Fill SP and PC fields as
- * they're needed by longjmp_break_handler.
- *
- * We allocate some slack between the original SP and start of
- * our fabricated regs. To be precise we want to have worst case
- * covered which is STMFD with all 16 regs so we allocate 2 *
- * sizeof(struct_pt_regs)).
- *
- * This is to prevent any simulated instruction from writing
- * over the regs when they are accessing the stack.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "sub r0, %0, %1 \n\t"
- "mov sp, r0 \n\t"
-#else
- "sub sp, %0, %1 \n\t"
-#endif
- "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
- "str %0, [sp, %2] \n\t"
- "str r0, [sp, %3] \n\t"
- "mov r0, sp \n\t"
- "bl kprobe_handler \n\t"
-
- /*
- * Return to the context saved by setjmp_pre_handler
- * and restored by longjmp_break_handler.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
- "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
- "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
- "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
- /* rfe context */
- "ldmia sp, {r0 - r12} \n\t"
- "mov sp, lr \n\t"
- "ldr lr, [sp], #4 \n\t"
- "rfeia sp! \n\t"
-#else
- "ldr r0, [sp, %4] \n\t"
- "msr cpsr_cxsf, r0 \n\t"
- "ldmia sp, {r0 - pc} \n\t"
-#endif
- :
- : "r" (kcb->jprobe_saved_regs.ARM_sp),
- "I" (sizeof(struct pt_regs) * 2),
- "J" (offsetof(struct pt_regs, ARM_sp)),
- "J" (offsetof(struct pt_regs, ARM_pc)),
- "J" (offsetof(struct pt_regs, ARM_cpsr)),
- "J" (offsetof(struct pt_regs, ARM_lr))
- : "memory", "cc");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
- long orig_sp = regs->ARM_sp;
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
- printk("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- printk("Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk("Current registers\n");
- show_regs(regs);
- BUG();
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 14db14152909..cc237fa9b90f 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -1461,7 +1461,6 @@ fail:
print_registers(&result_regs);
if (mem) {
- pr_err("current_stack=%p\n", current_stack);
pr_err("expected_memory:\n");
print_memory(expected_memory, mem_size);
pr_err("result_memory:\n");
diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
index d1329f1ba4e4..bf992264060e 100644
--- a/arch/arm/probes/uprobes/core.c
+++ b/arch/arm/probes/uprobes/core.c
@@ -32,7 +32,7 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
int set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr)
{
- return uprobe_write_opcode(mm, vaddr,
+ return uprobe_write_opcode(auprobe, mm, vaddr,
__opcode_to_mem_arm(auprobe->bpinsn));
}
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index a81404c09d5d..94516c40ebd3 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -8,8 +8,5 @@
# asflags-y := -DDEBUG
KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
-LDFLAGS +=--no-warn-mismatch
-obj-y += vfp.o
-
-vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
+obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 35d0f823e823..dc7e6b50ef67 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
}
/* Sanitise and restore the current VFP state from the provided structures. */
-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
- struct user_vfp_exc __user *ufp_exc)
+int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
- int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
- err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
- sizeof(hwstate->fpregs));
+ memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
- __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+ hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
- __get_user_error(fpexc, &ufp_exc->fpexc, err);
+ fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
- __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
- __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+ hwstate->fpinst = ufp_exc->fpinst;
+ hwstate->fpinst2 = ufp_exc->fpinst2;
- return err ? -EFAULT : 0;
+ return 0;
}
/*
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 42c090cf0292..29e75b47becd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
+ select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT
@@ -42,8 +43,19 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
@@ -74,6 +86,7 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -95,7 +108,9 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@@ -127,6 +142,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
+ select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -264,13 +280,6 @@ config ARCH_SUPPORTS_UPROBES
config ARCH_PROC_KCORE_TEXT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/arm64/Kconfig.platforms"
menu "Bus support"
@@ -756,7 +765,6 @@ config HOLES_IN_ZONE
def_bool y
depends on NUMA
-source kernel/Kconfig.preempt
source kernel/Kconfig.hz
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
@@ -775,6 +783,9 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
+config ARCH_FLATMEM_ENABLE
+ def_bool !NUMA
+
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
@@ -791,8 +802,6 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
-source "mm/Kconfig"
-
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
@@ -1246,6 +1255,7 @@ config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
depends on KERNEL_MODE_NEON
+ select ARCH_SUPPORTS_ACPI
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
@@ -1273,10 +1283,6 @@ config DMI
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
config COMPAT
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
@@ -1300,8 +1306,6 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -1327,25 +1331,12 @@ source "drivers/cpufreq/Kconfig"
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
source "drivers/acpi/Kconfig"
-source "fs/Kconfig"
-
source "arch/arm64/kvm/Kconfig"
-source "arch/arm64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
if CRYPTO
source "arch/arm64/crypto/Kconfig"
endif
-
-source "lib/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index cc6bd559af85..69c9170bdd24 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -1,6 +1,3 @@
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config ARM64_PTDUMP_CORE
def_bool n
@@ -97,5 +94,3 @@ config ARM64_RELOC_TEST
tristate "Relocation testing module"
source "drivers/hwtracing/coresight/Kconfig"
-
-endmenu
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d5aeac351fc3..f34cd1b42d69 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -261,6 +261,7 @@ config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
+ select RESET_CONTROLLER
help
This enables support for Socionext UniPhier SoC family.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 45272266dafb..efe61a2e4b5e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=-p --no-undefined -X
+LDFLAGS_vmlinux :=--no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
@@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB
-LD += -EB
-LDFLAGS += -maarch64linuxb
+# Prefer the baremetal ELF build target, but not all toolchains include
+# it so fall back to the standard linux version if needed.
+LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL
-LD += -EL
-LDFLAGS += -maarch64linux
+# Same as above, prefer ELF but fall back to linux target if needed.
+LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
UTS_MACHINE := aarch64
endif
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 4057197048dc..1a406a76c86a 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -482,9 +482,9 @@
status = "disabled";
};
- mdio_mux_iproc: mdio-mux@6602023c {
+ mdio_mux_iproc: mdio-mux@66020000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x6602023c 0x14>;
+ reg = <0x66020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index b203152ad67c..a70e8ddbd66f 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -278,9 +278,9 @@
#include "stingray-pinctrl.dtsi"
- mdio_mux_iproc: mdio-mux@2023c {
+ mdio_mux_iproc: mdio-mux@20000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x0002023c 0x14>;
+ reg = <0x00020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index 4dd06767f839..4664c33e0763 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -11,13 +11,14 @@ fman0: fman@1a00000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0x0 0x0 0x1a00000 0x100000>;
- reg = <0x0 0x1a00000 0x0 0x100000>;
+ ranges = <0x0 0x0 0x1a00000 0xfe000>;
+ reg = <0x0 0x1a00000 0x0 0xfe000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -73,9 +74,11 @@ fman0: fman@1a00000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@1afe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x0 0x1afe000 0x0 0x1000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 8d477dcbfa58..851190a719ea 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1000,6 +1000,24 @@
reset-gpios = <&gpio11 1 0 >;
};
+ /* UFS */
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
+
/* SD */
dwmmc1: dwmmc1@ff37f000 {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 9c10030a07f8..c33adefc3061 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1049,7 +1049,74 @@
num-pins = <2>;
};
};
+ p0_mbigen_alg_a:interrupt-controller@d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x0 0xd0080000 0x0 0x10000>;
+ p0_mbigen_sec_a: intc_sec {
+ msi-parent = <&p0_its_dsa_a 0x40400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_a 0x40b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p0_mbigen_alg_b:interrupt-controller@8,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x8 0xd0080000 0x0 0x10000>;
+
+ p0_mbigen_sec_b: intc_sec {
+ msi-parent = <&p0_its_dsa_b 0x42400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_b 0x42b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_a:interrupt-controller@400,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x400 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_a: intc_sec {
+ msi-parent = <&p1_its_dsa_a 0x44400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_a 0x44b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_b:interrupt-controller@408,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x408 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_b: intc_sec {
+ msi-parent = <&p1_its_dsa_b 0x46400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_b 0x46b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
p0_mbigen_dsa_a: interrupt-controller@c0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xc0080000 0x0 0x10000>;
@@ -1107,6 +1174,58 @@
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
+ p0_smmu_alg_a: smmu_alg@d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p0_smmu_alg_b: smmu_alg@8,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x8 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_a: smmu_alg@400,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x400 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_b: smmu_alg@408,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x408 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
soc {
compatible = "simple-bus";
@@ -1603,5 +1722,170 @@
0x0 0 0 4 &mbigen_pcie2_a 671 4>;
status = "disabled";
};
+ p0_sec_a: crypto@d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x0 0xd0000000 0x0 0x10000
+ 0x0 0xd2000000 0x0 0x10000
+ 0x0 0xd2010000 0x0 0x10000
+ 0x0 0xd2020000 0x0 0x10000
+ 0x0 0xd2030000 0x0 0x10000
+ 0x0 0xd2040000 0x0 0x10000
+ 0x0 0xd2050000 0x0 0x10000
+ 0x0 0xd2060000 0x0 0x10000
+ 0x0 0xd2070000 0x0 0x10000
+ 0x0 0xd2080000 0x0 0x10000
+ 0x0 0xd2090000 0x0 0x10000
+ 0x0 0xd20a0000 0x0 0x10000
+ 0x0 0xd20b0000 0x0 0x10000
+ 0x0 0xd20c0000 0x0 0x10000
+ 0x0 0xd20d0000 0x0 0x10000
+ 0x0 0xd20e0000 0x0 0x10000
+ 0x0 0xd20f0000 0x0 0x10000
+ 0x0 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_a>;
+ iommus = <&p0_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p0_sec_b: crypto@8,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x8 0xd0000000 0x0 0x10000
+ 0x8 0xd2000000 0x0 0x10000
+ 0x8 0xd2010000 0x0 0x10000
+ 0x8 0xd2020000 0x0 0x10000
+ 0x8 0xd2030000 0x0 0x10000
+ 0x8 0xd2040000 0x0 0x10000
+ 0x8 0xd2050000 0x0 0x10000
+ 0x8 0xd2060000 0x0 0x10000
+ 0x8 0xd2070000 0x0 0x10000
+ 0x8 0xd2080000 0x0 0x10000
+ 0x8 0xd2090000 0x0 0x10000
+ 0x8 0xd20a0000 0x0 0x10000
+ 0x8 0xd20b0000 0x0 0x10000
+ 0x8 0xd20c0000 0x0 0x10000
+ 0x8 0xd20d0000 0x0 0x10000
+ 0x8 0xd20e0000 0x0 0x10000
+ 0x8 0xd20f0000 0x0 0x10000
+ 0x8 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_b>;
+ iommus = <&p0_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_b: crypto@408,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x408 0xd0000000 0x0 0x10000
+ 0x408 0xd2000000 0x0 0x10000
+ 0x408 0xd2010000 0x0 0x10000
+ 0x408 0xd2020000 0x0 0x10000
+ 0x408 0xd2030000 0x0 0x10000
+ 0x408 0xd2040000 0x0 0x10000
+ 0x408 0xd2050000 0x0 0x10000
+ 0x408 0xd2060000 0x0 0x10000
+ 0x408 0xd2070000 0x0 0x10000
+ 0x408 0xd2080000 0x0 0x10000
+ 0x408 0xd2090000 0x0 0x10000
+ 0x408 0xd20a0000 0x0 0x10000
+ 0x408 0xd20b0000 0x0 0x10000
+ 0x408 0xd20c0000 0x0 0x10000
+ 0x408 0xd20d0000 0x0 0x10000
+ 0x408 0xd20e0000 0x0 0x10000
+ 0x408 0xd20f0000 0x0 0x10000
+ 0x408 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_b>;
+ iommus = <&p1_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f9a186f6af8a..514787d45dee 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -193,6 +193,7 @@ CONFIG_SCSI_HISI_SAS=y
CONFIG_SCSI_HISI_SAS_PCI=y
CONFIG_SCSI_UFSHCD=m
CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_HISI=y
CONFIG_SCSI_UFS_QCOM=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
@@ -372,8 +373,8 @@ CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_BCM2835_WDT=y
CONFIG_MFD_AXP20X_RSB=y
CONFIG_MFD_CROS_EC=y
-CONFIG_MFD_CROS_EC_I2C=y
-CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_CROS_EC_I2C=y
+CONFIG_CROS_EC_SPI=y
CONFIG_MFD_CROS_EC_CHARDEV=m
CONFIG_MFD_EXYNOS_LPASS=m
CONFIG_MFD_HI6421_PMIC=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 88f5aef7934c..e3a375c4cb83 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -19,33 +19,24 @@
* u32 *macp, u8 const rk[], u32 rounds);
*/
ENTRY(ce_aes_ccm_auth_data)
- frame_push 7
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
- ldr w25, [x22] /* leftover from prev round? */
+ ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */
- cbz w25, 1f
- sub w25, w25, #16
+ cbz w8, 1f
+ sub w8, w8, #16
eor v1.16b, v1.16b, v1.16b
-0: ldrb w7, [x20], #1 /* get 1 byte of input */
- subs w21, w21, #1
- add w25, w25, #1
+0: ldrb w7, [x1], #1 /* get 1 byte of input */
+ subs w2, w2, #1
+ add w8, w8, #1
ins v1.b[0], w7
ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
beq 8f /* out of input? */
- cbnz w25, 0b
+ cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b
-1: ld1 {v3.4s}, [x23] /* load first round key */
- prfm pldl1strm, [x20]
- cmp w24, #12 /* which key size? */
- add x6, x23, #16
- sub w7, w24, #2 /* modified # of rounds */
+1: ld1 {v3.4s}, [x4] /* load first round key */
+ prfm pldl1strm, [x1]
+ cmp w5, #12 /* which key size? */
+ add x6, x4, #16
+ sub w7, w5, #2 /* modified # of rounds */
bmi 2f
bne 5f
mov v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
ld1 {v5.4s}, [x6], #16 /* load next round key */
bpl 3b
aese v0.16b, v4.16b
- subs w21, w21, #16 /* last data? */
+ subs w2, w2, #16 /* last data? */
eor v0.16b, v0.16b, v5.16b /* final round */
bmi 6f
- ld1 {v1.16b}, [x20], #16 /* load next input block */
+ ld1 {v1.16b}, [x1], #16 /* load next input block */
eor v0.16b, v0.16b, v1.16b /* xor with mac */
- beq 6f
-
- if_will_cond_yield_neon
- st1 {v0.16b}, [x19] /* store mac */
- do_cond_yield_neon
- ld1 {v0.16b}, [x19] /* reload mac */
- endif_yield_neon
-
- b 1b
-6: st1 {v0.16b}, [x19] /* store mac */
+ bne 1b
+6: st1 {v0.16b}, [x0] /* store mac */
beq 10f
- adds w21, w21, #16
+ adds w2, w2, #16
beq 10f
- mov w25, w21
-7: ldrb w7, [x20], #1
+ mov w8, w2
+7: ldrb w7, [x1], #1
umov w6, v0.b[0]
eor w6, w6, w7
- strb w6, [x19], #1
- subs w21, w21, #1
+ strb w6, [x0], #1
+ subs w2, w2, #1
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
-8: mov w7, w25
- add w25, w25, #16
+8: mov w7, w8
+ add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
eor v0.16b, v0.16b, v1.16b
- st1 {v0.16b}, [x19]
-10: str w25, [x22]
-
- frame_pop
+ st1 {v0.16b}, [x0]
+10: str w8, [x3]
ret
ENDPROC(ce_aes_ccm_auth_data)
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
ENDPROC(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc
- frame_push 8
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
- mov x25, x6
-
- ldr x26, [x25, #8] /* load lower ctr */
- ld1 {v0.16b}, [x24] /* load mac */
-CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
+ ldr x8, [x6, #8] /* load lower ctr */
+ ld1 {v0.16b}, [x5] /* load mac */
+CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
0: /* outer loop */
- ld1 {v1.8b}, [x25] /* load upper ctr */
- prfm pldl1strm, [x20]
- add x26, x26, #1
- rev x9, x26
- cmp w23, #12 /* which key size? */
- sub w7, w23, #2 /* get modified # of rounds */
+ ld1 {v1.8b}, [x6] /* load upper ctr */
+ prfm pldl1strm, [x1]
+ add x8, x8, #1
+ rev x9, x8
+ cmp w4, #12 /* which key size? */
+ sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
- ld1 {v3.4s}, [x22] /* load first round key */
- add x10, x22, #16
+ ld1 {v3.4s}, [x3] /* load first round key */
+ add x10, x3, #16
bmi 1f
bne 4f
mov v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
bpl 2b
aese v0.16b, v4.16b
aese v1.16b, v4.16b
- subs w21, w21, #16
- bmi 7f /* partial block? */
- ld1 {v2.16b}, [x20], #16 /* load next input block */
+ subs w2, w2, #16
+ bmi 6f /* partial block? */
+ ld1 {v2.16b}, [x1], #16 /* load next input block */
.if \enc == 1
eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
eor v1.16b, v2.16b, v5.16b /* final round enc */
.endif
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
- st1 {v1.16b}, [x19], #16 /* write output block */
- beq 5f
-
- if_will_cond_yield_neon
- st1 {v0.16b}, [x24] /* store mac */
- do_cond_yield_neon
- ld1 {v0.16b}, [x24] /* reload mac */
- endif_yield_neon
-
- b 0b
-5:
-CPU_LE( rev x26, x26 )
- st1 {v0.16b}, [x24] /* store mac */
- str x26, [x25, #8] /* store lsb end of ctr (BE) */
-
-6: frame_pop
- ret
-
-7: eor v0.16b, v0.16b, v5.16b /* final round mac */
+ st1 {v1.16b}, [x0], #16 /* write output block */
+ bne 0b
+CPU_LE( rev x8, x8 )
+ st1 {v0.16b}, [x5] /* store mac */
+ str x8, [x6, #8] /* store lsb end of ctr (BE) */
+5: ret
+
+6: eor v0.16b, v0.16b, v5.16b /* final round mac */
eor v1.16b, v1.16b, v5.16b /* final round enc */
- st1 {v0.16b}, [x24] /* store mac */
- add w21, w21, #16 /* process partial tail block */
-8: ldrb w9, [x20], #1 /* get 1 byte of input */
+ st1 {v0.16b}, [x5] /* store mac */
+ add w2, w2, #16 /* process partial tail block */
+7: ldrb w9, [x1], #1 /* get 1 byte of input */
umov w6, v1.b[0] /* get top crypted ctr byte */
umov w7, v0.b[0] /* get top mac byte */
.if \enc == 1
@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 )
eor w9, w9, w6
eor w7, w7, w9
.endif
- strb w9, [x19], #1 /* store out byte */
- strb w7, [x24], #1 /* store mac byte */
- subs w21, w21, #1
- beq 6b
+ strb w9, [x0], #1 /* store out byte */
+ strb w7, [x5], #1 /* store mac byte */
+ subs w2, w2, #1
+ beq 5b
ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
- b 8b
+ b 7b
.endm
/*
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index e3e50950a863..adcb83eb683c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -567,7 +567,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -583,7 +582,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -599,7 +597,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
.base.cra_module = THIS_MODULE,
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index dcffb9e77589..1b319b716d5e 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,6 +46,19 @@
ss3 .req v26
ss4 .req v27
+ XL2 .req v8
+ XM2 .req v9
+ XH2 .req v10
+ XL3 .req v11
+ XM3 .req v12
+ XH3 .req v13
+ TT3 .req v14
+ TT4 .req v15
+ HH .req v16
+ HH3 .req v17
+ HH4 .req v18
+ HH34 .req v19
+
.text
.arch armv8-a+crypto
@@ -134,11 +147,25 @@
.endm
.macro __pmull_pre_p64
+ add x8, x3, #16
+ ld1 {HH.2d-HH4.2d}, [x8]
+
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
+ eor SHASH2.16b, SHASH2.16b, T1.16b
+
+ trn1 HH34.2d, HH3.2d, HH4.2d
+ trn2 T1.2d, HH3.2d, HH4.2d
+ eor HH34.16b, HH34.16b, T1.16b
+
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
.endm
.macro __pmull_pre_p8
+ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ eor SHASH2.16b, SHASH2.16b, SHASH.16b
+
// k00_16 := 0x0000000000000000_000000000000ffff
// k32_48 := 0x00000000ffffffff_0000ffffffffffff
movi k32_48.2d, #0xffffffff
@@ -213,31 +240,88 @@
.endm
.macro __pmull_ghash, pn
- frame_push 5
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-0: ld1 {SHASH.2d}, [x22]
- ld1 {XL.2d}, [x20]
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ ld1 {SHASH.2d}, [x3]
+ ld1 {XL.2d}, [x1]
__pmull_pre_\pn
/* do the head block first, if supplied */
- cbz x23, 1f
- ld1 {T1.2d}, [x23]
- mov x23, xzr
- b 2f
+ cbz x4, 0f
+ ld1 {T1.2d}, [x4]
+ mov x4, xzr
+ b 3f
+
+0: .ifc \pn, p64
+ tbnz w0, #0, 2f // skip until #blocks is a
+ tbnz w0, #1, 2f // round multiple of 4
+
+1: ld1 {XM3.16b-TT4.16b}, [x2], #64
+
+ sub w0, w0, #4
+
+ rev64 T1.16b, XM3.16b
+ rev64 T2.16b, XH3.16b
+ rev64 TT4.16b, TT4.16b
+ rev64 TT3.16b, TT3.16b
+
+ ext IN1.16b, TT4.16b, TT4.16b, #8
+ ext XL3.16b, TT3.16b, TT3.16b, #8
+
+ eor TT4.16b, TT4.16b, IN1.16b
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
+
+ eor TT3.16b, TT3.16b, XL3.16b
+ pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
+ pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
+ pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
+
+ ext IN1.16b, T2.16b, T2.16b, #8
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
+
+ eor T2.16b, T2.16b, IN1.16b
+ pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
+ pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
+ pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
-1: ld1 {T1.2d}, [x21], #16
- sub w19, w19, #1
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
-2: /* multiply XL by SHASH in GF(2^128) */
+ ext IN1.16b, T1.16b, T1.16b, #8
+ ext TT3.16b, XL.16b, XL.16b, #8
+ eor XL.16b, XL.16b, IN1.16b
+ eor T1.16b, T1.16b, TT3.16b
+
+ pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
+ eor T1.16b, T1.16b, XL.16b
+ pmull XL.1q, HH4.1d, XL.1d // a0 * b0
+ pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
+
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ eor XM.16b, XM.16b, XM2.16b
+
+ eor T2.16b, XL.16b, XH.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor XM.16b, XM.16b, T2.16b
+
+ __pmull_reduce_p64
+
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
+
+ cbz w0, 5f
+ b 1b
+ .endif
+
+2: ld1 {T1.2d}, [x2], #16
+ sub w0, w0, #1
+
+3: /* multiply XL by SHASH in GF(2^128) */
CPU_LE( rev64 T1.16b, T1.16b )
ext T2.16b, XL.16b, XL.16b, #8
@@ -250,7 +334,7 @@ CPU_LE( rev64 T1.16b, T1.16b )
__pmull_\pn XL, XL, SHASH // a0 * b0
__pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
- eor T2.16b, XL.16b, XH.16b
+4: eor T2.16b, XL.16b, XH.16b
ext T1.16b, XL.16b, XH.16b, #8
eor XM.16b, XM.16b, T2.16b
@@ -259,18 +343,9 @@ CPU_LE( rev64 T1.16b, T1.16b )
eor T2.16b, T2.16b, XH.16b
eor XL.16b, XL.16b, T2.16b
- cbz w19, 3f
+ cbnz w0, 0b
- if_will_cond_yield_neon
- st1 {XL.2d}, [x20]
- do_cond_yield_neon
- b 0b
- endif_yield_neon
-
- b 1b
-
-3: st1 {XL.2d}, [x20]
- frame_pop
+5: st1 {XL.2d}, [x1]
ret
.endm
@@ -286,9 +361,10 @@ ENTRY(pmull_ghash_update_p8)
__pmull_ghash p8
ENDPROC(pmull_ghash_update_p8)
- KS .req v8
- CTR .req v9
- INP .req v10
+ KS0 .req v12
+ KS1 .req v13
+ INP0 .req v14
+ INP1 .req v15
.macro load_round_keys, rounds, rk
cmp \rounds, #12
@@ -322,142 +398,153 @@ ENDPROC(pmull_ghash_update_p8)
.endm
.macro pmull_gcm_do_crypt, enc
- frame_push 10
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
- mov x25, x6
- mov x26, x7
- .if \enc == 1
- ldr x27, [sp, #96] // first stacked arg
- .endif
-
- ldr x28, [x24, #8] // load lower counter
-CPU_LE( rev x28, x28 )
-
-0: mov x0, x25
- load_round_keys w26, x0
- ld1 {SHASH.2d}, [x23]
- ld1 {XL.2d}, [x20]
+ ld1 {SHASH.2d}, [x4], #16
+ ld1 {HH.2d}, [x4]
+ ld1 {XL.2d}, [x1]
+ ldr x8, [x5, #8] // load lower counter
movi MASK.16b, #0xe1
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
+CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ eor SHASH2.16b, SHASH2.16b, T1.16b
.if \enc == 1
- ld1 {KS.16b}, [x27]
+ ldr x10, [sp]
+ ld1 {KS0.16b-KS1.16b}, [x10]
.endif
-1: ld1 {CTR.8b}, [x24] // load upper counter
- ld1 {INP.16b}, [x22], #16
- rev x9, x28
- add x28, x28, #1
- sub w19, w19, #1
- ins CTR.d[1], x9 // set lower counter
+ cbnz x6, 4f
+
+0: ld1 {INP0.16b-INP1.16b}, [x3], #32
+
+ rev x9, x8
+ add x11, x8, #1
+ add x8, x8, #2
.if \enc == 1
- eor INP.16b, INP.16b, KS.16b // encrypt input
- st1 {INP.16b}, [x21], #16
+ eor INP0.16b, INP0.16b, KS0.16b // encrypt input
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
- rev64 T1.16b, INP.16b
+ ld1 {KS0.8b}, [x5] // load upper counter
+ rev x11, x11
+ sub w0, w0, #2
+ mov KS1.8b, KS0.8b
+ ins KS0.d[1], x9 // set lower counter
+ ins KS1.d[1], x11
- cmp w26, #12
- b.ge 4f // AES-192/256?
+ rev64 T1.16b, INP1.16b
-2: enc_round CTR, v21
+ cmp w7, #12
+ b.ge 2f // AES-192/256?
- ext T2.16b, XL.16b, XL.16b, #8
+1: enc_round KS0, v21
ext IN1.16b, T1.16b, T1.16b, #8
- enc_round CTR, v22
+ enc_round KS1, v21
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+
+ enc_round KS0, v22
+ eor T1.16b, T1.16b, IN1.16b
+
+ enc_round KS1, v22
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ enc_round KS0, v23
+ pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+
+ enc_round KS1, v23
+ rev64 T1.16b, INP0.16b
+ ext T2.16b, XL.16b, XL.16b, #8
+
+ enc_round KS0, v24
+ ext IN1.16b, T1.16b, T1.16b, #8
eor T1.16b, T1.16b, T2.16b
- eor XL.16b, XL.16b, IN1.16b
- enc_round CTR, v23
+ enc_round KS1, v24
+ eor XL.16b, XL.16b, IN1.16b
- pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
+ enc_round KS0, v25
eor T1.16b, T1.16b, XL.16b
- enc_round CTR, v24
+ enc_round KS1, v25
+ pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
+
+ enc_round KS0, v26
+ pmull XL.1q, HH.1d, XL.1d // a0 * b0
- pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
- pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+ enc_round KS1, v26
+ pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
- enc_round CTR, v25
+ enc_round KS0, v27
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ enc_round KS1, v27
+ eor XM.16b, XM.16b, XM2.16b
ext T1.16b, XL.16b, XH.16b, #8
+
+ enc_round KS0, v28
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
- enc_round CTR, v26
-
+ enc_round KS1, v28
eor XM.16b, XM.16b, T2.16b
- pmull T2.1q, XL.1d, MASK.1d
- enc_round CTR, v27
+ enc_round KS0, v29
+ pmull T2.1q, XL.1d, MASK.1d
+ enc_round KS1, v29
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
- enc_round CTR, v28
-
+ aese KS0.16b, v30.16b
eor XL.16b, XM.16b, T2.16b
- enc_round CTR, v29
-
+ aese KS1.16b, v30.16b
ext T2.16b, XL.16b, XL.16b, #8
- aese CTR.16b, v30.16b
-
+ eor KS0.16b, KS0.16b, v31.16b
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
- eor KS.16b, CTR.16b, v31.16b
-
+ eor KS1.16b, KS1.16b, v31.16b
eor XL.16b, XL.16b, T2.16b
.if \enc == 0
- eor INP.16b, INP.16b, KS.16b
- st1 {INP.16b}, [x21], #16
+ eor INP0.16b, INP0.16b, KS0.16b
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
- cbz w19, 3f
+ st1 {INP0.16b-INP1.16b}, [x2], #32
- if_will_cond_yield_neon
- st1 {XL.2d}, [x20]
- .if \enc == 1
- st1 {KS.16b}, [x27]
- .endif
- do_cond_yield_neon
- b 0b
- endif_yield_neon
+ cbnz w0, 0b
- b 1b
+CPU_LE( rev x8, x8 )
+ st1 {XL.2d}, [x1]
+ str x8, [x5, #8] // store lower counter
-3: st1 {XL.2d}, [x20]
.if \enc == 1
- st1 {KS.16b}, [x27]
+ st1 {KS0.16b-KS1.16b}, [x10]
.endif
-CPU_LE( rev x28, x28 )
- str x28, [x24, #8] // store lower counter
-
- frame_pop
ret
-4: b.eq 5f // AES-192?
- enc_round CTR, v17
- enc_round CTR, v18
-5: enc_round CTR, v19
- enc_round CTR, v20
- b 2b
+2: b.eq 3f // AES-192?
+ enc_round KS0, v17
+ enc_round KS1, v17
+ enc_round KS0, v18
+ enc_round KS1, v18
+3: enc_round KS0, v19
+ enc_round KS1, v19
+ enc_round KS0, v20
+ enc_round KS1, v20
+ b 1b
+
+4: load_round_keys w7, x6
+ b 0b
.endm
/*
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 7cf0b1aa6ea8..6e9f33d14930 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -33,9 +33,12 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GCM_IV_SIZE 12
struct ghash_key {
- u64 a;
- u64 b;
- be128 k;
+ u64 h[2];
+ u64 h2[2];
+ u64 h3[2];
+ u64 h4[2];
+
+ be128 k;
};
struct ghash_desc_ctx {
@@ -113,6 +116,9 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
}
}
+/* avoid hogging the CPU for too long */
+#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
+
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
@@ -136,11 +142,16 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
blocks = len / GHASH_BLOCK_SIZE;
len %= GHASH_BLOCK_SIZE;
- ghash_do_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
+ do {
+ int chunk = min(blocks, MAX_BLOCKS);
- src += blocks * GHASH_BLOCK_SIZE;
- partial = 0;
+ ghash_do_update(chunk, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+
+ blocks -= chunk;
+ src += chunk * GHASH_BLOCK_SIZE;
+ partial = 0;
+ } while (unlikely(blocks > 0));
}
if (len)
memcpy(ctx->buf + partial, src, len);
@@ -166,23 +177,36 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
+static void ghash_reflect(u64 h[], const be128 *k)
+{
+ u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
+
+ h[0] = (be64_to_cpu(k->b) << 1) | carry;
+ h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
+
+ if (carry)
+ h[1] ^= 0xc200000000000000UL;
+}
+
static int __ghash_setkey(struct ghash_key *key,
const u8 *inkey, unsigned int keylen)
{
- u64 a, b;
+ be128 h;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
- /* perform multiplication by 'x' in GF(2^128) */
- b = get_unaligned_be64(inkey);
- a = get_unaligned_be64(inkey + 8);
+ ghash_reflect(key->h, &key->k);
- key->a = (a << 1) | (b >> 63);
- key->b = (b << 1) | (a >> 63);
+ h = key->k;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h2, &h);
- if (b >> 63)
- key->b ^= 0xc200000000000000UL;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h3, &h);
+
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h4, &h);
return 0;
}
@@ -204,7 +228,6 @@ static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
@@ -245,7 +268,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
num_rounds(&ctx->aes_key));
- return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
+ return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -349,9 +372,10 @@ static int gcm_encrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
struct skcipher_walk walk;
u8 iv[AES_BLOCK_SIZE];
- u8 ks[AES_BLOCK_SIZE];
+ u8 ks[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -360,39 +384,39 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
+
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- pmull_gcm_encrypt_block(ks, iv, NULL,
- num_rounds(&ctx->aes_key));
+ pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
put_unaligned_be32(3, iv + GCM_IV_SIZE);
- kernel_neon_end();
+ pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
+ put_unaligned_be32(4, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key), ks);
+ iv, rk, nrounds, ks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -400,8 +424,7 @@ static int gcm_encrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- ks, iv,
- num_rounds(&ctx->aes_key));
+ ks, iv, nrounds);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -418,19 +441,28 @@ static int gcm_encrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
u8 buf[GHASH_BLOCK_SIZE];
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *head = NULL;
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
walk.nbytes);
- memcpy(buf, walk.dst.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = dst;
+ dst += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, dst, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
err = skcipher_walk_done(&walk, 0);
}
@@ -453,10 +485,11 @@ static int gcm_decrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
struct skcipher_walk walk;
- u8 iv[AES_BLOCK_SIZE];
+ u8 iv[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
- u8 buf[GHASH_BLOCK_SIZE];
+ u8 buf[2 * GHASH_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -465,39 +498,53 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- kernel_neon_end();
- err = skcipher_walk_aead_decrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ int rem = walk.total - blocks * AES_BLOCK_SIZE;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ iv, rk, nrounds);
+
+ /* check if this is the final iteration of the loop */
+ if (rem < (2 * AES_BLOCK_SIZE)) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ if (rem > AES_BLOCK_SIZE) {
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+ }
+
+ pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
+
+ if (rem > AES_BLOCK_SIZE)
+ pmull_gcm_encrypt_block(iv2, iv2, NULL,
+ nrounds);
+ }
+
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
- if (walk.nbytes)
- pmull_gcm_encrypt_block(iv, iv, NULL,
- num_rounds(&ctx->aes_key));
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_decrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -508,8 +555,7 @@ static int gcm_decrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- buf, iv,
- num_rounds(&ctx->aes_key));
+ buf, iv, nrounds);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -522,14 +568,24 @@ static int gcm_decrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
- memcpy(buf, walk.src.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ const u8 *src = walk.src.virt.addr;
+ const u8 *head = NULL;
+ unsigned int nbytes = walk.nbytes;
+
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = src;
+ src += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, src, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
walk.nbytes);
@@ -554,7 +610,7 @@ static int gcm_decrypt(struct aead_request *req)
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
- .chunksize = AES_BLOCK_SIZE,
+ .chunksize = 2 * AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index efbeb3e0dcfb..17fac2889f56 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -99,7 +99,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index fd1ff2b13dfa..261f5195cab7 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -114,7 +114,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -129,7 +128,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e8880ccdc71f..4aedeaefd61f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -67,8 +67,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -80,8 +79,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -153,7 +151,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -166,7 +163,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index da8222e528bd..a336feac0f59 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -105,7 +105,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -117,7 +116,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -129,7 +127,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -141,7 +138,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index a77c8632a589..f2c5f28c622a 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -87,7 +87,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -100,7 +99,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 27db4851e380..325b23b43a9b 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -76,7 +75,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 3b4948f7e26f..88938a20d9b2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -72,7 +72,6 @@ static struct shash_alg sm3_alg = {
.descsize = sizeof(struct sm3_state),
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 3a9b84d39d71..6cd5d77b6b44 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
generic-y += qrwlock.h
+generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 0db62a4cbce2..709208dfdc8b 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,10 +12,12 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
+#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -29,18 +31,22 @@
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
+
/* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
+ /* For normal memory we already have a cacheable mapping. */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__phys_to_virt(phys);
+
/*
- * EFI's reserve_regions() call adds memory with the WB attribute
- * to memblock via early_init_dt_add_memory_arch().
+ * We should still honor the memory's attribute here because
+ * crash dump kernel possibly excludes some ACPI (reclaim)
+ * regions from memblock list.
*/
- if (!memblock_is_memory(phys))
- return ioremap(phys, size);
-
- return ioremap_cache(phys, size);
+ return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
}
#define acpi_os_ioremap acpi_os_ioremap
@@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu)
* for compatibility.
*/
#define acpi_disable_cmcff 1
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
+static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+{
+ return __acpi_get_mem_attribute(addr);
+}
#endif /* CONFIG_ACPI_APEI */
#ifdef CONFIG_ACPI_NUMA
int arm64_acpi_numa_init(void);
-int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
+int acpi_numa_get_nid(unsigned int cpu);
+void acpi_map_cpus_to_nodes(void);
#else
static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
-static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
+static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index c0235e0ff849..9bca54dda75c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -40,17 +40,6 @@
#include <asm/cmpxchg.h>
-#define ___atomic_add_unless(v, a, u, sfx) \
-({ \
- typeof((v)->counter) c, old; \
- \
- c = atomic##sfx##_read(v); \
- while (c != (u) && \
- (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c; \
- })
-
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
@@ -61,21 +50,11 @@
#define atomic_add_return_release atomic_add_return_release
#define atomic_add_return atomic_add_return
-#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_sub_return_acquire atomic_sub_return_acquire
#define atomic_sub_return_release atomic_sub_return_release
#define atomic_sub_return atomic_sub_return
-#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#define atomic_fetch_add_release atomic_fetch_add_release
@@ -119,13 +98,6 @@
cmpxchg_release(&((v)->counter), (old), (new))
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
-#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
#define atomic_andnot atomic_andnot
/*
@@ -140,21 +112,11 @@
#define atomic64_add_return_release atomic64_add_return_release
#define atomic64_add_return atomic64_add_return
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#define atomic64_sub_return_release atomic64_sub_return_release
#define atomic64_sub_return atomic64_sub_return
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -195,16 +157,9 @@
#define atomic64_cmpxchg_release atomic_cmpxchg_release
#define atomic64_cmpxchg atomic_cmpxchg
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
-#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u)
#define atomic64_andnot atomic64_andnot
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index f11518af96a9..822a9192c551 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -128,6 +128,19 @@ do { \
__u.__val; \
})
+#define smp_cond_load_relaxed(ptr, cond_expr) \
+({ \
+ typeof(ptr) __PTR = (ptr); \
+ typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = READ_ONCE(*__PTR); \
+ if (cond_expr) \
+ break; \
+ __cmpwait_relaxed(__PTR, VAL); \
+ } \
+ VAL; \
+})
+
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 9c19594ce7cb..10d536b1af74 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -17,22 +17,11 @@
#define __ASM_BITOPS_H
#include <linux/compiler.h>
-#include <asm/barrier.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
-/*
- * Little endian assembly atomic bitops.
- */
-extern void set_bit(int nr, volatile unsigned long *p);
-extern void clear_bit(int nr, volatile unsigned long *p);
-extern void change_bit(int nr, volatile unsigned long *p);
-extern int test_and_set_bit(int nr, volatile unsigned long *p);
-extern int test_and_clear_bit(int nr, volatile unsigned long *p);
-extern int test_and_change_bit(int nr, volatile unsigned long *p);
-
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
@@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p);
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
-
-/*
- * Ext2 is defined to use little-endian byte ordering.
- */
-#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p)
-#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __ASM_BITOPS_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5df5cfe1c143..5ee5bca8c24b 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -21,12 +21,16 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
+#define CTR_IMINLINE_SHIFT 0
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29
+#define CTR_CACHE_MINLINE_MASK \
+ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
+
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index d264a7274811..19844211a4e6 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -19,6 +19,7 @@
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
+#include <linux/kgdb.h>
#include <linux/mm.h>
/*
@@ -71,7 +72,7 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void __flush_icache_range(unsigned long start, unsigned long end);
extern int invalidate_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
@@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len);
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ __flush_icache_range(start, end);
+
+ /*
+ * IPI all online CPUs so that they undergo a context synchronization
+ * event and are forced to refetch the new instructions.
+ */
+#ifdef CONFIG_KGDB
+ /*
+ * KGDB performs cache maintenance with interrupts disabled, so we
+ * will deadlock trying to IPI the secondary CPUs. In theory, we can
+ * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
+ * just means that KGDB will elide the maintenance altogether! As it
+ * turns out, KGDB uses IPIs to round-up the secondary CPUs during
+ * the patching operation, so we don't need extra IPIs here anyway.
+ * In which case, add a KGDB-specific bodge and return early.
+ */
+ if (kgdb_connected && irqs_disabled())
+ return;
+#endif
+ kick_all_cpus_sync();
+}
+
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8a699c708fc9..ae1f70450fb2 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -49,7 +49,9 @@
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_SSBD 30
+#define ARM64_MISMATCHED_CACHE_TYPE 31
+#define ARM64_HAS_STAGE2_FWB 32
-#define ARM64_NCAPS 31
+#define ARM64_NCAPS 33
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 192d791f1103..7ed320895d1f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (true)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index fa92747a49c8..dd1ad3950ef5 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -16,13 +16,15 @@
#ifndef __ASM_FP_H
#define __ASM_FP_H
-#include <asm/ptrace.h>
#include <asm/errno.h>
+#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
+#include <asm/sysreg.h>
#ifndef __ASSEMBLY__
+#include <linux/build_bug.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h>
@@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task,
extern int sve_set_current_vl(unsigned long arg);
extern int sve_get_current_vl(void);
+static inline void sve_user_disable(void)
+{
+ sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
+}
+
+static inline void sve_user_enable(void)
+{
+ sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
+}
+
/*
* Probing and setup functions.
* Calls to these functions must be serialised with one another.
@@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void)
return -EINVAL;
}
+static inline void sve_user_disable(void) { BUILD_BUG(); }
+static inline void sve_user_enable(void) { BUILD_BUG(); }
+
static inline void sve_init_vq_map(void) { }
static inline void sve_update_vq_map(void) { }
static inline int sve_verify_vq_map(void) { return 0; }
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 41770766d964..6a53e59ced95 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg,
struct task_struct;
struct notifier_block;
+struct perf_event_attr;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index f62c56b1793f..c6802dea6cab 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
-bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
-
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a0fee6985e6a..b2b0c6405eb0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,8 +8,6 @@
struct pt_regs;
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
static inline int nr_legacy_irqs(void)
{
return 0;
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 6deb8d726041..d5a44cf859e9 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -48,7 +48,6 @@ struct kprobe_ctlblk {
unsigned long saved_irqflag;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
- struct pt_regs jprobe_saved_regs;
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 6dd285e979c9..aa45df752a16 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,7 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_FWB (UL(1) << 46)
#define HCR_TEA (UL(1) << 37)
#define HCR_TERR (UL(1) << 36)
#define HCR_TLOR (UL(1) << 35)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 1dab3a984608..6106a85ae0be 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -63,6 +63,8 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
/* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR;
}
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ vcpu->arch.hcr_el2 |= HCR_FWB;
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
@@ -81,6 +83,21 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr_el2;
}
+static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 &= ~HCR_TWE;
+}
+
+static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 |= HCR_TWE;
+}
+
+static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.vsesr_el2;
+}
+
static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
{
vcpu->arch.vsesr_el2 = vsesr;
@@ -140,7 +157,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
}
/*
@@ -190,8 +207,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
u32 mode;
if (vcpu_mode_is_32bit(vcpu)) {
- mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
- return mode > COMPAT_PSR_MODE_USR;
+ mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
+ return mode > PSR_AA32_MODE_USR;
}
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
@@ -329,7 +346,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25);
@@ -340,7 +357,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+ return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fe8777b12f86..f26055f2306e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -350,6 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
@@ -378,16 +383,23 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
+
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-void __kvm_set_tpidr_el2(u64 tpidr_el2);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
- u64 tpidr_el2;
+ /*
+ * Calculate the raw per-cpu offset without a translation from the
+ * kernel's mapping to the linear mapping, and store it in tpidr_el2
+ * so that we can use adr_l to access per-cpu variables in EL2.
+ */
+ u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) -
+ (u64)kvm_ksym_ref(kvm_host_cpu_state));
/*
* Call initialization code, and switch to the full blown HYP code.
@@ -396,17 +408,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* cpus_have_const_cap() wrapper.
*/
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
- __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
-
- /*
- * Calculate the raw per-cpu offset without a translation from the
- * kernel's mapping to the linear mapping, and store it in tpidr_el2
- * so that we can use adr_l to access per-cpu variables in EL2.
- */
- tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
- - (u64)kvm_ksym_ref(kvm_host_cpu_state);
-
- kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
}
static inline bool kvm_arch_check_sve_has_vhe(void)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index fb9a7127bb75..d6fff7de5539 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -169,8 +169,12 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
-#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
-#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
+#define kvm_mk_pmd(ptep) \
+ __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
+#define kvm_mk_pud(pmdp) \
+ __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
+#define kvm_mk_pgd(pudp) \
+ __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
@@ -267,6 +271,15 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
void *va = page_address(pfn_to_page(pfn));
+ /*
+ * With FWB, we ensure that the guest always accesses memory using
+ * cacheable attributes, and we don't have to clean to PoC when
+ * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
+ * PoU is not required either in this case.
+ */
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ return;
+
kvm_flush_dcache_to_poc(va, size);
}
@@ -287,20 +300,26 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
- struct page *page = pte_page(pte);
- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+ }
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
- struct page *page = pmd_page(pmd);
- kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+ }
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
- struct page *page = pud_page(pud);
- kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+ }
}
#define kvm_virt_to_phys(x) __pa_symbol(x)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 49d99214f43c..b96442960aea 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -155,6 +155,13 @@
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
+/*
+ * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
+ * Stage-2 enforces Normal-WB and Device-nGnRE
+ */
+#define MT_S2_FWB_NORMAL 6
+#define MT_S2_FWB_DEVICE_nGnRE 1
+
#ifdef CONFIG_ARM64_4K_PAGES
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
#else
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index f922eaf780f9..fb9d137256a6 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -19,11 +19,4 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
-/*
- * Temporary macro to allow the crypto code to compile. Note that the
- * semantics of kernel_neon_begin_partial() are now different from the
- * original as it does not allow being called in an interrupt context.
- */
-#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
-
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 01bc46d5b43a..626ad01e83bf 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
#else /* CONFIG_NUMA */
static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arm64_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 108ecad7acc5..78b942c1bea4 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -67,8 +67,28 @@
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN)
-#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
+#define PAGE_S2_MEMATTR(attr) \
+ ({ \
+ u64 __val; \
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \
+ __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
+ else \
+ __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
+ __val; \
+ })
+
+#define PAGE_S2_XN \
+ ({ \
+ u64 __val; \
+ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) \
+ __val = 0; \
+ else \
+ __val = PTE_S2_XN; \
+ __val; \
+ })
+
+#define PAGE_S2 __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(NORMAL) | PTE_S2_RDONLY | PAGE_S2_XN)
+#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index a73ae1e49200..79657ad91397 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
start_thread_common(regs, pc);
- regs->pstate = COMPAT_PSR_MODE_USR;
+ regs->pstate = PSR_AA32_MODE_USR;
if (pc & 1)
- regs->pstate |= COMPAT_PSR_T_BIT;
+ regs->pstate |= PSR_AA32_T_BIT;
#ifdef __AARCH64EB__
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
#endif
regs->compat_sp = sp;
@@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void);
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+/*
+ * For CONFIG_GCC_PLUGIN_STACKLEAK
+ *
+ * These need to be macros because otherwise we get stuck in a nightmare
+ * of header definitions for the use of task_stack_page.
+ */
+
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
+ _info.high; \
+})
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 6069d66e0bc2..177b851ca6d9 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -35,36 +35,39 @@
#define COMPAT_PTRACE_GETHBPREGS 29
#define COMPAT_PTRACE_SETHBPREGS 30
-/* AArch32 CPSR bits */
-#define COMPAT_PSR_MODE_MASK 0x0000001f
-#define COMPAT_PSR_MODE_USR 0x00000010
-#define COMPAT_PSR_MODE_FIQ 0x00000011
-#define COMPAT_PSR_MODE_IRQ 0x00000012
-#define COMPAT_PSR_MODE_SVC 0x00000013
-#define COMPAT_PSR_MODE_ABT 0x00000017
-#define COMPAT_PSR_MODE_HYP 0x0000001a
-#define COMPAT_PSR_MODE_UND 0x0000001b
-#define COMPAT_PSR_MODE_SYS 0x0000001f
-#define COMPAT_PSR_T_BIT 0x00000020
-#define COMPAT_PSR_F_BIT 0x00000040
-#define COMPAT_PSR_I_BIT 0x00000080
-#define COMPAT_PSR_A_BIT 0x00000100
-#define COMPAT_PSR_E_BIT 0x00000200
-#define COMPAT_PSR_J_BIT 0x01000000
-#define COMPAT_PSR_Q_BIT 0x08000000
-#define COMPAT_PSR_V_BIT 0x10000000
-#define COMPAT_PSR_C_BIT 0x20000000
-#define COMPAT_PSR_Z_BIT 0x40000000
-#define COMPAT_PSR_N_BIT 0x80000000
-#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
-#define COMPAT_PSR_GE_MASK 0x000f0000
+/* SPSR_ELx bits for exceptions taken from AArch32 */
+#define PSR_AA32_MODE_MASK 0x0000001f
+#define PSR_AA32_MODE_USR 0x00000010
+#define PSR_AA32_MODE_FIQ 0x00000011
+#define PSR_AA32_MODE_IRQ 0x00000012
+#define PSR_AA32_MODE_SVC 0x00000013
+#define PSR_AA32_MODE_ABT 0x00000017
+#define PSR_AA32_MODE_HYP 0x0000001a
+#define PSR_AA32_MODE_UND 0x0000001b
+#define PSR_AA32_MODE_SYS 0x0000001f
+#define PSR_AA32_T_BIT 0x00000020
+#define PSR_AA32_F_BIT 0x00000040
+#define PSR_AA32_I_BIT 0x00000080
+#define PSR_AA32_A_BIT 0x00000100
+#define PSR_AA32_E_BIT 0x00000200
+#define PSR_AA32_DIT_BIT 0x01000000
+#define PSR_AA32_Q_BIT 0x08000000
+#define PSR_AA32_V_BIT 0x10000000
+#define PSR_AA32_C_BIT 0x20000000
+#define PSR_AA32_Z_BIT 0x40000000
+#define PSR_AA32_N_BIT 0x80000000
+#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+#define PSR_AA32_GE_MASK 0x000f0000
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
+#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
#else
-#define COMPAT_PSR_ENDSTATE 0
+#define PSR_AA32_ENDSTATE 0
#endif
+/* AArch32 CPSR bits, as seen in AArch32 */
+#define COMPAT_PSR_DIT_BIT 0x00200000
+
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
@@ -111,6 +114,30 @@
#define compat_sp_fiq regs[29]
#define compat_lr_fiq regs[30]
+static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
+{
+ unsigned long pstate;
+
+ pstate = psr & ~COMPAT_PSR_DIT_BIT;
+
+ if (psr & COMPAT_PSR_DIT_BIT)
+ pstate |= PSR_AA32_DIT_BIT;
+
+ return pstate;
+}
+
+static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
+{
+ unsigned long psr;
+
+ psr = pstate & ~PSR_AA32_DIT_BIT;
+
+ if (pstate & PSR_AA32_DIT_BIT)
+ psr |= COMPAT_PSR_DIT_BIT;
+
+ return psr;
+}
+
/*
* This struct defines the way the registers are stored on the stack during an
* exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
@@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs)
#ifdef CONFIG_COMPAT
#define compat_thumb_mode(regs) \
- (((regs)->pstate & COMPAT_PSR_T_BIT))
+ (((regs)->pstate & PSR_AA32_T_BIT))
#else
#define compat_thumb_mode(regs) (0)
#endif
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index e073e6886685..ffe47d766c25 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
-bool _on_sdei_stack(unsigned long sp);
-static inline bool on_sdei_stack(unsigned long sp)
+struct stack_info;
+
+bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp,
+ struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp);
+ return _on_sdei_stack(sp, info);
return false;
}
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
index fa8b3fe932e6..6495cc51246f 100644
--- a/arch/arm64/include/asm/simd.h
+++ b/arch/arm64/include/asm/simd.h
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
static __must_check inline bool may_use_simd(void)
{
/*
- * The raw_cpu_read() is racy if called with preemption enabled.
- * This is not a bug: kernel_neon_busy is only set when
- * preemption is disabled, so we cannot migrate to another CPU
- * while it is set, nor can we migrate to a CPU where it is set.
- * So, if we find it clear on some CPU then we're guaranteed to
- * find it clear on any CPU we could migrate to.
- *
- * If we are in between kernel_neon_begin()...kernel_neon_end(),
- * the flag will be set, but preemption is also disabled, so we
- * can't migrate to another CPU and spuriously see it become
- * false.
+ * kernel_neon_busy is only set while preemption is disabled,
+ * and is clear whenever preemption is enabled. Since
+ * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+ * cannot change under our feet -- if it's set we cannot be
+ * migrated, and if it's clear we cannot be migrated to a CPU
+ * where it is set.
*/
return !in_irq() && !irqs_disabled() && !in_nmi() &&
- !raw_cpu_read(kernel_neon_busy);
+ !this_cpu_read(kernel_neon_busy);
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 26c5bd7d88d8..38116008d18b 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,123 +16,8 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
-#include <asm/lse.h>
-#include <asm/spinlock_types.h>
-#include <asm/processor.h>
-
-/*
- * Spinlock implementation.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval, newval;
-
- asm volatile(
- /* Atomically increment the next ticket. */
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
-" prfm pstl1strm, %3\n"
-"1: ldaxr %w0, %3\n"
-" add %w1, %w0, %w5\n"
-" stxr %w2, %w1, %3\n"
-" cbnz %w2, 1b\n",
- /* LSE atomics */
-" mov %w2, %w5\n"
-" ldadda %w2, %w0, %3\n"
- __nops(3)
- )
-
- /* Did we get the lock? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /*
- * No: spin on the owner. Send a local event to avoid missing an
- * unlock before the exclusive load.
- */
-" sevl\n"
-"2: wfe\n"
-" ldaxrh %w2, %4\n"
-" eor %w1, %w2, %w0, lsr #16\n"
-" cbnz %w1, 2b\n"
- /* We got the lock. Critical section starts here. */
-"3:"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
- : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
- : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " prfm pstl1strm, %2\n"
- "1: ldaxr %w0, %2\n"
- " eor %w1, %w0, %w0, ror #16\n"
- " cbnz %w1, 2f\n"
- " add %w0, %w0, %3\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
- "2:",
- /* LSE atomics */
- " ldr %w0, %2\n"
- " eor %w1, %w0, %w0, ror #16\n"
- " cbnz %w1, 1f\n"
- " add %w1, %w0, %3\n"
- " casa %w0, %w1, %2\n"
- " sub %w1, %w1, %3\n"
- " eor %w1, %w1, %w0\n"
- "1:")
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "I" (1 << TICKET_SHIFT)
- : "memory");
-
- return !tmp;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " ldrh %w1, %0\n"
- " add %w1, %w1, #1\n"
- " stlrh %w1, %0",
- /* LSE atomics */
- " mov %w1, #1\n"
- " staddlh %w1, %0\n"
- __nops(1))
- : "=Q" (lock->owner), "=&r" (tmp)
- :
- : "memory");
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.owner == lock.next;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return !arch_spin_value_unlocked(READ_ONCE(*lock));
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- arch_spinlock_t lockval = READ_ONCE(*lock);
- return (lockval.next - lockval.owner) > 1;
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 6b856012c51b..a157ff465e27 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,22 +20,7 @@
# error "please don't include this file directly"
#endif
-#include <linux/types.h>
-
-#define TICKET_SHIFT 16
-
-typedef struct {
-#ifdef __AARCH64EB__
- u16 next;
- u16 owner;
-#else
- u16 owner;
- u16 next;
-#endif
-} __aligned(4) arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
-
+#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 902f9edacbea..e86737b7c924 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -32,6 +32,21 @@ struct stackframe {
#endif
};
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_TASK,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_OVERFLOW,
+ STACK_TYPE_SDEI_NORMAL,
+ STACK_TYPE_SDEI_CRITICAL,
+};
+
+struct stack_info {
+ unsigned long low;
+ unsigned long high;
+ enum stack_type type;
+};
+
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
@@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_irq_stack(unsigned long sp)
+static inline bool on_irq_stack(unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
@@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp)
if (!low)
return false;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_IRQ;
+ }
+
+ return true;
}
-static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_TASK;
+ }
+
+ return true;
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp)
+static inline bool on_overflow_stack(unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_OVERFLOW;
+ }
+
+ return true;
}
#else
-static inline bool on_overflow_stack(unsigned long sp) { return false; }
+static inline bool on_overflow_stack(unsigned long sp,
+ struct stack_info *info) { return false; }
#endif
+
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
-static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
+static inline bool on_accessible_stack(struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
- if (on_task_stack(tsk, sp))
+ if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp))
+ if (on_irq_stack(sp, info))
return true;
- if (on_overflow_stack(sp))
+ if (on_overflow_stack(sp, info))
return true;
- if (on_sdei_stack(sp))
+ if (on_sdei_stack(sp, info))
return true;
return false;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 709a574468f0..ad8be16a39c9 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -20,7 +20,13 @@
#include <linux/compat.h>
#include <linux/err.h>
-extern const void *sys_call_table[];
+typedef long (*syscall_fn_t)(struct pt_regs *regs);
+
+extern const syscall_fn_t sys_call_table[];
+
+#ifdef CONFIG_COMPAT
+extern const syscall_fn_t compat_sys_call_table[];
+#endif
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..a4477e515b79
--- /dev/null
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - arm64 specific wrappers to syscall definitions
+ *
+ * Based on arch/x86/include/asm_syscall_wrapper.h
+ */
+
+#ifndef __ASM_SYSCALL_WRAPPER_H
+#define __ASM_SYSCALL_WRAPPER_H
+
+#define SC_ARM64_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \
+ ,,regs->regs[3],,regs->regs[4],,regs->regs[5])
+
+#ifdef CONFIG_COMPAT
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __arm64_compat_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_compat_sys_##sname(void)
+
+#define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__arm64_compat_sys_##name);
+
+#define COMPAT_SYS_NI(name) \
+ SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#ifndef SYSCALL_DEFINE0
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __arm64_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_sys_##sname(void)
+#endif
+
+#ifndef COND_SYSCALL
+#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name)
+#endif
+
+#ifndef SYS_NI
+#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
+#endif
+
+#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index a8f84812c6e8..c1470931b897 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -314,6 +314,8 @@
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
+#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
@@ -436,7 +438,8 @@
#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
@@ -452,9 +455,9 @@
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
-
+#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL2 set/clear bits"
+#endif
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (1 << 26)
@@ -473,7 +476,8 @@
#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
(1 << 29))
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
@@ -492,8 +496,9 @@
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
SCTLR_EL1_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL1 set/clear bits"
+#endif
/* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
@@ -576,6 +581,7 @@
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
@@ -739,19 +745,6 @@ asm(
write_sysreg(__scs_new, sysreg); \
} while (0)
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
- u32 val;
-
- SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
- SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
-
- val = read_sysreg(sctlr_el1);
- val &= ~clear;
- val |= set;
- write_sysreg(val, sctlr_el1);
-}
-
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..0ad1cf233470 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
static inline void tlb_flush(struct mmu_gather *tlb)
{
- struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+ struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
/*
* The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index dfc61d73f740..a4a1901140ee 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
dsb(ish);
}
+static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
+{
+ unsigned long addr = __TLBI_VADDR(kaddr, 0);
+
+ __tlbi(vaae1is, addr);
+ dsb(ish);
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index df48212f767b..49a0fee4f89b 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -11,7 +11,7 @@ struct cpu_topology {
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
- cpumask_t llc_siblings;
+ cpumask_t llc_sibling;
};
extern struct cpu_topology cpu_topology[NR_CPUS];
@@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
+void remove_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#ifdef CONFIG_NUMA
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index a0baa9af5487..e0d0f5b856e7 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -43,7 +43,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 398
+#define __NR_compat_syscalls 399
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index ef292160748c..2cd6dcf8d246 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall)
#define __NR_fsync 118
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_sigreturn 119
-__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn)
#define __NR_clone 120
__SYSCALL(__NR_clone, sys_clone)
#define __NR_setdomainname 121
@@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16)
#define __NR_prctl 172
__SYSCALL(__NR_prctl, sys_prctl)
#define __NR_rt_sigreturn 173
-__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn)
#define __NR_rt_sigaction 174
__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
#define __NR_rt_sigprocmask 175
@@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigsuspend 179
__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
#define __NR_pread64 180
-__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64)
#define __NR_pwrite64 181
-__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64)
#define __NR_chown 182
__SYSCALL(__NR_chown, sys_chown16)
#define __NR_getcwd 183
@@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork)
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
#define __NR_mmap2 192
-__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper)
+__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2)
#define __NR_truncate64 193
-__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64)
#define __NR_ftruncate64 194
-__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64)
#define __NR_stat64 195
__SYSCALL(__NR_stat64, sys_stat64)
#define __NR_lstat64 196
@@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall)
#define __NR_gettid 224
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_readahead 225
-__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead)
#define __NR_setxattr 226
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 227
@@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
#define __NR_clock_nanosleep 265
__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
#define __NR_statfs64 266
-__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
#define __NR_fstatfs64 267
-__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
#define __NR_tgkill 268
__SYSCALL(__NR_tgkill, sys_tgkill)
#define __NR_utimes 269
__SYSCALL(__NR_utimes, compat_sys_utimes)
#define __NR_arm_fadvise64_64 270
-__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
#define __NR_pciconfig_iobase 271
__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
#define __NR_pciconfig_read 272
@@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
#define __NR_splice 340
__SYSCALL(__NR_splice, sys_splice)
#define __NR_sync_file_range2 341
-__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
#define __NR_tee 342
__SYSCALL(__NR_tee, sys_tee)
#define __NR_vmsplice 343
@@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create)
#define __NR_eventfd 351
__SYSCALL(__NR_eventfd, sys_eventfd)
#define __NR_fallocate 352
-__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
#define __NR_timerfd_settime 353
__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
#define __NR_timerfd_gettime 354
@@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 397
__SYSCALL(__NR_statx, sys_statx)
+#define __NR_rseq 398
+__SYSCALL(__NR_rseq, sys_rseq)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 4e76630dd655..97c3478ee6e7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 0025f8691046..95ac7374d723 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -18,7 +18,8 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
hyp-stub.o psci.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
- smp.o smp_spin_table.o topology.o smccc-call.o
+ smp.o smp_spin_table.o topology.o smccc-call.o \
+ syscall.o
extra-$(CONFIG_EFI) := efi-entry.o
@@ -27,7 +28,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o entry32.o
+ sys_compat.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 7b09487ff8fb..ed46dc188b22 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -18,6 +18,7 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/cpumask.h>
+#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -29,13 +30,9 @@
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/pgtable.h>
#include <asm/smp_plat.h>
-#ifdef CONFIG_ACPI_APEI
-# include <linux/efi.h>
-# include <asm/pgtable.h>
-#endif
-
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
EXPORT_SYMBOL(acpi_disabled);
@@ -239,8 +236,7 @@ done:
}
}
-#ifdef CONFIG_ACPI_APEI
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
* According to "Table 8 Map: EFI memory types to AArch64 memory
@@ -261,4 +257,3 @@ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_NORMAL_NC);
return __pgprot(PROT_DEVICE_nGnRnE);
}
-#endif
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index d190a7b231bf..4f4f1815e047 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -26,36 +26,73 @@
#include <linux/module.h>
#include <linux/topology.h>
-#include <acpi/processor.h>
#include <asm/numa.h>
-static int cpus_in_srat;
+static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
-struct __node_cpu_hwid {
- u32 node_id; /* logical node containing this CPU */
- u64 cpu_hwid; /* MPIDR for this CPU */
-};
+int __init acpi_numa_get_nid(unsigned int cpu)
+{
+ return acpi_early_node_map[cpu];
+}
+
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
-static struct __node_cpu_hwid early_node_cpu_hwid[NR_CPUS] = {
-[0 ... NR_CPUS - 1] = {NUMA_NO_NODE, PHYS_CPUID_INVALID} };
+ return -EINVAL;
+}
-int acpi_numa_get_nid(unsigned int cpu, u64 hwid)
+static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- int i;
+ struct acpi_srat_gicc_affinity *pa;
+ int cpu, pxm, node;
- for (i = 0; i < cpus_in_srat; i++) {
- if (hwid == early_node_cpu_hwid[i].cpu_hwid)
- return early_node_cpu_hwid[i].node_id;
- }
+ if (srat_disabled())
+ return -EINVAL;
+
+ pa = (struct acpi_srat_gicc_affinity *)header;
+ if (!pa)
+ return -EINVAL;
+
+ if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
+ return 0;
- return NUMA_NO_NODE;
+ pxm = pa->proximity_domain;
+ node = pxm_to_node(pxm);
+
+ /*
+ * If we can't map the UID to a logical cpu this
+ * means that the UID is not part of possible cpus
+ * so we do not need a NUMA mapping for it, skip
+ * the SRAT entry and keep parsing.
+ */
+ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
+ if (cpu < 0)
+ return 0;
+
+ acpi_early_node_map[cpu] = node;
+ pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm,
+ cpu_logical_map(cpu), node);
+
+ return 0;
+}
+
+void __init acpi_map_cpus_to_nodes(void)
+{
+ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GICC_AFFINITY,
+ acpi_parse_gicc_pxm, 0);
}
/* Callback for Proximity Domain -> ACPI processor UID mapping */
void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
{
int pxm, node;
- phys_cpuid_t mpidr;
if (srat_disabled())
return;
@@ -70,12 +107,6 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
return;
- if (cpus_in_srat >= NR_CPUS) {
- pr_warn_once("SRAT: cpu_to_node_map[%d] is too small, may not be able to use all cpus\n",
- NR_CPUS);
- return;
- }
-
pxm = pa->proximity_domain;
node = acpi_map_pxm_to_node(pxm);
@@ -85,20 +116,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
return;
}
- mpidr = acpi_map_madt_entry(pa->acpi_processor_uid);
- if (mpidr == PHYS_CPUID_INVALID) {
- pr_err("SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
- pxm, pa->acpi_processor_uid);
- bad_srat();
- return;
- }
-
- early_node_cpu_hwid[cpus_in_srat].node_id = node;
- early_node_cpu_hwid[cpus_in_srat].cpu_hwid = mpidr;
node_set(node, numa_nodes_parsed);
- cpus_in_srat++;
- pr_info("SRAT: PXM %d -> MPIDR 0x%Lx -> Node %d\n",
- pxm, mpidr, node);
}
int __init arm64_acpi_numa_init(void)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 36fb069fd049..b5d603992d40 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -47,11 +47,11 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
unsigned long replptr;
if (kernel_text_address(pc))
- return 1;
+ return true;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
- return 0;
+ return false;
/*
* Branching into *another* alternate sequence is doomed, and
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index d4707abb2f16..92be1d12d590 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -441,8 +441,8 @@ static struct undef_hook swp_hooks[] = {
{
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = swp_handler
},
{ }
@@ -511,9 +511,9 @@ ret:
static int cp15_barrier_set_hw_mode(bool enable)
{
if (enable)
- config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
else
- config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
return 0;
}
@@ -521,15 +521,15 @@ static struct undef_hook cp15_barrier_hooks[] = {
{
.instr_mask = 0x0fff0fdf,
.instr_val = 0x0e070f9a,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = cp15barrier_handler,
},
{
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e070f95,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = cp15barrier_handler,
},
{ }
@@ -548,9 +548,9 @@ static int setend_set_hw_mode(bool enable)
return -EINVAL;
if (enable)
- config_sctlr_el1(SCTLR_EL1_SED, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
else
- config_sctlr_el1(0, SCTLR_EL1_SED);
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
return 0;
}
@@ -562,10 +562,10 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
if (big_endian) {
insn = "setend be";
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
} else {
insn = "setend le";
- regs->pstate &= ~COMPAT_PSR_E_BIT;
+ regs->pstate &= ~PSR_AA32_E_BIT;
}
trace_instruction_emulation(insn, regs->pc);
@@ -593,16 +593,16 @@ static struct undef_hook setend_hooks[] = {
{
.instr_mask = 0xfffffdff,
.instr_val = 0xf1010000,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = a32_setend_handler,
},
{
/* Thumb mode */
.instr_mask = 0x0000fff7,
.instr_val = 0x0000b650,
- .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
- .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
+ .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
+ .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
.fn = t16_setend_handler,
},
{}
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index 6c2b1b4f57c9..fad90e4935fb 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -16,13 +16,14 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2);
-static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
- unsigned long entry, unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static inline void __noreturn cpu_soft_restart(unsigned long entry,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2)
{
typeof(__cpu_soft_restart) *restart;
- el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
+ unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
restart = (void *)__pa_symbol(__cpu_soft_restart);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1d2b6d768efe..dec10898d688 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -65,19 +65,24 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
}
static bool
-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
- int scope)
+has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+ int scope)
{
+ u64 mask = CTR_CACHE_MINLINE_MASK;
+
+ /* Skip matching the min line sizes for cache type check */
+ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
+ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
+ return (read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask);
}
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
- /* Clear SCTLR_EL1.UCT */
- config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
@@ -101,7 +106,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
- flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
@@ -613,7 +618,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
- .matches = has_mismatched_cache_line_size,
+ .matches = has_mismatched_cache_type,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+ {
+ .desc = "Mismatched cache type",
+ .capability = ARM64_MISMATCHED_CACHE_TYPE,
+ .matches = has_mismatched_cache_type,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.cpu_enable = cpu_enable_trap_ctr_access,
},
@@ -649,7 +661,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.cpu_enable = enable_smccc_arch_workaround_1,
ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
},
@@ -658,7 +669,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f24892a40d2c..e238b7932096 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -192,6 +192,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
@@ -214,7 +215,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1026,6 +1027,14 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
}
#endif
+static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
+{
+ u64 val = read_sysreg_s(SYS_CLIDR_EL1);
+
+ /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
+ WARN_ON(val & (7 << 27 | 7 << 21));
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1182,6 +1191,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_dic,
},
+ {
+ .desc = "Stage-2 Force Write-Back",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_STAGE2_FWB,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_FWB_SHIFT,
+ .min_field_value = 1,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_has_fwb,
+ },
#ifdef CONFIG_ARM64_HW_AFDBM
{
/*
@@ -1351,9 +1371,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void update_cpu_capabilities(u16 scope_mask)
{
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
__update_cpu_capabilities(arm64_errata, scope_mask,
"enabling workaround for");
+ __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
}
static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1428,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void __init enable_cpu_capabilities(u16 scope_mask)
{
- __enable_cpu_capabilities(arm64_features, scope_mask);
__enable_cpu_capabilities(arm64_errata, scope_mask);
+ __enable_cpu_capabilities(arm64_features, scope_mask);
}
/*
@@ -1723,7 +1743,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
static struct undef_hook mrs_hook = {
.instr_mask = 0xfff00000,
.instr_val = 0xd5300000,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 28ad8799406f..09dbea221a27 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -41,19 +41,9 @@
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
- .macro ct_user_exit, syscall = 0
+ .macro ct_user_exit
#ifdef CONFIG_CONTEXT_TRACKING
bl context_tracking_user_exit
- .if \syscall == 1
- /*
- * Save/restore needed during syscalls. Restore syscall arguments from
- * the values already saved on stack during kernel_entry.
- */
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- .endif
#endif
.endm
@@ -63,6 +53,12 @@
#endif
.endm
+ .macro clear_gp_regs
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
+ mov x\n, xzr
+ .endr
+ .endm
+
/*
* Bad Abort numbers
*-----------------
@@ -140,20 +136,21 @@ alternative_else_nop_endif
// This macro corrupts x0-x3. It is the caller's duty
// to save/restore them if required.
- .macro apply_ssbd, state, targ, tmp1, tmp2
+ .macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
- b \targ
+ b .L__asm_ssbd_skip\@
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
- cbz \tmp2, \targ
+ cbz \tmp2, .L__asm_ssbd_skip\@
ldr \tmp2, [tsk, #TSK_TI_FLAGS]
- tbnz \tmp2, #TIF_SSBD, \targ
+ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
alternative_cb arm64_update_smccc_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
+.L__asm_ssbd_skip\@:
#endif
.endm
@@ -178,20 +175,14 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
+ clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
- apply_ssbd 1, 1f, x22, x23
-
-#ifdef CONFIG_ARM64_SSBD
- ldp x0, x1, [sp, #16 * 0]
- ldp x2, x3, [sp, #16 * 1]
-#endif
-1:
+ apply_ssbd 1, x22, x23
- mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
@@ -331,8 +322,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
- apply_ssbd 0, 5f, x0, x1
-5:
+ apply_ssbd 0, x0, x1
.endif
msr elr_el1, x21 // set up the return data
@@ -720,14 +710,9 @@ el0_sync_compat:
b.ge el0_dbg
b el0_inv
el0_svc_compat:
- /*
- * AArch32 syscall handling
- */
- ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
- adrp stbl, compat_sys_call_table // load compat syscall table pointer
- mov wscno, w7 // syscall number in w7 (r7)
- mov wsc_nr, #__NR_compat_syscalls
- b el0_svc_naked
+ mov x0, sp
+ bl el0_svc_compat_handler
+ b ret_to_user
.align 6
el0_irq_compat:
@@ -896,25 +881,6 @@ el0_error_naked:
b ret_to_user
ENDPROC(el0_error)
-
-/*
- * This is the fast syscall return path. We do as little as possible here,
- * and this includes saving x0 back into the kernel stack.
- */
-ret_fast_syscall:
- disable_daif
- str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
- and x2, x1, #_TIF_SYSCALL_WORK
- cbnz x2, ret_fast_syscall_trace
- and x2, x1, #_TIF_WORK_MASK
- cbnz x2, work_pending
- enable_step_tsk x1, x2
- kernel_exit 0
-ret_fast_syscall_trace:
- enable_daif
- b __sys_trace_return_skipped // we already saved x0
-
/*
* Ok, we need to do extra processing, enter the slow path.
*/
@@ -936,6 +902,9 @@ ret_to_user:
cbnz x2, work_pending
finish_ret_to_user:
enable_step_tsk x1, x2
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ bl stackleak_erase
+#endif
kernel_exit 0
ENDPROC(ret_to_user)
@@ -944,85 +913,10 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
- ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
- adrp stbl, sys_call_table // load syscall table pointer
- mov wscno, w8 // syscall number in w8
- mov wsc_nr, #__NR_syscalls
-
-#ifdef CONFIG_ARM64_SVE
-alternative_if_not ARM64_SVE
- b el0_svc_naked
-alternative_else_nop_endif
- tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
- bic x16, x16, #_TIF_SVE // discard SVE state
- str x16, [tsk, #TSK_TI_FLAGS]
-
- /*
- * task_fpsimd_load() won't be called to update CPACR_EL1 in
- * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
- * happens if a context switch or kernel_neon_begin() or context
- * modification (sigreturn, ptrace) intervenes.
- * So, ensure that CPACR_EL1 is already correct for the fast-path case:
- */
- mrs x9, cpacr_el1
- bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
- msr cpacr_el1, x9 // synchronised by eret to el0
-#endif
-
-el0_svc_naked: // compat entry point
- stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_daif
- ct_user_exit 1
-
- tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
- b.ne __sys_trace
- cmp wscno, wsc_nr // check upper syscall limit
- b.hs ni_sys
- mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
- ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
- blr x16 // call sys_* routine
- b ret_fast_syscall
-ni_sys:
- mov x0, sp
- bl do_ni_syscall
- b ret_fast_syscall
-ENDPROC(el0_svc)
-
- /*
- * This is the really slow path. We're going to be doing context
- * switches, and waiting for our parent to respond.
- */
-__sys_trace:
- cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
- b.ne 1f
- mov x0, #-ENOSYS // set default errno if so
- str x0, [sp, #S_X0]
-1: mov x0, sp
- bl syscall_trace_enter
- cmp w0, #NO_SYSCALL // skip the syscall?
- b.eq __sys_trace_return_skipped
- mov wscno, w0 // syscall number (possibly new)
- mov x1, sp // pointer to regs
- cmp wscno, wsc_nr // check upper syscall limit
- b.hs __ni_sys_trace
- ldp x0, x1, [sp] // restore the syscall args
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
- blr x16 // call sys_* routine
-
-__sys_trace_return:
- str x0, [sp, #S_X0] // save returned x0
-__sys_trace_return_skipped:
mov x0, sp
- bl syscall_trace_exit
+ bl el0_svc_handler
b ret_to_user
-
-__ni_sys_trace:
- mov x0, sp
- bl do_ni_syscall
- b __sys_trace_return
+ENDPROC(el0_svc)
.popsection // .entry.text
@@ -1138,14 +1032,6 @@ __entry_tramp_data_start:
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
- * Special system call wrappers.
- */
-ENTRY(sys_rt_sigreturn_wrapper)
- mov x0, sp
- b sys_rt_sigreturn
-ENDPROC(sys_rt_sigreturn_wrapper)
-
-/*
* Register switch for AArch64. The callee-saved registers need to be saved
* and restored. On entry:
* x0 = previous task_struct (must be preserved across the switch)
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
deleted file mode 100644
index f332d5d1f6b4..000000000000
--- a/arch/arm64/kernel/entry32.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Compat system call wrappers
- *
- * Copyright (C) 2012 ARM Ltd.
- * Authors: Will Deacon <will.deacon@arm.com>
- * Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/const.h>
-
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/errno.h>
-#include <asm/page.h>
-
-/*
- * System call wrappers for the AArch32 compatibility layer.
- */
-
-ENTRY(compat_sys_sigreturn_wrapper)
- mov x0, sp
- b compat_sys_sigreturn
-ENDPROC(compat_sys_sigreturn_wrapper)
-
-ENTRY(compat_sys_rt_sigreturn_wrapper)
- mov x0, sp
- b compat_sys_rt_sigreturn
-ENDPROC(compat_sys_rt_sigreturn_wrapper)
-
-ENTRY(compat_sys_statfs64_wrapper)
- mov w3, #84
- cmp w1, #88
- csel w1, w3, w1, eq
- b compat_sys_statfs64
-ENDPROC(compat_sys_statfs64_wrapper)
-
-ENTRY(compat_sys_fstatfs64_wrapper)
- mov w3, #84
- cmp w1, #88
- csel w1, w3, w1, eq
- b compat_sys_fstatfs64
-ENDPROC(compat_sys_fstatfs64_wrapper)
-
-/*
- * Note: off_4k (w5) is always in units of 4K. If we can't do the
- * requested offset because it is not page-aligned, we return -EINVAL.
- */
-ENTRY(compat_sys_mmap2_wrapper)
-#if PAGE_SHIFT > 12
- tst w5, #~PAGE_MASK >> 12
- b.ne 1f
- lsr w5, w5, #PAGE_SHIFT - 12
-#endif
- b sys_mmap_pgoff
-1: mov x0, #-EINVAL
- ret
-ENDPROC(compat_sys_mmap2_wrapper)
-
-/*
- * Wrappers for AArch32 syscalls that either take 64-bit parameters
- * in registers or that take 32-bit parameters which require sign
- * extension.
- */
-ENTRY(compat_sys_pread64_wrapper)
- regs_to_64 x3, x4, x5
- b sys_pread64
-ENDPROC(compat_sys_pread64_wrapper)
-
-ENTRY(compat_sys_pwrite64_wrapper)
- regs_to_64 x3, x4, x5
- b sys_pwrite64
-ENDPROC(compat_sys_pwrite64_wrapper)
-
-ENTRY(compat_sys_truncate64_wrapper)
- regs_to_64 x1, x2, x3
- b sys_truncate
-ENDPROC(compat_sys_truncate64_wrapper)
-
-ENTRY(compat_sys_ftruncate64_wrapper)
- regs_to_64 x1, x2, x3
- b sys_ftruncate
-ENDPROC(compat_sys_ftruncate64_wrapper)
-
-ENTRY(compat_sys_readahead_wrapper)
- regs_to_64 x1, x2, x3
- mov w2, w4
- b sys_readahead
-ENDPROC(compat_sys_readahead_wrapper)
-
-ENTRY(compat_sys_fadvise64_64_wrapper)
- mov w6, w1
- regs_to_64 x1, x2, x3
- regs_to_64 x2, x4, x5
- mov w3, w6
- b sys_fadvise64_64
-ENDPROC(compat_sys_fadvise64_64_wrapper)
-
-ENTRY(compat_sys_sync_file_range2_wrapper)
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
- b sys_sync_file_range2
-ENDPROC(compat_sys_sync_file_range2_wrapper)
-
-ENTRY(compat_sys_fallocate_wrapper)
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
- b sys_fallocate
-ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 84c68b14f1b2..58c53bc96928 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -159,25 +159,6 @@ static void sve_free(struct task_struct *task)
__sve_free(task);
}
-static void change_cpacr(u64 val, u64 mask)
-{
- u64 cpacr = read_sysreg(CPACR_EL1);
- u64 new = (cpacr & ~mask) | val;
-
- if (new != cpacr)
- write_sysreg(new, CPACR_EL1);
-}
-
-static void sve_user_disable(void)
-{
- change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
-}
-
-static void sve_user_enable(void)
-{
- change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
-}
-
/*
* TIF_SVE controls whether a task can use SVE without trapping while
* in userspace, and also the way a task's FPSIMD/SVE state is stored
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 413dbe530da8..8c9644376326 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_3:
- info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_5:
- info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
break;
case HW_BREAKPOINT_LEN_6:
- info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
break;
case HW_BREAKPOINT_LEN_7:
- info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
@@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp)
* AArch32 also requires breakpoints of length 2 for Thumb.
* Watchpoints can be of length 1, 2, 4 or 8 bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
if (is_compat_bp(bp)) {
- if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
- } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+ } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
/*
* FIXME: Some tools (I'm looking at you perf) assume
* that breakpoints should be sizeof(long). This
* is nonsense. For now, we fix up the parameter
* but we should probably return -EINVAL instead.
*/
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
}
}
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/*
* Privilege
* Note that we disallow combined EL0/EL1 breakpoints because
* that would complicate the stepping code.
*/
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
else
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
return 0;
}
@@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret;
u64 alignment_mask, offset;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
@@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* that here.
*/
if (is_compat_bp(bp)) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
break;
case 1:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
default:
return -EINVAL;
}
} else {
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+ if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 816d03c4c913..2b3413549734 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn)
return __aarch64_insn_write(addr, cpu_to_le32(insn));
}
-static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
-{
- if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
- return false;
-
- return aarch64_insn_is_b(insn) ||
- aarch64_insn_is_bl(insn) ||
- aarch64_insn_is_svc(insn) ||
- aarch64_insn_is_hvc(insn) ||
- aarch64_insn_is_smc(insn) ||
- aarch64_insn_is_brk(insn) ||
- aarch64_insn_is_nop(insn);
-}
-
bool __kprobes aarch64_insn_uses_literal(u32 insn)
{
/* ldr/ldrsw (literal), prfm */
@@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn);
}
-/*
- * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
- * Section B2.6.5 "Concurrent modification and execution of instructions":
- * Concurrent modification and execution of instructions can lead to the
- * resulting instruction performing any behavior that can be achieved by
- * executing any sequence of instructions that can be executed from the
- * same Exception level, except where the instruction before modification
- * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
- * or SMC instruction.
- */
-bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
-{
- return __aarch64_insn_hotpatch_safe(old_insn) &&
- __aarch64_insn_hotpatch_safe(new_insn);
-}
-
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{
u32 *tp = addr;
@@ -216,8 +186,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
ret = aarch64_insn_write(tp, insn);
if (ret == 0)
- flush_icache_range((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
+ __flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
return ret;
}
@@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
pp->new_insns[i]);
- /*
- * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
- * which ends with "dsb; isb" pair guaranteeing global
- * visibility.
- */
/* Notify other processors with an additional increment. */
atomic_inc(&pp->cpu_count);
} else {
@@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
return ret;
}
-static
-int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
{
struct aarch64_insn_patch patch = {
.text_addrs = addrs,
@@ -272,34 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
cpu_online_mask);
}
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
- int ret;
- u32 insn;
-
- /* Unsafe to patch multiple instructions without synchronizaiton */
- if (cnt == 1) {
- ret = aarch64_insn_read(addrs[0], &insn);
- if (ret)
- return ret;
-
- if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
- /*
- * ARMv8 architecture doesn't guarantee all CPUs see
- * the new instruction after returning from function
- * aarch64_insn_patch_text_nosync(). So send IPIs to
- * all other CPUs to achieve instruction
- * synchronization.
- */
- ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
- kick_all_cpus_sync();
- return ret;
- }
- }
-
- return aarch64_insn_patch_text_sync(addrs, insns, cnt);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 60e5fc661f74..780a12f59a8f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-
#ifdef CONFIG_VMAP_STACK
static void init_irq_stacks(void)
{
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index c2dd1ad3e648..e0756416e567 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -36,7 +36,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
insn = aarch64_insn_gen_nop();
}
- aarch64_insn_patch_text(&addr, &insn, 1);
+ aarch64_insn_patch_text_nosync(addr, insn);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f76ea92dff91..f6a5c6bc1434 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -184,8 +184,15 @@ void machine_kexec(struct kimage *kimage)
/* Flush the reboot_code_buffer in preparation for its execution. */
__flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
- flush_icache_range((uintptr_t)reboot_code_buffer,
- arm64_relocate_new_kernel_size);
+
+ /*
+ * Although we've killed off the secondary CPUs, we don't update
+ * the online mask if we're handling a crash kernel and consequently
+ * need to avoid flush_icache_range(), which will attempt to IPI
+ * the offline CPUs. Therefore, we must use the __* variant here.
+ */
+ __flush_icache_range((uintptr_t)reboot_code_buffer,
+ arm64_relocate_new_kernel_size);
/* Flush the kimage list and its buffers. */
kexec_list_flush(kimage);
@@ -207,8 +214,7 @@ void machine_kexec(struct kimage *kimage)
* relocation is complete.
*/
- cpu_soft_restart(kimage != kexec_crash_image,
- reboot_code_buffer_phys, kimage->head, kimage->start, 0);
+ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
BUG(); /* Should never get here. */
}
@@ -361,4 +367,5 @@ void arch_crash_save_vmcoreinfo(void)
kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
PHYS_OFFSET);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 33147aacdafd..8e38d5267f22 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,6 +25,7 @@
#include <asm/virt.h>
#include <linux/acpi.h>
+#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -446,9 +447,16 @@ static struct attribute_group armv8_pmuv3_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-15");
+PMU_FORMAT_ATTR(long, "config1:0");
+
+static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+{
+ return event->attr.config1 & 0x1;
+}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_long.attr,
NULL,
};
@@ -466,6 +474,21 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
/*
+ * We must chain two programmable counters for 64 bit events,
+ * except when we have allocated the 64bit cycle counter (for CPU
+ * cycles event). This must be called only when the event has
+ * a counter allocated.
+ */
+static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ return !WARN_ON(idx < 0) &&
+ armv8pmu_event_is_64bit(event) &&
+ (idx != ARMV8_IDX_CYCLE_COUNTER);
+}
+
+/*
* ARMv8 low level PMU access
*/
@@ -503,34 +526,68 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline int armv8pmu_select_counter(int idx)
+static inline void armv8pmu_select_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(counter, pmselr_el0);
isb();
+}
- return idx;
+static inline u32 armv8pmu_read_evcntr(int idx)
+{
+ armv8pmu_select_counter(idx);
+ return read_sysreg(pmxevcntr_el0);
+}
+
+static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = 0;
+
+ val = armv8pmu_read_evcntr(idx);
+ if (armv8pmu_event_is_chained(event))
+ val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
+ return val;
}
-static inline u32 armv8pmu_read_counter(struct perf_event *event)
+static inline u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- u32 value = 0;
+ u64 value = 0;
if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
- else if (armv8pmu_select_counter(idx) == idx)
- value = read_sysreg(pmxevcntr_el0);
+ else
+ value = armv8pmu_read_hw_counter(event);
return value;
}
-static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv8pmu_write_evcntr(int idx, u32 value)
+{
+ armv8pmu_select_counter(idx);
+ write_sysreg(value, pmxevcntr_el0);
+}
+
+static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+ u64 value)
+{
+ int idx = event->hw.idx;
+
+ if (armv8pmu_event_is_chained(event)) {
+ armv8pmu_write_evcntr(idx, upper_32_bits(value));
+ armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
+ } else {
+ armv8pmu_write_evcntr(idx, value);
+ }
+}
+
+static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -541,22 +598,43 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
/*
- * Set the upper 32bits as this is a 64bit counter but we only
- * count using the lower 32bits and we want an interrupt when
- * it overflows.
+ * The cycles counter is really a 64-bit counter.
+ * When treating it as a 32-bit counter, we only count
+ * the lower 32 bits, and set the upper 32-bits so that
+ * we get an interrupt upon 32-bit overflow.
*/
- u64 value64 = 0xffffffff00000000ULL | value;
-
- write_sysreg(value64, pmccntr_el0);
- } else if (armv8pmu_select_counter(idx) == idx)
- write_sysreg(value, pmxevcntr_el0);
+ if (!armv8pmu_event_is_64bit(event))
+ value |= 0xffffffff00000000ULL;
+ write_sysreg(value, pmccntr_el0);
+ } else
+ armv8pmu_write_hw_counter(event, value);
}
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
- if (armv8pmu_select_counter(idx) == idx) {
- val &= ARMV8_PMU_EVTYPE_MASK;
- write_sysreg(val, pmxevtyper_el0);
+ armv8pmu_select_counter(idx);
+ val &= ARMV8_PMU_EVTYPE_MASK;
+ write_sysreg(val, pmxevtyper_el0);
+}
+
+static inline void armv8pmu_write_event_type(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /*
+ * For chained events, the low counter is programmed to count
+ * the event of interest and the high counter is programmed
+ * with CHAIN event code with filters set to count at all ELs.
+ */
+ if (armv8pmu_event_is_chained(event)) {
+ u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
+ ARMV8_PMU_INCLUDE_EL2;
+
+ armv8pmu_write_evtype(idx - 1, hwc->config_base);
+ armv8pmu_write_evtype(idx, chain_evt);
+ } else {
+ armv8pmu_write_evtype(idx, hwc->config_base);
}
}
@@ -567,6 +645,16 @@ static inline int armv8pmu_enable_counter(int idx)
return idx;
}
+static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ isb();
+}
+
static inline int armv8pmu_disable_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -574,6 +662,16 @@ static inline int armv8pmu_disable_counter(int idx)
return idx;
}
+static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+}
+
static inline int armv8pmu_enable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -581,6 +679,11 @@ static inline int armv8pmu_enable_intens(int idx)
return idx;
}
+static inline int armv8pmu_enable_event_irq(struct perf_event *event)
+{
+ return armv8pmu_enable_intens(event->hw.idx);
+}
+
static inline int armv8pmu_disable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -593,6 +696,11 @@ static inline int armv8pmu_disable_intens(int idx)
return idx;
}
+static inline int armv8pmu_disable_event_irq(struct perf_event *event)
+{
+ return armv8pmu_disable_intens(event->hw.idx);
+}
+
static inline u32 armv8pmu_getreset_flags(void)
{
u32 value;
@@ -610,10 +718,8 @@ static inline u32 armv8pmu_getreset_flags(void)
static void armv8pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
/*
* Enable counter and interrupt, and set the counter to count
@@ -624,22 +730,22 @@ static void armv8pmu_enable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv8pmu_disable_counter(idx);
+ armv8pmu_disable_event_counter(event);
/*
* Set event (if destined for PMNx counters).
*/
- armv8pmu_write_evtype(idx, hwc->config_base);
+ armv8pmu_write_event_type(event);
/*
* Enable interrupt for this counter
*/
- armv8pmu_enable_intens(idx);
+ armv8pmu_enable_event_irq(event);
/*
* Enable counter
*/
- armv8pmu_enable_counter(idx);
+ armv8pmu_enable_event_counter(event);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -647,10 +753,8 @@ static void armv8pmu_enable_event(struct perf_event *event)
static void armv8pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
/*
* Disable counter and interrupt
@@ -660,16 +764,38 @@ static void armv8pmu_disable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv8pmu_disable_counter(idx);
+ armv8pmu_disable_event_counter(event);
/*
* Disable interrupt for this counter
*/
- armv8pmu_disable_intens(idx);
+ armv8pmu_disable_event_irq(event);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmovsr;
@@ -694,6 +820,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/
regs = get_irq_regs();
+ /*
+ * Stop the PMU while processing the counter overflows
+ * to prevent skews in group events.
+ */
+ armv8pmu_stop(cpu_pmu);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -718,6 +849,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
+ armv8pmu_start(cpu_pmu);
/*
* Handle the pending perf events.
@@ -731,32 +863,42 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
return IRQ_HANDLED;
}
-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+ return -EAGAIN;
}
-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ /*
+ * Chaining requires two consecutive event counters, where
+ * the lower idx must be even.
+ */
+ for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
+ if (!test_and_set_bit(idx, cpuc->used_mask)) {
+ /* Check if the preceding even counter is available */
+ if (!test_and_set_bit(idx - 1, cpuc->used_mask))
+ return idx;
+ /* Release the Odd counter */
+ clear_bit(idx, cpuc->used_mask);
+ }
+ }
+ return -EAGAIN;
}
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
- int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
@@ -770,13 +912,20 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
/*
* Otherwise use events counters
*/
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
+ if (armv8pmu_event_is_64bit(event))
+ return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
+ else
+ return armv8pmu_get_single_idx(cpuc, cpu_pmu);
+}
- /* The counters are all in use. */
- return -EAGAIN;
+static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ clear_bit(idx, cpuc->used_mask);
+ if (armv8pmu_event_is_chained(event))
+ clear_bit(idx - 1, cpuc->used_mask);
}
/*
@@ -851,6 +1000,9 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
+ if (armv8pmu_event_is_64bit(event))
+ event->hw.flags |= ARMPMU_EVT_64BIT;
+
/* Onl expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
@@ -957,10 +1109,10 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv8pmu_read_counter,
cpu_pmu->write_counter = armv8pmu_write_counter,
cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
+ cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
cpu_pmu->start = armv8pmu_start,
cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset,
- cpu_pmu->max_period = (1LLU << 32) - 1,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
return 0;
@@ -1127,3 +1279,32 @@ static int __init armv8_pmu_driver_init(void)
return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
+
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
+{
+ u32 freq;
+ u32 shift;
+
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always computed with the sched_clock.
+ */
+ freq = arch_timer_get_rate();
+ userpg->cap_user_time = 1;
+
+ clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
+ NSEC_PER_SEC, 0);
+ /*
+ * time_shift is not expected to be greater than 31 due to
+ * the original published conversion algorithm shifting a
+ * 32-bit value (now specifies a 64-bit value) - refer
+ * perf_event_mmap_page documentation in perf_event.h.
+ */
+ if (shift == 32) {
+ shift = 31;
+ userpg->time_mult >>= 1;
+ }
+ userpg->time_shift = (u16)shift;
+ userpg->time_offset = -now;
+}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d849d9804011..e78c3ef04d95 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return, keep PSTATE D-flag enabled until
@@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs, kcb, 0);
- return;
- }
- }
- } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
- BRK64_OPCODE_KPROBES) && cur_kprobe) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur_kprobe->break_handler &&
- cur_kprobe->break_handler(cur_kprobe, regs)) {
- setup_singlestep(cur_kprobe, regs, kcb, 0);
- return;
+ } else
+ reset_current_kprobe();
}
}
/*
@@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- /*
- * Since we can't be sure where in the stack frame "stacked"
- * pass-by-value arguments are stored we just don't try to
- * duplicate any of the stack. Do not use jprobes on functions that
- * use more than 64 bytes (after padding each to an 8 byte boundary)
- * of arguments, or pass individual arguments larger than 16 bytes.
- */
-
- instruction_pointer_set(regs, (unsigned long) jp->entry);
- preempt_disable();
- pause_graph_tracing();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /*
- * Jprobe handler return by entering break exception,
- * encoded same as kprobe, but with following conditions
- * -a special PC to identify it from the other kprobes.
- * -restore stack addr to original saved pt_regs
- */
- asm volatile(" mov sp, %0 \n"
- "jprobe_return_break: brk %1 \n"
- :
- : "r" (kcb->jprobe_saved_regs.sp),
- "I" (BRK64_ESR_KPROBES)
- : "memory");
-
- unreachable();
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.sp;
- long orig_sp = kernel_stack_pointer(regs);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- extern const char jprobe_return_break[];
-
- if (instruction_pointer(regs) != (u64) jprobe_return_break)
- return 0;
-
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.sp;
- pr_err("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- pr_err("Saved registers for jprobe %p\n", jp);
- __show_regs(saved_regs);
- pr_err("Current registers\n");
- __show_regs(regs);
- BUG();
- }
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- preempt_enable_no_resched();
- return 1;
-}
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
if ((addr >= (unsigned long)__kprobes_text_start &&
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e10bc363f533..7f1628effe6d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -177,16 +177,16 @@ static void print_pstate(struct pt_regs *regs)
if (compat_user_mode(regs)) {
printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
pstate,
- pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
- pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
- pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
- pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
- pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
- pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
- pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
- pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
- pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
- pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
+ pstate & PSR_AA32_N_BIT ? 'N' : 'n',
+ pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_AA32_C_BIT ? 'C' : 'c',
+ pstate & PSR_AA32_V_BIT ? 'V' : 'v',
+ pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
+ pstate & PSR_AA32_T_BIT ? "T32" : "A32",
+ pstate & PSR_AA32_E_BIT ? "BE" : "LE",
+ pstate & PSR_AA32_A_BIT ? 'A' : 'a',
+ pstate & PSR_AA32_I_BIT ? 'I' : 'i',
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f');
} else {
printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
pstate,
@@ -493,3 +493,25 @@ void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
}
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+void __used stackleak_check_alloca(unsigned long size)
+{
+ unsigned long stack_left;
+ unsigned long current_sp = current_stack_pointer;
+ struct stack_info info;
+
+ BUG_ON(!on_accessible_stack(current, current_sp, &info));
+
+ stack_left = current_sp - info.low;
+
+ /*
+ * There's a good chance we're almost out of stack space if this
+ * is true. Using panic() over BUG() is more likely to give
+ * reliable debugging output.
+ */
+ if (size >= stack_left)
+ panic("alloca() over the kernel stack boundary\n");
+}
+EXPORT_SYMBOL(stackleak_check_alloca);
+#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 5c338ce5a7fa..6219486fa25f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -132,7 +132,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr);
+ on_irq_stack(addr, NULL);
}
/**
@@ -277,19 +277,22 @@ static int ptrace_hbp_set_event(unsigned int note_type,
switch (note_type) {
case NT_ARM_HW_BREAK:
- if (idx < ARM_MAX_BRP) {
- tsk->thread.debug.hbp_break[idx] = bp;
- err = 0;
- }
+ if (idx >= ARM_MAX_BRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_BRP);
+ tsk->thread.debug.hbp_break[idx] = bp;
+ err = 0;
break;
case NT_ARM_HW_WATCH:
- if (idx < ARM_MAX_WRP) {
- tsk->thread.debug.hbp_watch[idx] = bp;
- err = 0;
- }
+ if (idx >= ARM_MAX_WRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_WRP);
+ tsk->thread.debug.hbp_watch[idx] = bp;
+ err = 0;
break;
}
+out:
return err;
}
@@ -1076,6 +1079,7 @@ static int compat_gpr_get(struct task_struct *target,
break;
case 16:
reg = task_pt_regs(target)->pstate;
+ reg = pstate_to_compat_psr(reg);
break;
case 17:
reg = task_pt_regs(target)->orig_x0;
@@ -1143,6 +1147,7 @@ static int compat_gpr_set(struct task_struct *target,
newregs.pc = reg;
break;
case 16:
+ reg = compat_psr_to_pstate(reg);
newregs.pstate = reg;
break;
case 17:
@@ -1629,7 +1634,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
regs->regs[regno] = saved_reg;
}
-asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
@@ -1647,7 +1652,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
return regs->syscallno;
}
-asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+void syscall_trace_exit(struct pt_regs *regs)
{
audit_syscall_exit(regs);
@@ -1656,18 +1661,24 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+
+ rseq_syscall(regs);
}
/*
- * Bits which are always architecturally RES0 per ARM DDI 0487A.h
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
+ * We also take into account DIT (bit 24), which is not yet documented, and
+ * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
+ * allocated an EL0 meaning in future.
* Userspace cannot use these until they have an architectural meaning.
+ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
- GENMASK_ULL(5, 5))
+ (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
+ (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
static int valid_compat_regs(struct user_pt_regs *regs)
{
@@ -1675,15 +1686,15 @@ static int valid_compat_regs(struct user_pt_regs *regs)
if (!system_supports_mixed_endian_el0()) {
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
else
- regs->pstate &= ~COMPAT_PSR_E_BIT;
+ regs->pstate &= ~PSR_AA32_E_BIT;
}
if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
- (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
- (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
- (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
+ (regs->pstate & PSR_AA32_A_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_I_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_F_BIT) == 0) {
return 1;
}
@@ -1691,11 +1702,11 @@ static int valid_compat_regs(struct user_pt_regs *regs)
* Force PSR to a valid 32-bit EL0t, preserving the same bits as
* arch/arm.
*/
- regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
- COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
- COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
- COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
- COMPAT_PSR_T_BIT;
+ regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
+ PSR_AA32_C_BIT | PSR_AA32_V_BIT |
+ PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
+ PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
+ PSR_AA32_T_BIT;
regs->pstate |= PSR_MODE32_BIT;
return 0;
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 6b8d90d5ceae..5ba4465e44f0 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -13,6 +13,7 @@
#include <asm/mmu.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
#include <asm/vmap_stack.h>
@@ -88,23 +89,52 @@ static int init_sdei_stacks(void)
return err;
}
-bool _on_sdei_stack(unsigned long sp)
+static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long low, high;
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
- if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ if (sp < low || sp >= high)
return false;
- low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
- high = low + SDEI_STACK_SIZE;
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_SDEI_NORMAL;
+ }
- if (low <= sp && sp < high)
+ return true;
+}
+
+static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
+
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_SDEI_CRITICAL;
+ }
+
+ return true;
+}
+
+bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+{
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+
+ if (on_sdei_critical_stack(sp, info))
return true;
- low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
- high = low + SDEI_STACK_SIZE;
+ if (on_sdei_normal_stack(sp, info))
+ return true;
- return (low <= sp && sp < high);
+ return false;
}
unsigned long sdei_arch_get_entry_point(int conduit)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 30ad2f085d1f..5b4fac434c84 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -241,6 +241,44 @@ static void __init request_standard_resources(void)
}
}
+static int __init reserve_memblock_reserved_regions(void)
+{
+ phys_addr_t start, end, roundup_end = 0;
+ struct resource *mem, *res;
+ u64 i;
+
+ for_each_reserved_mem_region(i, &start, &end) {
+ if (end <= roundup_end)
+ continue; /* done already */
+
+ start = __pfn_to_phys(PFN_DOWN(start));
+ end = __pfn_to_phys(PFN_UP(end)) - 1;
+ roundup_end = end;
+
+ res = kzalloc(sizeof(*res), GFP_ATOMIC);
+ if (WARN_ON(!res))
+ return -ENOMEM;
+ res->start = start;
+ res->end = end;
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+
+ mem = request_resource_conflict(&iomem_resource, res);
+ /*
+ * We expected memblock_reserve() regions to conflict with
+ * memory created by request_standard_resources().
+ */
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ kfree(res);
+
+ reserve_region_with_split(mem, start, end, "reserved");
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 511af13e8d8f..5dcc942906db 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -539,8 +539,9 @@ static int restore_sigframe(struct pt_regs *regs,
return err;
}
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE0(rt_sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -802,6 +803,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int usig = ksig->sig;
int ret;
+ rseq_signal_deliver(ksig, regs);
+
/*
* Set up the stack frame
*/
@@ -910,7 +913,7 @@ static void do_signal(struct pt_regs *regs)
}
asmlinkage void do_notify_resume(struct pt_regs *regs,
- unsigned int thread_flags)
+ unsigned long thread_flags)
{
/*
* The assembly code enters us with IRQs off, but it hasn't
@@ -940,6 +943,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 77b91f478995..24b09003f821 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -243,6 +243,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
int err;
sigset_t set;
struct compat_aux_sigframe __user *aux;
+ unsigned long psr;
err = get_sigset_t(&set, &sf->uc.uc_sigmask);
if (err == 0) {
@@ -266,7 +267,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
__get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
__get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
__get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
+ __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ regs->pstate = compat_psr_to_pstate(psr);
/*
* Avoid compat_sys_sigreturn() restarting.
@@ -282,8 +285,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
return err;
}
-asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
+COMPAT_SYSCALL_DEFINE0(sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct compat_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -312,8 +316,9 @@ badframe:
return 0;
}
-asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct compat_rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -372,22 +377,22 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
{
compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
compat_ulong_t retcode;
- compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT);
+ compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT);
int thumb;
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
if (thumb)
- spsr |= COMPAT_PSR_T_BIT;
+ spsr |= PSR_AA32_T_BIT;
else
- spsr &= ~COMPAT_PSR_T_BIT;
+ spsr &= ~PSR_AA32_T_BIT;
/* The IT state must be cleared for both ARM and Thumb-2 */
- spsr &= ~COMPAT_PSR_IT_MASK;
+ spsr &= ~PSR_AA32_IT_MASK;
/* Restore the original endianness */
- spsr |= COMPAT_PSR_ENDSTATE;
+ spsr |= PSR_AA32_ENDSTATE;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
@@ -414,6 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set)
{
struct compat_aux_sigframe __user *aux;
+ unsigned long psr = pstate_to_compat_psr(regs->pstate);
int err = 0;
__put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
@@ -432,7 +438,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
__put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
__put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
- __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
+ __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2faa9863d2e5..25fcd22a4bb2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -225,6 +225,7 @@ asmlinkage notrace void secondary_start_kernel(void)
notify_cpu_starting(cpu);
store_cpu_topology(cpu);
+ numa_add_cpu(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -278,6 +279,9 @@ int __cpu_disable(void)
if (ret)
return ret;
+ remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
+
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
@@ -518,7 +522,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
}
bootcpu_valid = true;
cpu_madt_gicc[0] = *processor;
- early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
@@ -541,8 +544,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
*/
acpi_set_mailbox_entry(cpu_count, processor);
- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
-
cpu_count++;
}
@@ -562,8 +563,34 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
return 0;
}
+
+static void __init acpi_parse_and_init_cpus(void)
+{
+ int i;
+
+ /*
+ * do a walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init.
+ */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ /*
+ * In ACPI, SMP and CPU NUMA information is provided in separate
+ * static tables, namely the MADT and the SRAT.
+ *
+ * Thus, it is simpler to first create the cpu logical map through
+ * an MADT walk and then map the logical cpus to their node ids
+ * as separate steps.
+ */
+ acpi_map_cpus_to_nodes();
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ early_map_cpu_to_node(i, acpi_numa_get_nid(i));
+}
#else
-#define acpi_table_parse_madt(...) do { } while (0)
+#define acpi_parse_and_init_cpus(...) do { } while (0)
#endif
/*
@@ -636,13 +663,7 @@ void __init smp_init_cpus(void)
if (acpi_disabled)
of_parse_and_init_cpus();
else
- /*
- * do a walk of MADT to determine how many CPUs
- * we have including disabled CPUs, and get information
- * we need for SMP init
- */
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic_cpu_interface, 0);
+ acpi_parse_and_init_cpus();
if (cpu_count > nr_cpu_ids)
pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
@@ -679,6 +700,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
numa_store_cpu_info(this_cpu);
+ numa_add_cpu(this_cpu);
/*
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d5718a060672..4989f7ea1e59 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -50,7 +50,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp))
+ if (!on_accessible_stack(tsk, fp, NULL))
return -EINVAL;
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 72981bae10eb..b44065fb1616 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -25,11 +25,13 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
+
#include <asm/cpufeature.h>
+#include <asm/syscall.h>
-asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t off)
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
@@ -42,24 +44,25 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
if (personality(personality) == PER_LINUX32 &&
!system_supports_32bit_el0())
return -EINVAL;
- return sys_personality(personality);
+ return ksys_personality(personality);
}
/*
* Wrappers to pass the pt_regs argument.
*/
-asmlinkage long sys_rt_sigreturn_wrapper(void);
-#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
#define sys_personality sys_arm64_personality
+asmlinkage long sys_ni_syscall(const struct pt_regs *);
+#define __arm64_sys_ni_syscall sys_ni_syscall
+
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = sym,
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd.h>
-/*
- * The sys_call_table array must be 4K aligned to be accessible from
- * kernel/entry.S.
- */
-void * const sys_call_table[__NR_syscalls] __aligned(4096) = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+
+const syscall_fn_t sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index a40b1343b819..0f8bcb7de700 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -22,31 +22,128 @@
*/
#define __COMPAT_SYSCALL_NR
+#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
-asmlinkage long compat_sys_sigreturn_wrapper(void);
-asmlinkage long compat_sys_rt_sigreturn_wrapper(void);
-asmlinkage long compat_sys_statfs64_wrapper(void);
-asmlinkage long compat_sys_fstatfs64_wrapper(void);
-asmlinkage long compat_sys_pread64_wrapper(void);
-asmlinkage long compat_sys_pwrite64_wrapper(void);
-asmlinkage long compat_sys_truncate64_wrapper(void);
-asmlinkage long compat_sys_ftruncate64_wrapper(void);
-asmlinkage long compat_sys_readahead_wrapper(void);
-asmlinkage long compat_sys_fadvise64_64_wrapper(void);
-asmlinkage long compat_sys_sync_file_range2_wrapper(void);
-asmlinkage long compat_sys_fallocate_wrapper(void);
-asmlinkage long compat_sys_mmap2_wrapper(void);
+#include <asm/syscall.h>
-#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = sym,
+asmlinkage long compat_sys_sigreturn(void);
+asmlinkage long compat_sys_rt_sigreturn(void);
+
+COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
+ compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ /*
+ * 32-bit ARM applies an OABI compatibility fixup to statfs64 and
+ * fstatfs64 regardless of whether OABI is in use, and therefore
+ * arbitrary binaries may rely upon it, so we must do the same.
+ * For more details, see commit:
+ *
+ * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
+ * fstatfs64")
+ */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_statfs64(pathname, sz, buf);
+}
+
+COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
+ struct compat_statfs64 __user *, buf)
+{
+ /* see aarch32_statfs64 */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_fstatfs64(fd, sz, buf);
+}
/*
- * The sys_call_table array must be 4K aligned to be accessible from
- * kernel/entry.S.
+ * Note: off_4k is always in units of 4K. If we can't do the
+ * requested offset because it is not page-aligned, we return -EINVAL.
*/
-void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = {
- [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall,
+COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ if (off_4k & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ off_4k >>= (PAGE_SHIFT - 12);
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define arg_u32p(name) u32, name##_hi, u32, name##_lo
+#else
+#define arg_u32p(name) u32, name##_lo, u32, name##_hi
+#endif
+
+#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
+ size_t, count, u32, __pad, arg_u32p(pos))
+{
+ return ksys_pread64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
+ const char __user *, buf, size_t, count, u32, __pad,
+ arg_u32p(pos))
+{
+ return ksys_pwrite64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
+ u32, __pad, arg_u32p(length))
+{
+ return ksys_truncate(pathname, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
+ arg_u32p(length))
+{
+ return ksys_ftruncate(fd, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
+ arg_u32p(offset), size_t, count)
+{
+ return ksys_readahead(fd, arg_u64(offset), count);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
+ arg_u32p(offset), arg_u32p(nbytes))
+{
+ return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
+ flags);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
+}
+
+asmlinkage long sys_ni_syscall(const struct pt_regs *);
+#define __arm64_sys_ni_syscall sys_ni_syscall
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd32.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+
+const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
+ [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
new file mode 100644
index 000000000000..032d22312881
--- /dev/null
+++ b/arch/arm64/kernel/syscall.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/errno.h>
+#include <linux/nospec.h>
+#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/daifflags.h>
+#include <asm/fpsimd.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+long compat_arm_syscall(struct pt_regs *regs);
+
+long sys_ni_syscall(void);
+
+asmlinkage long do_ni_syscall(struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ long ret;
+ if (is_compat_task()) {
+ ret = compat_arm_syscall(regs);
+ if (ret != -ENOSYS)
+ return ret;
+ }
+#endif
+
+ return sys_ni_syscall();
+}
+
+static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
+{
+ return syscall_fn(regs);
+}
+
+static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ unsigned int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ long ret;
+
+ if (scno < sc_nr) {
+ syscall_fn_t syscall_fn;
+ syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
+ ret = __invoke_syscall(regs, syscall_fn);
+ } else {
+ ret = do_ni_syscall(regs);
+ }
+
+ regs->regs[0] = ret;
+}
+
+static inline bool has_syscall_work(unsigned long flags)
+{
+ return unlikely(flags & _TIF_SYSCALL_WORK);
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
+static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ unsigned long flags = current_thread_info()->flags;
+
+ regs->orig_x0 = regs->regs[0];
+ regs->syscallno = scno;
+
+ local_daif_restore(DAIF_PROCCTX);
+ user_exit();
+
+ if (has_syscall_work(flags)) {
+ /* set default errno for user-issued syscall(-1) */
+ if (scno == NO_SYSCALL)
+ regs->regs[0] = -ENOSYS;
+ scno = syscall_trace_enter(regs);
+ if (scno == NO_SYSCALL)
+ goto trace_exit;
+ }
+
+ invoke_syscall(regs, scno, sc_nr, syscall_table);
+
+ /*
+ * The tracing status may have changed under our feet, so we have to
+ * check again. However, if we were tracing entry, then we always trace
+ * exit regardless, as the old entry assembly did.
+ */
+ if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
+ local_daif_mask();
+ flags = current_thread_info()->flags;
+ if (!has_syscall_work(flags)) {
+ /*
+ * We're off to userspace, where interrupts are
+ * always enabled after we restore the flags from
+ * the SPSR.
+ */
+ trace_hardirqs_on();
+ return;
+ }
+ local_daif_restore(DAIF_PROCCTX);
+ }
+
+trace_exit:
+ syscall_trace_exit(regs);
+}
+
+static inline void sve_user_discard(void)
+{
+ if (!system_supports_sve())
+ return;
+
+ clear_thread_flag(TIF_SVE);
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case.
+ */
+ sve_user_disable();
+}
+
+asmlinkage void el0_svc_handler(struct pt_regs *regs)
+{
+ sve_user_discard();
+ el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
+}
+
+#ifdef CONFIG_COMPAT
+asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
+{
+ el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
+ compat_sys_call_table);
+}
+#endif
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index f845a8617812..0825c4a856e3 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -215,11 +215,16 @@ EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling;
+ const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
+ /* Find the smaller of NUMA, core or LLC siblings */
+ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
+ /* not numa in package, lets use the package siblings */
+ core_mask = &cpu_topology[cpu].core_sibling;
+ }
if (cpu_topology[cpu].llc_id != -1) {
- if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask))
- core_mask = &cpu_topology[cpu].llc_siblings;
+ if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+ core_mask = &cpu_topology[cpu].llc_sibling;
}
return core_mask;
@@ -231,27 +236,25 @@ static void update_siblings_masks(unsigned int cpuid)
int cpu;
/* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->llc_id == cpu_topo->llc_id) {
- cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings);
- cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings);
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
@@ -293,6 +296,19 @@ topology_populated:
update_siblings_masks(cpuid);
}
+static void clear_cpu_topology(int cpu)
+{
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpumask_clear(&cpu_topo->llc_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+}
+
static void __init reset_cpu_topology(void)
{
unsigned int cpu;
@@ -303,18 +319,26 @@ static void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = 0;
cpu_topo->package_id = -1;
-
cpu_topo->llc_id = -1;
- cpumask_clear(&cpu_topo->llc_siblings);
- cpumask_set_cpu(cpu, &cpu_topo->llc_siblings);
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+ clear_cpu_topology(cpu);
}
}
+void remove_cpu_topology(unsigned int cpu)
+{
+ int sibling;
+
+ for_each_cpu(sibling, topology_core_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_llc_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+
+ clear_cpu_topology(cpu);
+}
+
#ifdef CONFIG_ACPI
/*
* Propagate the topology information of the processor_topology_node tree to the
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d399d459397b..039e9ff379cc 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -411,7 +411,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
- config_sctlr_el1(SCTLR_EL1_UCI, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
#define __user_cache_maint(insn, address, res) \
@@ -547,22 +547,6 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
do_undefinstr(regs);
}
-long compat_arm_syscall(struct pt_regs *regs);
-
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
-{
-#ifdef CONFIG_COMPAT
- long ret;
- if (is_compat_task()) {
- ret = compat_arm_syscall(regs);
- if (ret != -ENOSYS)
- return ret;
- }
-#endif
-
- return sys_ni_syscall();
-}
-
static const char *esr_class_str[] = {
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
index b82c85e5d972..e20483b104d9 100644
--- a/arch/arm64/kernel/vdso/note.S
+++ b/arch/arm64/kernel/vdso/note.S
@@ -22,7 +22,10 @@
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
+#include <linux/build-salt.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+
+BUILD_SALT
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 56a0260ceb11..07256b08226c 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_USR:
- case COMPAT_PSR_MODE_FIQ:
- case COMPAT_PSR_MODE_IRQ:
- case COMPAT_PSR_MODE_SVC:
- case COMPAT_PSR_MODE_ABT:
- case COMPAT_PSR_MODE_UND:
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_FIQ:
+ case PSR_AA32_MODE_IRQ:
+ case PSR_AA32_MODE_SVC:
+ case PSR_AA32_MODE_ABT:
+ case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
@@ -289,6 +289,39 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
+ events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+
+ if (events->exception.serror_pending && events->exception.serror_has_esr)
+ events->exception.serror_esr = vcpu_get_vsesr(vcpu);
+
+ return 0;
+}
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ bool serror_pending = events->exception.serror_pending;
+ bool has_esr = events->exception.serror_has_esr;
+
+ if (serror_pending && has_esr) {
+ if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ return -EINVAL;
+
+ if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
+ kvm_set_sei_esr(vcpu, events->exception.serror_esr);
+ else
+ return -EINVAL;
+ } else if (serror_pending) {
+ kvm_inject_vabt(vcpu);
+ }
+
+ return 0;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6fd91b31a131..ea9225160786 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -57,6 +57,7 @@ __invalid:
* x0: HYP pgd
* x1: HYP stack
* x2: HYP vectors
+ * x3: per-CPU offset
*/
__do_hyp_init:
/* Check for a stub HVC call */
@@ -119,9 +120,8 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
mov sp, x1
msr vbar_el2, x2
- /* copy tpidr_el1 into tpidr_el2 for use by HYP */
- mrs x1, tpidr_el1
- msr tpidr_el2, x1
+ /* Set tpidr_el2 for use by HYP */
+ msr tpidr_el2, x3
/* Hello, World! */
eret
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 4313f7475333..2fabc2dc1966 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -3,7 +3,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
+ $(DISABLE_STACKLEAK_PLUGIN)
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 35bc16832efe..9ce223944983 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -288,8 +288,3 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
vcpu->arch.sysregs_loaded_on_cpu = false;
}
-
-void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
-{
- asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
-}
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 39be799d0417..215c7c0eb3b0 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -27,7 +27,7 @@
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
+ return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index d8e71659ba7e..a55e91dfcf8f 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu);
}
-static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
+void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
{
- vcpu_set_vsesr(vcpu, esr);
+ vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
*vcpu_hcr(vcpu) |= HCR_VSE;
}
@@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
- pend_guest_serror(vcpu, ESR_ELx_ISV);
+ kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index eefe403a2e63..7a5173ea2276 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
- unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+ unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
+ case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
break;
- case COMPAT_PSR_MODE_ABT:
+ case PSR_AA32_MODE_ABT:
mode = 4;
break;
- case COMPAT_PSR_MODE_UND:
+ case PSR_AA32_MODE_UND:
mode = 5;
break;
- case COMPAT_PSR_MODE_SYS:
+ case PSR_AA32_MODE_SYS:
mode = 0; /* SYS maps to USR */
break;
@@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
*/
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
{
- unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+ unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC;
- case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT;
- case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND;
- case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ;
- case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ;
+ case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
+ case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
+ case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
+ case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
+ case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
default: BUG();
}
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index a74311beda35..e37c78bbe1ca 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = {
};
static const struct kvm_regs default_regs_reset32 = {
- .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
- COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
+ .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
+ PSR_AA32_I_BIT | PSR_AA32_F_BIT),
};
static bool cpu_has_32bit_el1(void)
@@ -77,8 +77,12 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PMU_V3:
r = kvm_arm_support_pmu_v3();
break;
+ case KVM_CAP_ARM_INJECT_SERROR_ESR:
+ r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+ break;
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
+ case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
default:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a4363735d3f8..22fbbdbece3c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -194,7 +194,16 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
- kvm_set_way_flush(vcpu);
+ /*
+ * Only track S/W ops if we don't have FWB. It still indicates
+ * that the guest is a bit broken (S/W operations should only
+ * be done by firmware, knowing that there is only a single
+ * CPU left in the system, and certainly not from non-secure
+ * software).
+ */
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ kvm_set_way_flush(vcpu);
+
return true;
}
@@ -243,10 +252,43 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ bool g1;
+
if (!p->is_write)
return read_from_write_only(vcpu, p, r);
- vgic_v3_dispatch_sgi(vcpu, p->regval);
+ /*
+ * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
+ * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
+ * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
+ * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
+ * group.
+ */
+ if (p->is_aarch32) {
+ switch (p->Op1) {
+ default: /* Keep GCC quiet */
+ case 0: /* ICC_SGI1R */
+ g1 = true;
+ break;
+ case 1: /* ICC_ASGI1R */
+ case 2: /* ICC_SGI0R */
+ g1 = false;
+ break;
+ }
+ } else {
+ switch (p->Op2) {
+ default: /* Keep GCC quiet */
+ case 5: /* ICC_SGI1R_EL1 */
+ g1 = true;
+ break;
+ case 6: /* ICC_ASGI1R_EL1 */
+ case 7: /* ICC_SGI0R_EL1 */
+ g1 = false;
+ break;
+ }
+ }
+
+ vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
return true;
}
@@ -1303,6 +1345,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
+ { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
+ { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
@@ -1613,8 +1657,6 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
-
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -1737,8 +1779,10 @@ static const struct sys_reg_desc cp15_regs[] = {
static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
- { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+ { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
+ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
{ Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
};
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 137710f4dac3..68755fd70dcf 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
deleted file mode 100644
index 43ac736baa5b..000000000000
--- a/arch/arm64/lib/bitops.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Based on arch/arm/lib/bitops.h
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/lse.h>
-
-/*
- * x0: bits 5:0 bit offset
- * bits 31:6 word offset
- * x1: address
- */
- .macro bitop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x3, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]"
-alt_lse " \llsc x2, x2, x3", "nop"
-alt_lse " stxr w0, x2, [x1]", "nop"
-alt_lse " cbnz w0, 1b", "nop"
-
- ret
-ENDPROC(\name )
- .endm
-
- .macro testop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x4, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]"
- lsr x0, x2, x3
-alt_lse " \llsc x2, x2, x4", "nop"
-alt_lse " stlxr w5, x2, [x1]", "nop"
-alt_lse " cbnz w5, 1b", "nop"
-alt_lse " dmb ish", "nop"
-
- and x0, x0, #1
- ret
-ENDPROC(\name )
- .endm
-
-/*
- * Atomic bit operations.
- */
- bitop change_bit, eor, steor
- bitop clear_bit, bic, stclr
- bitop set_bit, orr, stset
-
- testop test_and_change_bit, eor, ldeoral
- testop test_and_clear_bit, bic, ldclral
- testop test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 30334d81b021..0c22ede52f90 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -35,7 +35,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(flush_icache_range)
+ENTRY(__flush_icache_range)
/* FALLTHROUGH */
/*
@@ -77,7 +77,7 @@ alternative_else_nop_endif
9:
mov x0, #-EFAULT
b 1b
-ENDPROC(flush_icache_range)
+ENDPROC(__flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 61e93f0b5482..072c51fb07d7 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -355,7 +355,7 @@ static int __init atomic_pool_init(void)
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, GFP_KERNEL);
+ pool_size_order, false);
else
page = alloc_pages(GFP_DMA32, pool_size_order);
@@ -573,7 +573,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page *page;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), gfp);
+ get_order(size), gfp & __GFP_NOWARN);
if (!page)
return NULL;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b8eecc7b9531..50b30ff30de4 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -379,12 +379,12 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault;
+ vm_fault_t fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -427,7 +427,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
struct siginfo si;
- int fault, major = 0;
+ vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -727,12 +727,7 @@ static const struct fault_info fault_info[] = {
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
- int ret = -ENOENT;
-
- if (IS_ENABLED(CONFIG_ACPI_APEI_SEA))
- ret = ghes_notify_sea();
-
- return ret;
+ return ghes_notify_sea();
}
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
@@ -879,7 +874,7 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
*/
WARN_ON_ONCE(in_interrupt());
- config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1));
}
#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 1059884f9a6f..30695a868107 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -66,6 +66,7 @@ void __sync_icache_dcache(pte_t pte)
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
}
+EXPORT_SYMBOL_GPL(__sync_icache_dcache);
/*
* This function is called when a page has been modified by the kernel. Mark
@@ -82,7 +83,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(__flush_icache_range);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index ecc6818191df..192b3ba07075 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- struct vm_area_struct vma = { .vm_mm = mm };
pte_t orig_pte = huge_ptep_get(ptep);
bool valid = pte_valid(orig_pte);
unsigned long i, saddr = addr;
@@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
orig_pte = pte_mkdirty(orig_pte);
}
- if (valid)
+ if (valid) {
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
flush_tlb_range(&vma, saddr, addr);
+ }
return orig_pte;
}
@@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- struct vm_area_struct vma = { .vm_mm = mm };
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 325cfb3b858a..787e27964ab9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
- return memblock_is_map_memory(pfn << PAGE_SHIFT);
+ phys_addr_t addr = pfn << PAGE_SHIFT;
+
+ if ((addr >> PAGE_SHIFT) != pfn)
+ return 0;
+ return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -611,11 +615,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
- * correctly.
+ * correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 493ff75670ff..65f86271f02b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,6 +45,7 @@
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
+#include <asm/tlbflush.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -977,12 +978,51 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1;
}
-int pud_free_pmd_page(pud_t *pud)
+int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{
- return pud_none(*pud);
+ pte_t *table;
+ pmd_t pmd;
+
+ pmd = READ_ONCE(*pmdp);
+
+ /* No-op for empty entry and WARN_ON for valid entry */
+ if (!pmd_present(pmd) || !pmd_table(pmd)) {
+ VM_WARN_ON(!pmd_table(pmd));
+ return 1;
+ }
+
+ table = pte_offset_kernel(pmdp, addr);
+ pmd_clear(pmdp);
+ __flush_tlb_kernel_pgtable(addr);
+ pte_free_kernel(NULL, table);
+ return 1;
}
-int pmd_free_pte_page(pmd_t *pmd)
+int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
{
- return pmd_none(*pmd);
+ pmd_t *table;
+ pmd_t *pmdp;
+ pud_t pud;
+ unsigned long next, end;
+
+ pud = READ_ONCE(*pudp);
+
+ /* No-op for empty entry and WARN_ON for valid entry */
+ if (!pud_present(pud) || !pud_table(pud)) {
+ VM_WARN_ON(!pud_table(pud));
+ return 1;
+ }
+
+ table = pmd_offset(pudp, addr);
+ pmdp = table;
+ next = addr;
+ end = addr + PUD_SIZE;
+ do {
+ pmd_free_pte_page(pmdp, next);
+ } while (pmdp++, next += PMD_SIZE, next != end);
+
+ pud_clear(pudp);
+ __flush_tlb_kernel_pgtable(addr);
+ pmd_free(NULL, table);
+ return 1;
}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index dad128ba98bf..146c04ceaa51 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -70,19 +70,32 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
-static void map_cpu_to_node(unsigned int cpu, int nid)
+static void numa_update_cpu(unsigned int cpu, bool remove)
{
- set_cpu_numa_node(cpu, nid);
- if (nid >= 0)
+ int nid = cpu_to_node(cpu);
+
+ if (nid == NUMA_NO_NODE)
+ return;
+
+ if (remove)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+ else
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
-void numa_clear_node(unsigned int cpu)
+void numa_add_cpu(unsigned int cpu)
{
- int nid = cpu_to_node(cpu);
+ numa_update_cpu(cpu, false);
+}
- if (nid >= 0)
- cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+void numa_remove_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, true);
+}
+
+void numa_clear_node(unsigned int cpu)
+{
+ numa_remove_cpu(cpu);
set_cpu_numa_node(cpu, NUMA_NO_NODE);
}
@@ -116,7 +129,7 @@ static void __init setup_node_to_cpumask_map(void)
*/
void numa_store_cpu_info(unsigned int cpu)
{
- map_cpu_to_node(cpu, cpu_to_node_map[cpu]);
+ set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
}
void __init early_map_cpu_to_node(unsigned int cpu, int nid)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 02b18f8b2905..24d786fc3a4c 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -10,18 +10,7 @@ static int ptdump_show(struct seq_file *m, void *v)
ptdump_walk_pgd(m, info);
return 0;
}
-
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump);
int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index bf59855628ac..a641b0bf1611 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -53,12 +53,8 @@ config C6X_BIG_KERNEL
If unsure, say N.
-source "init/Kconfig"
-
# Use the generic interrupt handling code in kernel/irq/
-source "kernel/Kconfig.freezer"
-
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
@@ -114,43 +110,6 @@ config KERNEL_RAM_BASE_ADDRESS
default 0xe0000000 if SOC_TMS320C6472
default 0x80000000
-source "mm/Kconfig"
-
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config ACCESS_CHECK
- bool "Check the user pointer address"
- default y
- help
- Usually the pointer transfer from user space is checked to see if its
- address is in the kernel space.
-
- Say N here to disable that check to improve the performance.
-
-endmenu
diff --git a/arch/c6x/Kconfig.debug b/arch/c6x/Kconfig.debug
new file mode 100644
index 000000000000..c299e0d8eca3
--- /dev/null
+++ b/arch/c6x/Kconfig.debug
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config ACCESS_CHECK
+ bool "Check the user pointer address"
+ default y
+ help
+ Usually the pointer transfer from user space is checked to see if its
+ address is in the kernel space.
+
+ Say N here to disable that check to improve the performance.
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 091d6d04b5e5..0b334b671e90 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -16,6 +16,7 @@ config H8300
select OF_IRQ
select OF_EARLY_FLATTREE
select HAVE_MEMBLOCK
+ select NO_BOOTMEM
select TIMER_OF
select H8300_TMR8
select HAVE_KERNEL_GZIP
@@ -48,40 +49,4 @@ config NR_CPUS
int
default 1
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/h8300/Kconfig.cpu"
-
-menu "Kernel Features"
-
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-endmenu
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/h8300/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index e1c02ca230cb..cc12b162c222 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -8,6 +8,8 @@
# (C) Copyright 2002-2015 Yoshinori Sato <ysato@users.sourceforge.jp>
#
+KBUILD_DEFCONFIG := edosk2674_defconfig
+
cflags-$(CONFIG_CPU_H8300H) := -mh
aflags-$(CONFIG_CPU_H8300H) := -mh -Wa,--mach=h8300h
ldflags-$(CONFIG_CPU_H8300H) := -mh8300helf_linux
@@ -22,6 +24,8 @@ KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
KBUILD_AFLAGS += $(aflags-y)
LDFLAGS += $(ldflags-y)
+CHECKFLAGS += -msize-long
+
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := h8300-unknown-linux-
endif
diff --git a/arch/h8300/boot/dts/h8300h_sim.dts b/arch/h8300/boot/dts/h8300h_sim.dts
index f1c31cecdec8..595398b9d018 100644
--- a/arch/h8300/boot/dts/h8300h_sim.dts
+++ b/arch/h8300/boot/dts/h8300h_sim.dts
@@ -73,7 +73,7 @@
timer16: timer@ffff68 {
compatible = "renesas,16bit-timer";
reg = <0xffff68 8>, <0xffff60 8>;
- interrupts = <24 0>;
+ interrupts = <26 0>;
renesas,channel = <0>;
clocks = <&fclk>;
clock-names = "fck";
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 941e7554e886..c6b6a06231b2 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -2,8 +2,10 @@
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
+#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -15,8 +17,6 @@
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#include <linux/kernel.h>
-
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
@@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-
-#define atomic_inc(v) (void)atomic_inc_return(v)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec(v) (void)atomic_dec_return(v)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
@@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
arch_local_irq_restore(flags);
return ret;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
index ea0cb0cf6a8b..647a83bd40b7 100644
--- a/arch/h8300/include/asm/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
@@ -29,11 +29,11 @@ static inline unsigned long ffz(unsigned long word)
result = -1;
__asm__("1:\n\t"
- "shlr.l %2\n\t"
+ "shlr.l %1\n\t"
"adds #1,%0\n\t"
"bcs 1b"
- : "=r"(result)
- : "0"(result), "r"(word));
+ : "=r"(result),"=r"(word)
+ : "0"(result), "1"(word));
return result;
}
@@ -66,7 +66,7 @@ H8300_GEN_BITOP(change_bit, "bnot")
#undef H8300_GEN_BITOP
-static inline int test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const volatile unsigned long *addr)
{
int ret = 0;
unsigned char *b_addr;
@@ -162,11 +162,11 @@ static inline unsigned long __ffs(unsigned long word)
result = -1;
__asm__("1:\n\t"
- "shlr.l %2\n\t"
+ "shlr.l %1\n\t"
"adds #1,%0\n\t"
"bcc 1b"
- : "=r" (result)
- : "0"(result), "r"(word));
+ : "=r" (result),"=r"(word)
+ : "0"(result), "1"(word));
return result;
}
diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h
index 313cafa85380..66d383848ff1 100644
--- a/arch/h8300/include/asm/ptrace.h
+++ b/arch/h8300/include/asm/ptrace.h
@@ -4,6 +4,8 @@
#include <uapi/asm/ptrace.h>
+struct task_struct;
+
#ifndef __ASSEMBLY__
#ifndef PS_S
#define PS_S (0x10)
diff --git a/arch/h8300/kernel/kgdb.c b/arch/h8300/kernel/kgdb.c
index 602e478afbd5..1a1d30cb0609 100644
--- a/arch/h8300/kernel/kgdb.c
+++ b/arch/h8300/kernel/kgdb.c
@@ -129,7 +129,7 @@ void kgdb_arch_exit(void)
/* Nothing to do */
}
-const struct kgdb_arch arch_kgdb_ops = {
+struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #2 */
.gdb_bpt_instr = { 0x57, 0x20 },
};
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index a4d0470c10a9..34e2df5c0d6d 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/memblock.h>
@@ -71,10 +70,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs)
static void __init bootmem_init(void)
{
- int bootmap_size;
- unsigned long ram_start_pfn;
- unsigned long free_ram_start_pfn;
- unsigned long ram_end_pfn;
struct memblock_region *region;
memory_end = memory_start = 0;
@@ -88,33 +83,17 @@ static void __init bootmem_init(void)
if (!memory_end)
panic("No memory!");
- ram_start_pfn = PFN_UP(memory_start);
- /* free_ram_start_pfn is first page after kernel */
- free_ram_start_pfn = PFN_UP(__pa(_end));
- ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ /* setup bootmem globals (we use no_bootmem, but mm still depends on this) */
+ min_low_pfn = PFN_UP(memory_start);
+ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_pfn = max_low_pfn;
- max_pfn = ram_end_pfn;
+ memblock_reserve(__pa(_stext), _end - _stext);
- /*
- * give all the memory to the bootmap allocator, tell it to put the
- * boot mem_map at the start of memory
- */
- bootmap_size = init_bootmem_node(NODE_DATA(0),
- free_ram_start_pfn,
- 0,
- ram_end_pfn);
- /*
- * free the usable memory, we have to make sure we do not free
- * the bootmem bitmap so we then reserve it after freeing it :-)
- */
- free_bootmem(PFN_PHYS(free_ram_start_pfn),
- (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
- reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
- BOOTMEM_DEFAULT);
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
- for_each_memblock(reserved, region) {
- reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
- }
+ memblock_dump_all();
}
void __init setup_arch(char **cmdline_p)
@@ -188,15 +167,6 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-static int __init device_probe(void)
-{
- of_platform_populate(NULL, NULL, NULL, NULL);
-
- return 0;
-}
-
-device_initcall(device_probe);
-
#if defined(CONFIG_CPU_H8300H)
#define get_wait(base, addr) ({ \
int baddr; \
diff --git a/arch/h8300/kernel/sim-console.c b/arch/h8300/kernel/sim-console.c
index 46138f55a9ea..03aa35b1a08c 100644
--- a/arch/h8300/kernel/sim-console.c
+++ b/arch/h8300/kernel/sim-console.c
@@ -13,12 +13,13 @@
static void sim_write(struct console *con, const char *s, unsigned n)
{
- register const int fd __asm__("er0") = 1; /* stdout */
register const char *_ptr __asm__("er1") = s;
register const unsigned _len __asm__("er2") = n;
- __asm__(".byte 0x5e,0x00,0x00,0xc7\n\t" /* jsr @0xc7 (sys_write) */
- : : "g"(fd), "g"(_ptr), "g"(_len));
+ __asm__("sub.l er0,er0\n\t" /* er0 = 1 (stdout) */
+ "inc.l #1,er0\n\t"
+ ".byte 0x5e,0x00,0x00,0xc7\n\t" /* jsr @0xc7 (sys_write) */
+ : : "g"(_ptr), "g"(_len):"er0");
}
static int __init sim_setup(struct earlycon_device *device, const char *opt)
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 37adb2003033..89a4b22f34d9 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
+ select ARCH_NO_PREEMPT
select HAVE_OPROFILE
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
@@ -146,26 +147,6 @@ config PAGE_SIZE_256KB
endchoice
-source "mm/Kconfig"
-
source "kernel/Kconfig.hz"
endmenu
-
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-source "drivers/Kconfig"
-source "fs/Kconfig"
-
-menu "Executable File Formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
-source "net/Kconfig"
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-source "lib/Kconfig.debug"
-endmenu
diff --git a/arch/hexagon/Kconfig.debug b/arch/hexagon/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/hexagon/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index fb3dfb2a667e..311b9894ccc8 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -164,7 +164,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -173,7 +173,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -196,18 +196,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 933bbcef5363..eb263e61daf4 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -52,7 +52,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
struct mm_struct *mm = current->mm;
int si_signo;
int si_code = SEGV_MAPERR;
- int fault;
+ vm_fault_t fault;
const struct exception_table_entry *fixup;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ff861420b8f5..8b4a0c1748c0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -4,10 +4,6 @@ config PGTABLE_LEVELS
range 3 4 if !IA64_PAGE_SIZE_64KB
default 3
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config IA64
@@ -16,6 +12,7 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM)
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -31,6 +28,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
+ select NO_BOOTMEM
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN
@@ -368,10 +366,6 @@ config FORCE_CPEI_RETARGET
This option it useful to enable this feature on older BIOS's as well.
You can also enable this by using boot command line option force_cpei=1.
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
@@ -532,8 +526,6 @@ config CRASH_DUMP
source "drivers/firmware/Kconfig"
-source "fs/Kconfig.binfmt"
-
endmenu
menu "Power management and ACPI options"
@@ -574,10 +566,6 @@ endmenu
endif
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "arch/ia64/hp/sim/Kconfig"
config MSPEC
@@ -588,13 +576,3 @@ config MSPEC
If you have an ia64 and you want to enable memory special
operations support (formerly known as fetchop), say Y here,
otherwise say N.
-
-source "fs/Kconfig"
-
-source "arch/ia64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index 677c409425df..1371efc9b005 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
choice
prompt "Physical memory granularity"
@@ -56,5 +53,3 @@ config IA64_DEBUG_IRQ
Selecting this option turns on bug checking for the IA-64 irq_save
and restore instructions. It's useful for tracking down spinlock
problems, but slow! If you're unsure, select N.
-
-endmenu
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index ee5b652d320a..671ce1e3f6f2 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1805,7 +1805,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
{ SX2000_IOC_ID, "sx2000", NULL },
};
-static void ioc_init(unsigned long hpa, struct ioc *ioc)
+static void __init ioc_init(unsigned long hpa, struct ioc *ioc)
{
struct ioc_iommu *info;
@@ -2002,7 +2002,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
#endif
}
-static void acpi_sba_ioc_add(struct ioc *ioc)
+static void __init acpi_sba_ioc_add(struct ioc *ioc)
{
acpi_handle handle = ioc->handle;
acpi_status status;
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 2524fb60fbc2..206530d0751b 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^)
(cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-
-static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ long
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
#define atomic_add(i,v) (void)atomic_add_return((i), (v))
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index fb0651961e2c..1e6fef69bb01 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -83,12 +83,14 @@ virt_to_phys (volatile void *address)
{
return (unsigned long) address - PAGE_OFFSET;
}
+#define virt_to_phys virt_to_phys
static inline void*
phys_to_virt (unsigned long address)
{
return (void *) (address + PAGE_OFFSET);
}
+#define phys_to_virt phys_to_virt
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
@@ -433,9 +435,11 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
{
return ioremap(phys_addr, size);
}
+#define ioremap ioremap
+#define ioremap_nocache ioremap_nocache
#define ioremap_cache ioremap_cache
#define ioremap_uc ioremap_nocache
-
+#define iounmap iounmap
/*
* String version of IO memory access ops:
@@ -444,6 +448,14 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
extern void memset_io(volatile void __iomem *s, int c, long n);
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+#define memset_io memset_io
+#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#include <asm-generic/io.h>
+#undef PCI_IOBASE
+
# endif /* __KERNEL__ */
#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index 0302b3664789..580356a2eea6 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -82,8 +82,6 @@ struct prev_kprobe {
#define ARCH_PREV_KPROBE_SZ 2
struct kprobe_ctlblk {
unsigned long kprobe_status;
- struct pt_regs jprobe_saved_regs;
- unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
unsigned long *bsp;
unsigned long cfm;
atomic_t prev_kprobe_index;
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 44f0ac0df308..516355a774bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
flush_tlb_all();
} else {
/*
- * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
- * vma pointer.
+ * flush_tlb_range() takes a vma instead of a mm pointer because
+ * some architectures want the vm_flags for ITLB/DTLB flush.
*/
- struct vm_area_struct vma;
+ struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
- vma.vm_mm = tlb->mm;
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
index 5d742bcb0018..4ca110f0a94b 100644
--- a/arch/ia64/include/uapi/asm/break.h
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -14,7 +14,6 @@
*/
#define __IA64_BREAK_KDB 0x80100
#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
-#define __IA64_BREAK_JPROBE 0x82000
/*
* OS-specific break numbers:
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 3efba40adc54..c872c4e6bafb 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -114,4 +114,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 498f3da3f225..d0c0ccdd656a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
-obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index f4db2168d1b8..00e8e2a1eb19 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -50,8 +50,7 @@ void foo(void)
DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
- DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
- DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
+ DEFINE(IA64_TASK_THREAD_PID_OFFSET,offsetof (struct task_struct, thread_pid));
DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
@@ -68,6 +67,7 @@ void foo(void)
DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
group_stop_count));
DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));
+ DEFINE(IA64_SIGNAL_PIDS_TGID_OFFSET, offsetof (struct signal_struct, pids[PIDTYPE_TGID]));
BLANK();
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index fe742ffafc7a..d80c99a5f55d 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -62,16 +62,16 @@ ENTRY(fsys_getpid)
.prologue
.altrp b6
.body
- add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
+ add r17=IA64_TASK_SIGNAL_OFFSET,r16
;;
- ld8 r17=[r17] // r17 = current->group_leader
+ ld8 r17=[r17] // r17 = current->signal
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
;;
ld4 r9=[r9]
- add r17=IA64_TASK_TGIDLINK_OFFSET,r17
+ add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17
;;
and r9=TIF_ALLWORK_MASK,r9
- ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid
+ ld8 r17=[r17] // r17 = current->signal->pids[PIDTYPE_TGID]
;;
add r8=IA64_PID_LEVEL_OFFSET,r17
;;
@@ -96,11 +96,11 @@ ENTRY(fsys_set_tid_address)
.altrp b6
.body
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- add r17=IA64_TASK_TGIDLINK_OFFSET,r16
+ add r17=IA64_TASK_THREAD_PID_OFFSET,r16
;;
ld4 r9=[r9]
tnat.z p6,p7=r32 // check argument register for being NaT
- ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid
+ ld8 r17=[r17] // r17 = current->thread_pid
;;
and r9=TIF_ALLWORK_MASK,r9
add r8=IA64_PID_LEVEL_OFFSET,r17
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
deleted file mode 100644
index f69389c7be1d..000000000000
--- a/arch/ia64/kernel/jprobes.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Jprobe specific operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- * <anil.s.keshavamurthy@intel.com> initial implementation
- *
- * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
- * probe to be inserted into the beginning of a function call. The fundamental
- * difference between a jprobe and a kprobe is the jprobe handler is executed
- * in the same context as the target function, while the kprobe handlers
- * are executed in interrupt context.
- *
- * For jprobes we initially gain control by placing a break point in the
- * first instruction of the targeted function. When we catch that specific
- * break, we:
- * * set the return address to our jprobe_inst_return() function
- * * jump to the jprobe handler function
- *
- * Since we fixed up the return address, the jprobe handler will return to our
- * jprobe_inst_return() function, giving us control again. At this point we
- * are back in the parents frame marker, so we do yet another call to our
- * jprobe_break() function to fix up the frame marker as it would normally
- * exist in the target function.
- *
- * Our jprobe_return function then transfers control back to kprobes.c by
- * executing a break instruction using one of our reserved numbers. When we
- * catch that break in kprobes.c, we continue like we do for a normal kprobe
- * by single stepping the emulated instruction, and then returning execution
- * to the correct location.
- */
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-
- /*
- * void jprobe_break(void)
- */
- .section .kprobes.text, "ax"
-ENTRY(jprobe_break)
- break.m __IA64_BREAK_JPROBE
-END(jprobe_break)
-
- /*
- * void jprobe_inst_return(void)
- */
-GLOBAL_ENTRY(jprobe_inst_return)
- br.call.sptk.many b0=jprobe_break
-END(jprobe_inst_return)
-
-GLOBAL_ENTRY(invalidate_stacked_regs)
- movl r16=invalidate_restore_cfm
- ;;
- mov b6=r16
- ;;
- br.ret.sptk.many b6
- ;;
-invalidate_restore_cfm:
- mov r16=ar.rsc
- ;;
- mov ar.rsc=r0
- ;;
- loadrs
- ;;
- mov ar.rsc=r16
- ;;
- br.cond.sptk.many rp
-END(invalidate_stacked_regs)
-
-GLOBAL_ENTRY(flush_register_stack)
- // flush dirty regs to backing store (must be first in insn group)
- flushrs
- ;;
- br.ret.sptk.many rp
-END(flush_register_stack)
-
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f5f3a5e6fcd1..aa41bd5cf9b7 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -35,8 +35,6 @@
#include <asm/sections.h>
#include <asm/exception.h>
-extern void jprobe_inst_return(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
*/
break;
}
-
kretprobe_assert(ri, orig_ret_address, trampoline_address);
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
prepare_ss(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else if (args->err == __IA64_BREAK_JPROBE) {
- /*
- * jprobe instrumented function just completed
- */
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
} else if (!is_ia64_break_inst(regs)) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
@@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
set_current_kprobe(p, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
- /*
- * Our pre-handler is specifically requesting that we just
- * do a return. This is used for both the jprobe pre-handler
- * and the kretprobe trampoline
- */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
#if !defined(CONFIG_PREEMPT)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
@@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
case DIE_BREAK:
/* err is break number from ia64_bad_break() */
if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
- || args->err == __IA64_BREAK_JPROBE
|| args->err == 0)
if (pre_kprobes_handler(args))
ret = NOTIFY_STOP;
@@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry)
return ((struct fnptr *)entry)->ip;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr = arch_deref_entry_point(jp->entry);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- struct param_bsp_cfm pa;
- int bytes;
-
- /*
- * Callee owns the argument space and could overwrite it, eg
- * tail call optimization. So to be absolutely safe
- * we save the argument space before transferring the control
- * to instrumented jprobe function which runs in
- * the process context
- */
- pa.ip = regs->cr_iip;
- unw_init_running(ia64_get_bsp_cfm, &pa);
- bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
- - (char *)pa.bsp;
- memcpy( kcb->jprobes_saved_stacked_regs,
- pa.bsp,
- bytes );
- kcb->bsp = pa.bsp;
- kcb->cfm = pa.cfm;
-
- /* save architectural state */
- kcb->jprobe_saved_regs = *regs;
-
- /* after rfi, execute the jprobe instrumented function */
- regs->cr_iip = addr & ~0xFULL;
- ia64_psr(regs)->ri = addr & 0xf;
- regs->r1 = ((struct fnptr *)(jp->entry))->gp;
-
- /*
- * fix the return address to our jprobe_inst_return() function
- * in the jprobes.S file
- */
- regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
-
- return 1;
-}
-
-/* ia64 does not need this */
-void __kprobes jprobe_return(void)
-{
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- int bytes;
-
- /* restoring architectural state */
- *regs = kcb->jprobe_saved_regs;
-
- /* restoring the original argument space */
- flush_register_stack();
- bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
- - (char *)kcb->bsp;
- memcpy( kcb->bsp,
- kcb->jprobes_saved_stacked_regs,
- bytes );
- invalidate_stacked_regs();
-
- preempt_enable_no_resched();
- return 1;
-}
-
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3b38c717008a..46bff1661836 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(mm);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
- INIT_LIST_HEAD(&vma->anon_vma_chain);
/*
* partially initialize the vma for the sampling buffer
*/
- vma->vm_mm = mm;
vma->vm_file = get_file(filp);
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
@@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
return 0;
error:
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
error_kmem:
pfm_rvfree(smpl_buf, size);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad43cbf70628..0e6c2d9fb498 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/reboot.h>
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
@@ -383,8 +384,16 @@ reserve_memory (void)
sort_regions(rsvd_region, num_rsvd_regions);
num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
-}
+ /* reserve all regions except the end of memory marker with memblock */
+ for (n = 0; n < num_rsvd_regions - 1; n++) {
+ struct rsvd_region *region = &rsvd_region[n];
+ phys_addr_t addr = __pa(region->start);
+ phys_addr_t size = region->end - region->start;
+
+ memblock_reserve(addr, size);
+ }
+}
/**
* find_initrd - get initrd parameters from the boot parameter structure
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 7d64b30913d1..e2e40bbd391c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -34,53 +34,6 @@ static unsigned long max_gap;
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
-/**
- * find_bootmap_location - callback to find a memory area for the bootmap
- * @start: start of region
- * @end: end of region
- * @arg: unused callback data
- *
- * Find a place to put the bootmap and return its starting address in
- * bootmap_start. This address must be page-aligned.
- */
-static int __init
-find_bootmap_location (u64 start, u64 end, void *arg)
-{
- u64 needed = *(unsigned long *)arg;
- u64 range_start, range_end, free_start;
- int i;
-
-#if IGNORE_PFN0
- if (start == PAGE_OFFSET) {
- start += PAGE_SIZE;
- if (start >= end)
- return 0;
- }
-#endif
-
- free_start = PAGE_OFFSET;
-
- for (i = 0; i < num_rsvd_regions; i++) {
- range_start = max(start, free_start);
- range_end = min(end, rsvd_region[i].start & PAGE_MASK);
-
- free_start = PAGE_ALIGN(rsvd_region[i].end);
-
- if (range_end <= range_start)
- continue; /* skip over empty range */
-
- if (range_end - range_start >= needed) {
- bootmap_start = __pa(range_start);
- return -1; /* done */
- }
-
- /* nothing more available in this segment */
- if (range_end == end)
- return 0;
- }
- return 0;
-}
-
#ifdef CONFIG_SMP
static void *cpu_data;
/**
@@ -196,8 +149,6 @@ setup_per_cpu_areas(void)
void __init
find_memory (void)
{
- unsigned long bootmap_size;
-
reserve_memory();
/* first find highest page frame number */
@@ -205,21 +156,12 @@ find_memory (void)
max_low_pfn = 0;
efi_memmap_walk(find_max_min_low_pfn, NULL);
max_pfn = max_low_pfn;
- /* how many bytes to cover all the pages */
- bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
-
- /* look for a location to hold the bootmap */
- bootmap_start = ~0UL;
- efi_memmap_walk(find_bootmap_location, &bootmap_size);
- if (bootmap_start == ~0UL)
- panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
- bootmap_size = init_bootmem_node(NODE_DATA(0),
- (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
-
- /* Free all available memory, then mark bootmem-map as being in use. */
- efi_memmap_walk(filter_rsvd_memory, free_bootmem);
- reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ efi_memmap_walk(filter_memory, register_active_ranges);
+#else
+ memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
+#endif
find_initrd();
@@ -244,11 +186,9 @@ paging_init (void)
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(filter_memory, register_active_ranges);
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
- free_area_init_nodes(max_zone_pfns);
} else {
unsigned long map_size;
@@ -266,13 +206,10 @@ paging_init (void)
*/
NODE_DATA(0)->node_mem_map = vmem_map +
find_min_pfn_with_active_regions();
- free_area_init_nodes(max_zone_pfns);
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
-#else /* !CONFIG_VIRTUAL_MEM_MAP */
- memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
- free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ free_area_init_nodes(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 7d9bd20319ff..1928d5719e41 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -20,6 +20,7 @@
#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
@@ -38,9 +39,6 @@ struct early_node_data {
struct ia64_node_data *node_data;
unsigned long pernode_addr;
unsigned long pernode_size;
-#ifdef CONFIG_ZONE_DMA32
- unsigned long num_dma_physpages;
-#endif
unsigned long min_pfn;
unsigned long max_pfn;
};
@@ -60,33 +58,31 @@ pg_data_t *pgdat_list[MAX_NUMNODES];
(((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
/**
- * build_node_maps - callback to setup bootmem structs for each node
+ * build_node_maps - callback to setup mem_data structs for each node
* @start: physical start of range
* @len: length of range
* @node: node where this range resides
*
- * We allocate a struct bootmem_data for each piece of memory that we wish to
+ * Detect extents of each piece of memory that we wish to
* treat as a virtually contiguous block (i.e. each node). Each such block
* must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
* if necessary. Any non-existent pages will simply be part of the virtual
- * memmap. We also update min_low_pfn and max_low_pfn here as we receive
- * memory ranges from the caller.
+ * memmap.
*/
static int __init build_node_maps(unsigned long start, unsigned long len,
int node)
{
unsigned long spfn, epfn, end = start + len;
- struct bootmem_data *bdp = &bootmem_node_data[node];
epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
- if (!bdp->node_low_pfn) {
- bdp->node_min_pfn = spfn;
- bdp->node_low_pfn = epfn;
+ if (!mem_data[node].min_pfn) {
+ mem_data[node].min_pfn = spfn;
+ mem_data[node].max_pfn = epfn;
} else {
- bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
- bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
+ mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
+ mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
}
return 0;
@@ -269,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
{
void *cpu_data;
int cpus = early_nr_cpus_node(node);
- struct bootmem_data *bdp = &bootmem_node_data[node];
mem_data[node].pernode_addr = pernode;
mem_data[node].pernode_size = pernodesize;
@@ -284,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
-
- pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -325,20 +318,16 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
int node)
{
unsigned long spfn, epfn;
- unsigned long pernodesize = 0, pernode, pages, mapsize;
- struct bootmem_data *bdp = &bootmem_node_data[node];
+ unsigned long pernodesize = 0, pernode;
spfn = start >> PAGE_SHIFT;
epfn = (start + len) >> PAGE_SHIFT;
- pages = bdp->node_low_pfn - bdp->node_min_pfn;
- mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
-
/*
* Make sure this memory falls within this node's usable memory
* since we may have thrown some away in build_maps().
*/
- if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
+ if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
return 0;
/* Don't setup this node's local space twice... */
@@ -353,32 +342,13 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernode = NODEDATA_ALIGN(start, node);
/* Is this range big enough for what we want to store here? */
- if (start + len > (pernode + pernodesize + mapsize))
+ if (start + len > (pernode + pernodesize))
fill_pernode(node, pernode, pernodesize);
return 0;
}
/**
- * free_node_bootmem - free bootmem allocator memory for use
- * @start: physical start of range
- * @len: length of range
- * @node: node where this range resides
- *
- * Simply calls the bootmem allocator to free the specified ranged from
- * the given pg_data_t's bdata struct. After this function has been called
- * for all the entries in the EFI memory map, the bootmem allocator will
- * be ready to service allocation requests.
- */
-static int __init free_node_bootmem(unsigned long start, unsigned long len,
- int node)
-{
- free_bootmem_node(pgdat_list[node], start, len);
-
- return 0;
-}
-
-/**
* reserve_pernode_space - reserve memory for per-node space
*
* Reserve the space used by the bootmem maps & per-node space in the boot
@@ -387,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len,
*/
static void __init reserve_pernode_space(void)
{
- unsigned long base, size, pages;
- struct bootmem_data *bdp;
+ unsigned long base, size;
int node;
for_each_online_node(node) {
- pg_data_t *pdp = pgdat_list[node];
-
if (node_isset(node, memory_less_mask))
continue;
- bdp = pdp->bdata;
-
- /* First the bootmem_map itself */
- pages = bdp->node_low_pfn - bdp->node_min_pfn;
- size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
- base = __pa(bdp->node_bootmem_map);
- reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
-
/* Now the per-node space */
size = mem_data[node].pernode_size;
base = __pa(mem_data[node].pernode_addr);
- reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
+ memblock_reserve(base, size);
}
}
@@ -528,6 +487,7 @@ void __init find_memory(void)
int node;
reserve_memory();
+ efi_memmap_walk(filter_memory, register_active_ranges);
if (num_online_nodes() == 0) {
printk(KERN_ERR "node info missing!\n");
@@ -544,38 +504,8 @@ void __init find_memory(void)
efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node)
- if (bootmem_node_data[node].node_low_pfn) {
+ if (mem_data[node].min_pfn)
node_clear(node, memory_less_mask);
- mem_data[node].min_pfn = ~0UL;
- }
-
- efi_memmap_walk(filter_memory, register_active_ranges);
-
- /*
- * Initialize the boot memory maps in reverse order since that's
- * what the bootmem allocator expects
- */
- for (node = MAX_NUMNODES - 1; node >= 0; node--) {
- unsigned long pernode, pernodesize, map;
- struct bootmem_data *bdp;
-
- if (!node_online(node))
- continue;
- else if (node_isset(node, memory_less_mask))
- continue;
-
- bdp = &bootmem_node_data[node];
- pernode = mem_data[node].pernode_addr;
- pernodesize = mem_data[node].pernode_size;
- map = pernode + pernodesize;
-
- init_bootmem_node(pgdat_list[node],
- map>>PAGE_SHIFT,
- bdp->node_min_pfn,
- bdp->node_low_pfn);
- }
-
- efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
reserve_pernode_space();
memory_less_nodes();
@@ -655,36 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
/**
- * count_node_pages - callback to build per-node memory info structures
- * @start: physical start of range
- * @len: length of range
- * @node: node where this range resides
- *
- * Each node has it's own number of physical pages, DMAable pages, start, and
- * end page frame number. This routine will be called by call_pernode_memory()
- * for each piece of usable memory and will setup these values for each node.
- * Very similar to build_maps().
- */
-static __init int count_node_pages(unsigned long start, unsigned long len, int node)
-{
- unsigned long end = start + len;
-
-#ifdef CONFIG_ZONE_DMA32
- if (start <= __pa(MAX_DMA_ADDRESS))
- mem_data[node].num_dma_physpages +=
- (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
-#endif
- start = GRANULEROUNDDOWN(start);
- end = GRANULEROUNDUP(end);
- mem_data[node].max_pfn = max(mem_data[node].max_pfn,
- end >> PAGE_SHIFT);
- mem_data[node].min_pfn = min(mem_data[node].min_pfn,
- start >> PAGE_SHIFT);
-
- return 0;
-}
-
-/**
* paging_init - setup page tables
*
* paging_init() sets up the page tables for each node of the system and frees
@@ -700,8 +600,6 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- efi_memmap_walk(filter_rsvd_memory, count_node_pages);
-
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 817fa120645f..a9d55ad8d67b 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -86,7 +86,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct vm_area_struct *vma, *prev_vma;
struct mm_struct *mm = current->mm;
unsigned long mask;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 18278b448530..3b85c3ecac38 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -114,10 +114,9 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
+ vma_set_anonymous(vma);
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -125,7 +124,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
@@ -133,10 +132,9 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
+ vma_set_anonymous(vma);
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -144,7 +142,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
@@ -277,7 +275,7 @@ static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
- gate_vma.vm_mm = NULL;
+ vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 785612b576f7..070553791e97 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,8 +2,10 @@
config M68K
bool
default y
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
+ select ARCH_NO_PREEMPT if !COLDFIRE
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
@@ -24,6 +26,10 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
+ select DMA_NONCOHERENT_OPS if HAS_DMA
+ select HAVE_MEMBLOCK
+ select ARCH_DISCARD_MEMBLOCK
+ select NO_BOOTMEM
config CPU_BIG_ENDIAN
def_bool y
@@ -75,10 +81,6 @@ config PGTABLE_LEVELS
default 2 if SUN3 || COLDFIRE
default 3
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
config MMU
bool "MMU-based Paged Memory Management Support"
default y
@@ -133,18 +135,6 @@ endmenu
menu "Kernel Features"
-if COLDFIRE
-source "kernel/Kconfig.preempt"
-endif
-
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
endmenu
if !MMU
@@ -158,18 +148,4 @@ config PM
endmenu
endif
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "arch/m68k/Kconfig.devices"
-
-source "fs/Kconfig"
-
-source "arch/m68k/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 04690b179a6e..f43643111eaf 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
@@ -51,5 +48,3 @@ config BDM_DISABLE
Disable the ColdFire CPU's BDM signals.
endif
-
-endmenu
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index b2a6bc63f8cd..aef8d42e078d 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
extern u32 dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
arch_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
- mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
mach_reset = dn_dummy_reset; /* */
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
}
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
- pr_info("set_clock_mmss\n");
- return 0;
-}
-
void dn_dummy_reset(void) {
dn_serial_print("The end !\n");
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 565c6f06ab0b..bd96702a1ad0 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
extern u32 atari_gettimeoffset(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
/* ++roman: This is a more elaborate test for an SCC chip, since the plain
* Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
ATARIHW_SET(TT_CLK);
pr_cont(" TT_CLK");
mach_hwclk = atari_tt_hwclk;
- mach_set_clock_mmss = atari_tt_set_clock_mmss;
}
if (hwreg_present(&mste_rtc.sec_ones)) {
ATARIHW_SET(MSTE_CLK);
pr_cont(" MSTE_CLK");
mach_hwclk = atari_mste_hwclk;
- mach_set_clock_mmss = atari_mste_set_clock_mmss;
}
if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
hwreg_write(&dma_wd.fdc_speed, 0)) {
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index c549b48174ec..9cca64286464 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
return( 0 );
}
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- struct MSTE_RTC val;
- unsigned char rtc_minutes;
-
- mste_read(&val);
- rtc_minutes= val.min_ones + val.min_tens * 10;
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- val.sec_ones = real_seconds % 10;
- val.sec_tens = real_seconds / 10;
- val.min_ones = real_minutes % 10;
- val.min_tens = real_minutes / 10;
- mste_write(&val);
- }
- else
- return -1;
- return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char save_control, save_freq_select, rtc_minutes;
-
- save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
- RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
- save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
- rtc_minutes = RTC_READ (RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY))
- rtc_minutes = bcd2bin(rtc_minutes);
-
- /* Since we're only adjusting minutes and seconds, don't interfere
- with hour overflow. This avoids messing with unknown time zones
- but requires your RTC not to be off by more than 30 minutes. */
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- if (!(save_control & RTC_DM_BINARY))
- {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- RTC_WRITE (RTC_SECONDS, real_seconds);
- RTC_WRITE (RTC_MINUTES, real_minutes);
- }
- else
- retval = -1;
-
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
- RTC_WRITE (RTC_CONTROL, save_control);
- return retval;
-}
-
/*
* Local variables:
* c-indent-level: 4
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 2cfff4765040..143ee9fa3893 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
extern void bvme6000_sched_init(irq_handler_t handler);
extern u32 bvme6000_gettimeoffset(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
mach_init_IRQ = bvme6000_init_IRQ;
arch_gettimeoffset = bvme6000_gettimeoffset;
mach_hwclk = bvme6000_hwclk;
- mach_set_clock_mmss = bvme6000_set_clock_mmss;
mach_reset = bvme6000_reset;
mach_get_model = bvme6000_get_model;
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
return 0;
}
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char rtc_minutes, rtc_tenms;
- volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
- unsigned long flags;
- volatile int i;
-
- rtc->msr = 0; /* Ensure clock accessible */
- rtc_minutes = bcd2bin (rtc->bcd_min);
-
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- local_irq_save(flags);
- rtc_tenms = rtc->bcd_tenms;
- while (rtc_tenms == rtc->bcd_tenms)
- ;
- for (i = 0; i < 1000; i++)
- ;
- rtc->bcd_min = bin2bcd(real_minutes);
- rtc->bcd_sec = bin2bcd(real_seconds);
- local_irq_restore(flags);
- }
- else
- retval = -1;
-
- rtc->msr = msr;
-
- return retval;
-}
-
diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
index 849cd208e2ed..7bc666e482eb 100644
--- a/arch/m68k/coldfire/clk.c
+++ b/arch/m68k/coldfire/clk.c
@@ -129,4 +129,33 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
/***************************************************************************/
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a874e54404d1..1d5483f6e457 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8ce39e23aa42..52a0af127951 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 346c4e75edf8..b3103e51268a 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fca9c7aa71a3..fb7d651a4cab 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index f9eab174915c..6b37f5537c39 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b52e597899eb..c717bf879449 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -369,7 +366,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
CONFIG_ADB_IOP=y
-CONFIG_ADB_PMU68K=y
+CONFIG_ADB_PMU=y
CONFIG_ADB_CUDA=y
CONFIG_INPUT_ADBHID=y
CONFIG_MAC_EMUMOUSEBTN=y
@@ -398,8 +395,8 @@ CONFIG_VETH=m
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2a84eeec5b02..226c994ce794 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_ATARI_SCSI=y
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -402,7 +400,7 @@ CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
CONFIG_ADB_IOP=y
-CONFIG_ADB_PMU68K=y
+CONFIG_ADB_PMU=y
CONFIG_ADB_CUDA=y
CONFIG_INPUT_ADBHID=y
CONFIG_MAC_EMUMOUSEBTN=y
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_MAC8390=y
CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 476e69994340..b383327fd77a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 1477cda9146e..9783d3deb9e9 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index b3a543dc48a0..a35d10ee10cb 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d543ed5dfa96..573bf922d448 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a67e54246023..efb27a7fcc55 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 4d8d68c4e3dd..a4b8d3331a9e 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += barrier.h
generic-y += compat.h
generic-y += device.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e993e2860ee1..47228b0d4163 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -126,11 +126,13 @@ static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
+#define atomic_inc atomic_inc
static inline void atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
+#define atomic_dec atomic_dec
static inline int atomic_dec_and_test(atomic_t *v)
{
@@ -138,6 +140,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_dec_and_test atomic_dec_and_test
static inline int atomic_dec_and_test_lt(atomic_t *v)
{
@@ -155,6 +158,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_inc_and_test atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
@@ -190,9 +194,6 @@ static inline int atomic_xchg(atomic_t *v, int new)
#endif /* !CONFIG_RMW_INSNS */
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
char c;
@@ -201,6 +202,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
+#define atomic_sub_and_test atomic_sub_and_test
static inline int atomic_add_negative(int i, atomic_t *v)
{
@@ -210,20 +212,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
+#define atomic_add_negative atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 93b47b1f6fb4..d979f38af751 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
!defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
: "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
: "dm" (x & -x));
return 32 - cnt;
}
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return ffs(x) - 1;
+}
/*
* fls: find last bit set.
@@ -515,12 +519,16 @@ static inline int __fls(int x)
#endif
+/* Simple test-and-set bit locks */
+#define test_and_set_bit_lock test_and_set_bit
+#define clear_bit_unlock clear_bit
+#define __clear_bit_unlock clear_bit_unlock
+
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644
index e3722ed04fbb..000000000000
--- a/arch/m68k/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &m68k_dma_ops;
-}
-
-#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index b0978a23bad1..ae2021964e32 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -390,7 +390,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
- dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
+ dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
(int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
#endif
}
@@ -421,7 +421,7 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
#ifdef DEBUG_DMA
printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
- __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
+ __FILE__, __LINE__, dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
(int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
(int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
#endif
diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h
index ca2849afb087..aabe6420ead2 100644
--- a/arch/m68k/include/asm/io.h
+++ b/arch/m68k/include/asm/io.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
#if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
#include <asm/io_no.h>
#else
#include <asm/io_mm.h>
#endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index fe485f4f5fac..782b78f8a048 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -16,13 +16,11 @@
* isa_readX(),isa_writeX() are for ISA memory
*/
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
#ifdef __KERNEL__
-#define ARCH_HAS_IOREMAP_WT
-
#include <linux/compiler.h>
#include <asm/raw_io.h>
#include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
#define writew(val, addr) out_le16((addr), (val))
#endif /* CONFIG_ATARI_ROM_ISA */
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port) 0xff
-#define inb_p(port) 0xff
-#define outb(val,port) ((void)0)
-#define outb_p(val,port) ((void)0)
-#define inw(port) 0xffff
-#define inw_p(port) 0xffff
-#define outw(val,port) ((void)0)
-#define outw_p(val,port) ((void)0)
-#define inl(port) 0xffffffffUL
-#define inl_p(port) 0xffffffffUL
-#define outl(val,port) ((void)0)
-#define outl_p(val,port) ((void)0)
-
-#define insb(port,buf,nr) ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr) ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr) ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr) in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr) in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
#define readl(addr) in_le32(addr)
#define writel(val,addr) out_le32((addr),(val))
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
#define writew_relaxed(b, addr) writew(b, addr)
#define writel_relaxed(b, addr) writel(b, addr)
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 83a0a6d449f4..0498192e1d98 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define PCI_SPACE_LIMIT PCI_IO_MASK
#endif /* CONFIG_PCI */
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
#include <asm/kmap.h>
#include <asm/virtconvert.h>
-#include <asm-generic/io.h>
#endif /* _M68KNOMMU_IO_H */
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 84b8333db8ad..aac7f045f7f0 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_MMU
+#define ARCH_HAS_IOREMAP_WT
+
/* Values for nocacheflag and cmode */
#define IOMAP_FULL_CACHING 0
#define IOMAP_NOCACHE_SER 1
@@ -16,6 +18,7 @@
*/
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
+#define iounmap iounmap
extern void iounmap(void __iomem *addr);
extern void __iounmap(void *addr, unsigned long size);
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
}
#define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
static inline void __iomem *ioremap_wt(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+#define memset_io memset_io
static inline void memset_io(volatile void __iomem *addr, unsigned char val,
int count)
{
__builtin_memset((void __force *) addr, val, count);
}
+#define memcpy_fromio memcpy_fromio
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
{
__builtin_memcpy(dst, (void __force *) src, count);
}
+#define memcpy_toio memcpy_toio
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
{
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 1605da48ebf2..49bd3266b4b1 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
extern unsigned int (*mach_get_ss)(void);
extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
extern void (*mach_reset)( void );
extern void (*mach_halt)( void );
extern void (*mach_power_off)( void );
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 9b840c03ebb7..08cee11180e6 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -57,7 +57,6 @@ struct mac_model
#define MAC_SCSI_IIFX 5
#define MAC_SCSI_DUO 6
#define MAC_SCSI_LC 7
-#define MAC_SCSI_LATE 8
#define MAC_IDE_NONE 0
#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 8b707c249026..12fe700632f4 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return page;
}
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index e644c4daf540..6bbe52025de3 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define __pa(vaddr) ((unsigned long)(vaddr))
-#define __va(paddr) ((void *)(paddr))
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 463572c4943f..e99993c57d6b 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -6,7 +6,7 @@
#undef DEBUG
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -19,7 +19,7 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
{
struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
return addr;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
#include <asm/cacheflush.h>
-static void *m68k_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
return ret;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-static void m68k_dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
}
}
-static void m68k_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t handle = page_to_phys(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_sync_single_for_device(dev, handle, size, dir);
-
- return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
- return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
- .alloc = m68k_dma_alloc,
- .free = m68k_dma_free,
- .map_page = m68k_dma_map_page,
- .map_sg = m68k_dma_map_sg,
- .sync_single_for_device = m68k_dma_sync_single_for_device,
- .sync_sg_for_device = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
void arch_setup_pdev_archdata(struct platform_device *pdev)
{
if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index f35e3ebd6331..5d3596c180f9 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
+ memblock_add(m68k_memory[m68k_num_memory].addr,
+ m68k_memory[m68k_num_memory].size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
-#ifndef CONFIG_SUN3
- int i;
-#endif
-
/* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifndef CONFIG_SUN3
- for (i = 1; i < m68k_num_memory; i++)
- free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
- m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
- reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
- m68k_ramdisk.addr, m68k_ramdisk.size,
- BOOTMEM_DEFAULT);
+ memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a98af1018201..cfd5475bfc31 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
void __init setup_arch(char **cmdline_p)
{
- int bootmap_size;
-
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
__bss_stop, memory_start, memory_start, memory_end);
+ memblock_add(memory_start, memory_end - memory_start);
+
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
min_low_pfn = PFN_DOWN(memory_start);
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
- bootmap_size = init_bootmem_node(
- NODE_DATA(0),
- min_low_pfn, /* map goes here */
- PFN_DOWN(PAGE_OFFSET),
- max_pfn);
- /*
- * Free the usable memory, we have to make sure we do not free
- * the bootmem bitmap so we then reserve it after freeing it :-)
- */
- free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
if ((initrd_start > 0) && (initrd_start < initrd_end) &&
(initrd_end < memory_end))
- reserve_bootmem(initrd_start, initrd_end - initrd_start,
- BOOTMEM_DEFAULT);
+ memblock_reserve(initrd_start, initrd_end - initrd_start);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e522307db47c..cd9317d53276 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
/* Mac specific timer functions */
extern u32 mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
mach_get_model = mac_get_model;
arch_gettimeoffset = mac_gettimeoffset;
mach_hwclk = mac_hwclk;
- mach_set_clock_mmss = mac_set_clock_mmss;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook 520",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_QUADRA,
- .scsi_type = MAC_SCSI_LATE,
+ .scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -891,7 +889,7 @@ static void __init mac_identify(void)
#ifdef CONFIG_ADB_CUDA
find_via_cuda();
#endif
-#ifdef CONFIG_ADB_PMU68K
+#ifdef CONFIG_ADB_PMU
find_via_pmu();
#endif
}
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
},
};
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MAC_SCSI,
- .end = IRQ_MAC_SCSI,
- }, {
- .flags = IORESOURCE_MEM,
- .start = 0x50010000,
- .end = 0x50011FFF,
- },
-};
-
static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
{
.flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
platform_device_register_simple("mac_scsi", 0,
mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
break;
- case MAC_SCSI_LATE:
- /* XXX PDMA support for PowerBook 500 series needs testing */
- platform_device_register_simple("mac_scsi", 0,
- mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
- break;
case MAC_SCSI_LC:
/* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
* Also from the Developer Notes for Classic II, LC III,
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index c68054361615..3534aa6a4dc2 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -26,33 +26,38 @@
#include <asm/machdep.h>
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
#define RTC_OFFSET 2082844800
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
- time = (req.reply[3] << 24) | (req.reply[4] << 16) |
- (req.reply[5] << 8) | req.reply[6];
+ time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+ (req.reply[5] << 8) | req.reply[6]);
+
return time - RTC_OFFSET;
}
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -85,27 +90,28 @@ static void cuda_write_pram(int offset, __u8 data)
}
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+#ifdef CONFIG_ADB_PMU
+static time64_t pmu_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
while (!req.complete)
pmu_poll();
- time = (req.reply[1] << 24) | (req.reply[2] << 16) |
- (req.reply[3] << 8) | req.reply[4];
+ time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+ (req.reply[3] << 8) | req.reply[4]);
+
return time - RTC_OFFSET;
}
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -136,7 +142,7 @@ static void pmu_write_pram(int offset, __u8 data)
while (!req.complete)
pmu_poll();
}
-#endif /* CONFIG_ADB_PMU68K */
+#endif /* CONFIG_ADB_PMU */
/*
* VIA PRAM/RTC access routines
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
* is basically any machine with Mac II-style ADB.
*/
-static long via_read_time(void)
+static time64_t via_read_time(void)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} result, last_result;
int count = 1;
@@ -270,7 +276,7 @@ static long via_read_time(void)
via_pram_command(0x8D, &result.cdata[0]);
if (result.idata == last_result.idata)
- return result.idata - RTC_OFFSET;
+ return (time64_t)result.idata - RTC_OFFSET;
if (++count > 10)
break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
last_result.idata = result.idata;
}
- pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
- last_result.idata, result.idata);
+ pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+ __func__, last_result.idata, result.idata);
return 0;
}
@@ -291,11 +297,11 @@ static long via_read_time(void)
* is basically any machine with Mac II-style ADB.
*/
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} data;
__u8 temp;
@@ -304,7 +310,7 @@ static void via_write_time(long time)
temp = 0x55;
via_pram_command(0x35, &temp);
- data.idata = time + RTC_OFFSET;
+ data.idata = lower_32_bits(time + RTC_OFFSET);
via_pram_command(0x01, &data.cdata[3]);
via_pram_command(0x05, &data.cdata[2]);
via_pram_command(0x09, &data.cdata[1]);
@@ -367,38 +373,6 @@ static void cuda_shutdown(void)
}
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_ADB_PMU68K
-
-void pmu_restart(void)
-{
- struct adb_request req;
- if (pmu_request(&req, NULL,
- 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
- return;
- while (!req.complete)
- pmu_poll();
- if (pmu_request(&req, NULL, 1, PMU_RESET) < 0)
- return;
- while (!req.complete)
- pmu_poll();
-}
-
-void pmu_shutdown(void)
-{
- struct adb_request req;
- if (pmu_request(&req, NULL,
- 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
- return;
- while (!req.complete)
- pmu_poll();
- if (pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 'M', 'A', 'T', 'T') < 0)
- return;
- while (!req.complete)
- pmu_poll();
-}
-
-#endif
-
/*
*-------------------------------------------------------------------
* Below this point are the generic routines; they'll dispatch to the
@@ -423,7 +397,7 @@ void mac_pram_read(int offset, __u8 *buffer, int len)
func = cuda_read_pram;
break;
#endif
-#ifdef CONFIG_ADB_PMU68K
+#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
func = pmu_read_pram;
break;
@@ -453,7 +427,7 @@ void mac_pram_write(int offset, __u8 *buffer, int len)
func = cuda_write_pram;
break;
#endif
-#ifdef CONFIG_ADB_PMU68K
+#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
func = pmu_write_pram;
break;
@@ -477,9 +451,8 @@ void mac_poweroff(void)
macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_shutdown();
#endif
-#ifdef CONFIG_ADB_PMU68K
- } else if (macintosh_config->adb_type == MAC_ADB_PB1
- || macintosh_config->adb_type == MAC_ADB_PB2) {
+#ifdef CONFIG_ADB_PMU
+ } else if (macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_shutdown();
#endif
}
@@ -519,9 +492,8 @@ void mac_reset(void)
macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_restart();
#endif
-#ifdef CONFIG_ADB_PMU68K
- } else if (macintosh_config->adb_type == MAC_ADB_PB1
- || macintosh_config->adb_type == MAC_ADB_PB2) {
+#ifdef CONFIG_ADB_PMU
+ } else if (macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_restart();
#endif
} else if (CPU_IS_030) {
@@ -585,12 +557,15 @@ void mac_reset(void)
* This function translates seconds since 1970 into a proper date.
*
* Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
*/
#define SECS_PER_MINUTE (60)
#define SECS_PER_HOUR (SECS_PER_MINUTE * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
int *yearp, int *monp, int *dayp,
int *hourp, int *minp, int *secp)
{
@@ -602,11 +577,10 @@ static void unmktime(unsigned long time, long offset,
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
- long int days, rem, y, wday, yday;
+ int days, rem, y, wday, yday;
const unsigned short int *ip;
- days = time / SECS_PER_DAY;
- rem = time % SECS_PER_DAY;
+ days = div_u64_rem(time, SECS_PER_DAY, &rem);
rem += offset;
while (rem < 0) {
rem += SECS_PER_DAY;
@@ -657,7 +631,7 @@ static void unmktime(unsigned long time, long offset,
int mac_hwclk(int op, struct rtc_time *t)
{
- unsigned long now;
+ time64_t now;
if (!op) { /* read */
switch (macintosh_config->adb_type) {
@@ -672,7 +646,7 @@ int mac_hwclk(int op, struct rtc_time *t)
now = cuda_read_time();
break;
#endif
-#ifdef CONFIG_ADB_PMU68K
+#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
now = pmu_read_time();
break;
@@ -693,8 +667,8 @@ int mac_hwclk(int op, struct rtc_time *t)
__func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
- now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
- t->tm_hour, t->tm_min, t->tm_sec);
+ now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+ t->tm_hour, t->tm_min, t->tm_sec);
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
@@ -708,7 +682,7 @@ int mac_hwclk(int op, struct rtc_time *t)
cuda_write_time(now);
break;
#endif
-#ifdef CONFIG_ADB_PMU68K
+#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
pmu_write_time(now);
break;
@@ -719,19 +693,3 @@ int mac_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
- struct rtc_time now;
-
- mac_hwclk(0, &now);
- now.tm_sec = nowtime % 60;
- now.tm_min = (nowtime / 60) % 60;
- mac_hwclk(1, &now);
-
- return 0;
-}
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index f2ff3779875a..9b6163c05a75 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -70,7 +70,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
@@ -136,7 +136,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- pr_debug("handle_mm_fault returns %d\n", fault);
+ pr_debug("handle_mm_fault returns %x\n", fault);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8827b7f91402..38e2b272c220 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
pg_data_table[i] = pg_data_map + node;
}
#endif
- pg_data_map[node].bdata = bootmem_node_data + node;
node_set_online(node);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2925d795d71a..70dde040779b 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
void __init cf_bootmem_alloc(void)
{
- unsigned long start_pfn;
unsigned long memstart;
/* _rambase and _ramend will be naturally page aligned */
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
+ memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
/* page numbers */
memstart = PAGE_ALIGN(_ramstart);
min_low_pfn = PFN_DOWN(_rambase);
- start_pfn = PFN_DOWN(memstart);
max_pfn = max_low_pfn = PFN_DOWN(_ramend);
high_memory = (void *)_ramend;
+ /* Reserve kernel text/data/bss */
+ memblock_reserve(memstart, memstart - _rambase);
+
m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
- /* setup bootmem data */
+ /* setup node data */
m68k_setup_node(0);
- memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
- min_low_pfn, max_low_pfn);
- free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
}
/*
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index e490ecc7842c..4e17ecb5928a 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long min_addr, max_addr;
- unsigned long addr, size, end;
+ unsigned long addr;
int i;
#ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
min_low_pfn = availmem >> PAGE_SHIFT;
max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
- for (i = 0; i < m68k_num_memory; i++) {
- addr = m68k_memory[i].addr;
- end = addr + m68k_memory[i].size;
- m68k_setup_node(i);
- availmem = PAGE_ALIGN(availmem);
- availmem += init_bootmem_node(NODE_DATA(i),
- availmem >> PAGE_SHIFT,
- addr >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- }
+ /* Reserve kernel text/data/bss and the memory allocated in head.S */
+ memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
/*
* Map the physical memory available into the kernel virtual
- * address space. First initialize the bootmem allocator with
- * the memory we already mapped, so map_node() has something
- * to allocate.
+ * address space. Make sure memblock will not try to allocate
+ * pages beyond the memory we already mapped in head.S
*/
- addr = m68k_memory[0].addr;
- size = m68k_memory[0].size;
- free_bootmem_node(NODE_DATA(0), availmem,
- min(m68k_init_mapped_size, size) - (availmem - addr));
- map_node(0);
- if (size > m68k_init_mapped_size)
- free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
- size - m68k_init_mapped_size);
-
- for (i = 1; i < m68k_num_memory; i++)
+ memblock_set_bottom_up(true);
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ m68k_setup_node(i);
map_node(i);
+ }
flush_tlb_all();
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index f8a710fd84cd..adea549d240e 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
mach_init_IRQ = mvme147_init_IRQ;
arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
- mach_set_clock_mmss = mvme147_set_clock_mmss;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 4ffd9ef98de4..6ee36a5b528d 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
extern void mvme16x_sched_init(irq_handler_t handler);
extern u32 mvme16x_gettimeoffset(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
mach_init_IRQ = mvme16x_init_IRQ;
arch_gettimeoffset = mvme16x_gettimeoffset;
mach_hwclk = mvme16x_hwclk;
- mach_set_clock_mmss = mvme16x_set_clock_mmss;
mach_reset = mvme16x_reset;
mach_get_model = mvme16x_get_model;
mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
-
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 71c0867ecf20..96810d91da2b 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
static u32 q40_gettimeoffset(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
@@ -175,7 +174,6 @@ void __init config_q40(void)
mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
mach_set_rtc_pll = q40_set_rtc_pll;
- mach_set_clock_mmss = q40_set_clock_mmss;
mach_reset = q40_reset;
mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
return bcd2bin(Q40_RTC_SECS);
}
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
- int rtc_minutes;
-
- rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
- if ((rtc_minutes < real_minutes ?
- real_minutes - rtc_minutes :
- rtc_minutes - real_minutes) < 30) {
- Q40_RTC_CTRL |= Q40_RTC_WRITE;
- Q40_RTC_MINS = bin2bcd(real_minutes);
- Q40_RTC_SECS = bin2bcd(real_seconds);
- Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
- } else
- retval = -1;
-
- return retval;
-}
-
-
/* get and set PLL calibration of RTC clock */
#define Q40_RTC_PLL_MASK ((1<<5)-1)
#define Q40_RTC_PLL_SIGN (1<<5)
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 1d28d380e8cc..79a2bb857906 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
availmem = memory_start;
m68k_setup_node(0);
- availmem += init_bootmem(start_page, num_pages);
- availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
- free_bootmem(__pa(availmem), memory_end - (availmem));
}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d14782100088..ace5c5bf1836 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,6 +1,9 @@
config MICROBLAZE
def_bool y
+ select ARCH_NO_SWAP
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION
@@ -8,6 +11,8 @@ config MICROBLAZE
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
+ select DMA_NONCOHERENT_OPS
+ select DMA_NONCOHERENT_MMAP
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
@@ -52,9 +57,6 @@ config CPU_LITTLE_ENDIAN
endchoice
-config SWAP
- def_bool n
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
@@ -85,16 +87,10 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/microblaze/Kconfig.platform"
menu "Processor type and features"
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
config MMU
@@ -268,14 +264,6 @@ config MICROBLAZE_64K_PAGES
endchoice
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
endmenu
menu "Bus Options"
@@ -299,17 +287,3 @@ config PCI_XILINX
source "drivers/pci/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/microblaze/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 93a737c8d1a6..dc2e3c45e8a2 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -1,11 +1,5 @@
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
-menu "Kernel hacking"
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-
-source "lib/Kconfig.debug"
-
-endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d269dd4b8279..73330360a8e6 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -40,11 +40,11 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
ifdef CONFIG_CPU_BIG_ENDIAN
KBUILD_CFLAGS += -mbig-endian
KBUILD_AFLAGS += -mbig-endian
-LD += -EB
+LDFLAGS += -EB
else
KBUILD_CFLAGS += -mlittle-endian
KBUILD_AFLAGS += -mlittle-endian
-LD += -EL
+LDFLAGS += -EL
endif
CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index fe6a6c6e5003..569ba9e670c1 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += bugs.h
generic-y += compat.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
deleted file mode 100644
index add50c1373bf..000000000000
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Implements the generic device dma API for microblaze and the pci
- *
- * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2009-2010 PetaLogix
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- *
- * This file is base on powerpc and x86 dma-mapping.h versions
- * Copyright (C) 2004 IBM
- */
-
-#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
-#define _ASM_MICROBLAZE_DMA_MAPPING_H
-
-/*
- * Available generic sets of operations
- */
-extern const struct dma_map_ops dma_nommu_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_nommu_ops;
-}
-
-#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index db8b1fa83452..7b650ab14fa0 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -553,11 +553,6 @@ void __init *early_get_page(void);
extern unsigned long ioremap_bot, ioremap_base;
-void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
-void consistent_free(size_t size, void *vaddr);
-void consistent_sync(void *vaddr, size_t size, int direction);
-void consistent_sync_page(struct page *page, unsigned long offset,
- size_t size, int direction);
unsigned long consistent_virt_to_pfn(void *vaddr);
void setup_memory(void);
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 3145e7dc8ab1..71032cf64669 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -8,29 +8,15 @@
*/
#include <linux/device.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <asm/cacheflush.h>
-static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- return consistent_alloc(flag, size, dma_handle);
-}
-
-static void dma_nommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- consistent_free(size, vaddr);
-}
-
-static inline void __dma_sync(unsigned long paddr,
- size_t size, enum dma_data_direction direction)
+static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+ enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
@@ -45,113 +31,21 @@ static inline void __dma_sync(unsigned long paddr,
}
}
-static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- struct scatterlist *sg;
- int i;
-
- /* FIXME this part of code is untested */
- for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- __dma_sync(sg_phys(sg), sg->length, direction);
- }
-
- return nents;
-}
-
-static inline dma_addr_t dma_nommu_map_page(struct device *dev,
- struct page *page,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(page_to_phys(page) + offset, size, direction);
- return page_to_phys(page) + offset;
+ __dma_sync(dev, paddr, size, dir);
}
-static inline void dma_nommu_unmap_page(struct device *dev,
- dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
-/* There is not necessary to do cache cleanup
- *
- * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
- * dma_address is physical address
- */
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(dma_address, size, direction);
+ __dma_sync(dev, paddr, size, dir);
}
-static inline void
-dma_nommu_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- /*
- * It's pointless to flush the cache as the memory segment
- * is given to the CPU
- */
-
- if (direction == DMA_FROM_DEVICE)
- __dma_sync(dma_handle, size, direction);
-}
-
-static inline void
-dma_nommu_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- /*
- * It's pointless to invalidate the cache if the device isn't
- * supposed to write to the relevant region
- */
-
- if (direction == DMA_TO_DEVICE)
- __dma_sync(dma_handle, size, direction);
-}
-
-static inline void
-dma_nommu_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- /* FIXME this part of code is untested */
- if (direction == DMA_FROM_DEVICE)
- for_each_sg(sgl, sg, nents, i)
- __dma_sync(sg->dma_address, sg->length, direction);
-}
-
-static inline void
-dma_nommu_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- /* FIXME this part of code is untested */
- if (direction == DMA_TO_DEVICE)
- for_each_sg(sgl, sg, nents, i)
- __dma_sync(sg->dma_address, sg->length, direction);
-}
-
-static
-int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = vma_pages(vma);
@@ -170,17 +64,3 @@ int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
#endif
}
-
-const struct dma_map_ops dma_nommu_ops = {
- .alloc = dma_nommu_alloc_coherent,
- .free = dma_nommu_free_coherent,
- .mmap = dma_nommu_mmap_coherent,
- .map_sg = dma_nommu_map_sg,
- .map_page = dma_nommu_map_page,
- .unmap_page = dma_nommu_unmap_page,
- .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
- .sync_single_for_device = dma_nommu_sync_single_for_device,
- .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
- .sync_sg_for_device = dma_nommu_sync_sg_for_device,
-};
-EXPORT_SYMBOL(dma_nommu_ops);
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 4655ff342c64..f264fdcf152a 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -341,11 +341,6 @@ start_here:
/* Initialize r31 with current task address */
addik r31, r0, init_task
- /*
- * Call platform dependent initialize function.
- * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
- * the function.
- */
addik r11, r0, machine_early_init
brald r15, r11
nop
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index b06c3a7faf20..c9a278ac795a 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
+#include <linux/dma-noncoherent.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
@@ -59,7 +60,8 @@
* uncached region. This will no doubt cause big problems if memory allocated
* here is not also freed properly. -- JW
*/
-void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
unsigned long order, vaddr;
void *ret;
@@ -154,7 +156,6 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
return ret;
}
-EXPORT_SYMBOL(consistent_alloc);
#ifdef CONFIG_MMU
static pte_t *consistent_virt_to_pte(void *vaddr)
@@ -178,7 +179,8 @@ unsigned long consistent_virt_to_pfn(void *vaddr)
/*
* free page(s) as defined by the above mapping.
*/
-void consistent_free(size_t size, void *vaddr)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
{
struct page *page;
@@ -218,49 +220,3 @@ void consistent_free(size_t size, void *vaddr)
flush_tlb_all();
#endif
}
-EXPORT_SYMBOL(consistent_free);
-
-/*
- * make an area consistent.
- */
-void consistent_sync(void *vaddr, size_t size, int direction)
-{
- unsigned long start;
- unsigned long end;
-
- start = (unsigned long)vaddr;
-
- /* Convert start address back down to unshadowed memory region */
-#ifdef CONFIG_XILINX_UNCACHED_SHADOW
- start &= ~UNCACHED_SHADOW_MASK;
-#endif
- end = start + size;
-
- switch (direction) {
- case PCI_DMA_NONE:
- BUG();
- case PCI_DMA_FROMDEVICE: /* invalidate only */
- invalidate_dcache_range(start, end);
- break;
- case PCI_DMA_TODEVICE: /* writeback only */
- flush_dcache_range(start, end);
- break;
- case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
- flush_dcache_range(start, end);
- break;
- }
-}
-EXPORT_SYMBOL(consistent_sync);
-
-/*
- * consistent_sync_page makes memory consistent. identical
- * to consistent_sync, but takes a struct page instead of a
- * virtual address
- */
-void consistent_sync_page(struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- unsigned long start = (unsigned long)page_address(page) + offset;
- consistent_sync((void *)start, size, direction);
-}
-EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index af607447c683..202ad6a494f5 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -90,7 +90,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
struct mm_struct *mm = current->mm;
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
regs->ear = address;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index f34346d56095..2ffd171af8b6 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -597,19 +597,6 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
int pcibios_add_device(struct pci_dev *dev)
{
dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
index 14c7da5fd039..b800909ddccf 100644
--- a/arch/microblaze/pci/xilinx_pci.c
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -157,6 +157,7 @@ void __init xilinx_pci_init(void)
/* Set the max bus number to 255, and bus/subbus no's to 0 */
pci_reg = of_iomap(pci_node, 0);
+ WARN_ON(!pci_reg);
out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
iounmap(pci_reg);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 08c10c518f83..2af13b162e5e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select CPU_PM if CPU_IDLE
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
@@ -97,6 +98,7 @@ config MIPS_GENERIC
select HW_HAS_PCI
select IRQ_MIPS_CPU
select LIBFDT
+ select MIPS_AUTO_PFN_OFFSET
select MIPS_CPU_SCACHE
select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_7
@@ -193,6 +195,7 @@ config ATH79
select CSRC_R4K
select DMA_NONCOHERENT
select GPIOLIB
+ select PINCTRL
select HAVE_CLK
select COMMON_CLK
select CLKDEV_LOOKUP
@@ -211,6 +214,8 @@ config ATH79
config BMIPS_GENERIC
bool "Broadcom Generic BMIPS kernel"
+ select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select BOOT_RAW
select NO_EXCEPT_FILL
select USE_OF
@@ -438,7 +443,6 @@ config MACH_LOONGSON32
config MACH_LOONGSON64
bool "Loongson-2/3 family of machines"
- select ARCH_HAS_PHYS_TO_DMA
select SYS_SUPPORTS_ZBOOT
help
This enables the support of Loongson-2/3 family of machines.
@@ -662,11 +666,11 @@ config SGI_IP22
config SGI_IP27
bool "SGI IP27 (Origin200/2000)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC64
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
- select DMA_COHERENT
select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
select NR_CPUS_DEFAULT_64
@@ -721,6 +725,7 @@ config SGI_IP28
config SGI_IP32
bool "SGI IP32 (O2)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC32
select BOOT_ELF32
@@ -743,7 +748,6 @@ config SGI_IP32
config SIBYTE_CRHINE
bool "Sibyte BCM91120C-CRhine"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -753,7 +757,6 @@ config SIBYTE_CRHINE
config SIBYTE_CARMEL
bool "Sibyte BCM91120x-Carmel"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -763,7 +766,6 @@ config SIBYTE_CARMEL
config SIBYTE_CRHONE
bool "Sibyte BCM91125C-CRhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -774,7 +776,6 @@ config SIBYTE_CRHONE
config SIBYTE_RHONE
bool "Sibyte BCM91125E-Rhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125H
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -784,7 +785,6 @@ config SIBYTE_RHONE
config SIBYTE_SWARM
bool "Sibyte BCM91250A-SWARM"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -797,7 +797,6 @@ config SIBYTE_SWARM
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -809,7 +808,6 @@ config SIBYTE_LITTLESUR
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -819,7 +817,6 @@ config SIBYTE_SENTOSA
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
select BOOT_ELF32
- select DMA_COHERENT
select NR_CPUS_DEFAULT_4
select SIBYTE_BCM1x80
select SWAP_IO_SPACE
@@ -895,8 +892,8 @@ config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
select ARCH_HAS_PHYS_TO_DMA
+ select HAS_RAPIDIO
select PHYS_ADDR_T_64BIT
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select EDAC_SUPPORT
@@ -945,7 +942,6 @@ config NLM_XLR_BOARD
select PHYS_ADDR_T_64BIT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -973,7 +969,6 @@ config NLM_XLP_BOARD
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -992,7 +987,6 @@ config MIPS_PARAVIRT
bool "Para-Virtualized guest system"
select CEVT_R4K
select CSRC_R4K
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -1118,12 +1112,14 @@ config DMA_PERDEV_COHERENT
bool
select DMA_MAYBE_COHERENT
-config DMA_COHERENT
- bool
-
config DMA_NONCOHERENT
bool
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
+ select DMA_NONCOHERENT_MMAP
+ select DMA_NONCOHERENT_CACHE_SYNC
+ select DMA_NONCOHERENT_OPS
config SYS_HAS_EARLY_PRINTK
bool
@@ -1365,6 +1361,7 @@ choice
config CPU_LOONGSON3
bool "Loongson 3 CPU"
depends on SYS_HAS_CPU_LOONGSON3
+ select ARCH_HAS_PHYS_TO_DMA
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
@@ -1427,7 +1424,8 @@ config CPU_LOONGSON1B
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_LOONGSON1C
bool "Loongson 1C"
@@ -1436,7 +1434,8 @@ config CPU_LOONGSON1C
select LEDS_GPIO_REGISTER
help
The Loongson 1C is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
@@ -1831,11 +1830,12 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select ARCH_HAS_PHYS_TO_DMA
config CPU_LOONGSON1
bool
select CPU_MIPS32
- select CPU_MIPSR2
+ select CPU_MIPSR1
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1979,12 +1979,6 @@ config SYS_HAS_CPU_XLR
config SYS_HAS_CPU_XLP
bool
-config MIPS_MALTA_PM
- depends on MIPS_MALTA
- depends on PCI
- bool
- default y
-
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -2641,8 +2635,6 @@ config HW_PERF_EVENTS
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
-source "mm/Kconfig"
-
config SMP
bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP
@@ -2820,8 +2812,6 @@ config HZ
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
-source "kernel/Kconfig.preempt"
-
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
@@ -2994,9 +2984,8 @@ config PGTABLE_LEVELS
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
+config MIPS_AUTO_PFN_OFFSET
+ bool
menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
@@ -3115,10 +3104,13 @@ config ZONE_DMA32
source "drivers/pcmcia/Kconfig"
+config HAS_RAPIDIO
+ bool
+ default n
+
config RAPIDIO
tristate "RapidIO support"
- depends on PCI
- default n
+ depends on HAS_RAPIDIO || PCI
help
If you say Y here, the kernel will include drivers and
infrastructure code to support RapidIO interconnect devices.
@@ -3127,10 +3119,6 @@ source "drivers/rapidio/Kconfig"
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
config TRAD_SIGNALS
bool
@@ -3176,8 +3164,6 @@ config BINFMT_ELF32
default y if MIPS32_O32 || MIPS32_N32
select ELFCORE
-endmenu
-
menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
@@ -3205,20 +3191,6 @@ source "drivers/cpuidle/Kconfig"
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/mips/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 0749c3724543..0c86b2a2adfc 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -1,12 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
-source "lib/Kconfig.debug"
-
config EARLY_PRINTK
bool "Early printk" if EXPERT
depends on SYS_HAS_EARLY_PRINTK
@@ -155,5 +152,3 @@ config MIPS_CPS_NS16550_SHIFT
adjacent ns16550 registers in the system.
endif # MIPS_CPS_NS16550_BOOL
-
-endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e2122cca4ae2..5425df002a6b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -122,12 +122,22 @@ cflags-y += -ffreestanding
# are used, so we kludge that here. A bug has been filed at
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
#
+# clang doesn't suffer from these issues and our checks against -dumpmachine
+# don't work so well when cross compiling, since without providing --target
+# clang's output will be based upon the build machine. So for clang we simply
+# unconditionally specify -EB or -EL as appropriate.
+#
+ifeq ($(cc-name),clang)
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL
+else
undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
+endif
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
@@ -155,15 +165,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index fa75d75b5ba9..ddff9a02513d 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
@@ -60,7 +61,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index aab55aaf3d62..d625e6f99ae7 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -31,6 +31,7 @@
#include <mtd/mtd-abi.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
@@ -58,7 +59,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index 0fc53e08a894..5f05b8714385 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
@@ -55,7 +56,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
index 203854ddd1bb..8d4b65c3268a 100644
--- a/arch/mips/alchemy/devboards/platform.c
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -14,6 +14,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
@@ -36,7 +37,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
index 0137656107a9..6b64fd96dba8 100644
--- a/arch/mips/ar7/clock.c
+++ b/arch/mips/ar7/clock.c
@@ -476,3 +476,32 @@ void __init ar7_init_clocks(void)
/* adjust vbus clock rate */
vbus_clk.rate = bus_clk.rate / 2;
}
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index dd53987a690f..2ec8d9ac91ec 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <linux/io.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
@@ -259,10 +260,9 @@ static inline void serial_out(int offset, int value)
writel(value, (void *)PORT(offset));
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
;
serial_out(UART_TX, c);
- return 1;
}
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
index 7070b4bcd01d..2c1dfd06c366 100644
--- a/arch/mips/ath25/Kconfig
+++ b/arch/mips/ath25/Kconfig
@@ -12,6 +12,7 @@ config SOC_AR2315
config PCI_AR2315
bool "Atheros AR2315 PCI controller support"
depends on SOC_AR2315
+ select ARCH_HAS_PHYS_TO_DMA
select HW_HAS_PCI
select PCI
default y
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 6d11ae581ea7..989e71015ee6 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -146,10 +146,10 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
pr_info("Fixing up empty mac addresses\n");
config->reset_config_gpio = 0xffff;
config->sys_led_gpio = 0xffff;
- random_ether_addr(config->wlan0_mac);
+ eth_random_addr(config->wlan0_mac);
config->wlan0_mac[0] &= ~0x06;
- random_ether_addr(config->enet0_mac);
- random_ether_addr(config->enet1_mac);
+ eth_random_addr(config->enet0_mac);
+ eth_random_addr(config->enet1_mac);
}
}
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
index 36035b628161..d534761e9cda 100644
--- a/arch/mips/ath25/early_printk.c
+++ b/arch/mips/ath25/early_printk.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include "devices.h"
#include "ar2315_regs.h"
@@ -25,7 +26,7 @@ static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
return __raw_readl(base + 4 * reg);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
static void __iomem *base;
@@ -38,7 +39,7 @@ void prom_putchar(unsigned char ch)
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
- prom_uart_wr(base, UART_TX, ch);
+ prom_uart_wr(base, UART_TX, (unsigned char)ch);
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 6b1000b6a6a6..cf9158e3c2d9 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -355,6 +355,91 @@ static void __init ar934x_clocks_init(void)
iounmap(dpll_base);
}
+static void __init qca953x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += frac * (ref_rate >> 6) / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = cpu_pll / (postdiv + 1);
+ else
+ cpu_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = ddr_pll / (postdiv + 1);
+ else
+ ddr_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
static void __init qca955x_clocks_init(void)
{
unsigned long ref_rate;
@@ -440,6 +525,110 @@ static void __init qca955x_clocks_init(void)
clk_add_alias("uart", NULL, "ref", NULL);
}
+static void __init qca956x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ /*
+ * QCA956x timer init workaround has to be applied right before setting
+ * up the clock. Else, there will be no jiffies
+ */
+ u32 misc;
+
+ misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
+ misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
+ ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
+
+ bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
+ cpu_rate = ddr_pll / (postdiv + 1);
+ else
+ cpu_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
+ ddr_rate = cpu_pll / (postdiv + 1);
+ else
+ ddr_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -450,8 +639,12 @@ void __init ath79_clocks_init(void)
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
+ else if (soc_is_qca953x())
+ qca953x_clocks_init();
else if (soc_is_qca955x())
qca955x_clocks_init();
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ qca956x_clocks_init();
else
BUG();
}
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 10a405d593df..cd6055f9e7a0 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
- void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);
@@ -103,8 +103,12 @@ void ath79_device_reset_set(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -131,8 +135,12 @@ void ath79_device_reset_clear(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index d1adc59af5bf..4b1063117ef7 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -13,12 +13,13 @@
#include <linux/errno.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ar933x_uart.h>
-static void (*_prom_putchar) (unsigned char);
+static void (*_prom_putchar)(char);
static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
{
@@ -33,31 +34,72 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-static void prom_putchar_ar71xx(unsigned char ch)
+static void prom_putchar_ar71xx(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
- __raw_writel(ch, base + UART_TX * 4);
+ __raw_writel((unsigned char)ch, base + UART_TX * 4);
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
}
-static void prom_putchar_ar933x(unsigned char ch)
+static void prom_putchar_ar933x(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
- __raw_writel(AR933X_UART_DATA_TX_CSR | ch, base + AR933X_UART_DATA_REG);
+ __raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
+ base + AR933X_UART_DATA_REG);
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
}
-static void prom_putchar_dummy(unsigned char ch)
+static void prom_putchar_dummy(char ch)
{
/* nothing to do */
}
+static void prom_enable_uart(u32 id)
+{
+ void __iomem *gpio_base;
+ u32 uart_en;
+ u32 t;
+
+ switch (id) {
+ case REV_ID_MAJOR_AR71XX:
+ uart_en = AR71XX_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR7240:
+ case REV_ID_MAJOR_AR7241:
+ case REV_ID_MAJOR_AR7242:
+ uart_en = AR724X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR913X:
+ uart_en = AR913X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ case REV_ID_MAJOR_AR9331:
+ uart_en = AR933X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
+ /* TODO */
+ default:
+ return;
+ }
+
+ gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
+ t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
+ t |= uart_en;
+ __raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
+}
+
static void prom_putchar_init(void)
{
void __iomem *base;
@@ -76,8 +118,12 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9533:
+ case REV_ID_MAJOR_QCA9533_V2:
case REV_ID_MAJOR_QCA9556:
case REV_ID_MAJOR_QCA9558:
+ case REV_ID_MAJOR_TP9343:
+ case REV_ID_MAJOR_QCA956X:
_prom_putchar = prom_putchar_ar71xx;
break;
@@ -88,11 +134,13 @@ static void prom_putchar_init(void)
default:
_prom_putchar = prom_putchar_dummy;
- break;
+ return;
}
+
+ prom_enable_uart(id);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!_prom_putchar)
prom_putchar_init();
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index f206dafbb0a3..4c7a93f4039a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
static void ath79_restart(char *command)
{
+ local_irq_disable();
ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
for (;;)
if (cpu_wait)
@@ -59,6 +60,7 @@ static void __init ath79_detect_sys_type(void)
u32 major;
u32 minor;
u32 rev = 0;
+ u32 ver = 1;
id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
major = id & REV_ID_MAJOR_MASK;
@@ -151,6 +153,17 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA9533_V2:
+ ver = 2;
+ ath79_soc_rev = 2;
+ /* drop through */
+
+ case REV_ID_MAJOR_QCA9533:
+ ath79_soc = ATH79_SOC_QCA9533;
+ chip = "9533";
+ rev = id & QCA953X_REV_ID_REVISION_MASK;
+ break;
+
case REV_ID_MAJOR_QCA9556:
ath79_soc = ATH79_SOC_QCA9556;
chip = "9556";
@@ -163,14 +176,30 @@ static void __init ath79_detect_sys_type(void)
rev = id & QCA955X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA956X:
+ ath79_soc = ATH79_SOC_QCA956X;
+ chip = "956X";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_TP9343:
+ ath79_soc = ATH79_SOC_TP9343;
+ chip = "9343";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
- ath79_soc_rev = rev;
+ if (ver == 1)
+ ath79_soc_rev = rev;
- if (soc_is_qca955x())
- sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+ if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
+ chip, ver, rev);
+ else if (soc_is_tp9343())
+ sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
chip, rev);
else
sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 8c9cbf13d32a..6054d49e608e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
-
- /*
- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
- * Enable ExternalSync for sync instruction to take effect
- */
- set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index 6092226a6d76..9e9ec27c282f 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -8,6 +8,7 @@
#include <bcm63xx_io.h>
#include <linux/serial_bcm63xx.h>
+#include <asm/setup.h>
static void wait_xfered(void)
{
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 6dec30842b2f..3d13c77c125f 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -17,7 +17,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <dma-coherence.h>
+#include <asm/bmips.h>
/*
* BCM338x has configurable address translation windows which allow the
@@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
#define FLUSH_RAC 0x100
-static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
{
struct bmips_dma_range *r;
@@ -52,17 +52,7 @@ static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
return pa;
}
-dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return bmips_phys_to_dma(dev, virt_to_phys(addr));
-}
-
-dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return bmips_phys_to_dma(dev, page_to_phys(page));
-}
-
-unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
struct bmips_dma_range *r;
@@ -74,6 +64,22 @@ unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
+void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+ void __iomem *cbr = BMIPS_GET_CBR();
+ u32 cfg;
+
+ if (boot_cpu_type() != CPU_BMIPS3300 &&
+ boot_cpu_type() != CPU_BMIPS4350 &&
+ boot_cpu_type() != CPU_BMIPS4380)
+ return;
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
static int __init bmips_init_dma_ranges(void)
{
struct device_node *np =
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 3b6f687f177c..231fc5ce375e 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -202,13 +202,6 @@ void __init device_tree_init(void)
of_node_put(np);
}
-int __init plat_of_setup(void)
-{
- return __dt_register_buses("simple-bus", NULL);
-}
-
-arch_initcall(plat_of_setup);
-
static int __init plat_dev_init(void)
{
of_clk_init(NULL);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index c22da16d67b8..35704c28a28b 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -105,28 +105,29 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
# Flattened Image Tree (.itb) images
#
-targets += vmlinux.itb
-targets += vmlinux.gz.itb
-targets += vmlinux.bz2.itb
-targets += vmlinux.lzma.itb
-targets += vmlinux.lzo.itb
-
ifeq ($(ADDR_BITS),32)
- itb_addr_cells = 1
+itb_addr_cells = 1
endif
ifeq ($(ADDR_BITS),64)
- itb_addr_cells = 2
+itb_addr_cells = 2
endif
+targets += vmlinux.its.S
+
quiet_cmd_its_cat = CAT $@
- cmd_its_cat = cat $^ >$@
+ cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
-$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS))
+$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
$(call if_changed,its_cat)
+targets += vmlinux.its
+targets += vmlinux.gz.its
+targets += vmlinux.bz2.its
+targets += vmlinux.lzmo.its
+targets += vmlinux.lzo.its
+
quiet_cmd_cpp_its_S = ITS $@
- cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
- -D__ASSEMBLY__ \
+ cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \
-DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
-DVMLINUX_BINARY="\"$(3)\"" \
-DVMLINUX_COMPRESSION="\"$(2)\"" \
@@ -136,19 +137,25 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_CELLS=$(itb_addr_cells)
$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+ $(call if_changed,cpp_its_S,none,vmlinux.bin)
$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+ $(call if_changed,cpp_its_S,gzip,vmlinux.bin.gz)
$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+ $(call if_changed,cpp_its_S,bzip2,vmlinux.bin.bz2)
$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+ $(call if_changed,cpp_its_S,lzma,vmlinux.bin.lzma)
$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+ $(call if_changed,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
quiet_cmd_itb-image = ITB $@
cmd_itb-image = \
@@ -162,14 +169,5 @@ quiet_cmd_itb-image = ITB $@
$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
$(call if_changed,itb-image,$<)
-$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+$(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE
$(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/compressed/uart-prom.c b/arch/mips/boot/compressed/uart-prom.c
index d6f0fee0a151..a8a0a32e05d1 100644
--- a/arch/mips/boot/compressed/uart-prom.c
+++ b/arch/mips/boot/compressed/uart-prom.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-
-extern void prom_putchar(unsigned char ch);
+#include <asm/setup.h>
void putc(char c)
{
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index aa4e8f75ff5d..ce93d57f1b4d 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -155,6 +155,25 @@
};
};
+ spi_gpio {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-chipselects = <2>;
+
+ gpio-miso = <&gpe 14 0>;
+ gpio-sck = <&gpe 15 0>;
+ gpio-mosi = <&gpe 17 0>;
+ cs-gpios = <&gpe 16 0
+ &gpe 18 0>;
+
+ spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4780-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
index 3c6aed9f5439..9a9bb7ea0503 100644
--- a/arch/mips/boot/dts/mscc/Makefile
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -1,3 +1,3 @@
-dtb-$(CONFIG_LEGACY_BOARD_OCELOT) += ocelot_pcb123.dtb
+dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index 4f33dbc67348..f7eb612b46ba 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -91,6 +91,17 @@
status = "disabled";
};
+ spi: spi@101000 {
+ compatible = "mscc,ocelot-spi", "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x101000 0x100>, <0x3c 0x18>;
+ interrupts = <9>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
switch@1010000 {
compatible = "mscc,vsc7514-switch";
reg = <0x1010000 0x10000>,
@@ -168,6 +179,9 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&gpio 0 0 22>;
+ interrupt-controller;
+ interrupts = <13>;
+ #interrupt-cells = <2>;
uart_pins: uart-pins {
pins = "GPIO_6", "GPIO_7";
@@ -178,13 +192,18 @@
pins = "GPIO_12", "GPIO_13";
function = "uart2";
};
+
+ miim1: miim1 {
+ pins = "GPIO_14", "GPIO_15";
+ function = "miim1";
+ };
};
mdio0: mdio@107009c {
#address-cells = <1>;
#size-cells = <0>;
compatible = "mscc,ocelot-miim";
- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
interrupts = <14>;
status = "disabled";
@@ -201,5 +220,16 @@
reg = <3>;
};
};
+
+ mdio1: mdio@10700c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "mscc,ocelot-miim";
+ reg = <0x10700c0 0x24>;
+ interrupts = <15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
index 4ccd65379059..2266027759f9 100644
--- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
@@ -26,6 +26,16 @@
status = "okay";
};
+&spi {
+ status = "okay";
+
+ flash@0 {
+ compatible = "macronix,mx25l25635f", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
&mdio0 {
status = "okay";
};
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 1fe561c5f90e..61dcfa5b6ca7 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -161,7 +161,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index 3931033e47c8..7fccf6357225 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -22,11 +22,10 @@
};
gpio-keys {
- compatible = "gpio-keys-polled";
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <20>;
button@0 {
label = "reset";
linux,code = <KEY_RESTART>;
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index efd5f0722206..2bae201aa365 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -146,7 +146,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
index d4e4502daaa8..e7af2cf5f4c1 100644
--- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
index 4f95ccf17c4c..d38aa73f1a2e 100644
--- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "jumpstart";
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
index f70f79c4d0d5..11778abacf66 100644
--- a/arch/mips/boot/dts/qca/ar9331_omega.dts
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
index 748131aea22e..c8290d36cfbe 100644
--- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "wps";
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
index b3e73c22c345..5be79ebfc3f8 100644
--- a/arch/mips/boot/ecoff.h
+++ b/arch/mips/boot/ecoff.h
@@ -2,14 +2,17 @@
/*
* Some ECOFF definitions.
*/
+
+#include <stdint.h>
+
typedef struct filehdr {
- unsigned short f_magic; /* magic number */
- unsigned short f_nscns; /* number of sections */
- long f_timdat; /* time & date stamp */
- long f_symptr; /* file pointer to symbolic header */
- long f_nsyms; /* sizeof(symbolic hdr) */
- unsigned short f_opthdr; /* sizeof(optional hdr) */
- unsigned short f_flags; /* flags */
+ uint16_t f_magic; /* magic number */
+ uint16_t f_nscns; /* number of sections */
+ int32_t f_timdat; /* time & date stamp */
+ int32_t f_symptr; /* file pointer to symbolic header */
+ int32_t f_nsyms; /* sizeof(symbolic hdr) */
+ uint16_t f_opthdr; /* sizeof(optional hdr) */
+ uint16_t f_flags; /* flags */
} FILHDR;
#define FILHSZ sizeof(FILHDR)
@@ -18,32 +21,32 @@ typedef struct filehdr {
typedef struct scnhdr {
char s_name[8]; /* section name */
- long s_paddr; /* physical address, aliased s_nlib */
- long s_vaddr; /* virtual address */
- long s_size; /* section size */
- long s_scnptr; /* file ptr to raw data for section */
- long s_relptr; /* file ptr to relocation */
- long s_lnnoptr; /* file ptr to gp histogram */
- unsigned short s_nreloc; /* number of relocation entries */
- unsigned short s_nlnno; /* number of gp histogram entries */
- long s_flags; /* flags */
+ int32_t s_paddr; /* physical address, aliased s_nlib */
+ int32_t s_vaddr; /* virtual address */
+ int32_t s_size; /* section size */
+ int32_t s_scnptr; /* file ptr to raw data for section */
+ int32_t s_relptr; /* file ptr to relocation */
+ int32_t s_lnnoptr; /* file ptr to gp histogram */
+ uint16_t s_nreloc; /* number of relocation entries */
+ uint16_t s_nlnno; /* number of gp histogram entries */
+ int32_t s_flags; /* flags */
} SCNHDR;
#define SCNHSZ sizeof(SCNHDR)
-#define SCNROUND ((long)16)
+#define SCNROUND ((int32_t)16)
typedef struct aouthdr {
- short magic; /* see above */
- short vstamp; /* version stamp */
- long tsize; /* text size in bytes, padded to DW bdry*/
- long dsize; /* initialized data " " */
- long bsize; /* uninitialized data " " */
- long entry; /* entry pt. */
- long text_start; /* base of text used for this file */
- long data_start; /* base of data used for this file */
- long bss_start; /* base of bss used for this file */
- long gprmask; /* general purpose register mask */
- long cprmask[4]; /* co-processor register masks */
- long gp_value; /* the gp value used for this object */
+ int16_t magic; /* see above */
+ int16_t vstamp; /* version stamp */
+ int32_t tsize; /* text size in bytes, padded to DW bdry*/
+ int32_t dsize; /* initialized data " " */
+ int32_t bsize; /* uninitialized data " " */
+ int32_t entry; /* entry pt. */
+ int32_t text_start; /* base of text used for this file */
+ int32_t data_start; /* base of data used for this file */
+ int32_t bss_start; /* base of bss used for this file */
+ int32_t gprmask; /* general purpose register mask */
+ int32_t cprmask[4]; /* co-processor register masks */
+ int32_t gp_value; /* the gp value used for this object */
} AOUTHDR;
#define AOUTHSZ sizeof(AOUTHDR)
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 266c8137e859..6972b97235da 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -43,6 +43,8 @@
#include <limits.h>
#include <netinet/in.h>
#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
#include "ecoff.h"
@@ -55,8 +57,8 @@
/* -------------------------------------------------------------------- */
struct sect {
- unsigned long vaddr;
- unsigned long len;
+ uint32_t vaddr;
+ uint32_t len;
};
int *symTypeTable;
@@ -153,16 +155,16 @@ static char *saveRead(int file, off_t offset, off_t len, char *name)
}
#define swab16(x) \
- ((unsigned short)( \
- (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \
- (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) ))
+ ((uint16_t)( \
+ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ))
#define swab32(x) \
((unsigned int)( \
- (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
- (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
- (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
- (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
+ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
+ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
+ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ))
static void convert_elf_hdr(Elf32_Ehdr * e)
{
@@ -274,7 +276,7 @@ int main(int argc, char *argv[])
struct aouthdr eah;
struct scnhdr esecs[6];
int infile, outfile;
- unsigned long cur_vma = ULONG_MAX;
+ uint32_t cur_vma = UINT32_MAX;
int addflag = 0;
int nosecs;
@@ -518,7 +520,7 @@ int main(int argc, char *argv[])
for (i = 0; i < nosecs; i++) {
printf
- ("Section %d: %s phys %lx size %lx file offset %lx\n",
+ ("Section %d: %s phys %"PRIx32" size %"PRIx32"\t file offset %"PRIx32"\n",
i, esecs[i].s_name, esecs[i].s_paddr,
esecs[i].s_size, esecs[i].s_scnptr);
}
@@ -564,17 +566,16 @@ int main(int argc, char *argv[])
the section can be loaded before copying. */
if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) {
if (cur_vma != ph[i].p_vaddr) {
- unsigned long gap =
- ph[i].p_vaddr - cur_vma;
+ uint32_t gap = ph[i].p_vaddr - cur_vma;
char obuf[1024];
if (gap > 65536) {
fprintf(stderr,
- "Intersegment gap (%ld bytes) too large.\n",
+ "Intersegment gap (%"PRId32" bytes) too large.\n",
gap);
exit(1);
}
fprintf(stderr,
- "Warning: %ld byte intersegment gap.\n",
+ "Warning: %d byte intersegment gap.\n",
gap);
memset(obuf, 0, sizeof obuf);
while (gap) {
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index af4c712f7afc..d1ed066e1a17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -182,7 +182,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 2b74b5b67cae..80d71e775936 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -215,7 +215,6 @@ static struct shash_alg octeon_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 97e96fead08a..8b931e640926 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -239,7 +239,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "octeon-sha256",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -252,7 +251,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "octeon-sha224",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index d5fb3c6f22ae..6c9561496257 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -235,7 +235,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +248,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 7b335ab21697..236833be6fbe 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -11,9 +11,7 @@
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
#include <linux/bootmem.h>
-#include <linux/export.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -24,10 +22,16 @@
#include <asm/octeon/octeon.h>
#ifdef CONFIG_PCI
+#include <linux/pci.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
+struct octeon_dma_map_ops {
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
{
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
@@ -61,6 +65,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
+ .phys_to_dma = octeon_gen1_phys_to_dma,
+ .dma_to_phys = octeon_gen1_dma_to_phys,
+};
+
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return octeon_hole_phys_to_dma(paddr);
@@ -71,6 +80,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
return octeon_hole_dma_to_phys(daddr);
}
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
+ .phys_to_dma = octeon_gen2_phys_to_dma,
+ .dma_to_phys = octeon_gen2_dma_to_phys,
+};
+
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
@@ -93,6 +107,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_big_ops = {
+ .phys_to_dma = octeon_big_phys_to_dma,
+ .dma_to_phys = octeon_big_dma_to_phys,
+};
+
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
phys_addr_t paddr)
{
@@ -121,105 +140,51 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
return daddr;
}
-#endif /* CONFIG_PCI */
-
-static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- direction, attrs);
- mb();
-
- return daddr;
-}
-
-static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
- mb();
- return r;
-}
-
-static void octeon_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
- mb();
-}
-
-static void octeon_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
- mb();
-}
-
-static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
+static const struct octeon_dma_map_ops octeon_small_ops = {
+ .phys_to_dma = octeon_small_phys_to_dma,
+ .dma_to_phys = octeon_small_dma_to_phys,
+};
- return ret;
-}
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
-static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+void __init octeon_pci_dma_init(void)
{
- return daddr;
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ octeon_pci_dma_ops = &octeon_gen1_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
+ octeon_pci_dma_ops = &octeon_gen2_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ octeon_pci_dma_ops = &octeon_big_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ octeon_pci_dma_ops = &octeon_small_ops;
+ break;
+ default:
+ BUG();
+ }
}
-
-struct octeon_dma_map_ops {
- const struct dma_map_ops dma_map_ops;
- dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
- phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
-};
+#endif /* CONFIG_PCI */
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->phys_to_dma(dev, paddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
+#endif
+ return paddr;
}
-EXPORT_SYMBOL(__phys_to_dma);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->dma_to_phys(dev, daddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
+#endif
+ return daddr;
}
-EXPORT_SYMBOL(__dma_to_phys);
-
-static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
- .phys_to_dma = octeon_unity_phys_to_dma,
- .dma_to_phys = octeon_unity_dma_to_phys
-};
char *octeon_swiotlb;
@@ -283,52 +248,4 @@ void __init plat_swiotlb_setup(void)
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
-
- mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
}
-
-#ifdef CONFIG_PCI
-static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
-};
-
-const struct dma_map_ops *octeon_pci_dma_map_ops;
-
-void __init octeon_pci_dma_init(void)
-{
- switch (octeon_dma_bar_type) {
- case OCTEON_DMA_BAR_TYPE_PCIE2:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_PCIE:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_BIG:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_SMALL:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
- break;
- default:
- BUG();
- }
- octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
-}
-#endif /* CONFIG_PCI */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index d18ed5af62f4..b8898e2b8a6f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -42,9 +42,6 @@
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
/**
* Probe RGMII ports and determine the number present
*
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 578283350776..a176358c5a21 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,10 +39,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+#include <asm/octeon/cvmx-pcsxx-defs.h>
/**
* Perform initialization required only once for an SGMII port.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
index ef16aa00167b..2a574d272671 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -25,10 +25,6 @@
* Contact Cavium Networks for more information
***********************license end**************************************/
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
/*
* Functions for SPI initialization, configuration,
* and monitoring.
@@ -41,6 +37,8 @@ void __cvmx_interrupt_stxx_int_msk_enable(int index);
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
/*
* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 19d54e02c185..2bb6912a580d 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,12 +39,9 @@
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
int __cvmx_helper_xaui_enumerate(int interface)
{
union cvmx_gmxx_hg2_control gmx_hg2_control;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index b3aec101a65d..8272d8c648ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -814,7 +814,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
+ enable_one = false;
__set_bit(cd->bit, pen);
} else {
__clear_bit(cd->bit, pen);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 8505db478904..807cadaf554e 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
return 0;
pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
if (!pd)
return 0;
@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
return 0;
pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
if (!pd)
return 0;
@@ -1067,6 +1069,6 @@ end_led:
static int __init octeon_publish_devices(void)
{
- return of_platform_bus_probe(NULL, octeon_ids, NULL);
+ return of_platform_populate(NULL, octeon_ids, NULL, NULL);
}
arch_initcall(octeon_publish_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index a8034d0dcade..c2426232db06 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -36,6 +36,7 @@
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
@@ -1108,7 +1109,7 @@ void __init plat_mem_setup(void)
* Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
uint64_t lsrval;
@@ -1119,7 +1120,6 @@ int prom_putchar(char c)
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
- return 1;
}
EXPORT_SYMBOL(prom_putchar);
@@ -1154,11 +1154,7 @@ void __init prom_free_prom_memory(void)
}
void __init octeon_fill_mac_addresses(void);
-int octeon_prune_device_tree(void);
-extern const char __appended_dtb;
-extern const char __dtb_octeon_3xxx_begin;
-extern const char __dtb_octeon_68xx_begin;
void __init device_tree_init(void)
{
const void *fdt;
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index fad8e964f14c..ba800a892384 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -66,7 +66,6 @@ CONFIG_HW_RANDOM=y
CONFIG_GPIO_SYSFS=y
CONFIG_WATCHDOG=y
CONFIG_BCM47XX_WDT=y
-CONFIG_SSB_DEBUG=y
CONFIG_SSB_DRIVER_GIGE=y
CONFIG_BCMA_DRIVER_GMAC_CMN=y
CONFIG_USB=y
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index be23fd25eeaa..030ff9c205fb 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -92,6 +92,8 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_JZ4780=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_INGENIC=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 26b1cd5ffbf5..684c9dcba126 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -43,9 +43,6 @@ CONFIG_NETFILTER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_SCSI=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index df8a9a15ca83..81058295d35f 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -317,6 +317,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 14df9ef15d40..5c10cddc39d3 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -328,6 +328,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 25092e344574..bb694f5065f1 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -330,6 +330,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 210bf609f785..5b5306b80576 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index e5934aa98397..85543599448f 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index cb2ca11c1789..067bb84ac916 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -134,6 +134,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index be29fcec69fc..dfc78c3172a3 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -137,6 +137,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 40462d4c90a0..50a2288c69f8 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -132,6 +132,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 4e50176cb3df..99a19cf5f9ba 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -326,6 +326,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/fw/arc/arc_con.c b/arch/mips/fw/arc/arc_con.c
index 769d4b9ac82e..365e3913231e 100644
--- a/arch/mips/fw/arc/arc_con.c
+++ b/arch/mips/fw/arc/arc_con.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/fs.h>
+#include <asm/setup.h>
#include <asm/sgialib.h>
static void prom_console_write(struct console *co, const char *s,
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index 7e8ba5ce95be..be381307fbb0 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <asm/sgialib.h>
#include <asm/bcache.h>
+#include <asm/setup.h>
/*
* IP22 boardcache is not compatible with board caches. Thus we disable it
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 6aa264b9856a..8772617b64ce 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -19,6 +19,7 @@
#include <asm/mipsprom.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
/* special SNI prom calls */
/*
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index ba9b2c8cce68..08e33c6b2539 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -35,13 +35,13 @@ config LEGACY_BOARD_OCELOT
depends on LEGACY_BOARD_SEAD3=n
select LEGACY_BOARDS
select MSCC_OCELOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
config MSCC_OCELOT
bool
select GPIOLIB
select MSCC_OCELOT_IRQ
- select SYS_HAS_EARLY_PRINTK
- select USE_GENERIC_EARLY_PRINTK_8250
comment "FIT/UHI Boards"
@@ -65,6 +65,14 @@ config FIT_IMAGE_FDT_XILFPGA
Enable this to include the FDT for the MIPSfpga platform
from Imagination Technologies in the FIT kernel image.
+config FIT_IMAGE_FDT_OCELOT_PCB123
+ bool "Include FDT for Microsemi Ocelot PCB123"
+ select MSCC_OCELOT
+ help
+ Enable this to include the FDT for the Ocelot PCB123 platform
+ from Microsemi in the FIT kernel image.
+ This requires u-boot on the platform.
+
config VIRT_BOARD_RANCHU
bool "Support Ranchu platform for Android emulator"
help
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index 0dd0d5d460a5..879cb80396c8 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -16,4 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot_pcb123.its.S
new file mode 100644
index 000000000000..5a7d5e1c878a
--- /dev/null
+++ b/arch/mips/generic/board-ocelot_pcb123.its.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/ {
+ images {
+ fdt@ocelot_pcb123 {
+ description = "MSCC Ocelot PCB123 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf@ocelot_pcb123 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@ocelot_pcb123";
+ };
+ };
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 5ba6fcc26fa7..a106f8113842 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
@@ -204,22 +203,11 @@ void __init arch_init_irq(void)
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
+ of_node_put(intc_node);
irqchip_init();
}
-static int __init publish_devices(void)
-{
- if (!of_have_populated_dt())
- panic("Device-tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(publish_devices);
-
void __init prom_free_prom_memory(void)
{
}
diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
index b408dac722ac..7ba4ad5cc1d6 100644
--- a/arch/mips/generic/yamon-dt.c
+++ b/arch/mips/generic/yamon-dt.c
@@ -28,8 +28,6 @@ __init int yamon_dt_append_cmdline(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
@@ -221,8 +219,6 @@ __init int yamon_dt_serial_config(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 45d541baf359..58351e48421e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -8,6 +8,7 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 0ab176bdb8e8..0269b3de8b51 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -22,6 +22,17 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __scbeqz "beqzl"
+#else
+# define __scbeqz "beqz"
+#endif
+
#define ATOMIC_INIT(i) { (i) }
/*
@@ -44,31 +55,18 @@
#define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %0, %1 # atomic_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -218,38 +184,17 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " subu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
+ " sc %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -274,97 +219,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-/*
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-/*
- * atomic_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*/
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
-/*
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1, (v))
-
-/*
- * atomic_dec - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1, (v))
-
-/*
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
@@ -386,31 +246,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %0, %1 # atomic64_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -425,37 +272,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -478,33 +308,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -563,38 +376,17 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
- " dsubu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "=" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
+ " scd %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -620,99 +412,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-/*
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-/*
- * atomic64_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*/
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
-/*
- * atomic64_inc - increment atomic variable
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic64_inc(v) atomic64_add(1, (v))
-
-/*
- * atomic64_dec - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-/*
- * atomic64_add_negative - add and test if negative
- * @v: pointer of type atomic64_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index b3e2975f83d3..bf6a8afd7ad2 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -123,22 +123,6 @@ static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
barrier();
}
-static inline void bmips_post_dma_flush(struct device *dev)
-{
- void __iomem *cbr = BMIPS_GET_CBR();
- u32 cfg;
-
- if (boot_cpu_type() != CPU_BMIPS3300 &&
- boot_cpu_type() != CPU_BMIPS4350 &&
- boot_cpu_type() != CPU_BMIPS4380)
- return;
-
- /* Flush stale data out of the readahead cache */
- cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
- __raw_readl(cbr + BMIPS_RAC_CONFIG);
-}
-
#endif /* !defined(__ASSEMBLY__) */
#endif /* _ASM_BMIPS_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9cdb4e4ce258..0edba3e75747 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -14,39 +14,77 @@
#include <asm/isa-rev.h>
#include <cpu-feature-overrides.h>
+#define __ase(ase) (cpu_data[0].ases & (ase))
+#define __opt(opt) (cpu_data[0].options & (opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * Note that these should only be used in cases where a kernel built for an
+ * older ISA *cannot* run on a CPU which supports the feature in question. For
+ * example this may be used for features introduced with MIPSr6, since a kernel
+ * built for an older ISA cannot run on a MIPSr6 CPU. This should not be used
+ * for MIPSr2 features however, since a MIPSr1 or earlier kernel might run on a
+ * MIPSr2 CPU.
+ */
+#define __isa_ge_and_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) && __ase(ase))
+#define __isa_ge_and_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) && __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *or* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & then become required.
+ */
+#define __isa_ge_or_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) || __ase(ase))
+#define __isa_ge_or_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) || __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is < isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & are then removed - ie. no longer present in any CPU implementing
+ * the given ISA revision.
+ */
+#define __isa_lt_and_ase(isa, ase) ((MIPS_ISA_REV < (isa)) && __ase(ase))
+#define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt))
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
*/
#ifndef cpu_has_tlb
-#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
+#define cpu_has_tlb __opt(MIPS_CPU_TLB)
#endif
#ifndef cpu_has_ftlb
-#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB)
+#define cpu_has_ftlb __opt(MIPS_CPU_FTLB)
#endif
#ifndef cpu_has_tlbinv
-#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
+#define cpu_has_tlbinv __opt(MIPS_CPU_TLBINV)
#endif
#ifndef cpu_has_segments
-#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#define cpu_has_segments __opt(MIPS_CPU_SEGMENTS)
#endif
#ifndef cpu_has_eva
-#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
+#define cpu_has_eva __opt(MIPS_CPU_EVA)
#endif
#ifndef cpu_has_htw
-#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW)
+#define cpu_has_htw __opt(MIPS_CPU_HTW)
#endif
#ifndef cpu_has_ldpte
-#define cpu_has_ldpte (cpu_data[0].options & MIPS_CPU_LDPTE)
+#define cpu_has_ldpte __opt(MIPS_CPU_LDPTE)
#endif
#ifndef cpu_has_rixiex
-#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX)
+#define cpu_has_rixiex __isa_ge_or_opt(6, MIPS_CPU_RIXIEX)
#endif
#ifndef cpu_has_maar
-#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
+#define cpu_has_maar __opt(MIPS_CPU_MAAR)
#endif
#ifndef cpu_has_rw_llb
-#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#define cpu_has_rw_llb __isa_ge_or_opt(6, MIPS_CPU_RW_LLB)
#endif
/*
@@ -59,18 +97,18 @@
#define cpu_has_3kex (!cpu_has_4kex)
#endif
#ifndef cpu_has_4kex
-#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
+#define cpu_has_4kex __isa_ge_or_opt(1, MIPS_CPU_4KEX)
#endif
#ifndef cpu_has_3k_cache
-#define cpu_has_3k_cache (cpu_data[0].options & MIPS_CPU_3K_CACHE)
+#define cpu_has_3k_cache __isa_lt_and_opt(1, MIPS_CPU_3K_CACHE)
#endif
#define cpu_has_6k_cache 0
#define cpu_has_8k_cache 0
#ifndef cpu_has_4k_cache
-#define cpu_has_4k_cache (cpu_data[0].options & MIPS_CPU_4K_CACHE)
+#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_tx39_cache
-#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
+#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
@@ -83,92 +121,92 @@
#define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
-#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
+#define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR)
#endif
#ifndef cpu_has_counter
-#define cpu_has_counter (cpu_data[0].options & MIPS_CPU_COUNTER)
+#define cpu_has_counter __opt(MIPS_CPU_COUNTER)
#endif
#ifndef cpu_has_watch
-#define cpu_has_watch (cpu_data[0].options & MIPS_CPU_WATCH)
+#define cpu_has_watch __opt(MIPS_CPU_WATCH)
#endif
#ifndef cpu_has_divec
-#define cpu_has_divec (cpu_data[0].options & MIPS_CPU_DIVEC)
+#define cpu_has_divec __isa_ge_or_opt(1, MIPS_CPU_DIVEC)
#endif
#ifndef cpu_has_vce
-#define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE)
+#define cpu_has_vce __opt(MIPS_CPU_VCE)
#endif
#ifndef cpu_has_cache_cdex_p
-#define cpu_has_cache_cdex_p (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P)
+#define cpu_has_cache_cdex_p __opt(MIPS_CPU_CACHE_CDEX_P)
#endif
#ifndef cpu_has_cache_cdex_s
-#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
+#define cpu_has_cache_cdex_s __opt(MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
-#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#define cpu_has_prefetch __isa_ge_or_opt(1, MIPS_CPU_PREFETCH)
#endif
#ifndef cpu_has_mcheck
-#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
+#define cpu_has_mcheck __isa_ge_or_opt(1, MIPS_CPU_MCHECK)
#endif
#ifndef cpu_has_ejtag
-#define cpu_has_ejtag (cpu_data[0].options & MIPS_CPU_EJTAG)
+#define cpu_has_ejtag __opt(MIPS_CPU_EJTAG)
#endif
#ifndef cpu_has_llsc
-#define cpu_has_llsc (cpu_data[0].options & MIPS_CPU_LLSC)
+#define cpu_has_llsc __isa_ge_or_opt(1, MIPS_CPU_LLSC)
#endif
#ifndef cpu_has_bp_ghist
-#define cpu_has_bp_ghist (cpu_data[0].options & MIPS_CPU_BP_GHIST)
+#define cpu_has_bp_ghist __opt(MIPS_CPU_BP_GHIST)
#endif
#ifndef kernel_uses_llsc
#define kernel_uses_llsc cpu_has_llsc
#endif
#ifndef cpu_has_guestctl0ext
-#define cpu_has_guestctl0ext (cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#define cpu_has_guestctl0ext __opt(MIPS_CPU_GUESTCTL0EXT)
#endif
#ifndef cpu_has_guestctl1
-#define cpu_has_guestctl1 (cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#define cpu_has_guestctl1 __opt(MIPS_CPU_GUESTCTL1)
#endif
#ifndef cpu_has_guestctl2
-#define cpu_has_guestctl2 (cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#define cpu_has_guestctl2 __opt(MIPS_CPU_GUESTCTL2)
#endif
#ifndef cpu_has_guestid
-#define cpu_has_guestid (cpu_data[0].options & MIPS_CPU_GUESTID)
+#define cpu_has_guestid __opt(MIPS_CPU_GUESTID)
#endif
#ifndef cpu_has_drg
-#define cpu_has_drg (cpu_data[0].options & MIPS_CPU_DRG)
+#define cpu_has_drg __opt(MIPS_CPU_DRG)
#endif
#ifndef cpu_has_mips16
-#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
+#define cpu_has_mips16 __isa_lt_and_ase(6, MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mips16e2
-#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
+#define cpu_has_mips16e2 __isa_lt_and_ase(6, MIPS_ASE_MIPS16E2)
#endif
#ifndef cpu_has_mdmx
-#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx __isa_lt_and_ase(6, MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
-#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d __isa_lt_and_ase(6, MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
-#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
+#define cpu_has_smartmips __isa_lt_and_ase(6, MIPS_ASE_SMARTMIPS)
#endif
#ifndef cpu_has_rixi
-#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
+#define cpu_has_rixi __isa_ge_or_opt(6, MIPS_CPU_RIXI)
#endif
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
-# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+# define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS)
# else
# define cpu_has_mmips 0
# endif
#endif
#ifndef cpu_has_lpa
-#define cpu_has_lpa (cpu_data[0].options & MIPS_CPU_LPA)
+#define cpu_has_lpa __opt(MIPS_CPU_LPA)
#endif
#ifndef cpu_has_mvh
-#define cpu_has_mvh (cpu_data[0].options & MIPS_CPU_MVH)
+#define cpu_has_mvh __opt(MIPS_CPU_MVH)
#endif
#ifndef cpu_has_xpa
#define cpu_has_xpa (cpu_has_lpa && cpu_has_mvh)
@@ -338,32 +376,32 @@
#endif
#ifndef cpu_has_dsp
-#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
+#define cpu_has_dsp __ase(MIPS_ASE_DSP)
#endif
#ifndef cpu_has_dsp2
-#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
+#define cpu_has_dsp2 __ase(MIPS_ASE_DSP2P)
#endif
#ifndef cpu_has_dsp3
-#define cpu_has_dsp3 (cpu_data[0].ases & MIPS_ASE_DSP3)
+#define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
#endif
#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp
-#define cpu_has_vp (cpu_data[0].options & MIPS_CPU_VP)
+#define cpu_has_vp __isa_ge_and_opt(6, MIPS_CPU_VP)
#endif
#ifndef cpu_has_userlocal
-#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
+#define cpu_has_userlocal __isa_ge_or_opt(6, MIPS_CPU_ULRI)
#endif
#ifdef CONFIG_32BIT
# ifndef cpu_has_nofpuex
-# define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX)
+# define cpu_has_nofpuex __isa_lt_and_opt(1, MIPS_CPU_NOFPUEX)
# endif
# ifndef cpu_has_64bits
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
@@ -405,19 +443,19 @@
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
-# define cpu_has_vint (cpu_data[0].options & MIPS_CPU_VINT)
+# define cpu_has_vint __opt(MIPS_CPU_VINT)
#elif !defined(cpu_has_vint)
# define cpu_has_vint 0
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
-# define cpu_has_veic (cpu_data[0].options & MIPS_CPU_VEIC)
+# define cpu_has_veic __opt(MIPS_CPU_VEIC)
#elif !defined(cpu_has_veic)
# define cpu_has_veic 0
#endif
#ifndef cpu_has_inclusive_pcaches
-#define cpu_has_inclusive_pcaches (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
+#define cpu_has_inclusive_pcaches __opt(MIPS_CPU_INCLUSIVE_CACHES)
#endif
#ifndef cpu_dcache_line_size
@@ -438,63 +476,63 @@
#endif
#ifndef cpu_has_perf_cntr_intr_bit
-#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
+#define cpu_has_perf_cntr_intr_bit __opt(MIPS_CPU_PCI)
#endif
#ifndef cpu_has_vz
-#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
+#define cpu_has_vz __ase(MIPS_ASE_VZ)
#endif
#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
-# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
+# define cpu_has_msa __ase(MIPS_ASE_MSA)
#elif !defined(cpu_has_msa)
# define cpu_has_msa 0
#endif
#ifndef cpu_has_ufr
-# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
+# define cpu_has_ufr __opt(MIPS_CPU_UFR)
#endif
#ifndef cpu_has_fre
-# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
+# define cpu_has_fre __opt(MIPS_CPU_FRE)
#endif
#ifndef cpu_has_cdmm
-# define cpu_has_cdmm (cpu_data[0].options & MIPS_CPU_CDMM)
+# define cpu_has_cdmm __opt(MIPS_CPU_CDMM)
#endif
#ifndef cpu_has_small_pages
-# define cpu_has_small_pages (cpu_data[0].options & MIPS_CPU_SP)
+# define cpu_has_small_pages __opt(MIPS_CPU_SP)
#endif
#ifndef cpu_has_nan_legacy
-#define cpu_has_nan_legacy (cpu_data[0].options & MIPS_CPU_NAN_LEGACY)
+#define cpu_has_nan_legacy __isa_lt_and_opt(6, MIPS_CPU_NAN_LEGACY)
#endif
#ifndef cpu_has_nan_2008
-#define cpu_has_nan_2008 (cpu_data[0].options & MIPS_CPU_NAN_2008)
+#define cpu_has_nan_2008 __isa_ge_or_opt(6, MIPS_CPU_NAN_2008)
#endif
#ifndef cpu_has_ebase_wg
-# define cpu_has_ebase_wg (cpu_data[0].options & MIPS_CPU_EBASE_WG)
+# define cpu_has_ebase_wg __opt(MIPS_CPU_EBASE_WG)
#endif
#ifndef cpu_has_badinstr
-# define cpu_has_badinstr (cpu_data[0].options & MIPS_CPU_BADINSTR)
+# define cpu_has_badinstr __isa_ge_or_opt(6, MIPS_CPU_BADINSTR)
#endif
#ifndef cpu_has_badinstrp
-# define cpu_has_badinstrp (cpu_data[0].options & MIPS_CPU_BADINSTRP)
+# define cpu_has_badinstrp __isa_ge_or_opt(6, MIPS_CPU_BADINSTRP)
#endif
#ifndef cpu_has_contextconfig
-# define cpu_has_contextconfig (cpu_data[0].options & MIPS_CPU_CTXTC)
+# define cpu_has_contextconfig __opt(MIPS_CPU_CTXTC)
#endif
#ifndef cpu_has_perf
-# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
+# define cpu_has_perf __opt(MIPS_CPU_PERF)
#endif
-#if defined(CONFIG_SMP) && (MIPS_ISA_REV >= 6)
+#ifdef CONFIG_SMP
/*
* Some systems share FTLB RAMs between threads within a core (siblings in
* kernel parlance). This means that FTLB entries may become invalid at almost
@@ -507,7 +545,7 @@
*/
# ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_RAM)
# endif
/*
@@ -524,9 +562,9 @@
*/
# ifndef cpu_has_shared_ftlb_entries
# define cpu_has_shared_ftlb_entries \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_ENTRIES)
# endif
-#endif /* SMP && MIPS_ISA_REV >= 6 */
+#endif /* SMP */
#ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram 0
@@ -537,7 +575,7 @@
#ifdef CONFIG_MIPS_MT_SMP
# define cpu_has_mipsmt_pertccounters \
- (cpu_data[0].options & MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
+ __isa_lt_and_opt(6, MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
#else
# define cpu_has_mipsmt_pertccounters 0
#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 5b9d02ef4f60..dacbdb84516a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -225,31 +225,32 @@
* Definitions for 7:0 on legacy processors
*/
-#define PRID_REV_TX4927 0x0022
-#define PRID_REV_TX4937 0x0030
-#define PRID_REV_R4400 0x0040
-#define PRID_REV_R3000A 0x0030
-#define PRID_REV_R3000 0x0020
-#define PRID_REV_R2000A 0x0010
-#define PRID_REV_TX3912 0x0010
-#define PRID_REV_TX3922 0x0030
-#define PRID_REV_TX3927 0x0040
-#define PRID_REV_VR4111 0x0050
-#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
-#define PRID_REV_VR4121 0x0060
-#define PRID_REV_VR4122 0x0070
-#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
-#define PRID_REV_VR4130 0x0080
-#define PRID_REV_34K_V1_0_2 0x0022
-#define PRID_REV_LOONGSON1B 0x0020
-#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
-#define PRID_REV_LOONGSON2E 0x0002
-#define PRID_REV_LOONGSON2F 0x0003
-#define PRID_REV_LOONGSON3A_R1 0x0005
-#define PRID_REV_LOONGSON3B_R1 0x0006
-#define PRID_REV_LOONGSON3B_R2 0x0007
-#define PRID_REV_LOONGSON3A_R2 0x0008
-#define PRID_REV_LOONGSON3A_R3 0x0009
+#define PRID_REV_TX4927 0x0022
+#define PRID_REV_TX4937 0x0030
+#define PRID_REV_R4400 0x0040
+#define PRID_REV_R3000A 0x0030
+#define PRID_REV_R3000 0x0020
+#define PRID_REV_R2000A 0x0010
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
+#define PRID_REV_VR4111 0x0050
+#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
+#define PRID_REV_VR4121 0x0060
+#define PRID_REV_VR4122 0x0070
+#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
+#define PRID_REV_VR4130 0x0080
+#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON1B 0x0020
+#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
+#define PRID_REV_LOONGSON2E 0x0002
+#define PRID_REV_LOONGSON2F 0x0003
+#define PRID_REV_LOONGSON3A_R1 0x0005
+#define PRID_REV_LOONGSON3B_R1 0x0006
+#define PRID_REV_LOONGSON3B_R2 0x0007
+#define PRID_REV_LOONGSON3A_R2 0x0008
+#define PRID_REV_LOONGSON3A_R3_0 0x0009
+#define PRID_REV_LOONGSON3A_R3_1 0x000d
/*
* Older processors used to encode processor version and revision in two
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index 72d0eab02afc..8eda48748ed5 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -21,10 +21,10 @@ enum coherent_io_user_state {
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
#else
-#ifdef CONFIG_DMA_COHERENT
-#define coherentio IO_COHERENCE_ENABLED
-#else
+#ifdef CONFIG_DMA_NONCOHERENT
#define coherentio IO_COHERENCE_DISABLED
+#else
+#define coherentio IO_COHERENCE_ENABLED
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index f32f15530aba..b5c240806e1b 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -1 +1,16 @@
-#include <asm/dma-coherence.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_DMA_DIRECT_H
+#define _MIPS_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 886e75a383f2..e81c4e97ff1a 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,19 +2,21 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <asm/dma-coherence.h>
-#include <asm/cache.h>
+#include <linux/swiotlb.h>
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
-#include <dma-coherence.h>
-#endif
-
-extern const struct dma_map_ops *mips_dma_map_ops;
+extern const struct dma_map_ops jazz_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return mips_dma_map_ops;
+#if defined(CONFIG_MACH_JAZZ)
+ return &jazz_dma_ops;
+#elif defined(CONFIG_SWIOTLB)
+ return &swiotlb_dma_ops;
+#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
+ return &dma_noncoherent_ops;
+#else
+ return &dma_direct_ops;
+#endif
}
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index cea8ad864b3f..54c730aed327 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -12,6 +12,8 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#define ARCH_HAS_IOREMAP_WC
+
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -141,14 +143,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
{
- return (unsigned long)address - PAGE_OFFSET;
+ return virt_to_phys(address);
}
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET);
+ return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys
@@ -278,15 +280,25 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
#define ioremap_cache ioremap_cachable
/*
- * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
- * requests a cachable mapping, ioremap_uncached_accelerated requests a
- * mapping using the uncached accelerated mode which isn't supported on
- * all processors.
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support UCA, the method shall fall-back to the
+ * _CACHE_UNCACHED option (see cpu_probe() method).
*/
-#define ioremap_cacheable_cow(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
-#define ioremap_uncached_accelerated(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
+#define ioremap_wc(offset, size) \
+ __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
static inline void iounmap(const volatile void __iomem *addr)
{
@@ -590,7 +602,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*
* This API used to be exported; it now is for arch code internal use only.
*/
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -609,7 +621,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index ad1a99948f27..a72dfbf1babb 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -68,16 +68,6 @@ struct prev_kprobe {
unsigned long saved_epc;
};
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR) \
- ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
- ? MAX_JPROBES_STACK_ADDR - (ADDR) \
- : MAX_JPROBES_STACK_SIZE)
-
-
#define SKIP_DELAYSLOT 0x0001
/* per-cpu kprobe control block */
@@ -86,12 +76,9 @@ struct kprobe_ctlblk {
unsigned long kprobe_old_SR;
unsigned long kprobe_saved_SR;
unsigned long kprobe_saved_epc;
- unsigned long jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
/* Per-thread fields, used while emulating branches */
unsigned long flags;
unsigned long target_epc;
- u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
index 660ab64c0fc9..a004d94dfbdd 100644
--- a/arch/mips/include/asm/mach-ar7/spaces.h
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -17,9 +17,6 @@
#define PAGE_OFFSET _AC(0x94000000, UL)
#define PHYS_OFFSET _AC(0x14000000, UL)
-#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE UNCAC_BASE
-
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
deleted file mode 100644
index d5defdde32db..000000000000
--- a/arch/mips/include/asm/mach-ath25/dma-coherence.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- *
- */
-#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
-#define __ASM_MACH_ATH25_DMA_COHERENCE_H
-
-#include <linux/device.h>
-
-/*
- * We need some arbitrary non-zero value to be programmed to the BAR1 register
- * of PCI host controller to enable DMA. The same value should be used as the
- * offset to calculate the physical address of DMA buffer for PCI devices.
- */
-#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
-
-static inline dma_addr_t ath25_dev_offset(struct device *dev)
-{
-#ifdef CONFIG_PCI
- extern struct bus_type pci_bus_type;
-
- if (dev && dev->bus == &pci_bus_type)
- return AR2315_PCI_HOST_SDRAM_BASEADDR;
-#endif
- return 0;
-}
-
-static inline dma_addr_t
-plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return virt_to_phys(addr) + ath25_dev_offset(dev);
-}
-
-static inline dma_addr_t
-plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return page_to_phys(page) + ath25_dev_offset(dev);
-}
-
-static inline unsigned long
-plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr - ath25_dev_offset(dev);
-}
-
-static inline void
-plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_COHERENT
- return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#endif
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index d99ca862dae3..284b4fa23e03 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -20,6 +20,10 @@
#include <linux/bitops.h>
#define AR71XX_APB_BASE 0x18000000
+#define AR71XX_GE0_BASE 0x19000000
+#define AR71XX_GE0_SIZE 0x10000
+#define AR71XX_GE1_BASE 0x1a000000
+#define AR71XX_GE1_SIZE 0x10000
#define AR71XX_EHCI_BASE 0x1b000000
#define AR71XX_EHCI_SIZE 0x1000
#define AR71XX_OHCI_BASE 0x1c000000
@@ -39,6 +43,8 @@
#define AR71XX_PLL_SIZE 0x100
#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_MII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR71XX_MII_SIZE 0x100
#define AR71XX_PCI_MEM_BASE 0x10000000
#define AR71XX_PCI_MEM_SIZE 0x07000000
@@ -81,18 +87,39 @@
#define AR933X_UART_BASE (AR71XX_APB_BASE + 0x00020000)
#define AR933X_UART_SIZE 0x14
+#define AR933X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR933X_GMAC_SIZE 0x04
#define AR933X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR933X_WMAC_SIZE 0x20000
#define AR933X_EHCI_BASE 0x1b000000
#define AR933X_EHCI_SIZE 0x1000
+#define AR934X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR934X_GMAC_SIZE 0x14
#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR934X_WMAC_SIZE 0x20000
#define AR934X_EHCI_BASE 0x1b000000
#define AR934X_EHCI_SIZE 0x200
+#define AR934X_NFC_BASE 0x1b000200
+#define AR934X_NFC_SIZE 0xb8
#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
#define AR934X_SRIF_SIZE 0x1000
+#define QCA953X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA953X_GMAC_SIZE 0x14
+#define QCA953X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA953X_WMAC_SIZE 0x20000
+#define QCA953X_EHCI_BASE 0x1b000000
+#define QCA953X_EHCI_SIZE 0x200
+#define QCA953X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
+#define QCA953X_SRIF_SIZE 0x1000
+
+#define QCA953X_PCI_CFG_BASE0 0x14000000
+#define QCA953X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA953X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA953X_PCI_MEM_BASE0 0x10000000
+#define QCA953X_PCI_MEM_SIZE 0x02000000
+
#define QCA955X_PCI_MEM_BASE0 0x10000000
#define QCA955X_PCI_MEM_BASE1 0x12000000
#define QCA955X_PCI_MEM_SIZE 0x02000000
@@ -106,11 +133,72 @@
#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
#define QCA955X_PCI_CTRL_SIZE 0x100
+#define QCA955X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA955X_GMAC_SIZE 0x40
#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define QCA955X_WMAC_SIZE 0x20000
#define QCA955X_EHCI0_BASE 0x1b000000
#define QCA955X_EHCI1_BASE 0x1b400000
#define QCA955X_EHCI_SIZE 0x1000
+#define QCA955X_NFC_BASE 0x1b800200
+#define QCA955X_NFC_SIZE 0xb8
+
+#define QCA956X_PCI_MEM_BASE1 0x12000000
+#define QCA956X_PCI_MEM_SIZE 0x02000000
+#define QCA956X_PCI_CFG_BASE1 0x16000000
+#define QCA956X_PCI_CFG_SIZE 0x1000
+#define QCA956X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA956X_PCI_CRP_SIZE 0x1000
+#define QCA956X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA956X_PCI_CTRL_SIZE 0x100
+
+#define QCA956X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA956X_WMAC_SIZE 0x20000
+#define QCA956X_EHCI0_BASE 0x1b000000
+#define QCA956X_EHCI1_BASE 0x1b400000
+#define QCA956X_EHCI_SIZE 0x200
+#define QCA956X_GMAC_SGMII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SGMII_SIZE 0x64
+#define QCA956X_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
+#define QCA956X_PLL_SIZE 0x50
+#define QCA956X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SIZE 0x64
+
+/*
+ * Hidden Registers
+ */
+#define QCA956X_MAC_CFG_BASE 0xb9000000
+#define QCA956X_MAC_CFG_SIZE 0x64
+
+#define QCA956X_MAC_CFG1_REG 0x00
+#define QCA956X_MAC_CFG1_SOFT_RST BIT(31)
+#define QCA956X_MAC_CFG1_RX_RST BIT(19)
+#define QCA956X_MAC_CFG1_TX_RST BIT(18)
+#define QCA956X_MAC_CFG1_LOOPBACK BIT(8)
+#define QCA956X_MAC_CFG1_RX_EN BIT(2)
+#define QCA956X_MAC_CFG1_TX_EN BIT(0)
+
+#define QCA956X_MAC_CFG2_REG 0x04
+#define QCA956X_MAC_CFG2_IF_1000 BIT(9)
+#define QCA956X_MAC_CFG2_IF_10_100 BIT(8)
+#define QCA956X_MAC_CFG2_HUGE_FRAME_EN BIT(5)
+#define QCA956X_MAC_CFG2_LEN_CHECK BIT(4)
+#define QCA956X_MAC_CFG2_PAD_CRC_EN BIT(2)
+#define QCA956X_MAC_CFG2_FDX BIT(0)
+
+#define QCA956X_MAC_MII_MGMT_CFG_REG 0x20
+#define QCA956X_MGMT_CFG_CLK_DIV_20 0x07
+
+#define QCA956X_MAC_FIFO_CFG0_REG 0x48
+#define QCA956X_MAC_FIFO_CFG1_REG 0x4c
+#define QCA956X_MAC_FIFO_CFG2_REG 0x50
+#define QCA956X_MAC_FIFO_CFG3_REG 0x54
+#define QCA956X_MAC_FIFO_CFG4_REG 0x58
+#define QCA956X_MAC_FIFO_CFG5_REG 0x5c
+
+#define QCA956X_DAM_RESET_OFFSET 0xb90001bc
+#define QCA956X_DAM_RESET_SIZE 0x4
+#define QCA956X_INLINE_CHKSUM_ENG BIT(27)
/*
* DDR_CTRL block
@@ -149,6 +237,12 @@
#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+#define QCA953X_DDR_REG_FLUSH_GE0 0x9c
+#define QCA953X_DDR_REG_FLUSH_GE1 0xa0
+#define QCA953X_DDR_REG_FLUSH_USB 0xa4
+#define QCA953X_DDR_REG_FLUSH_PCIE 0xa8
+#define QCA953X_DDR_REG_FLUSH_WMAC 0xac
+
/*
* PLL block
*/
@@ -166,9 +260,15 @@
#define AR71XX_AHB_DIV_SHIFT 20
#define AR71XX_AHB_DIV_MASK 0x7
+#define AR71XX_ETH0_PLL_SHIFT 17
+#define AR71XX_ETH1_PLL_SHIFT 19
+
#define AR724X_PLL_REG_CPU_CONFIG 0x00
#define AR724X_PLL_REG_PCIE_CONFIG 0x10
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS BIT(16)
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET BIT(25)
+
#define AR724X_PLL_FB_SHIFT 0
#define AR724X_PLL_FB_MASK 0x3ff
#define AR724X_PLL_REF_DIV_SHIFT 10
@@ -178,6 +278,8 @@
#define AR724X_DDR_DIV_SHIFT 22
#define AR724X_DDR_DIV_MASK 0x3
+#define AR7242_PLL_REG_ETH0_INT_CLOCK 0x2c
+
#define AR913X_PLL_REG_CPU_CONFIG 0x00
#define AR913X_PLL_REG_ETH_CONFIG 0x04
#define AR913X_PLL_REG_ETH0_INT_CLOCK 0x14
@@ -190,6 +292,9 @@
#define AR913X_AHB_DIV_SHIFT 19
#define AR913X_AHB_DIV_MASK 0x1
+#define AR913X_ETH0_PLL_SHIFT 20
+#define AR913X_ETH1_PLL_SHIFT 22
+
#define AR933X_PLL_CPU_CONFIG_REG 0x00
#define AR933X_PLL_CLOCK_CTRL_REG 0x08
@@ -211,6 +316,8 @@
#define AR934X_PLL_CPU_CONFIG_REG 0x00
#define AR934X_PLL_DDR_CONFIG_REG 0x04
#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define AR934X_PLL_ETH_XMII_CONTROL_REG 0x2c
#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -243,9 +350,52 @@
#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL BIT(6)
+
+#define QCA953X_PLL_CPU_CONFIG_REG 0x00
+#define QCA953X_PLL_DDR_CONFIG_REG 0x04
+#define QCA953X_PLL_CLK_CTRL_REG 0x08
+#define QCA953X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define QCA953X_PLL_ETH_XMII_CONTROL_REG 0x2c
+#define QCA953X_PLL_ETH_SGMII_CONTROL_REG 0x48
+
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA953X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA953X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA953X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
#define QCA955X_PLL_CPU_CONFIG_REG 0x00
#define QCA955X_PLL_DDR_CONFIG_REG 0x04
#define QCA955X_PLL_CLK_CTRL_REG 0x08
+#define QCA955X_PLL_ETH_XMII_CONTROL_REG 0x28
+#define QCA955X_PLL_ETH_SGMII_CONTROL_REG 0x48
+#define QCA955X_PLL_ETH_SGMII_SERDES_REG 0x4c
#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -278,6 +428,81 @@
#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define QCA955X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA955X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA955X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
+#define QCA956X_PLL_CPU_CONFIG_REG 0x00
+#define QCA956X_PLL_CPU_CONFIG1_REG 0x04
+#define QCA956X_PLL_DDR_CONFIG_REG 0x08
+#define QCA956X_PLL_DDR_CONFIG1_REG 0x0c
+#define QCA956X_PLL_CLK_CTRL_REG 0x10
+#define QCA956X_PLL_SWITCH_CLOCK_CONTROL_REG 0x28
+#define QCA956X_PLL_ETH_XMII_CONTROL_REG 0x30
+#define QCA956X_PLL_ETH_SGMII_SERDES_REG 0x4c
+
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_CPU_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_DDR_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL BIT(20)
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL BIT(21)
+#define QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_I2C_CLK_SELB BIT(5)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_1 BIT(6)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_UART1_CLK_SEL BIT(7)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_SHIFT 8
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_MASK 0xf
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EN_PLL_TOP BIT(12)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_2 BIT(13)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_1 BIT(14)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_2 BIT(15)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCH_FUNC_TST_MODE BIT(16)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EEE_ENABLE BIT(17)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_OEN_CLK125M_PLL BIT(18)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCHCLK_SEL BIT(19)
+
+#define QCA956X_PLL_ETH_XMII_TX_INVERT BIT(1)
+#define QCA956X_PLL_ETH_XMII_GIGE BIT(25)
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_SHIFT 28
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_MASK 0x3
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_SHIFT 26
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_MASK 3
+
+#define QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA956X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
/*
* USB_CONFIG block
*/
@@ -317,10 +542,19 @@
#define AR934X_RESET_REG_BOOTSTRAP 0xb0
#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+#define QCA953X_RESET_REG_RESET_MODULE 0x1c
+#define QCA953X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA953X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
#define QCA955X_RESET_REG_RESET_MODULE 0x1c
#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+#define QCA956X_RESET_REG_RESET_MODULE 0x1c
+#define QCA956X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA956X_RESET_REG_EXT_INT_STATUS 0xac
+
+#define MISC_INT_MIPS_SI_TIMERINT_MASK BIT(28)
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -370,16 +604,123 @@
#define AR913X_RESET_USB_HOST BIT(5)
#define AR913X_RESET_USB_PHY BIT(4)
+#define AR933X_RESET_GE1_MDIO BIT(23)
+#define AR933X_RESET_GE0_MDIO BIT(22)
+#define AR933X_RESET_GE1_MAC BIT(13)
#define AR933X_RESET_WMAC BIT(11)
+#define AR933X_RESET_GE0_MAC BIT(9)
#define AR933X_RESET_USB_HOST BIT(5)
#define AR933X_RESET_USB_PHY BIT(4)
#define AR933X_RESET_USBSUS_OVERRIDE BIT(3)
+#define AR934X_RESET_HOST BIT(31)
+#define AR934X_RESET_SLIC BIT(30)
+#define AR934X_RESET_HDMA BIT(29)
+#define AR934X_RESET_EXTERNAL BIT(28)
+#define AR934X_RESET_RTC BIT(27)
+#define AR934X_RESET_PCIE_EP_INT BIT(26)
+#define AR934X_RESET_CHKSUM_ACC BIT(25)
+#define AR934X_RESET_FULL_CHIP BIT(24)
+#define AR934X_RESET_GE1_MDIO BIT(23)
+#define AR934X_RESET_GE0_MDIO BIT(22)
+#define AR934X_RESET_CPU_NMI BIT(21)
+#define AR934X_RESET_CPU_COLD BIT(20)
+#define AR934X_RESET_HOST_RESET_INT BIT(19)
+#define AR934X_RESET_PCIE_EP BIT(18)
+#define AR934X_RESET_UART1 BIT(17)
+#define AR934X_RESET_DDR BIT(16)
+#define AR934X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define AR934X_RESET_NANDF BIT(14)
+#define AR934X_RESET_GE1_MAC BIT(13)
+#define AR934X_RESET_ETH_SWITCH_ANALOG BIT(12)
#define AR934X_RESET_USB_PHY_ANALOG BIT(11)
+#define AR934X_RESET_HOST_DMA_INT BIT(10)
+#define AR934X_RESET_GE0_MAC BIT(9)
+#define AR934X_RESET_ETH_SWITCH BIT(8)
+#define AR934X_RESET_PCIE_PHY BIT(7)
+#define AR934X_RESET_PCIE BIT(6)
#define AR934X_RESET_USB_HOST BIT(5)
#define AR934X_RESET_USB_PHY BIT(4)
#define AR934X_RESET_USBSUS_OVERRIDE BIT(3)
-
+#define AR934X_RESET_LUT BIT(2)
+#define AR934X_RESET_MBOX BIT(1)
+#define AR934X_RESET_I2S BIT(0)
+
+#define QCA953X_RESET_USB_EXT_PWR BIT(29)
+#define QCA953X_RESET_EXTERNAL BIT(28)
+#define QCA953X_RESET_RTC BIT(27)
+#define QCA953X_RESET_FULL_CHIP BIT(24)
+#define QCA953X_RESET_GE1_MDIO BIT(23)
+#define QCA953X_RESET_GE0_MDIO BIT(22)
+#define QCA953X_RESET_CPU_NMI BIT(21)
+#define QCA953X_RESET_CPU_COLD BIT(20)
+#define QCA953X_RESET_DDR BIT(16)
+#define QCA953X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA953X_RESET_GE1_MAC BIT(13)
+#define QCA953X_RESET_ETH_SWITCH_ANALOG BIT(12)
+#define QCA953X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA953X_RESET_GE0_MAC BIT(9)
+#define QCA953X_RESET_ETH_SWITCH BIT(8)
+#define QCA953X_RESET_PCIE_PHY BIT(7)
+#define QCA953X_RESET_PCIE BIT(6)
+#define QCA953X_RESET_USB_HOST BIT(5)
+#define QCA953X_RESET_USB_PHY BIT(4)
+#define QCA953X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define QCA955X_RESET_HOST BIT(31)
+#define QCA955X_RESET_SLIC BIT(30)
+#define QCA955X_RESET_HDMA BIT(29)
+#define QCA955X_RESET_EXTERNAL BIT(28)
+#define QCA955X_RESET_RTC BIT(27)
+#define QCA955X_RESET_PCIE_EP_INT BIT(26)
+#define QCA955X_RESET_CHKSUM_ACC BIT(25)
+#define QCA955X_RESET_FULL_CHIP BIT(24)
+#define QCA955X_RESET_GE1_MDIO BIT(23)
+#define QCA955X_RESET_GE0_MDIO BIT(22)
+#define QCA955X_RESET_CPU_NMI BIT(21)
+#define QCA955X_RESET_CPU_COLD BIT(20)
+#define QCA955X_RESET_HOST_RESET_INT BIT(19)
+#define QCA955X_RESET_PCIE_EP BIT(18)
+#define QCA955X_RESET_UART1 BIT(17)
+#define QCA955X_RESET_DDR BIT(16)
+#define QCA955X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA955X_RESET_NANDF BIT(14)
+#define QCA955X_RESET_GE1_MAC BIT(13)
+#define QCA955X_RESET_SGMII_ANALOG BIT(12)
+#define QCA955X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA955X_RESET_HOST_DMA_INT BIT(10)
+#define QCA955X_RESET_GE0_MAC BIT(9)
+#define QCA955X_RESET_SGMII BIT(8)
+#define QCA955X_RESET_PCIE_PHY BIT(7)
+#define QCA955X_RESET_PCIE BIT(6)
+#define QCA955X_RESET_USB_HOST BIT(5)
+#define QCA955X_RESET_USB_PHY BIT(4)
+#define QCA955X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA955X_RESET_LUT BIT(2)
+#define QCA955X_RESET_MBOX BIT(1)
+#define QCA955X_RESET_I2S BIT(0)
+
+#define QCA956X_RESET_EXTERNAL BIT(28)
+#define QCA956X_RESET_FULL_CHIP BIT(24)
+#define QCA956X_RESET_GE1_MDIO BIT(23)
+#define QCA956X_RESET_GE0_MDIO BIT(22)
+#define QCA956X_RESET_CPU_NMI BIT(21)
+#define QCA956X_RESET_CPU_COLD BIT(20)
+#define QCA956X_RESET_DMA BIT(19)
+#define QCA956X_RESET_DDR BIT(16)
+#define QCA956X_RESET_GE1_MAC BIT(13)
+#define QCA956X_RESET_SGMII_ANALOG BIT(12)
+#define QCA956X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA956X_RESET_GE0_MAC BIT(9)
+#define QCA956X_RESET_SGMII BIT(8)
+#define QCA956X_RESET_USB_HOST BIT(5)
+#define QCA956X_RESET_USB_PHY BIT(4)
+#define QCA956X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA956X_RESET_SWITCH_ANALOG BIT(2)
+#define QCA956X_RESET_SWITCH BIT(0)
+
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN BIT(18)
+#define AR933X_BOOTSTRAP_EEPBUSY BIT(4)
#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
@@ -398,8 +739,17 @@
#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+#define QCA953X_BOOTSTRAP_SW_OPTION2 BIT(12)
+#define QCA953X_BOOTSTRAP_SW_OPTION1 BIT(11)
+#define QCA953X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define QCA953X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA953X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define QCA953X_BOOTSTRAP_DDR1 BIT(0)
+
#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA956X_BOOTSTRAP_REF_CLK_40 BIT(2)
+
#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
@@ -418,6 +768,24 @@
AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
AR934X_PCIE_WMAC_INT_PCIE_RC3)
+#define QCA953X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define QCA953X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define QCA953X_PCIE_WMAC_INT_WMAC_ALL \
+ (QCA953X_PCIE_WMAC_INT_WMAC_MISC | QCA953X_PCIE_WMAC_INT_WMAC_TX | \
+ QCA953X_PCIE_WMAC_INT_WMAC_RXLP | QCA953X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define QCA953X_PCIE_WMAC_INT_PCIE_ALL \
+ (QCA953X_PCIE_WMAC_INT_PCIE_RC | QCA953X_PCIE_WMAC_INT_PCIE_RC0 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC1 | QCA953X_PCIE_WMAC_INT_PCIE_RC2 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC3)
+
#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
#define QCA955X_EXT_INT_WMAC_TX BIT(1)
#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
@@ -449,6 +817,37 @@
QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
QCA955X_EXT_INT_PCIE_RC2_INT3)
+#define QCA956X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA956X_EXT_INT_WMAC_TX BIT(1)
+#define QCA956X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA956X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA956X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA956X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA956X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA956X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA956X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA956X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA956X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA956X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA956X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA956X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA956X_EXT_INT_USB1 BIT(24)
+#define QCA956X_EXT_INT_USB2 BIT(28)
+
+#define QCA956X_EXT_INT_WMAC_ALL \
+ (QCA956X_EXT_INT_WMAC_MISC | QCA956X_EXT_INT_WMAC_TX | \
+ QCA956X_EXT_INT_WMAC_RXLP | QCA956X_EXT_INT_WMAC_RXHP)
+
+#define QCA956X_EXT_INT_PCIE_RC1_ALL \
+ (QCA956X_EXT_INT_PCIE_RC1 | QCA956X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT1 | QCA956X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA956X_EXT_INT_PCIE_RC2_ALL \
+ (QCA956X_EXT_INT_PCIE_RC2 | QCA956X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT1 | QCA956X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -460,8 +859,12 @@
#define REV_ID_MAJOR_AR9341 0x0120
#define REV_ID_MAJOR_AR9342 0x1120
#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9533 0x0140
+#define REV_ID_MAJOR_QCA9533_V2 0x0160
#define REV_ID_MAJOR_QCA9556 0x0130
#define REV_ID_MAJOR_QCA9558 0x1130
+#define REV_ID_MAJOR_TP9343 0x0150
+#define REV_ID_MAJOR_QCA956X 0x1150
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -482,8 +885,12 @@
#define AR934X_REV_ID_REVISION_MASK 0xf
+#define QCA953X_REV_ID_REVISION_MASK 0xf
+
#define QCA955X_REV_ID_REVISION_MASK 0xf
+#define QCA956X_REV_ID_REVISION_MASK 0xf
+
/*
* SPI block
*/
@@ -521,15 +928,63 @@
#define AR71XX_GPIO_REG_INT_ENABLE 0x24
#define AR71XX_GPIO_REG_FUNC 0x28
+#define AR934X_GPIO_REG_OUT_FUNC0 0x2c
+#define AR934X_GPIO_REG_OUT_FUNC1 0x30
+#define AR934X_GPIO_REG_OUT_FUNC2 0x34
+#define AR934X_GPIO_REG_OUT_FUNC3 0x38
+#define AR934X_GPIO_REG_OUT_FUNC4 0x3c
+#define AR934X_GPIO_REG_OUT_FUNC5 0x40
#define AR934X_GPIO_REG_FUNC 0x6c
+#define QCA953X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA953X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA953X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA953X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA953X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA953X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA953X_GPIO_REG_FUNC 0x6c
+
+#define QCA953X_GPIO_OUT_MUX_SPI_CS1 10
+#define QCA953X_GPIO_OUT_MUX_SPI_CS2 11
+#define QCA953X_GPIO_OUT_MUX_SPI_CS0 9
+#define QCA953X_GPIO_OUT_MUX_SPI_CLK 8
+#define QCA953X_GPIO_OUT_MUX_SPI_MOSI 12
+#define QCA953X_GPIO_OUT_MUX_LED_LINK1 41
+#define QCA953X_GPIO_OUT_MUX_LED_LINK2 42
+#define QCA953X_GPIO_OUT_MUX_LED_LINK3 43
+#define QCA953X_GPIO_OUT_MUX_LED_LINK4 44
+#define QCA953X_GPIO_OUT_MUX_LED_LINK5 45
+
+#define QCA955X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA955X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA955X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA955X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA955X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA955X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA955X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA956X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA956X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA956X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA956X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA956X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA956X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA956X_GPIO_REG_IN_ENABLE3 0x50
+#define QCA956X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_OUT_MUX_GE0_MDO 32
+#define QCA956X_GPIO_OUT_MUX_GE0_MDC 33
+
#define AR71XX_GPIO_COUNT 16
#define AR7240_GPIO_COUNT 18
#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
+#define QCA953X_GPIO_COUNT 18
#define QCA955X_GPIO_COUNT 24
+#define QCA956X_GPIO_COUNT 23
/*
* SRIF block
@@ -552,4 +1007,318 @@
#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
+#define QCA953X_SRIF_CPU_DPLL1_REG 0x1c0
+#define QCA953X_SRIF_CPU_DPLL2_REG 0x1c4
+#define QCA953X_SRIF_CPU_DPLL3_REG 0x1c8
+
+#define QCA953X_SRIF_DDR_DPLL1_REG 0x240
+#define QCA953X_SRIF_DDR_DPLL2_REG 0x244
+#define QCA953X_SRIF_DDR_DPLL3_REG 0x248
+
+#define QCA953X_SRIF_DPLL1_REFDIV_SHIFT 27
+#define QCA953X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define QCA953X_SRIF_DPLL1_NINT_SHIFT 18
+#define QCA953X_SRIF_DPLL1_NINT_MASK 0x1ff
+#define QCA953X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
+
+#define QCA953X_SRIF_DPLL2_LOCAL_PLL BIT(30)
+#define QCA953X_SRIF_DPLL2_OUTDIV_SHIFT 13
+#define QCA953X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define AR71XX_GPIO_FUNC_STEREO_EN BIT(17)
+#define AR71XX_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR71XX_GPIO_FUNC_SPI_CS2_EN BIT(13)
+#define AR71XX_GPIO_FUNC_SPI_CS1_EN BIT(12)
+#define AR71XX_GPIO_FUNC_UART_EN BIT(8)
+#define AR71XX_GPIO_FUNC_USB_OC_EN BIT(4)
+#define AR71XX_GPIO_FUNC_USB_CLK_EN BIT(0)
+
+#define AR724X_GPIO_FUNC_GE0_MII_CLK_EN BIT(19)
+#define AR724X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR724X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR724X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR724X_GPIO_FUNC_CLK_OBS5_EN BIT(12)
+#define AR724X_GPIO_FUNC_CLK_OBS4_EN BIT(11)
+#define AR724X_GPIO_FUNC_CLK_OBS3_EN BIT(10)
+#define AR724X_GPIO_FUNC_CLK_OBS2_EN BIT(9)
+#define AR724X_GPIO_FUNC_CLK_OBS1_EN BIT(8)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR724X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR724X_GPIO_FUNC_UART_EN BIT(1)
+#define AR724X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR913X_GPIO_FUNC_WMAC_LED_EN BIT(22)
+#define AR913X_GPIO_FUNC_EXP_PORT_CS_EN BIT(21)
+#define AR913X_GPIO_FUNC_I2S_REFCLKEN BIT(20)
+#define AR913X_GPIO_FUNC_I2S_MCKEN BIT(19)
+#define AR913X_GPIO_FUNC_I2S1_EN BIT(18)
+#define AR913X_GPIO_FUNC_I2S0_EN BIT(17)
+#define AR913X_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR913X_GPIO_FUNC_UART_RTSCTS_EN BIT(9)
+#define AR913X_GPIO_FUNC_UART_EN BIT(8)
+#define AR913X_GPIO_FUNC_USB_CLK_EN BIT(4)
+
+#define AR933X_GPIO_FUNC_SPDIF2TCK BIT(31)
+#define AR933X_GPIO_FUNC_SPDIF_EN BIT(30)
+#define AR933X_GPIO_FUNC_I2SO_22_18_EN BIT(29)
+#define AR933X_GPIO_FUNC_I2S_MCK_EN BIT(27)
+#define AR933X_GPIO_FUNC_I2SO_EN BIT(26)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_DUPL BIT(25)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_COLL BIT(24)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_ACT BIT(23)
+#define AR933X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR933X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR933X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR933X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR933X_GPIO_FUNC_UART_EN BIT(1)
+#define AR933X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR934X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define AR934X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define AR934X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define AR934X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define AR934X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define AR934X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define AR934X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define AR934X_GPIO_FUNC_CLK_OBS0_EN BIT(2)
+#define AR934X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define AR934X_GPIO_OUT_GPIO 0
+#define AR934X_GPIO_OUT_SPI_CS1 7
+#define AR934X_GPIO_OUT_LED_LINK0 41
+#define AR934X_GPIO_OUT_LED_LINK1 42
+#define AR934X_GPIO_OUT_LED_LINK2 43
+#define AR934X_GPIO_OUT_LED_LINK3 44
+#define AR934X_GPIO_OUT_LED_LINK4 45
+#define AR934X_GPIO_OUT_EXT_LNA0 46
+#define AR934X_GPIO_OUT_EXT_LNA1 47
+
+#define QCA955X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define QCA955X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define QCA955X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define QCA955X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define QCA955X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define QCA955X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define QCA955X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define QCA955X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define QCA955X_GPIO_OUT_GPIO 0
+#define QCA955X_MII_EXT_MDI 1
+#define QCA955X_SLIC_DATA_OUT 3
+#define QCA955X_SLIC_PCM_FS 4
+#define QCA955X_SLIC_PCM_CLK 5
+#define QCA955X_SPI_CLK 8
+#define QCA955X_SPI_CS_0 9
+#define QCA955X_SPI_CS_1 10
+#define QCA955X_SPI_CS_2 11
+#define QCA955X_SPI_MISO 12
+#define QCA955X_I2S_CLK 13
+#define QCA955X_I2S_WS 14
+#define QCA955X_I2S_SD 15
+#define QCA955X_I2S_MCK 16
+#define QCA955X_SPDIF_OUT 17
+#define QCA955X_UART1_TD 18
+#define QCA955X_UART1_RTS 19
+#define QCA955X_UART1_RD 20
+#define QCA955X_UART1_CTS 21
+#define QCA955X_UART0_SOUT 22
+#define QCA955X_SPDIF2_OUT 23
+#define QCA955X_LED_SGMII_SPEED0 24
+#define QCA955X_LED_SGMII_SPEED1 25
+#define QCA955X_LED_SGMII_DUPLEX 26
+#define QCA955X_LED_SGMII_LINK_UP 27
+#define QCA955X_SGMII_SPEED0_INVERT 28
+#define QCA955X_SGMII_SPEED1_INVERT 29
+#define QCA955X_SGMII_DUPLEX_INVERT 30
+#define QCA955X_SGMII_LINK_UP_INVERT 31
+#define QCA955X_GE1_MII_MDO 32
+#define QCA955X_GE1_MII_MDC 33
+#define QCA955X_SWCOM2 38
+#define QCA955X_SWCOM3 39
+#define QCA955X_MAC2_GPIO 40
+#define QCA955X_MAC3_GPIO 41
+#define QCA955X_ATT_LED 42
+#define QCA955X_PWR_LED 43
+#define QCA955X_TX_FRAME 44
+#define QCA955X_RX_CLEAR_EXTERNAL 45
+#define QCA955X_LED_NETWORK_EN 46
+#define QCA955X_LED_POWER_EN 47
+#define QCA955X_WMAC_GLUE_WOW 68
+#define QCA955X_RX_CLEAR_EXTENSION 70
+#define QCA955X_CP_NAND_CS1 73
+#define QCA955X_USB_SUSPEND 74
+#define QCA955X_ETH_TX_ERR 75
+#define QCA955X_DDR_DQ_OE 76
+#define QCA955X_CLKREQ_N_EP 77
+#define QCA955X_CLKREQ_N_RC 78
+#define QCA955X_CLK_OBS0 79
+#define QCA955X_CLK_OBS1 80
+#define QCA955X_CLK_OBS2 81
+#define QCA955X_CLK_OBS3 82
+#define QCA955X_CLK_OBS4 83
+#define QCA955X_CLK_OBS5 84
+
+/*
+ * MII_CTRL block
+ */
+#define AR71XX_MII_REG_MII0_CTRL 0x00
+#define AR71XX_MII_REG_MII1_CTRL 0x04
+
+#define AR71XX_MII_CTRL_IF_MASK 3
+#define AR71XX_MII_CTRL_SPEED_SHIFT 4
+#define AR71XX_MII_CTRL_SPEED_MASK 3
+#define AR71XX_MII_CTRL_SPEED_10 0
+#define AR71XX_MII_CTRL_SPEED_100 1
+#define AR71XX_MII_CTRL_SPEED_1000 2
+
+#define AR71XX_MII0_CTRL_IF_GMII 0
+#define AR71XX_MII0_CTRL_IF_MII 1
+#define AR71XX_MII0_CTRL_IF_RGMII 2
+#define AR71XX_MII0_CTRL_IF_RMII 3
+
+#define AR71XX_MII1_CTRL_IF_RGMII 0
+#define AR71XX_MII1_CTRL_IF_RMII 1
+
+/*
+ * AR933X GMAC interface
+ */
+#define AR933X_GMAC_REG_ETH_CFG 0x00
+
+#define AR933X_ETH_CFG_RGMII_GE0 BIT(0)
+#define AR933X_ETH_CFG_MII_GE0 BIT(1)
+#define AR933X_ETH_CFG_GMII_GE0 BIT(2)
+#define AR933X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define AR933X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define AR933X_ETH_CFG_MII_GE0_ERR_EN BIT(5)
+#define AR933X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR933X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(8)
+#define AR933X_ETH_CFG_RMII_GE0 BIT(9)
+#define AR933X_ETH_CFG_RMII_GE0_SPD_10 0
+#define AR933X_ETH_CFG_RMII_GE0_SPD_100 BIT(10)
+
+/*
+ * AR934X GMAC Interface
+ */
+#define AR934X_GMAC_REG_ETH_CFG 0x00
+
+#define AR934X_ETH_CFG_RGMII_GMAC0 BIT(0)
+#define AR934X_ETH_CFG_MII_GMAC0 BIT(1)
+#define AR934X_ETH_CFG_GMII_GMAC0 BIT(2)
+#define AR934X_ETH_CFG_MII_GMAC0_MASTER BIT(3)
+#define AR934X_ETH_CFG_MII_GMAC0_SLAVE BIT(4)
+#define AR934X_ETH_CFG_MII_GMAC0_ERR_EN BIT(5)
+#define AR934X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define AR934X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR934X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define AR934X_ETH_CFG_RMII_GMAC0 BIT(10)
+#define AR933X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define AR934X_ETH_CFG_RMII_GMAC0_MASTER BIT(12)
+#define AR933X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define AR934X_ETH_CFG_RXD_DELAY BIT(14)
+#define AR934X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define AR934X_ETH_CFG_RDV_DELAY BIT(16)
+#define AR934X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+/*
+ * QCA953X GMAC Interface
+ */
+#define QCA953X_GMAC_REG_ETH_CFG 0x00
+
+#define QCA953X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define QCA953X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define QCA953X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define QCA953X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+
+/*
+ * QCA955X GMAC Interface
+ */
+
+#define QCA955X_GMAC_REG_ETH_CFG 0x00
+#define QCA955X_GMAC_REG_SGMII_SERDES 0x18
+
+#define QCA955X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA955X_ETH_CFG_MII_GE0 BIT(1)
+#define QCA955X_ETH_CFG_GMII_GE0 BIT(2)
+#define QCA955X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define QCA955X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define QCA955X_ETH_CFG_GE0_ERR_EN BIT(5)
+#define QCA955X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA955X_ETH_CFG_RMII_GE0 BIT(10)
+#define QCA955X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define QCA955X_ETH_CFG_RMII_GE0_MASTER BIT(12)
+#define QCA955X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA955X_ETH_CFG_RDV_DELAY BIT(16)
+#define QCA955X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RDV_DELAY_SHIFT 16
+#define QCA955X_ETH_CFG_TXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXD_DELAY_SHIFT 18
+#define QCA955X_ETH_CFG_TXE_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXE_DELAY_SHIFT 20
+
+#define QCA955X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+/*
+ * QCA956X GMAC Interface
+ */
+
+#define QCA956X_GMAC_REG_ETH_CFG 0x00
+#define QCA956X_GMAC_REG_SGMII_RESET 0x14
+#define QCA956X_GMAC_REG_SGMII_SERDES 0x18
+#define QCA956X_GMAC_REG_MR_AN_CONTROL 0x1c
+#define QCA956X_GMAC_REG_SGMII_CONFIG 0x34
+#define QCA956X_GMAC_REG_SGMII_DEBUG 0x58
+
+#define QCA956X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA956X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA956X_ETH_CFG_SW_ONLY_MODE BIT(7)
+#define QCA956X_ETH_CFG_SW_PHY_SWAP BIT(8)
+#define QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(9)
+#define QCA956X_ETH_CFG_SW_APB_ACCESS BIT(10)
+#define QCA956X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define QCA956X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA956X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+#define QCA956X_SGMII_RESET_RX_CLK_N_RESET 0x0
+#define QCA956X_SGMII_RESET_RX_CLK_N BIT(0)
+#define QCA956X_SGMII_RESET_TX_CLK_N BIT(1)
+#define QCA956X_SGMII_RESET_RX_125M_N BIT(2)
+#define QCA956X_SGMII_RESET_TX_125M_N BIT(3)
+#define QCA956X_SGMII_RESET_HW_RX_125M_N BIT(4)
+
+#define QCA956X_SGMII_SERDES_CDR_BW_MASK 0x3
+#define QCA956X_SGMII_SERDES_CDR_BW_SHIFT 1
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK 0x7
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT 4
+#define QCA956X_SGMII_SERDES_PLL_BW BIT(8)
+#define QCA956X_SGMII_SERDES_VCO_FAST BIT(9)
+#define QCA956X_SGMII_SERDES_VCO_SLOW BIT(10)
+#define QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT BIT(16)
+#define QCA956X_SGMII_SERDES_FIBER_SDO BIT(17)
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+#define QCA956X_SGMII_SERDES_VCO_REG_SHIFT 27
+#define QCA956X_SGMII_SERDES_VCO_REG_MASK 0xf
+
+#define QCA956X_MR_AN_CONTROL_AN_ENABLE BIT(12)
+#define QCA956X_MR_AN_CONTROL_PHY_RESET BIT(15)
+
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT 0
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_MASK 0x7
+
#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 441faa92c3cd..73dcd63b8243 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -32,8 +32,11 @@ enum ath79_soc_type {
ATH79_SOC_AR9341,
ATH79_SOC_AR9342,
ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9533,
ATH79_SOC_QCA9556,
ATH79_SOC_QCA9558,
+ ATH79_SOC_TP9343,
+ ATH79_SOC_QCA956X,
};
extern enum ath79_soc_type ath79_soc;
@@ -100,6 +103,16 @@ static inline int soc_is_ar934x(void)
return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
}
+static inline int soc_is_qca9533(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9533;
+}
+
+static inline int soc_is_qca953x(void)
+{
+ return soc_is_qca9533();
+}
+
static inline int soc_is_qca9556(void)
{
return ath79_soc == ATH79_SOC_QCA9556;
@@ -115,6 +128,26 @@ static inline int soc_is_qca955x(void)
return soc_is_qca9556() || soc_is_qca9558();
}
+static inline int soc_is_tp9343(void)
+{
+ return ath79_soc == ATH79_SOC_TP9343;
+}
+
+static inline int soc_is_qca9561(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca9563(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca956x(void)
+{
+ return soc_is_qca9561() || soc_is_qca9563();
+}
+
void ath79_ddr_wb_flush(unsigned int reg);
void ath79_ddr_set_pci_windows(void);
@@ -134,6 +167,7 @@ static inline u32 ath79_pll_rr(unsigned reg)
static inline void ath79_reset_wr(unsigned reg, u32 val)
{
__raw_writel(val, ath79_reset_base + reg);
+ (void) __raw_readl(ath79_reset_base + reg); /* flush */
}
static inline u32 ath79_reset_rr(unsigned reg)
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 0089a740e5ae..026ad90c8ac0 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -36,6 +36,7 @@
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
#define cpu_has_mips32r1 1
#define cpu_has_mips32r2 1
@@ -43,6 +44,7 @@
#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
@@ -51,5 +53,9 @@
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 1
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
#endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/dma-coherence.h b/arch/mips/include/asm/mach-bmips/dma-coherence.h
deleted file mode 100644
index d29781f02285..000000000000
--- a/arch/mips/include/asm/mach-bmips/dma-coherence.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2009 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_MACH_BMIPS_DMA_COHERENCE_H
-#define __ASM_MACH_BMIPS_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-#include <asm/cpu-type.h>
-#include <asm/cpu.h>
-
-struct device;
-
-extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
-extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
-extern unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr);
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
deleted file mode 100644
index 6eb1ee548b11..000000000000
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- *
- * Similar to mach-generic/dma-coherence.h except
- * plat_device_is_coherent hard coded to return 1.
- *
- */
-#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-
-#include <linux/bug.h>
-
-struct device;
-
-extern void octeon_pci_dma_init(void);
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- BUG();
- return 0;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- BUG();
- return 0;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- BUG();
- return 0;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- BUG();
- return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-struct dma_map_ops;
-extern const struct dma_map_ops *octeon_pci_dma_map_ops;
-extern char *octeon_swiotlb;
-
-#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
deleted file mode 100644
index 8ad7a40ca786..000000000000
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
-#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- return virt_to_phys(addr);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return page_to_phys(page);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_PERDEV_COHERENT
- return dev->archdata.dma_coherent;
-#else
- switch (coherentio) {
- default:
- case IO_COHERENCE_DEFAULT:
- return hw_coherentio;
- case IO_COHERENCE_ENABLED:
- return 1;
- case IO_COHERENCE_DISABLED:
- return 0;
- }
-#endif
-}
-
-#ifndef plat_post_dma_flush
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-#endif
-
-#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h
index 74207c7bd00d..649a98338886 100644
--- a/arch/mips/include/asm/mach-generic/kmalloc.h
+++ b/arch/mips/include/asm/mach-generic/kmalloc.h
@@ -2,8 +2,7 @@
#ifndef __ASM_MACH_GENERIC_KMALLOC_H
#define __ASM_MACH_GENERIC_KMALLOC_H
-
-#ifndef CONFIG_DMA_COHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
/*
* Total overkill for most systems but need as a safe default.
* Set this one if any device in the system might do non-coherent DMA.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 952b0fdfda0e..ee5ebe98f6cf 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -17,9 +17,13 @@
/*
* This gives the physical RAM offset.
*/
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET _AC(0, UL)
-#endif
+#ifndef __ASSEMBLY__
+# if defined(CONFIG_MIPS_AUTO_PFN_OFFSET)
+# define PHYS_OFFSET ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
+# elif !defined(PHYS_OFFSET)
+# define PHYS_OFFSET _AC(0, UL)
+# endif
+#endif /* __ASSEMBLY__ */
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
deleted file mode 100644
index 04d862020ac9..000000000000
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
-#define __ASM_MACH_IP27_DMA_COHERENCE_H
-
-#include <asm/pci/bridge.h>
-
-#define pdev_to_baddr(pdev, addr) \
- (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
-#define dev_to_baddr(dev, addr) \
- pdev_to_baddr(to_pci_dev(dev), (addr))
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
-
- return pa;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr & ~(0xffUL << 56);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1; /* IP27 non-coherent mode is unsupported */
-}
-
-#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
deleted file mode 100644
index 7bdf212587a0..000000000000
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
-#define __ASM_MACH_IP32_DMA_COHERENCE_H
-
-#include <asm/ip32/crime.h>
-
-struct device;
-
-/*
- * Few notes.
- * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
- * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
- * native-endian)
- * 3. All other devices see memory as one big chunk at 0x40000000
- * 4. Non-PCI devices will pass NULL as struct device*
- *
- * Thus we translate differently, depending on device.
- */
-
-#define RAM_OFFSET_MASK 0x3fffffffUL
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa;
-
- pa = page_to_phys(page) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- unsigned long addr = dma_addr & RAM_OFFSET_MASK;
-
- if (dma_addr >= 256*1024*1024)
- addr += CRIME_HI_MEM_BASE;
-
- return addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0; /* IP32 is non-coherent */
-}
-
-#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
deleted file mode 100644
index dc347c25c343..000000000000
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
-#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
-
-#include <asm/jazzdma.h>
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return vdma_alloc(virt_to_phys(addr), size);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return vdma_alloc(page_to_phys(page), PAGE_SIZE);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return vdma_log2phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- vdma_free(dma_addr);
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
deleted file mode 100644
index 64fc44dec0a8..000000000000
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 07 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
- * Author: Fuxin Zhang, zhangfx@lemote.com
- *
- */
-#ifndef __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-#define __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-
-#ifdef CONFIG_SWIOTLB
-#include <linux/swiotlb.h>
-#endif
-
-struct device;
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, virt_to_phys(addr));
-#else
- return virt_to_phys(addr) | 0x80000000;
-#endif
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, page_to_phys(page));
-#else
- return page_to_phys(page) | 0x80000000;
-#endif
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
-#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
- return __dma_to_phys(dev, dma_addr);
-#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
- return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
-#else
- return dma_addr & 0x7fffffff;
-#endif
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#else
- return 1;
-#endif /* CONFIG_DMA_NONCOHERENT */
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_LOONGSON64_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 8393bc548987..312739117bb0 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -19,18 +19,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
@@ -45,18 +45,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
diff --git a/arch/mips/include/asm/mach-pic32/spaces.h b/arch/mips/include/asm/mach-pic32/spaces.h
index 046a0a9aa8b3..a1b9783b76ea 100644
--- a/arch/mips/include/asm/mach-pic32/spaces.h
+++ b/arch/mips/include/asm/mach-pic32/spaces.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PIC32MZDA
#define PHYS_OFFSET _AC(0x08000000, UL)
-#define UNCAC_BASE _AC(0xa8000000, UL)
#endif
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0bc270806ec5..01df9ad62fb8 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/hazards.h>
+#include <asm/isa-rev.h>
#include <asm/war.h>
/*
@@ -51,6 +52,7 @@
#define CP0_GLOBALNUMBER $3, 1
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
#define CP0_SEGCTL0 $5, 2
#define CP0_SEGCTL1 $5, 3
#define CP0_SEGCTL2 $5, 4
@@ -77,6 +79,7 @@
#define CP0_CONFIG $16
#define CP0_CONFIG3 $16, 3
#define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
@@ -681,8 +684,6 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
-/* ExternalSync */
-#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -1483,32 +1484,38 @@ do { \
#define __write_64bit_c0_split(source, sel, val) \
do { \
- unsigned long long __tmp; \
+ unsigned long long __tmp = (val); \
unsigned long __flags; \
\
local_irq_save(__flags); \
- if (sel == 0) \
+ if (MIPS_ISA_REV >= 2) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\t" MIPS_ISA_LEVEL "\n\t" \
+ "dins\t%L0, %M0, 32, 32\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
local_irq_restore(__flags); \
} while (0)
@@ -2767,7 +2774,6 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
-__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index da2004cef2d5..b509371a6b0c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -126,8 +126,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
- atomic_set(&mm->context.fp_mode_switching, 0);
-
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
init_waitqueue_head(&mm->context.bd_emupage_queue);
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 5604db3d1836..d79c68fa78d9 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status0();
- if ((status & 0x2) == 1)
- pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
index a1e21a3854cf..1eef155979f3 100644
--- a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -55,6 +55,8 @@
#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_asxx_enable(int block);
+
union cvmx_asxx_gmii_rx_clk_set {
uint64_t u64;
struct cvmx_asxx_gmii_rx_clk_set_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
index 6e61792d9248..1d18be8cdddc 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -1,10014 +1,176 @@
-/***********************license start***************
- * Author: Cavium Networks
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon CIU definitions
*
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ */
#ifndef __CVMX_CIU_DEFS_H__
#define __CVMX_CIU_DEFS_H__
-#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
-#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
-#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
-#define CVMX_CIU_EN2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
-#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
-#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
-#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
-#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
-#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
-static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+#include <asm/bitfield.h>
+
+#define CVMX_CIU_ADDR(addr, coreid, coremask, offset) \
+ (CVMX_ADD_IO_SEG(0x0001070000000000ull + addr##ull) + \
+ (((coreid) & (coremask)) * offset))
+
+#define CVMX_CIU_EN2_PPX_IP4(c) CVMX_CIU_ADDR(0xA400, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1C(c) CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1S(c) CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8)
+#define CVMX_CIU_FUSE CVMX_CIU_ADDR(0x0728, 0, 0x00, 0)
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_ADDR(0x0108, 0, 0x00, 0)
+#define CVMX_CIU_INTX_EN0(c) CVMX_CIU_ADDR(0x0200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1C(c) CVMX_CIU_ADDR(0x2200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1S(c) CVMX_CIU_ADDR(0x6200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1(c) CVMX_CIU_ADDR(0x0208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1C(c) CVMX_CIU_ADDR(0x2208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1S(c) CVMX_CIU_ADDR(0x6208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_SUM0(c) CVMX_CIU_ADDR(0x0000, c, 0x3F, 8)
+#define CVMX_CIU_NMI CVMX_CIU_ADDR(0x0718, 0, 0x00, 0)
+#define CVMX_CIU_PCI_INTA CVMX_CIU_ADDR(0x0750, 0, 0x00, 0)
+#define CVMX_CIU_PP_BIST_STAT CVMX_CIU_ADDR(0x07E0, 0, 0x00, 0)
+#define CVMX_CIU_PP_DBG CVMX_CIU_ADDR(0x0708, 0, 0x00, 0)
+#define CVMX_CIU_PP_RST CVMX_CIU_ADDR(0x0700, 0, 0x00, 0)
+#define CVMX_CIU_QLM0 CVMX_CIU_ADDR(0x0780, 0, 0x00, 0)
+#define CVMX_CIU_QLM1 CVMX_CIU_ADDR(0x0788, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_ADDR(0x0768, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_ADDR(0x0770, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_BIST CVMX_CIU_ADDR(0x0738, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_ADDR(0x0758, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST CVMX_CIU_ADDR(0x0748, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_RST CVMX_CIU_ADDR(0x0740, 0, 0x00, 0)
+#define CVMX_CIU_SUM2_PPX_IP4(c) CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8)
+#define CVMX_CIU_TIM_MULTI_CAST CVMX_CIU_ADDR(0xC200, 0, 0x00, 0)
+#define CVMX_CIU_TIMX(c) CVMX_CIU_ADDR(0x0480, c, 0x0F, 8)
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100600ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100600, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000680, coreid, 0x0F, 8);
}
-static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100400ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100400, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000600, coreid, 0x0F, 8);
}
-#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
-#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
-#define CVMX_CIU_PP_BIST_STAT (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
-static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100200, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000030000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000580, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
}
-#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
-#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
-#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
-#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
-#define CVMX_CIU_QLM3 (CVMX_ADD_IO_SEG(0x0001070000000798ull))
-#define CVMX_CIU_QLM4 (CVMX_ADD_IO_SEG(0x00010700000007A0ull))
-#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
-#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
-#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
-#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
-#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
-#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
-#define CVMX_CIU_SOFT_PRST2 (CVMX_ADD_IO_SEG(0x00010700000007D8ull))
-#define CVMX_CIU_SOFT_PRST3 (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
-#define CVMX_CIU_SUM1_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM1_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIM_MULTI_CAST (CVMX_ADD_IO_SEG(0x000107000000C200ull))
-static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+static inline uint64_t CVMX_CIU_WDOGX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100000, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000020000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000500, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
}
-union cvmx_ciu_bist {
- uint64_t u64;
- struct cvmx_ciu_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t bist:7;
-#else
- uint64_t bist:7;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_ciu_bist_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t bist:4;
-#else
- uint64_t bist:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_ciu_bist_cn30xx cn31xx;
- struct cvmx_ciu_bist_cn30xx cn38xx;
- struct cvmx_ciu_bist_cn30xx cn38xxp2;
- struct cvmx_ciu_bist_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t bist:2;
-#else
- uint64_t bist:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_bist_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t bist:3;
-#else
- uint64_t bist:3;
- uint64_t reserved_3_63:61;
-#endif
- } cn52xx;
- struct cvmx_ciu_bist_cn52xx cn52xxp1;
- struct cvmx_ciu_bist_cn30xx cn56xx;
- struct cvmx_ciu_bist_cn30xx cn56xxp1;
- struct cvmx_ciu_bist_cn30xx cn58xx;
- struct cvmx_ciu_bist_cn30xx cn58xxp1;
- struct cvmx_ciu_bist_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t bist:6;
-#else
- uint64_t bist:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn61xx;
- struct cvmx_ciu_bist_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t bist:5;
-#else
- uint64_t bist:5;
- uint64_t reserved_5_63:59;
-#endif
- } cn63xx;
- struct cvmx_ciu_bist_cn63xx cn63xxp1;
- struct cvmx_ciu_bist_cn61xx cn66xx;
- struct cvmx_ciu_bist_s cn68xx;
- struct cvmx_ciu_bist_s cn68xxp1;
- struct cvmx_ciu_bist_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_block_int {
- uint64_t u64;
- struct cvmx_ciu_block_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_ciu_block_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn61xx;
- struct cvmx_ciu_block_int_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn63xx;
- struct cvmx_ciu_block_int_cn63xx cn63xxp1;
- struct cvmx_ciu_block_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_33_39:7;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t reserved_33_39:7;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn66xx;
- struct cvmx_ciu_block_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_27_29:3;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_6_8:3;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t reserved_6_8:3;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_29:3;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_dint {
- uint64_t u64;
- struct cvmx_ciu_dint_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dint:32;
-#else
- uint64_t dint:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_dint_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t dint:1;
-#else
- uint64_t dint:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_dint_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t dint:2;
-#else
- uint64_t dint:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_dint_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t dint:16;
-#else
- uint64_t dint:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_dint_cn38xx cn38xxp2;
- struct cvmx_ciu_dint_cn31xx cn50xx;
- struct cvmx_ciu_dint_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t dint:4;
-#else
- uint64_t dint:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_dint_cn52xx cn52xxp1;
- struct cvmx_ciu_dint_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t dint:12;
-#else
- uint64_t dint:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_dint_cn56xx cn56xxp1;
- struct cvmx_ciu_dint_cn38xx cn58xx;
- struct cvmx_ciu_dint_cn38xx cn58xxp1;
- struct cvmx_ciu_dint_cn52xx cn61xx;
- struct cvmx_ciu_dint_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t dint:6;
-#else
- uint64_t dint:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_dint_cn63xx cn63xxp1;
- struct cvmx_ciu_dint_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t dint:10;
-#else
- uint64_t dint:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_dint_s cn68xx;
- struct cvmx_ciu_dint_s cn68xxp1;
- struct cvmx_ciu_dint_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_fuse {
- uint64_t u64;
- struct cvmx_ciu_fuse_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t fuse:32;
-#else
- uint64_t fuse:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_fuse_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t fuse:1;
-#else
- uint64_t fuse:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_fuse_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t fuse:2;
-#else
- uint64_t fuse:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_fuse_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t fuse:16;
-#else
- uint64_t fuse:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_fuse_cn38xx cn38xxp2;
- struct cvmx_ciu_fuse_cn31xx cn50xx;
- struct cvmx_ciu_fuse_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t fuse:4;
-#else
- uint64_t fuse:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_fuse_cn52xx cn52xxp1;
- struct cvmx_ciu_fuse_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t fuse:12;
-#else
- uint64_t fuse:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_fuse_cn56xx cn56xxp1;
- struct cvmx_ciu_fuse_cn38xx cn58xx;
- struct cvmx_ciu_fuse_cn38xx cn58xxp1;
- struct cvmx_ciu_fuse_cn52xx cn61xx;
- struct cvmx_ciu_fuse_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t fuse:6;
-#else
- uint64_t fuse:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_fuse_cn63xx cn63xxp1;
- struct cvmx_ciu_fuse_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t fuse:10;
-#else
- uint64_t fuse:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_fuse_s cn68xx;
- struct cvmx_ciu_fuse_s cn68xxp1;
- struct cvmx_ciu_fuse_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_gstop {
- uint64_t u64;
- struct cvmx_ciu_gstop_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t gstop:1;
-#else
- uint64_t gstop:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_gstop_s cn30xx;
- struct cvmx_ciu_gstop_s cn31xx;
- struct cvmx_ciu_gstop_s cn38xx;
- struct cvmx_ciu_gstop_s cn38xxp2;
- struct cvmx_ciu_gstop_s cn50xx;
- struct cvmx_ciu_gstop_s cn52xx;
- struct cvmx_ciu_gstop_s cn52xxp1;
- struct cvmx_ciu_gstop_s cn56xx;
- struct cvmx_ciu_gstop_s cn56xxp1;
- struct cvmx_ciu_gstop_s cn58xx;
- struct cvmx_ciu_gstop_s cn58xxp1;
- struct cvmx_ciu_gstop_s cn61xx;
- struct cvmx_ciu_gstop_s cn63xx;
- struct cvmx_ciu_gstop_s cn63xxp1;
- struct cvmx_ciu_gstop_s cn66xx;
- struct cvmx_ciu_gstop_s cn68xx;
- struct cvmx_ciu_gstop_s cn68xxp1;
- struct cvmx_ciu_gstop_s cnf71xx;
-};
-
-union cvmx_ciu_intx_en0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en0_cn30xx cn50xx;
- struct cvmx_ciu_intx_en0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en0_cn38xx cn58xx;
- struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en1_cn31xx cn50xx;
- struct cvmx_ciu_intx_en1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en1_cn38xx cn58xx;
- struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en4_0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_0_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en4_1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_1_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum0 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_sum0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_sum0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
- struct cvmx_ciu_intx_sum0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_sum0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum4 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum4_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_sum4_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum4_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum4_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_sum4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int33_sum0 {
- uint64_t u64;
- struct cvmx_ciu_int33_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_int33_sum0_s cn61xx;
- struct cvmx_ciu_int33_sum0_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int33_sum0_cn63xx cn63xxp1;
- struct cvmx_ciu_int33_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int33_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int_dbg_sel {
- uint64_t u64;
- struct cvmx_ciu_int_dbg_sel_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_5_7:3;
- uint64_t pp:5;
-#else
- uint64_t pp:5;
- uint64_t reserved_5_7:3;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_ciu_int_dbg_sel_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_4_7:4;
- uint64_t pp:4;
-#else
- uint64_t pp:4;
- uint64_t reserved_4_7:4;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_dbg_sel_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_3_7:5;
- uint64_t pp:3;
-#else
- uint64_t pp:3;
- uint64_t reserved_3_7:5;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_dbg_sel_cn61xx cn66xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xxp1;
- struct cvmx_ciu_int_dbg_sel_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_int_sum1 {
- uint64_t u64;
- struct cvmx_ciu_int_sum1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_int_sum1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_int_sum1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_int_sum1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
- struct cvmx_ciu_int_sum1_cn31xx cn50xx;
- struct cvmx_ciu_int_sum1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_int_sum1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_int_sum1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
- struct cvmx_ciu_int_sum1_cn38xx cn58xx;
- struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
- struct cvmx_ciu_int_sum1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_sum1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
- struct cvmx_ciu_int_sum1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int_sum1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_37_46:10;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_46:10;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_mbox_clrx {
- uint64_t u64;
- struct cvmx_ciu_mbox_clrx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_clrx_s cn30xx;
- struct cvmx_ciu_mbox_clrx_s cn31xx;
- struct cvmx_ciu_mbox_clrx_s cn38xx;
- struct cvmx_ciu_mbox_clrx_s cn38xxp2;
- struct cvmx_ciu_mbox_clrx_s cn50xx;
- struct cvmx_ciu_mbox_clrx_s cn52xx;
- struct cvmx_ciu_mbox_clrx_s cn52xxp1;
- struct cvmx_ciu_mbox_clrx_s cn56xx;
- struct cvmx_ciu_mbox_clrx_s cn56xxp1;
- struct cvmx_ciu_mbox_clrx_s cn58xx;
- struct cvmx_ciu_mbox_clrx_s cn58xxp1;
- struct cvmx_ciu_mbox_clrx_s cn61xx;
- struct cvmx_ciu_mbox_clrx_s cn63xx;
- struct cvmx_ciu_mbox_clrx_s cn63xxp1;
- struct cvmx_ciu_mbox_clrx_s cn66xx;
- struct cvmx_ciu_mbox_clrx_s cn68xx;
- struct cvmx_ciu_mbox_clrx_s cn68xxp1;
- struct cvmx_ciu_mbox_clrx_s cnf71xx;
-};
-
-union cvmx_ciu_mbox_setx {
- uint64_t u64;
- struct cvmx_ciu_mbox_setx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_setx_s cn30xx;
- struct cvmx_ciu_mbox_setx_s cn31xx;
- struct cvmx_ciu_mbox_setx_s cn38xx;
- struct cvmx_ciu_mbox_setx_s cn38xxp2;
- struct cvmx_ciu_mbox_setx_s cn50xx;
- struct cvmx_ciu_mbox_setx_s cn52xx;
- struct cvmx_ciu_mbox_setx_s cn52xxp1;
- struct cvmx_ciu_mbox_setx_s cn56xx;
- struct cvmx_ciu_mbox_setx_s cn56xxp1;
- struct cvmx_ciu_mbox_setx_s cn58xx;
- struct cvmx_ciu_mbox_setx_s cn58xxp1;
- struct cvmx_ciu_mbox_setx_s cn61xx;
- struct cvmx_ciu_mbox_setx_s cn63xx;
- struct cvmx_ciu_mbox_setx_s cn63xxp1;
- struct cvmx_ciu_mbox_setx_s cn66xx;
- struct cvmx_ciu_mbox_setx_s cn68xx;
- struct cvmx_ciu_mbox_setx_s cn68xxp1;
- struct cvmx_ciu_mbox_setx_s cnf71xx;
-};
-
-union cvmx_ciu_nmi {
- uint64_t u64;
- struct cvmx_ciu_nmi_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nmi:32;
-#else
- uint64_t nmi:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_nmi_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t nmi:1;
-#else
- uint64_t nmi:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_nmi_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t nmi:2;
-#else
- uint64_t nmi:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_nmi_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t nmi:16;
-#else
- uint64_t nmi:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_nmi_cn38xx cn38xxp2;
- struct cvmx_ciu_nmi_cn31xx cn50xx;
- struct cvmx_ciu_nmi_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t nmi:4;
-#else
- uint64_t nmi:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_nmi_cn52xx cn52xxp1;
- struct cvmx_ciu_nmi_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t nmi:12;
-#else
- uint64_t nmi:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_nmi_cn56xx cn56xxp1;
- struct cvmx_ciu_nmi_cn38xx cn58xx;
- struct cvmx_ciu_nmi_cn38xx cn58xxp1;
- struct cvmx_ciu_nmi_cn52xx cn61xx;
- struct cvmx_ciu_nmi_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t nmi:6;
-#else
- uint64_t nmi:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_nmi_cn63xx cn63xxp1;
- struct cvmx_ciu_nmi_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t nmi:10;
-#else
- uint64_t nmi:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_nmi_s cn68xx;
- struct cvmx_ciu_nmi_s cn68xxp1;
- struct cvmx_ciu_nmi_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pci_inta {
- uint64_t u64;
- struct cvmx_ciu_pci_inta_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t intr:2;
-#else
- uint64_t intr:2;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_ciu_pci_inta_s cn30xx;
- struct cvmx_ciu_pci_inta_s cn31xx;
- struct cvmx_ciu_pci_inta_s cn38xx;
- struct cvmx_ciu_pci_inta_s cn38xxp2;
- struct cvmx_ciu_pci_inta_s cn50xx;
- struct cvmx_ciu_pci_inta_s cn52xx;
- struct cvmx_ciu_pci_inta_s cn52xxp1;
- struct cvmx_ciu_pci_inta_s cn56xx;
- struct cvmx_ciu_pci_inta_s cn56xxp1;
- struct cvmx_ciu_pci_inta_s cn58xx;
- struct cvmx_ciu_pci_inta_s cn58xxp1;
- struct cvmx_ciu_pci_inta_s cn61xx;
- struct cvmx_ciu_pci_inta_s cn63xx;
- struct cvmx_ciu_pci_inta_s cn63xxp1;
- struct cvmx_ciu_pci_inta_s cn66xx;
- struct cvmx_ciu_pci_inta_s cn68xx;
- struct cvmx_ciu_pci_inta_s cn68xxp1;
- struct cvmx_ciu_pci_inta_s cnf71xx;
-};
-
-union cvmx_ciu_pp_bist_stat {
- uint64_t u64;
- struct cvmx_ciu_pp_bist_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t pp_bist:32;
-#else
- uint64_t pp_bist:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_bist_stat_s cn68xx;
- struct cvmx_ciu_pp_bist_stat_s cn68xxp1;
-};
-
-union cvmx_ciu_pp_dbg {
- uint64_t u64;
- struct cvmx_ciu_pp_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ppdbg:32;
-#else
- uint64_t ppdbg:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ppdbg:1;
-#else
- uint64_t ppdbg:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t ppdbg:2;
-#else
- uint64_t ppdbg:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_dbg_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t ppdbg:16;
-#else
- uint64_t ppdbg:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
- struct cvmx_ciu_pp_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t ppdbg:4;
-#else
- uint64_t ppdbg:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_dbg_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t ppdbg:12;
-#else
- uint64_t ppdbg:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cn61xx;
- struct cvmx_ciu_pp_dbg_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t ppdbg:6;
-#else
- uint64_t ppdbg:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_dbg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t ppdbg:10;
-#else
- uint64_t ppdbg:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_dbg_s cn68xx;
- struct cvmx_ciu_pp_dbg_s cn68xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pp_pokex {
- uint64_t u64;
- struct cvmx_ciu_pp_pokex_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t poke:64;
-#else
- uint64_t poke:64;
-#endif
- } s;
- struct cvmx_ciu_pp_pokex_s cn30xx;
- struct cvmx_ciu_pp_pokex_s cn31xx;
- struct cvmx_ciu_pp_pokex_s cn38xx;
- struct cvmx_ciu_pp_pokex_s cn38xxp2;
- struct cvmx_ciu_pp_pokex_s cn50xx;
- struct cvmx_ciu_pp_pokex_s cn52xx;
- struct cvmx_ciu_pp_pokex_s cn52xxp1;
- struct cvmx_ciu_pp_pokex_s cn56xx;
- struct cvmx_ciu_pp_pokex_s cn56xxp1;
- struct cvmx_ciu_pp_pokex_s cn58xx;
- struct cvmx_ciu_pp_pokex_s cn58xxp1;
- struct cvmx_ciu_pp_pokex_s cn61xx;
- struct cvmx_ciu_pp_pokex_s cn63xx;
- struct cvmx_ciu_pp_pokex_s cn63xxp1;
- struct cvmx_ciu_pp_pokex_s cn66xx;
- struct cvmx_ciu_pp_pokex_s cn68xx;
- struct cvmx_ciu_pp_pokex_s cn68xxp1;
- struct cvmx_ciu_pp_pokex_s cnf71xx;
-};
-
-union cvmx_ciu_pp_rst {
- uint64_t u64;
- struct cvmx_ciu_pp_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t rst:31;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:31;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_rst_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_rst_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t rst:1;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:1;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_rst_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t rst:15;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:15;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_rst_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_rst_cn31xx cn50xx;
- struct cvmx_ciu_pp_rst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t rst:3;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:3;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_rst_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t rst:11;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:11;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_rst_cn38xx cn58xx;
- struct cvmx_ciu_pp_rst_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cn61xx;
- struct cvmx_ciu_pp_rst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t rst:5;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:5;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_rst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t rst:9;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:9;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_rst_s cn68xx;
- struct cvmx_ciu_pp_rst_s cn68xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_qlm0 {
- uint64_t u64;
- struct cvmx_ciu_qlm0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm0_s cn61xx;
- struct cvmx_ciu_qlm0_s cn63xx;
- struct cvmx_ciu_qlm0_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm0_s cn66xx;
- struct cvmx_ciu_qlm0_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn68xx;
- struct cvmx_ciu_qlm0_cn68xx cn68xxp1;
- struct cvmx_ciu_qlm0_s cnf71xx;
-};
-
-union cvmx_ciu_qlm1 {
- uint64_t u64;
- struct cvmx_ciu_qlm1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm1_s cn61xx;
- struct cvmx_ciu_qlm1_s cn63xx;
- struct cvmx_ciu_qlm1_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm1_s cn66xx;
- struct cvmx_ciu_qlm1_s cn68xx;
- struct cvmx_ciu_qlm1_s cn68xxp1;
- struct cvmx_ciu_qlm1_s cnf71xx;
-};
-
-union cvmx_ciu_qlm2 {
- uint64_t u64;
- struct cvmx_ciu_qlm2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm2_cn61xx cn63xx;
- struct cvmx_ciu_qlm2_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm2_cn61xx cn66xx;
- struct cvmx_ciu_qlm2_s cn68xx;
- struct cvmx_ciu_qlm2_s cn68xxp1;
- struct cvmx_ciu_qlm2_cn61xx cnf71xx;
-};
-union cvmx_ciu_qlm3 {
+union cvmx_ciu_qlm {
uint64_t u64;
- struct cvmx_ciu_qlm3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
+ struct cvmx_ciu_qlm_s {
+ __BITFIELD_FIELD(uint64_t g2bypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_53_62:10,
+ __BITFIELD_FIELD(uint64_t g2deemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_45_47:3,
+ __BITFIELD_FIELD(uint64_t g2margin:5,
+ __BITFIELD_FIELD(uint64_t reserved_32_39:8,
+ __BITFIELD_FIELD(uint64_t txbypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_21_30:10,
+ __BITFIELD_FIELD(uint64_t txdeemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint64_t txmargin:5,
+ __BITFIELD_FIELD(uint64_t reserved_4_7:4,
+ __BITFIELD_FIELD(uint64_t lane_en:4,
+ ;)))))))))))))
} s;
- struct cvmx_ciu_qlm3_s cn68xx;
- struct cvmx_ciu_qlm3_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm4 {
- uint64_t u64;
- struct cvmx_ciu_qlm4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm4_s cn68xx;
- struct cvmx_ciu_qlm4_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm_dcok {
- uint64_t u64;
- struct cvmx_ciu_qlm_dcok_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t qlm_dcok:4;
-#else
- uint64_t qlm_dcok:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu_qlm_dcok_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t qlm_dcok:2;
-#else
- uint64_t qlm_dcok:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_dcok_s cn56xx;
- struct cvmx_ciu_qlm_dcok_s cn56xxp1;
};
union cvmx_ciu_qlm_jtgc {
uint64_t u64;
struct cvmx_ciu_qlm_jtgc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t bypass_ext:1;
- uint64_t reserved_11_15:5;
- uint64_t clk_div:3;
- uint64_t reserved_7_7:1;
- uint64_t mux_sel:3;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:3;
- uint64_t reserved_7_7:1;
- uint64_t clk_div:3;
- uint64_t reserved_11_15:5;
- uint64_t bypass_ext:1;
- uint64_t reserved_17_63:47;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_17_63:47,
+ __BITFIELD_FIELD(uint64_t bypass_ext:1,
+ __BITFIELD_FIELD(uint64_t reserved_11_15:5,
+ __BITFIELD_FIELD(uint64_t clk_div:3,
+ __BITFIELD_FIELD(uint64_t reserved_7_7:1,
+ __BITFIELD_FIELD(uint64_t mux_sel:3,
+ __BITFIELD_FIELD(uint64_t bypass:4,
+ ;)))))))
} s;
- struct cvmx_ciu_qlm_jtgc_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_5_7:3;
- uint64_t mux_sel:1;
- uint64_t reserved_2_3:2;
- uint64_t bypass:2;
-#else
- uint64_t bypass:2;
- uint64_t reserved_2_3:2;
- uint64_t mux_sel:1;
- uint64_t reserved_5_7:3;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgc_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgc_cn56xx cn56xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t reserved_3_3:1;
- uint64_t bypass:3;
-#else
- uint64_t bypass:3;
- uint64_t reserved_3_3:1;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cnf71xx;
};
union cvmx_ciu_qlm_jtgd {
uint64_t u64;
struct cvmx_ciu_qlm_jtgd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_45_60:16;
- uint64_t select:5;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:5;
- uint64_t reserved_45_60:16;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } s;
- struct cvmx_ciu_qlm_jtgd_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_42_60:19;
- uint64_t select:2;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:2;
- uint64_t reserved_42_60:19;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgd_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_44_60:17;
- uint64_t select:4;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:4;
- uint64_t reserved_44_60:17;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_37_60:24;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_60:24;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_43_60:18;
- uint64_t select:3;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:3;
- uint64_t reserved_43_60:18;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_soft_bist {
- uint64_t u64;
- struct cvmx_ciu_soft_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_bist:1;
-#else
- uint64_t soft_bist:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t capture:1,
+ __BITFIELD_FIELD(uint64_t shift:1,
+ __BITFIELD_FIELD(uint64_t update:1,
+ __BITFIELD_FIELD(uint64_t reserved_45_60:16,
+ __BITFIELD_FIELD(uint64_t select:5,
+ __BITFIELD_FIELD(uint64_t reserved_37_39:3,
+ __BITFIELD_FIELD(uint64_t shft_cnt:5,
+ __BITFIELD_FIELD(uint64_t shft_reg:32,
+ ;))))))))
} s;
- struct cvmx_ciu_soft_bist_s cn30xx;
- struct cvmx_ciu_soft_bist_s cn31xx;
- struct cvmx_ciu_soft_bist_s cn38xx;
- struct cvmx_ciu_soft_bist_s cn38xxp2;
- struct cvmx_ciu_soft_bist_s cn50xx;
- struct cvmx_ciu_soft_bist_s cn52xx;
- struct cvmx_ciu_soft_bist_s cn52xxp1;
- struct cvmx_ciu_soft_bist_s cn56xx;
- struct cvmx_ciu_soft_bist_s cn56xxp1;
- struct cvmx_ciu_soft_bist_s cn58xx;
- struct cvmx_ciu_soft_bist_s cn58xxp1;
- struct cvmx_ciu_soft_bist_s cn61xx;
- struct cvmx_ciu_soft_bist_s cn63xx;
- struct cvmx_ciu_soft_bist_s cn63xxp1;
- struct cvmx_ciu_soft_bist_s cn66xx;
- struct cvmx_ciu_soft_bist_s cn68xx;
- struct cvmx_ciu_soft_bist_s cn68xxp1;
- struct cvmx_ciu_soft_bist_s cnf71xx;
};
union cvmx_ciu_soft_prst {
uint64_t u64;
struct cvmx_ciu_soft_prst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t host64:1;
- uint64_t npi:1;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t npi:1;
- uint64_t host64:1;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_ciu_soft_prst_s cn30xx;
- struct cvmx_ciu_soft_prst_s cn31xx;
- struct cvmx_ciu_soft_prst_s cn38xx;
- struct cvmx_ciu_soft_prst_s cn38xxp2;
- struct cvmx_ciu_soft_prst_s cn50xx;
- struct cvmx_ciu_soft_prst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn52xx;
- struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn56xx;
- struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
- struct cvmx_ciu_soft_prst_s cn58xx;
- struct cvmx_ciu_soft_prst_s cn58xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn61xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn66xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_soft_prst1 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst1_s cn52xx;
- struct cvmx_ciu_soft_prst1_s cn52xxp1;
- struct cvmx_ciu_soft_prst1_s cn56xx;
- struct cvmx_ciu_soft_prst1_s cn56xxp1;
- struct cvmx_ciu_soft_prst1_s cn61xx;
- struct cvmx_ciu_soft_prst1_s cn63xx;
- struct cvmx_ciu_soft_prst1_s cn63xxp1;
- struct cvmx_ciu_soft_prst1_s cn66xx;
- struct cvmx_ciu_soft_prst1_s cn68xx;
- struct cvmx_ciu_soft_prst1_s cn68xxp1;
- struct cvmx_ciu_soft_prst1_s cnf71xx;
-};
-
-union cvmx_ciu_soft_prst2 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst2_s cn66xx;
-};
-
-union cvmx_ciu_soft_prst3 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst3_s cn66xx;
-};
-
-union cvmx_ciu_soft_rst {
- uint64_t u64;
- struct cvmx_ciu_soft_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_rst:1;
-#else
- uint64_t soft_rst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_rst_s cn30xx;
- struct cvmx_ciu_soft_rst_s cn31xx;
- struct cvmx_ciu_soft_rst_s cn38xx;
- struct cvmx_ciu_soft_rst_s cn38xxp2;
- struct cvmx_ciu_soft_rst_s cn50xx;
- struct cvmx_ciu_soft_rst_s cn52xx;
- struct cvmx_ciu_soft_rst_s cn52xxp1;
- struct cvmx_ciu_soft_rst_s cn56xx;
- struct cvmx_ciu_soft_rst_s cn56xxp1;
- struct cvmx_ciu_soft_rst_s cn58xx;
- struct cvmx_ciu_soft_rst_s cn58xxp1;
- struct cvmx_ciu_soft_rst_s cn61xx;
- struct cvmx_ciu_soft_rst_s cn63xx;
- struct cvmx_ciu_soft_rst_s cn63xxp1;
- struct cvmx_ciu_soft_rst_s cn66xx;
- struct cvmx_ciu_soft_rst_s cn68xx;
- struct cvmx_ciu_soft_rst_s cn68xxp1;
- struct cvmx_ciu_soft_rst_s cnf71xx;
-};
-
-union cvmx_ciu_sum1_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum1_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_3_63:61,
+ __BITFIELD_FIELD(uint64_t host64:1,
+ __BITFIELD_FIELD(uint64_t npi:1,
+ __BITFIELD_FIELD(uint64_t soft_prst:1,
+ ;))))
} s;
- struct cvmx_ciu_sum1_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_iox_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_iox_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip2_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip2_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip3_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip3_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_sum2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip4_s cnf71xx;
};
union cvmx_ciu_timx {
uint64_t u64;
struct cvmx_ciu_timx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_37_63:27;
- uint64_t one_shot:1;
- uint64_t len:36;
-#else
- uint64_t len:36;
- uint64_t one_shot:1;
- uint64_t reserved_37_63:27;
-#endif
- } s;
- struct cvmx_ciu_timx_s cn30xx;
- struct cvmx_ciu_timx_s cn31xx;
- struct cvmx_ciu_timx_s cn38xx;
- struct cvmx_ciu_timx_s cn38xxp2;
- struct cvmx_ciu_timx_s cn50xx;
- struct cvmx_ciu_timx_s cn52xx;
- struct cvmx_ciu_timx_s cn52xxp1;
- struct cvmx_ciu_timx_s cn56xx;
- struct cvmx_ciu_timx_s cn56xxp1;
- struct cvmx_ciu_timx_s cn58xx;
- struct cvmx_ciu_timx_s cn58xxp1;
- struct cvmx_ciu_timx_s cn61xx;
- struct cvmx_ciu_timx_s cn63xx;
- struct cvmx_ciu_timx_s cn63xxp1;
- struct cvmx_ciu_timx_s cn66xx;
- struct cvmx_ciu_timx_s cn68xx;
- struct cvmx_ciu_timx_s cn68xxp1;
- struct cvmx_ciu_timx_s cnf71xx;
-};
-
-union cvmx_ciu_tim_multi_cast {
- uint64_t u64;
- struct cvmx_ciu_tim_multi_cast_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_37_63:27,
+ __BITFIELD_FIELD(uint64_t one_shot:1,
+ __BITFIELD_FIELD(uint64_t len:36,
+ ;)))
} s;
- struct cvmx_ciu_tim_multi_cast_s cn61xx;
- struct cvmx_ciu_tim_multi_cast_s cn66xx;
- struct cvmx_ciu_tim_multi_cast_s cnf71xx;
};
union cvmx_ciu_wdogx {
uint64_t u64;
struct cvmx_ciu_wdogx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_46_63:18;
- uint64_t gstopen:1;
- uint64_t dstop:1;
- uint64_t cnt:24;
- uint64_t len:16;
- uint64_t state:2;
- uint64_t mode:2;
-#else
- uint64_t mode:2;
- uint64_t state:2;
- uint64_t len:16;
- uint64_t cnt:24;
- uint64_t dstop:1;
- uint64_t gstopen:1;
- uint64_t reserved_46_63:18;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t gstopen:1,
+ __BITFIELD_FIELD(uint64_t dstop:1,
+ __BITFIELD_FIELD(uint64_t cnt:24,
+ __BITFIELD_FIELD(uint64_t len:16,
+ __BITFIELD_FIELD(uint64_t state:2,
+ __BITFIELD_FIELD(uint64_t mode:2,
+ ;)))))))
} s;
- struct cvmx_ciu_wdogx_s cn30xx;
- struct cvmx_ciu_wdogx_s cn31xx;
- struct cvmx_ciu_wdogx_s cn38xx;
- struct cvmx_ciu_wdogx_s cn38xxp2;
- struct cvmx_ciu_wdogx_s cn50xx;
- struct cvmx_ciu_wdogx_s cn52xx;
- struct cvmx_ciu_wdogx_s cn52xxp1;
- struct cvmx_ciu_wdogx_s cn56xx;
- struct cvmx_ciu_wdogx_s cn56xxp1;
- struct cvmx_ciu_wdogx_s cn58xx;
- struct cvmx_ciu_wdogx_s cn58xxp1;
- struct cvmx_ciu_wdogx_s cn61xx;
- struct cvmx_ciu_wdogx_s cn63xx;
- struct cvmx_ciu_wdogx_s cn63xxp1;
- struct cvmx_ciu_wdogx_s cn66xx;
- struct cvmx_ciu_wdogx_s cn68xx;
- struct cvmx_ciu_wdogx_s cn68xxp1;
- struct cvmx_ciu_wdogx_s cnf71xx;
};
-#endif
+#endif /* __CVMX_CIU_DEFS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
index e347496a33c3..80e4f8358b81 100644
--- a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -2070,6 +2070,8 @@ static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
}
+void __cvmx_interrupt_gmxx_enable(int interface);
+
union cvmx_gmxx_bad_reg {
uint64_t u64;
struct cvmx_gmxx_bad_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
index a5e8fd861c37..39da7f9d7b3f 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -334,6 +334,8 @@ static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsig
return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
}
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+
union cvmx_pcsx_anx_adv_reg {
uint64_t u64;
struct cvmx_pcsx_anx_adv_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
index b5b45d26f1c5..847dd9dca6ea 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -268,6 +268,8 @@ static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
}
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
union cvmx_pcsxx_10gbx_status_reg {
uint64_t u64;
struct cvmx_pcsxx_10gbx_status_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
index c7d601d9446e..f4c4e8051160 100644
--- a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+
union cvmx_spxx_bckprs_cnt {
uint64_t u64;
struct cvmx_spxx_bckprs_cnt_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
index 146354005d3b..3c409a854d91 100644
--- a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
union cvmx_stxx_arb_ctl {
uint64_t u64;
struct cvmx_stxx_arb_ctl_s {
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index c99c4b6a79f4..60481502826a 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -279,13 +279,12 @@ union octeon_cvmemctl {
} s;
};
-extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
-extern int octeon_get_boot_uart(void);
-struct uart_port;
-extern unsigned int octeon_serial_in(struct uart_port *, int);
-extern void octeon_serial_out(struct uart_port *, int, int);
+int octeon_prune_device_tree(void);
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
/**
* Write a 32bit value to the Octeon NPI register space
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index 1884609741a8..b12d9a3fbfb6 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -63,4 +63,7 @@ enum octeon_dma_bar_type {
*/
extern enum octeon_dma_bar_type octeon_dma_bar_type;
+void octeon_pci_dma_init(void);
+extern char *octeon_swiotlb;
+
#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad461216b5a1..e8cc328fce2d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -80,7 +80,12 @@ extern void build_copy_page(void);
* used in our early mem init code for all memory models.
* So always define it.
*/
-#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+extern unsigned long ARCH_PFN_OFFSET;
+# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
+#else
+# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#endif
extern void clear_page(void * page);
extern void copy_page(void * to, void * from);
@@ -252,8 +257,8 @@ extern int __virt_addr_valid(const volatile void *kaddr);
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
-#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+#define UNCAC_ADDR(addr) (UNCAC_BASE + __pa(addr))
+#define CAC_ADDR(addr) ((unsigned long)__va((addr) - UNCAC_BASE))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index af34afbc32d9..b2fa62922d88 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -141,7 +141,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
@@ -386,7 +386,20 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
+#ifdef CONFIG_CPU_LOONGSON3
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax() smp_mb()
+#else
#define cpu_relax() barrier()
+#endif
/*
* Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d49d247d48a1..bb36a400203d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -2,8 +2,10 @@
#ifndef _MIPS_SETUP_H
#define _MIPS_SETUP_H
+#include <linux/types.h>
#include <uapi/asm/setup.h>
+extern void prom_putchar(char);
extern void setup_early_printk(void);
#ifdef CONFIG_EARLY_PRINTK_8250
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 195db5045ae5..0d9fad5915fe 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -31,7 +31,6 @@ extern int prom_flags;
#define PROM_FLAG_DONT_FREE_TEMP 4
/* Simple char-by-char console I/O. */
-extern void prom_putchar(char c);
extern char prom_getchar(void);
/* Get next memory descriptor after CURR, returns first descriptor
diff --git a/arch/mips/include/asm/sim.h b/arch/mips/include/asm/sim.h
index 91831800c480..59f31a95facd 100644
--- a/arch/mips/include/asm/sim.h
+++ b/arch/mips/include/asm/sim.h
@@ -39,8 +39,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs
-
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
@@ -67,16 +65,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs \
- unsigned long __dummy0, \
- unsigned long __dummy1, \
- unsigned long __dummy2, \
- unsigned long __dummy3, \
- unsigned long __dummy4, \
- unsigned long __dummy5, \
- unsigned long __dummy6, \
- unsigned long __dummy7,
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_SIM_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 88ebd83b3bf9..056a6bf13491 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h
index 64887d3c7ec3..9a2c47bf3c40 100644
--- a/arch/mips/include/asm/txx9/generic.h
+++ b/arch/mips/include/asm/txx9/generic.h
@@ -49,7 +49,6 @@ void txx9_spi_init(int busid, unsigned long base, int irq);
void txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr);
void txx9_sio_init(unsigned long baseaddr, int irq,
unsigned int line, unsigned int sclk, int nocts);
-void prom_putchar(char c);
#ifdef CONFIG_EARLY_PRINTK
extern void (*txx9_prom_putchar)(char c);
void txx9_sio_putchar_init(unsigned long baseaddr);
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
index 6d667087f2aa..00805ac6e9fc 100644
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -101,13 +101,6 @@ struct tx4939_irc_reg {
struct tx4939_le_reg maskext;
};
-struct tx4939_rtc_reg {
- __u32 ctl;
- __u32 adr;
- __u32 dat;
- __u32 tbc;
-};
-
struct tx4939_crypto_reg {
struct tx4939_le_reg csr;
struct tx4939_le_reg idesptr;
@@ -370,26 +363,6 @@ struct tx4939_vpc_desc {
#define TX4939_CLKCTR_CYPRST 0x00000001
/*
- * RTC
- */
-#define TX4939_RTCCTL_ALME 0x00000080
-#define TX4939_RTCCTL_ALMD 0x00000040
-#define TX4939_RTCCTL_BUSY 0x00000020
-
-#define TX4939_RTCCTL_COMMAND 0x00000007
-#define TX4939_RTCCTL_COMMAND_NOP 0x00000000
-#define TX4939_RTCCTL_COMMAND_GETTIME 0x00000001
-#define TX4939_RTCCTL_COMMAND_SETTIME 0x00000002
-#define TX4939_RTCCTL_COMMAND_GETALARM 0x00000003
-#define TX4939_RTCCTL_COMMAND_SETALARM 0x00000004
-
-#define TX4939_RTCTBC_PM 0x00000080
-#define TX4939_RTCTBC_COMP 0x0000007f
-
-#define TX4939_RTC_REG_RAMSIZE 0x00000100
-#define TX4939_RTC_REG_RWBSIZE 0x00000006
-
-/*
* CRYPTO
*/
#define TX4939_CRYPTO_CSR_SAESO 0x08000000
@@ -498,8 +471,6 @@ struct tx4939_vpc_desc {
#define tx4939_ccfgptr \
((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
#define tx4939_sramcptr tx4938_sramcptr
-#define tx4939_rtcptr \
- ((struct tx4939_rtc_reg __iomem *)TX4939_RTC_REG)
#define tx4939_cryptoptr \
((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
#define tx4939_vpcptr ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 49c3d4795963..71370fb3ceef 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -123,4 +123,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d626a9a391cc..d31bc2f01208 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -16,6 +16,8 @@
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
#include <asm/io.h>
@@ -86,6 +88,7 @@ static int __init vdma_init(void)
printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
return 0;
}
+arch_initcall(vdma_init);
/*
* Allocate DMA pagetables using a simple first-fit algorithm
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel)
return enable;
}
-arch_initcall(vdma_init);
+static void *jazz_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+ if (*dma_handle == VDMA_ERROR) {
+ dma_direct_free(dev, size, ret, *dma_handle, attrs);
+ return NULL;
+ }
+
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long)ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+ return ret;
+}
+
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ vdma_free(dma_handle);
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, phys, size, dir);
+ return vdma_alloc(phys, size);
+}
+
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ vdma_free(dma_addr);
+}
+
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ dir);
+ sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
+ if (sg->dma_address == VDMA_ERROR)
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
+ dir);
+ vdma_free(sg->dma_address);
+ }
+}
+
+static void jazz_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == VDMA_ERROR;
+}
+
+const struct dma_map_ops jazz_dma_ops = {
+ .alloc = jazz_dma_alloc,
+ .free = jazz_dma_free,
+ .mmap = arch_dma_mmap,
+ .map_page = jazz_dma_map_page,
+ .unmap_page = jazz_dma_unmap_page,
+ .map_sg = jazz_dma_map_sg,
+ .unmap_sg = jazz_dma_unmap_sg,
+ .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
+ .sync_single_for_device = jazz_dma_sync_single_for_device,
+ .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = jazz_dma_sync_sg_for_device,
+ .dma_supported = dma_direct_supported,
+ .cache_sync = arch_dma_cache_sync,
+ .mapping_error = jazz_dma_mapping_error,
+};
+EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 448fd41792e4..1b5e121c3f0d 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -16,6 +16,7 @@
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/dma-mapping.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
@@ -136,10 +137,16 @@ static struct resource jazz_esp_rsrc[] = {
}
};
+static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_esp_pdev = {
.name = "jazz_esp",
.num_resources = ARRAY_SIZE(jazz_esp_rsrc),
- .resource = jazz_esp_rsrc
+ .resource = jazz_esp_rsrc,
+ .dev = {
+ .dma_mask = &jazz_esp_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_sonic_rsrc[] = {
@@ -155,10 +162,16 @@ static struct resource jazz_sonic_rsrc[] = {
}
};
+static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_sonic_pdev = {
.name = "jazzsonic",
.num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
- .resource = jazz_sonic_rsrc
+ .resource = jazz_sonic_rsrc,
+ .dev = {
+ .dma_mask = &jazz_sonic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_cmos_rsrc[] = {
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 28448d358c10..a2a5a85ea1f9 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,4 +1,4 @@
platform-$(CONFIG_MACH_INGENIC) += jz4740/
cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC) += 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 60f0767507c6..af0c8ace0141 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -29,10 +29,11 @@
#include <linux/power/gpio-charger.h>
#include <linux/pwm.h>
+#include <linux/platform_data/jz4740/jz4740_nand.h>
+
#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/jz4740_fb.h>
#include <asm/mach-jz4740/jz4740_mmc.h>
-#include <asm/mach-jz4740/jz4740_nand.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b2509c19cfb5..d535fc706a8b 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1849,7 +1849,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 505cb77d1280..4a1647ddfbd9 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -14,8 +14,6 @@
#include <asm/setup.h>
-extern void prom_putchar(char);
-
static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
index 83cea3767556..ea26614afac6 100644
--- a/arch/mips/kernel/early_printk_8250.c
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
static void __iomem *serial8250_base;
static unsigned int serial8250_reg_shift;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37b9383eacd3..6c257b52f57f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -354,16 +354,56 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
sll k0, k0, 30 # Check for SDBBP.
bgez k0, ejtag_return
+#ifdef CONFIG_SMP
+1: PTR_LA k0, ejtag_debug_buffer_spinlock
+ ll k0, 0(k0)
+ bnez k0, 1b
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sc k0, 0(k0)
+ beqz k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+ sync
+# endif
+
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+
+ PTR_LA k1, ejtag_debug_buffer
+ LONG_L k1, 0(k1)
+ LONG_S k1, 0(k0)
+
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sw zero, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_S k1, 0(k0)
+#endif
+
SAVE_ALL
move a0, sp
jal ejtag_exception_handler
RESTORE_ALL
+
+#ifdef CONFIG_SMP
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+ LONG_L k1, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_L k1, 0(k0)
+#endif
ejtag_return:
+ back_to_back_c0_hazard
MFC0 k0, CP0_DESAVE
.set mips32
deret
@@ -377,6 +417,12 @@ ejtag_return:
.data
EXPORT(ejtag_debug_buffer)
.fill LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+ .fill LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+ .fill LONGSIZE * NR_CPUS
+#endif
.previous
__INIT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 7c246b69c545..046846999efd 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,21 +33,21 @@
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
-static void r3081_wait(void)
+static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
local_irq_enable();
}
-static void r39xx_wait(void)
+static void __cpuidle r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
local_irq_enable();
}
-void r4k_wait(void)
+void __cpuidle r4k_wait(void)
{
local_irq_enable();
__r4k_wait();
@@ -60,7 +60,7 @@ void r4k_wait(void)
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
-void r4k_wait_irqoff(void)
+void __cpuidle r4k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -75,7 +75,7 @@ void r4k_wait_irqoff(void)
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
-static void rm7k_wait_irqoff(void)
+static void __cpuidle rm7k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -96,7 +96,7 @@ static void rm7k_wait_irqoff(void)
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
-static void au1k_wait(void)
+static void __cpuidle au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index f5c8bce70db2..54cd675c5d1d 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_enable_no_resched();
}
return 1;
- } else {
- if (addr->word != breakpoint_insn.word) {
- /*
- * The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ } else if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
}
-ss_probe:
prepare_singlestep(p, regs, kcb);
if (kcb->flags & SKIP_DELAYSLOT) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
@@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = regs->regs[29];
-
- memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
- regs->cp0_epc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
- /* Assembler quirk necessitates this '0,code' business. */
- asm volatile(
- "break 0,%0\n\t"
- ".globl jprobe_return_end\n"
- "jprobe_return_end:\n"
- : : "n" (BRK_KPROBE_BP) : "memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->cp0_epc >= (unsigned long)jprobe_return &&
- regs->cp0_epc <= (unsigned long)jprobe_return_end) {
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
- preempt_enable_no_resched();
-
- return 1;
- }
- return 0;
-}
-
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
@@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
instruction_pointer(regs) = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 318f1c05c5b3..6b61be486303 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -43,17 +43,6 @@
#include <asm/mmu_context.h>
#include <asm/mman.h>
-/* Use this to get at 32-bit user passed pointers. */
-/* A() macro should be used for places where you e.g.
- have some internal variable u32 and just want to get
- rid of a compiler warning. AA() has to be used in
- places where you want to convert a function argument
- to 32bit pointer or when you e.g. access pt_regs
- structure and want to consider 32bit registers only.
- */
-#define A(__x) ((unsigned long)(__x))
-#define AA(__x) ((unsigned long)((int)__x))
-
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
@@ -61,24 +50,6 @@
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
-SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags, unsigned long, fd,
- unsigned long, pgoff)
-{
- if (pgoff & (~PAGE_MASK >> 12))
- return -EINVAL;
- return ksys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT-12));
-}
-
-#define RLIM_INFINITY32 0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
-
-struct rlimit32 {
- int rlim_cur;
- int rlim_max;
-};
-
SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8d85046adcc8..8fc69891e117 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -29,6 +29,8 @@
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
+#include <linux/nmi.h>
+#include <linux/cpu.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -655,28 +657,42 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
{
- struct pt_regs *regs;
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
- regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
- if (regs)
- show_regs(regs);
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
- dump_stack();
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
- long this_cpu = get_cpu();
-
- if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
- dump_stack();
-
- smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
- put_cpu();
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
}
int mips_get_process_fp_mode(struct task_struct *task)
@@ -691,19 +707,25 @@ int mips_get_process_fp_mode(struct task_struct *task)
return value;
}
-static void prepare_for_fp_mode_switch(void *info)
+static long prepare_for_fp_mode_switch(void *unused)
{
- struct mm_struct *mm = info;
-
- if (current->mm == mm)
- lose_fpu(1);
+ /*
+ * This is icky, but we use this to simply ensure that all CPUs have
+ * context switched, regardless of whether they were previously running
+ * kernel or user code. This ensures that no CPU currently has its FPU
+ * enabled, or is about to attempt to enable it through any path other
+ * than enable_restore_fp_context() which will wait appropriately for
+ * fp_mode_switching to be zero.
+ */
+ return 0;
}
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
- int max_users;
+ struct cpumask process_cpus;
+ int cpu;
/* If nothing to change, return right away, successfully. */
if (value == mips_get_process_fp_mode(task))
@@ -736,35 +758,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
- /* Proceed with the mode switch */
- preempt_disable();
-
- /* Save FP & vector context, then disable FPU & MSA */
- if (task->signal == current->signal)
- lose_fpu(1);
-
- /* Prevent any threads from obtaining live FP context */
- atomic_set(&task->mm->context.fp_mode_switching, 1);
- smp_mb__after_atomic();
-
- /*
- * If there are multiple online CPUs then force any which are running
- * threads in this process to lose their FPU context, which they can't
- * regain until fp_mode_switching is cleared later.
- */
- if (num_online_cpus() > 1) {
- /* No need to send an IPI for the local CPU */
- max_users = (task->mm == current->mm) ? 1 : 0;
-
- if (atomic_read(&current->mm->mm_users) > max_users)
- smp_call_function(prepare_for_fp_mode_switch,
- (void *)current->mm, 1);
- }
-
- /*
- * There are now no threads of the process with live FP context, so it
- * is safe to proceed with the FP mode switch.
- */
+ /* Indicate the new FP mode in each thread */
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
@@ -781,9 +775,34 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
- /* Allow threads to use FP again */
- atomic_set(&task->mm->context.fp_mode_switching, 0);
- preempt_enable();
+ /*
+ * We need to ensure that all threads in the process have switched mode
+ * before returning, in order to allow userland to not worry about
+ * races. We can do this by forcing all CPUs that any thread in the
+ * process may be running on to schedule something else - in this case
+ * prepare_for_fp_mode_switch().
+ *
+ * We begin by generating a mask of all CPUs that any thread in the
+ * process may be running on.
+ */
+ cpumask_clear(&process_cpus);
+ for_each_thread(task, t)
+ cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+ /*
+ * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+ *
+ * The CPUs may have rescheduled already since we switched mode or
+ * generated the cpumask, but that doesn't matter. If the task in this
+ * process is scheduled out then our scheduling
+ * prepare_for_fp_mode_switch() will simply be redundant. If it's
+ * scheduled in then it will already have picked up the new FP mode
+ * whilst doing so.
+ */
+ get_online_cpus();
+ for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+ work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+ put_online_cpus();
wake_up_var(&task->mm->context.fp_mode_switching);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9f6c3f2aa2e2..e5ba56c01ee0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -41,6 +41,7 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/processor.h>
#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
@@ -589,9 +590,226 @@ static int fpr_set(struct task_struct *target,
return err;
}
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = (s32)dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = (s32)dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int fp_mode;
+
+ fp_mode = mips_get_process_fp_mode(target);
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
enum mips_regset {
REGSET_GPR,
REGSET_FPR,
+ REGSET_DSP,
+ REGSET_FP_MODE,
};
struct pt_regs_offset {
@@ -697,6 +915,23 @@ static const struct user_regset mips_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = dsp32_get,
+ .set = dsp32_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips_view = {
@@ -728,6 +963,23 @@ static const struct user_regset mips64_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .get = dsp64_get,
+ .set = dsp64_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips64_view = {
@@ -856,7 +1108,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 7edc629304c8..bc348d44d151 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -142,7 +142,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index c6bbf2165051..419c92197b2f 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -85,7 +85,7 @@ done:
#ifdef CONFIG_CPU_CAVIUM_OCTEON
/* We need to flush I-cache before jumping to new kernel.
- * Unfortunatelly, this code is cpu-specific.
+ * Unfortunately, this code is cpu-specific.
*/
.set push
.set noreorder
@@ -145,7 +145,7 @@ LEAF(kexec_smp_wait)
#endif
/* All parameters to new kernel are passed in registers a0-a3.
- * kexec_args[0..3] are uses to prepare register values.
+ * kexec_args[0..3] are used to prepare register values.
*/
kexec_args:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2c96c0c68116..c71d1eb7da59 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -36,6 +36,7 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
+#include <asm/dma-coherence.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
@@ -84,6 +85,11 @@ static struct resource bss_resource = { .name = "Kernel bss", };
static void *detect_magic __initdata = detect_memory_region;
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
{
int x = boot_mem_map.nr_map;
@@ -441,6 +447,12 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+ ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
@@ -448,8 +460,6 @@ static void __init bootmem_init(void)
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
BOOT_MEM_RESERVED);
- if (min_low_pfn >= max_low_pfn)
- panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
@@ -459,6 +469,7 @@ static void __init bootmem_init(void)
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
+#endif
/*
* Determine low and high memory ranges
@@ -1055,3 +1066,26 @@ static int __init debugfs_mips(void)
}
arch_initcall(debugfs_mips);
#endif
+
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_ENABLED;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_DISABLED;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0a9cfe7a0372..109ed163a6a6 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -592,13 +592,15 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
#endif
#ifdef CONFIG_TRAD_SIGNALS
-asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_sigreturn(void)
{
struct sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
@@ -606,7 +608,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext(&regs, &frame->sf_sc);
+ sig = restore_sigcontext(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -618,8 +620,8 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -627,13 +629,15 @@ badframe:
}
#endif /* CONFIG_TRAD_SIGNALS */
-asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_rt_sigreturn(void)
{
struct rt_sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
@@ -641,7 +645,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -656,8 +660,8 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index b672cebb4a1a..8f65aaf9206d 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -64,13 +64,15 @@ struct rt_sigframe_n32 {
struct ucontextn32 rs_uc;
};
-asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sysn32_rt_sigreturn(void)
{
struct rt_sigframe_n32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -78,7 +80,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -93,8 +95,8 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
index 2b3572fb5f1b..b6e3ddef48a0 100644
--- a/arch/mips/kernel/signal_o32.c
+++ b/arch/mips/kernel/signal_o32.c
@@ -151,13 +151,15 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig,
return 0;
}
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_rt_sigreturn(void)
{
struct rt_sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -165,7 +167,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -180,8 +182,8 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -251,13 +253,15 @@ struct mips_abi mips_abi_32 = {
};
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_sigreturn(void)
{
struct sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
@@ -265,7 +269,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ sig = restore_sigcontext32(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -277,8 +281,8 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d67fa74622ee..f8871d5b7eb3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
+ dump_stack();
}
void show_registers(struct pt_regs *regs)
@@ -1220,13 +1221,6 @@ static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
- /*
- * If an FP mode switch is currently underway, wait for it to
- * complete before proceeding.
- */
- wait_var_event(&current->mm->context.fp_mode_switching,
- !atomic_read(&current->mm->context.fp_mode_switching));
-
if (!used_math()) {
/* First time FP context user. */
preempt_disable();
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index f7a0645ccb82..4aaff3b3175c 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -224,7 +224,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr)
{
- return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+ return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
}
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7cd76f93a438..f7ea8e21656b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
if (swq_has_sleeper(&dvcpu->wq))
- swake_up(&dvcpu->wq);
+ swake_up_one(&dvcpu->wq);
return 0;
}
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
vcpu->arch.wait = 0;
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
}
/* low level hrtimer wake routine */
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 44bccaee822b..c4aa140b7c91 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <lantiq_soc.h>
+#include <asm/setup.h>
#define ASC_BUF 1024
#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 9ff7ccde9de0..d984bd5c2ec5 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -9,7 +9,6 @@
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/bootmem.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -114,10 +113,3 @@ void __init prom_init(void)
panic("failed to register_vsmp_smp_ops()");
#endif
}
-
-int __init plat_of_setup(void)
-{
- return of_platform_default_populate(NULL, NULL, NULL);
-}
-
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 805b3a6ab2d6..4b9fbb6744ad 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -130,10 +130,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_alloc_coherent(NULL,
+ ch->desc_base = dma_zalloc_coherent(NULL,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
- memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 17e15b50a551..37b8fc5b9ac9 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
#include "at93c.h"
#include <asm/lasat/eeprom.h>
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..3a6f34ef5ffc 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,16 +232,25 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ /*
+ * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
+ * a2 = a2 - t0 + 1
+ */
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
.Lfirst_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lfwd_fixup\@:
+ /*
+ * unset_bytes = partial_start_addr + #bytes - fault_addr
+ * a2 = t1 + (a2 & 3f) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
@@ -249,6 +259,10 @@
LONG_SUBU a2, t0
.Lpartial_fixup\@:
+ /*
+ * unset_bytes = partial_end_addr + #bytes - fault_addr
+ * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
@@ -257,10 +271,15 @@
LONG_SUBU a2, t0
.Llast_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lsmall_fixup\@:
+ /*
+ * unset_bytes = end_addr - current_addr + 1
+ * a2 = t1 - a0 + 1
+ */
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index ffe01c6d0037..a0dbb3b2f2de 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -1,8 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
- $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
-
+cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_LOONGSON1_LS1B) += 0xffffffff80100000
-load-$(CONFIG_LOONGSON1_LS1C) += 0xffffffff80100000
+load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index c79e6a565572..c865b4b9b775 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -91,7 +91,6 @@ config LOONGSON_MACH3X
select LOONGSON_MC146818
select ZONE_DMA32
select LEFI_FIRMWARE_INTERFACE
- select PHYS48_TO_HT40
help
Generic Loongson 3 family machines utilize the 3A/3B revision
of Loongson processor and RS780/SBX00 chipset.
@@ -130,10 +129,6 @@ config LOONGSON_UART_BASE
default y
depends on EARLY_PRINTK || SERIAL_8250
-config PHYS48_TO_HT40
- bool
- default y if CPU_LOONGSON3
-
config LOONGSON_MC146818
bool
default n
diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson64/common/Makefile
index 8235ac7eac95..57ee03022941 100644
--- a/arch/mips/loongson64/common/Makefile
+++ b/arch/mips/loongson64/common/Makefile
@@ -6,6 +6,7 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_CPU_LOONGSON2) += dma.o
#
# Serial port support
@@ -25,8 +26,3 @@ obj-$(CONFIG_CS5536) += cs5536/
#
obj-$(CONFIG_SUSPEND) += pm.o
-
-#
-# Big Memory (SWIOTLB) Support
-#
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
index f7c905e50dc4..92dc6bafc127 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
@@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg)
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
- if ((lo & 0x00000f00) == CS5536_USB_INTR)
+ if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
conf_data = 1;
break;
default:
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
deleted file mode 100644
index 6a739f8ae110..000000000000
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <boot_param.h>
-#include <dma-coherence.h>
-
-static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
- return ret;
-}
-
-static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- dir, attrs);
- mb();
- return daddr;
-}
-
-static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
- mb();
-
- return r;
-}
-
-static void loongson_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
- mb();
-}
-
-static void loongson_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- swiotlb_sync_sg_for_device(dev, sg, nents, dir);
- mb();
-}
-
-static int loongson_dma_supported(struct device *dev, u64 mask)
-{
- if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
- return 0;
- return swiotlb_dma_supported(dev, mask);
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (paddr >> 44) & 0x3;
- paddr = ((nid << 44) ^ paddr) | (nid << 37);
-#endif
- return paddr;
-}
-
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (daddr >> 37) & 0x3;
- daddr = ((nid << 37) ^ daddr) | (nid << 44);
-#endif
- return daddr;
-}
-
-static const struct dma_map_ops loongson_dma_map_ops = {
- .alloc = loongson_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = loongson_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = loongson_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = loongson_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = loongson_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = loongson_dma_supported,
-};
-
-void __init plat_swiotlb_setup(void)
-{
- swiotlb_init(1);
- mips_dma_map_ops = &loongson_dma_map_ops;
-}
diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/common/dma.c
new file mode 100644
index 000000000000..48f04126bde2
--- /dev/null
+++ b/arch/mips/loongson64/common/dma.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+ if (dma_addr > 0x8fffffff)
+ return dma_addr;
+ return dma_addr & 0x0fffffff;
+#else
+ return dma_addr & 0x7fffffff;
+#endif
+}
diff --git a/arch/mips/loongson64/common/early_printk.c b/arch/mips/loongson64/common/early_printk.c
index 6ca632e529dc..a782e2b24747 100644
--- a/arch/mips/loongson64/common/early_printk.c
+++ b/arch/mips/loongson64/common/early_printk.c
@@ -10,6 +10,7 @@
* option) any later version.
*/
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include <loongson.h>
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 1e8a955ae5a8..8f68ee02a8c2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -198,7 +198,8 @@ void __init prom_init_env(void)
break;
case PRID_REV_LOONGSON3A_R1:
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
cpu_clock_freq = 900000000;
break;
case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
index 44bc1482158b..b5a0c2fa5446 100644
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ b/arch/mips/loongson64/loongson-3/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for Loongson-3 family machines
#
-obj-y += irq.o cop2-ex.o platform.o acpi_init.o
+obj-y += irq.o cop2-ex.o platform.o acpi_init.o dma.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson64/loongson-3/dma.c b/arch/mips/loongson64/loongson-3/dma.c
new file mode 100644
index 000000000000..5e86635f71db
--- /dev/null
+++ b/arch/mips/loongson64/loongson-3/dma.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+#include <linux/init.h>
+#include <linux/swiotlb.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (paddr >> 44) & 0x3;
+ return ((nid << 44) ^ paddr) | (nid << 37);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (daddr >> 37) & 0x3;
+ return ((nid << 37) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 8501109bb0f0..fea95d003269 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -682,7 +682,8 @@ void play_dead(void)
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
break;
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
break;
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index c463bdad45c7..3e5bb203c95a 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
-obj-y += cache.o dma-default.o extable.o fault.o \
+obj-y += cache.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e12dfa48b478..a9ef057c79fe 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -830,12 +830,13 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
return __r4k_flush_icache_range(start, end, true);
}
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -904,7 +906,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
bc_inv(addr, size);
__sync();
}
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
struct flush_cache_sigtramp_args {
struct mm_struct *mm;
@@ -1505,6 +1507,14 @@ static void probe_pcache(void)
if (c->dcache.flags & MIPS_CACHE_PINDEX)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+ /*
+ * In systems with CM the icache fills from L2 or closer caches, and
+ * thus sees remote stores without needing to write them back any
+ * further than that.
+ */
+ if (mips_cm_present())
+ c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
+
switch (current_cpu_type()) {
case CPU_20KC:
/*
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 0d3c656feba0..70a523151ff3 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -65,7 +65,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
deleted file mode 100644
index f9fef0028ca2..000000000000
--- a/arch/mips/mm/dma-default.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
- * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/dma-contiguous.h>
-
-#include <asm/cache.h>
-#include <asm/cpu-type.h>
-#include <asm/io.h>
-
-#include <dma-coherence.h>
-
-#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
-/* User defined DMA coherency from command line. */
-enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
-EXPORT_SYMBOL_GPL(coherentio);
-int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
-
-static int __init setcoherentio(char *str)
-{
- coherentio = IO_COHERENCE_ENABLED;
- pr_info("Hardware DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("coherentio", setcoherentio);
-
-static int __init setnocoherentio(char *str)
-{
- coherentio = IO_COHERENCE_DISABLED;
- pr_info("Software DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("nocoherentio", setnocoherentio);
-#endif
-
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- *
- * Note that the R14000 and R16000 should also be checked for in this
- * condition. However this function is only called on non-I/O-coherent
- * systems and only the R10000 and R12000 are used in such systems, the
- * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
- */
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
-{
- if (plat_device_is_coherent(dev))
- return false;
-
- switch (boot_cpu_type()) {
- case CPU_R10000:
- case CPU_R12000:
- case CPU_BMIPS5000:
- return true;
-
- default:
- /*
- * Presence of MAARs suggests that the CPU supports
- * speculatively prefetching data, and therefore requires
- * the post-DMA flush/invalidate.
- */
- return cpu_has_maar;
- }
-}
-
-static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
-{
- gfp_t dma_flag;
-
-#ifdef CONFIG_ISA
- if (dev == NULL)
- dma_flag = __GFP_DMA;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev == NULL ||
- dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
- dma_flag = __GFP_DMA;
- else
-#endif
- dma_flag = 0;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return gfp | dma_flag;
-}
-
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret;
- struct page *page = NULL;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- gfp = massage_gfp_flags(dev, gfp);
-
- if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- gfp);
- if (!page)
- page = alloc_pages(gfp, get_order(size));
-
- if (!page)
- return NULL;
-
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = plat_map_dma_mem(dev, ret, size);
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
- !plat_device_is_coherent(dev)) {
- dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
- }
-
- return ret;
-}
-
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- unsigned long addr = (unsigned long) vaddr;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
-
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- page = virt_to_page((void *) addr);
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
-}
-
-static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)cpu_addr;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
- int ret = -ENXIO;
-
- if (!plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- pfn = page_to_pfn(virt_to_page((void *)addr));
-
- if (attrs & DMA_ATTR_WRITE_COMBINE)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static inline void __dma_sync_virtual(void *addr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- dma_cache_wback((unsigned long)addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- dma_cache_inv((unsigned long)addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv((unsigned long)addr, size);
- break;
-
- default:
- BUG();
- }
-}
-
-/*
- * A single sg entry may refer to multiple physically contiguous
- * pages. But we still need to process highmem pages individually.
- * If highmem is not configured then the bulk of this loop gets
- * optimized out.
- */
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- size_t left = size;
-
- do {
- size_t len = left;
-
- if (PageHighMem(page)) {
- void *addr;
-
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
- len = PAGE_SIZE - offset;
- }
-
- addr = kmap_atomic(page);
- __dma_sync_virtual(addr + offset, len, direction);
- kunmap_atomic(addr);
- } else
- __dma_sync_virtual(page_address(page) + offset,
- size, direction);
- offset = 0;
- page++;
- left -= len;
- } while (left);
-}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, unsigned long attrs)
-{
- if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nhwentries, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nhwentries, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (cpu_needs_post_dma_flush(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (!plat_device_is_coherent(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
-}
-
-static int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
-
-static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (!plat_device_is_coherent(dev))
- __dma_sync_virtual(vaddr, size, direction);
-}
-
-static const struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .mmap = mips_dma_mmap,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .dma_supported = mips_dma_supported,
- .cache_sync = mips_dma_cache_sync,
-};
-
-const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-EXPORT_SYMBOL(mips_dma_map_ops);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..2aca1236af36
--- /dev/null
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+
+#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm/dma-coherence.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+static inline int dev_is_coherent(struct device *dev)
+{
+ return dev->archdata.dma_coherent;
+}
+#else
+static inline int dev_is_coherent(struct device *dev)
+{
+ switch (coherentio) {
+ default:
+ case IO_COHERENCE_DEFAULT:
+ return hw_coherentio;
+ case IO_COHERENCE_ENABLED:
+ return 1;
+ case IO_COHERENCE_DISABLED:
+ return 0;
+ }
+}
+#endif /* CONFIG_DMA_PERDEV_COHERENT */
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
+ * fill random cachelines with stale data at any time, requiring an extra
+ * flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent; MIPS
+ * terminology calls memory areas with hardware maintained coherency coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this condition.
+ * However this function is only called on non-I/O-coherent systems and only the
+ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
+ * SGI IP32 aka O2.
+ */
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
+{
+ if (dev_is_coherent(dev))
+ return false;
+
+ switch (boot_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
+ return true;
+ default:
+ /*
+ * Presence of MAARs suggests that the CPU supports
+ * speculatively prefetching data, and therefore requires
+ * the post-DMA flush/invalidate.
+ */
+ return cpu_has_maar;
+ }
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long) ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+
+ return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
+ cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
+ dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long addr = (unsigned long)cpu_addr;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+ int ret = -ENXIO;
+
+ if (!dev_is_coherent(dev))
+ addr = CAC_ADDR(addr);
+
+ pfn = page_to_pfn(virt_to_page((void *)addr));
+
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static inline void dma_sync_virt(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback((unsigned long)addr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv((unsigned long)addr, size);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * A single sg entry may refer to multiple physically contiguous pages. But
+ * we still need to process highmem pages individually. If highmem is not
+ * configured then the bulk of this loop gets optimized out.
+ */
+static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+ unsigned long offset = paddr & ~PAGE_MASK;
+ size_t left = size;
+
+ do {
+ size_t len = left;
+
+ if (PageHighMem(page)) {
+ void *addr;
+
+ if (offset + len > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+ }
+ len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ dma_sync_virt(addr + offset, len, dir);
+ kunmap_atomic(addr);
+ } else
+ dma_sync_virt(page_address(page) + offset, size, dir);
+ offset = 0;
+ page++;
+ left -= len;
+ } while (left);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (!dev_is_coherent(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (cpu_needs_post_dma_flush(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!dev_is_coherent(dev))
+ dma_sync_virt(vaddr, size, direction);
+}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 5f71f2b903b7..73d8a0f0b810 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -43,7 +43,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
int si_code;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1986e09fb457..1601d90b087b 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
+#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
return error;
}
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Generic mapping function (not visible outside):
*/
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
{
+ unsigned long offset, pfn, last_pfn;
struct vm_struct * area;
- unsigned long offset;
phys_addr_t last_addr;
void * addr;
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
return (void __iomem *) CKSEG1ADDR(phys_addr);
/*
- * Don't allow anybody to remap normal RAM that we're using..
+ * Don't allow anybody to remap RAM that may be allocated by the page
+ * allocator, since that could lead to races & data clobbering.
*/
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
+ pfn = PFN_DOWN(phys_addr);
+ last_pfn = PFN_DOWN(last_addr);
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
}
/*
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index d5d02993aa21..56e4f8bffd4c 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -623,21 +623,6 @@ struct dmadscr {
u64 pad_b;
} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
-void sb1_dma_init(void)
-{
- int i;
-
- for (i = 0; i < DM_NUM_CHANNELS; i++) {
- const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
- V_DM_DSCR_BASE_RINGSZ(1);
- void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
- __raw_writeq(base_val, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
- }
-}
-
void clear_page(void *page)
{
u64 to_phys = CPHYSADDR((unsigned long)page);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 79b9f2ad3ff5..49312a14cd17 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1509,7 +1509,7 @@ static void setup_pw(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
write_c0_pwctl(1 << 6 | psn);
#endif
- write_c0_kpgd(swapper_pg_dir);
+ write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 9bb6baa45da3..24e5b0d06899 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_MICROMIPS
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 9fea6c6bbf49..60ceb93c71a0 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_CLASSIC
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 63940bdce698..17c7fd471a27 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -13,11 +13,9 @@ obj-y += malta-init.o
obj-y += malta-int.o
obj-y += malta-memory.o
obj-y += malta-platform.o
-obj-y += malta-reset.o
obj-y += malta-setup.o
obj-y += malta-time.o
obj-$(CONFIG_MIPS_CMP) += malta-amon.o
-obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
CFLAGS_malta-dtshim.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-malta/malta-pm.c b/arch/mips/mti-malta/malta-pm.c
deleted file mode 100644
index efbd659fb602..000000000000
--- a/arch/mips/mti-malta/malta-pm.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 Imagination Technologies
- * Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-
-#include <asm/mach-malta/malta-pm.h>
-
-static struct pci_bus *pm_pci_bus;
-static resource_size_t pm_io_offset;
-
-int mips_pm_suspend(unsigned state)
-{
- int spec_devid;
- u16 sts;
-
- if (!pm_pci_bus || !pm_io_offset)
- return -ENODEV;
-
- /* Ensure the power button status is clear */
- while (1) {
- sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
- break;
- outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- }
-
- /* Enable entry to suspend */
- outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
- pm_io_offset + PIIX4_FUNC3IO_PMCNTRL);
-
- /* If the special cycle occurs too soon this doesn't work... */
- mdelay(10);
-
- /*
- * The PIIX4 will enter the suspend state only after seeing a special
- * cycle with the correct magic data on the PCI bus. Generate that
- * cycle now.
- */
- spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
- pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0,
- PIIX4_SUSPEND_MAGIC);
-
- /* Give the system some time to power down */
- mdelay(1000);
-
- return 0;
-}
-
-static int __init malta_pm_setup(void)
-{
- struct pci_dev *dev;
- int res, io_region = PCI_BRIDGE_RESOURCES;
-
- /* Find a reference to the PCI bus */
- pm_pci_bus = pci_find_next_bus(NULL);
- if (!pm_pci_bus) {
- pr_warn("malta-pm: failed to find reference to PCI bus\n");
- return -ENODEV;
- }
-
- /* Find the PIIX4 PM device */
- dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
- PCI_ANY_ID, NULL);
- if (!dev) {
- pr_warn("malta-pm: failed to find PIIX4 PM\n");
- return -ENODEV;
- }
-
- /* Request access to the PIIX4 PM IO registers */
- res = pci_request_region(dev, io_region, "PIIX4 PM IO registers");
- if (res) {
- pr_warn("malta-pm: failed to request PM IO registers (%d)\n",
- res);
- pci_dev_put(dev);
- return -ENODEV;
- }
-
- /* Find the offset to the PIIX4 PM IO registers */
- pm_io_offset = pci_resource_start(dev, io_region);
-
- pci_dev_put(dev);
- return 0;
-}
-
-late_initcall(malta_pm_setup);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
deleted file mode 100644
index dd6f62ad4417..000000000000
--- a/arch/mips/mti-malta/malta-reset.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/reboot.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-malta/malta-pm.h>
-
-static void mips_machine_power_off(void)
-{
- mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
-
- pr_info("Failed to power down, resetting\n");
- machine_restart(NULL);
-}
-
-static int __init mips_reboot_setup(void)
-{
- pm_power_off = mips_machine_power_off;
-
- return 0;
-}
-arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 7b63914d2e58..5d4c5e5fbd69 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -26,6 +26,7 @@
#include <linux/screen_info.h>
#include <linux/time.h>
+#include <asm/dma-coherence.h>
#include <asm/fw/fw.h>
#include <asm/mach-malta/malta-dtshim.h>
#include <asm/mips-cps.h>
@@ -144,12 +145,6 @@ static int __init plat_enable_iocoherency(void)
static void __init plat_setup_iocoherency(void)
{
-#ifdef CONFIG_DMA_NONCOHERENT
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
if (plat_enable_iocoherency()) {
if (coherentio == IO_COHERENCE_DISABLED)
pr_info("Hardware DMA cache coherency disabled\n");
@@ -161,10 +156,6 @@ static void __init plat_setup_iocoherency(void)
else
pr_info("Software DMA cache coherency enabled\n");
}
-#else
- if (!plat_enable_iocoherency())
- panic("Hardware DMA cache coherency not supported!");
-#endif
}
static void __init pci_clock_check(void)
@@ -226,29 +217,6 @@ static void __init bonito_quirks_setup(void)
pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
-
-#ifdef CONFIG_DMA_COHERENT
- if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
- pr_info("Enabled Bonito CPU coherency\n");
-
- argptr = fw_getcmdline();
- if (strstr(argptr, "iobcuncached")) {
- BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
- ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Disabled Bonito IOBC coherency\n");
- } else {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG |=
- (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Enabled Bonito IOBC coherency\n");
- }
- } else
- panic("Hardware DMA cache coherency not supported");
-#endif
}
void __init *plat_get_fdt(void)
@@ -279,11 +247,6 @@ void __init plat_mem_setup(void)
*/
enable_dma(4);
-#ifdef CONFIG_DMA_COHERENT
- if (mips_revision_sconid != MIPS_REVISION_SCON_BONITO)
- panic("Hardware DMA cache coherency not supported");
-#endif
-
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index 769f93032c53..8f5bc1597550 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -36,6 +36,7 @@
#include <linux/serial_reg.h>
#include <asm/mipsregs.h>
+#include <asm/setup.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 856a6e6d296e..b5ba83f4c646 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -93,17 +93,3 @@ void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
-
-static struct of_device_id __initdata xlp_ids[] = {
- { .compatible = "simple-bus", },
- {},
-};
-
-int __init xlp8xx_ds_publish_devices(void)
-{
- if (!of_have_populated_dt())
- return 0;
- return of_platform_bus_probe(NULL, xlp_ids, NULL);
-}
-
-device_initcall(xlp8xx_ds_publish_devices);
diff --git a/arch/mips/paravirt/serial.c b/arch/mips/paravirt/serial.c
index 02b665c02272..a37b6f9f0ede 100644
--- a/arch/mips/paravirt/serial.c
+++ b/arch/mips/paravirt/serial.c
@@ -9,16 +9,15 @@
#include <linux/kernel.h>
#include <linux/virtio_console.h>
#include <linux/kvm_para.h>
+#include <asm/setup.h>
/*
* Emit one character to the boot console.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */,
(unsigned long)&c, 1 /* len == 1 */);
-
- return 1;
}
#ifdef CONFIG_VIRTIO_CONSOLE
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 57e1463fcd02..a1d2c4ae0d1b 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -167,7 +167,7 @@ oh_my_gawd:
static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_read_config(bus, devfn, where, size, value);
return pci_conf0_read_config(bus, devfn, where, size, value);
@@ -310,7 +310,7 @@ oh_my_gawd:
static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_write_config(bus, devfn, where, size, value);
return pci_conf0_write_config(bus, devfn, where, size, value);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index b4fa6413c4e5..c539d0d2b0cf 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -149,6 +149,13 @@
#define AR2315_PCI_HOST_SLOT 3
#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
/* ??? access BAR */
#define AR2315_PCI_HOST_MBAR0 0x10000000
/* RAM access BAR */
@@ -167,6 +174,23 @@ struct ar2315_pci_ctrl {
struct resource io_res;
};
+static inline dma_addr_t ar2315_dev_offset(struct device *dev)
+{
+ if (dev && dev_is_pci(dev))
+ return AR2315_PCI_HOST_SDRAM_BASEADDR;
+ return 0;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + ar2315_dev_offset(dev);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr - ar2315_dev_offset(dev);
+}
+
static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 1e23c8d587bd..64b58cc48a91 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -12,14 +12,18 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR724X_PCI_REG_APP 0x00
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0)
+
#define AR724X_PCI_RESET_LINK_UP BIT(0)
#define AR724X_PCI_INT_DEV0 BIT(14)
@@ -325,6 +329,37 @@ static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
apc);
}
+static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
+{
+ u32 ppl, app;
+ int wait = 0;
+
+ /* deassert PCIe host controller and PCIe PHY reset */
+ ath79_device_reset_clear(AR724X_RESET_PCIE);
+ ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
+
+ /* remove the reset of the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* deassert bypass for the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* set PCIE Application Control to ready */
+ app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
+ app |= AR724X_PCI_APP_LTSSM_ENABLE;
+ __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
+
+ /* wait up to 100ms for PHY link up */
+ do {
+ mdelay(10);
+ wait++;
+ } while (wait < 10 && !ar724x_pci_check_link(apc));
+}
+
static int ar724x_pci_probe(struct platform_device *pdev)
{
struct ar724x_pci_controller *apc;
@@ -383,6 +418,13 @@ static int ar724x_pci_probe(struct platform_device *pdev)
apc->pci_controller.io_resource = &apc->io_res;
apc->pci_controller.mem_resource = &apc->mem_res;
+ /*
+ * Do the full PCIE Root Complex Initialization Sequence if the PCIe
+ * host controller is in reset.
+ */
+ if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
+ ar724x_pci_hw_init(apc);
+
apc->link_up = ar724x_pci_check_link(apc);
if (!apc->link_up)
dev_warn(&pdev->dev, "PCIe link is down\n");
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 0f09eafa5e3a..c94a66070a60 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/smp.h>
+#include <linux/dma-direct.h>
#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
@@ -182,6 +183,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+ return bc->baddr + paddr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & ~(0xffUL << 56);
+}
+
/*
* Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
@@ -200,17 +214,6 @@ static inline void pci_disable_swapping(struct pci_dev *dev)
bridge->b_widget.w_tflush; /* Flush */
}
-static inline void pci_enable_swapping(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- bridge_t *bridge = bc->base;
- int slot = PCI_SLOT(dev->devfn);
-
- /* Turn on byte swapping */
- bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
- bridge->b_widget.w_tflush; /* Flush */
-}
-
static void pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 3e92a06fa772..5017d5843c5a 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -21,8 +21,6 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
-#include <dma-coherence.h>
-
#define USE_OCTEON_INTERNAL_ARBITER
/*
@@ -166,8 +164,6 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
- dev->dev.dma_ops = octeon_pci_dma_map_ops;
-
return 0;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 9632436d74d7..c2e94cf5ecda 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
+ *end = rsrc->start + size - 1;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 87ba86bd8696..d919a0d813a1 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -94,8 +94,6 @@ union cvmx_pcie_address {
static int cvmx_pcie_rc_initialize(int pcie_port);
-#include <dma-coherence.h>
-
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
@@ -1239,14 +1237,14 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
if (pcie_port) {
- union cvmx_ciu_qlm1 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
} else {
- union cvmx_ciu_qlm0 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
index d7b783463fac..8ed4961b1271 100644
--- a/arch/mips/pic32/pic32mzda/early_console.c
+++ b/arch/mips/pic32/pic32mzda/early_console.c
@@ -13,6 +13,7 @@
*/
#include <asm/mach-pic32/pic32.h>
#include <asm/fw/fw.h>
+#include <asm/setup.h>
#include "pic32mzda.h"
#include "early_pin.h"
@@ -157,7 +158,7 @@ void __init fw_init_early_console(char port)
setup_early_console(port, baud);
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
if (console_port >= 0) {
while (__raw_readl(
@@ -166,6 +167,4 @@ int prom_putchar(char c)
__raw_writel(c, uart_base + U_TXR(console_port));
}
-
- return 1;
}
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index 3c59ffe5f5f5..ecd30ddfb3db 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -10,6 +10,7 @@
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#ifdef CONFIG_SOC_RT288X
#define EARLY_UART_BASE 0x300c00
@@ -68,7 +69,7 @@ static void find_uart_base(void)
}
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!init_complete) {
find_uart_base();
@@ -76,13 +77,13 @@ void prom_putchar(unsigned char ch)
}
if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
- uart_w32(ch, UART_TX);
+ uart_w32((unsigned char)ch, UART_TX);
while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
;
} else {
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
- uart_w32(ch, UART_REG_TX);
+ uart_w32((unsigned char)ch, UART_REG_TX);
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 45fdfbcbd4c6..6bdb48d41276 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -7,6 +7,7 @@
*/
#include <asm/page.h>
+#include <asm/setup.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn0/hub.h>
#include <asm/sn/klconfig.h>
diff --git a/arch/mips/sgi-ip32/Makefile b/arch/mips/sgi-ip32/Makefile
index 60f0227425e7..4745cd94df11 100644
--- a/arch/mips/sgi-ip32/Makefile
+++ b/arch/mips/sgi-ip32/Makefile
@@ -4,4 +4,4 @@
#
obj-y += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
- crime.o ip32-memory.o
+ crime.o ip32-memory.o ip32-dma.o
diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c
new file mode 100644
index 000000000000..fa7b17cb5385
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-dma.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/dma-direct.h>
+#include <asm/ip32/crime.h>
+
+/*
+ * Few notes.
+ * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
+ * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
+ * native-endian)
+ * 3. All other devices see memory as one big chunk at 0x40000000
+ * 4. Non-PCI devices will pass NULL as struct device*
+ *
+ * Thus we translate differently, depending on device.
+ */
+
+#define RAM_OFFSET_MASK 0x3fffffffUL
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
+
+ if (!dev)
+ dma_addr += CRIME_HI_MEM_BASE;
+ return dma_addr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
+
+ if (dma_addr >= 256*1024*1024)
+ paddr += CRIME_HI_MEM_BASE;
+ return paddr;
+}
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index f4dbce25bc6a..7ec278d72096 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -70,7 +70,6 @@ config SIBYTE_BCM1x55
config SIBYTE_SB1xxx_SOC
bool
- select DMA_COHERENT
select IRQ_MIPS_CPU
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 115399202eab..092fb2a6ec4a 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -27,6 +27,7 @@
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 1791a44ee570..f6d9182ef82a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <linux/serial_core.h>
#include <linux/mtd/physmap.h>
#include <linux/leds.h>
@@ -32,10 +33,10 @@
#include <asm/reboot.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9tmr.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#ifdef CONFIG_CPU_TX49XX
#include <asm/txx9/tx4938.h>
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 85d1795652da..17395d5d15ca 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -17,13 +17,13 @@
#include <linux/ptrace.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/txx9irq.h>
#include <asm/txx9tmr.h>
#include <asm/txx9pio.h>
#include <asm/txx9/generic.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#include <asm/txx9/tx4938.h>
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index 274928987a21..360c388f4c82 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -21,13 +21,13 @@
#include <linux/ptrace.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/txx9irq.h>
#include <asm/txx9tmr.h>
#include <asm/txx9/generic.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#include <asm/txx9/tx4939.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ce196046ac3e..34605ca21498 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -7,7 +7,13 @@ ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
- $(filter -march=%,$(KBUILD_CFLAGS))
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
@@ -38,6 +44,7 @@ endif
# VDSO linker flags.
VDSO_LDFLAGS := \
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
+ $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
-nostdlib -shared \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
$(call cc-ldoption, -Wl$(comma)--build-id)
diff --git a/arch/mips/vdso/genvdso.h b/arch/mips/vdso/genvdso.h
index 94334727059a..611b06f01a3c 100644
--- a/arch/mips/vdso/genvdso.h
+++ b/arch/mips/vdso/genvdso.h
@@ -15,8 +15,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
ELF(Shdr) *shdr;
char *shstrtab, *name;
uint16_t sh_count, sh_entsize, i;
- unsigned int local_gotno, symtabno, gotsym;
- ELF(Dyn) *dyn = NULL;
shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
sh_count = swap_uint16(ehdr->e_shnum);
@@ -41,9 +39,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
"%s: '%s' contains relocation sections\n",
program_name, path);
return false;
- case SHT_DYNAMIC:
- dyn = vdso + FUNC(swap_uint)(shdr->sh_offset);
- break;
}
/* Check for existing sections. */
@@ -61,52 +56,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
}
}
- /*
- * Ensure the GOT has no entries other than the standard 2, for the same
- * reason we check that there's no relocation sections above.
- * The standard two entries are:
- * - Lazy resolver
- * - Module pointer
- */
- if (dyn) {
- local_gotno = symtabno = gotsym = 0;
-
- while (FUNC(swap_uint)(dyn->d_tag) != DT_NULL) {
- switch (FUNC(swap_uint)(dyn->d_tag)) {
- /*
- * This member holds the number of local GOT entries.
- */
- case DT_MIPS_LOCAL_GOTNO:
- local_gotno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the number of entries in the
- * .dynsym section.
- */
- case DT_MIPS_SYMTABNO:
- symtabno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the index of the first dynamic
- * symbol table entry that corresponds to an entry in
- * the GOT.
- */
- case DT_MIPS_GOTSYM:
- gotsym = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- }
-
- dyn++;
- }
-
- if (local_gotno > 2 || symtabno - gotsym) {
- fprintf(stderr,
- "%s: '%s' contains unexpected GOT entries\n",
- program_name, path);
- return false;
- }
- }
-
return true;
}
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 39a0db3e2b34..16e684b59875 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -46,7 +47,7 @@ static void __iomem *pmu_base;
#define pmu_read(offset) readw(pmu_base + (offset))
#define pmu_write(offset, value) writew((value), pmu_base + (offset))
-static void vr41xx_cpu_wait(void)
+static void __cpuidle vr41xx_cpu_wait(void)
{
local_irq_disable();
if (!need_resched())
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 6aed974276d8..1d4248fa55e9 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -12,17 +12,17 @@ config NDS32
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_NONCOHERENT_OPS
- select GENERIC_ASHLDI3
- select GENERIC_ASHRDI3
- select GENERIC_LSHRDI3
- select GENERIC_CMPDI2
- select GENERIC_MULDI3
- select GENERIC_UCMPDI2
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
+ select GENERIC_LIB_ASHLDI3
+ select GENERIC_LIB_ASHRDI3
+ select GENERIC_LIB_CMPDI2
+ select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_MULDI3
+ select GENERIC_LIB_UCMPDI2
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
@@ -71,8 +71,6 @@ config FIX_EARLYCON_MEM
config PGTABLE_LEVELS
default 2
-source "init/Kconfig"
-
menu "System Type"
source "arch/nds32/Kconfig.cpu"
config NR_CPUS
@@ -90,24 +88,5 @@ config NDS32_BUILTIN_DTB
endmenu
menu "Kernel Features"
-source "kernel/Kconfig.preempt"
-source "kernel/Kconfig.freezer"
-source "mm/Kconfig"
source "kernel/Kconfig.hz"
endmenu
-
-menu "Executable file formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
-source "net/Kconfig"
-source "drivers/Kconfig"
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-source "lib/Kconfig.debug"
-endmenu
-
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
diff --git a/arch/nds32/Kconfig.debug b/arch/nds32/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/nds32/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 513bb2e9baf9..031c676821ff 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += $(call cc-option, -EL)
KBUILD_AFLAGS += $(call cc-option, -EL)
LDFLAGS += $(call cc-option, -EL)
+CHECKFLAGS += -D__NDS32_EL__
else
KBUILD_CFLAGS += $(call cc-option, -EB)
KBUILD_AFLAGS += $(call cc-option, -EB)
LDFLAGS += $(call cc-option, -EB)
+CHECKFLAGS += -D__NDS32_EB__
endif
boot := arch/nds32/boot
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 10b48f0d8e85..8b26198d51bb 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -8,6 +8,8 @@
#define PG_dcache_dirty PG_arch_1
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
void flush_kernel_dcache_page(struct page *page);
void flush_kernel_vmap_range(void *addr, int size);
void invalidate_kernel_vmap_range(void *addr, int size);
-void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
#include <asm-generic/cacheflush.h>
+#undef flush_icache_range
+#undef flush_icache_page
+#undef flush_icache_user_range
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h
index eab5e84bd991..cb6cb91cfdf8 100644
--- a/arch/nds32/include/asm/futex.h
+++ b/arch/nds32/include/asm/futex.h
@@ -16,7 +16,7 @@
" .popsection\n" \
" .pushsection .fixup,\"ax\"\n" \
"4: move %0, " err_reg "\n" \
- " j 3b\n" \
+ " b 3b\n" \
" .popsection"
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 2f5b2ccebe47..63a1a5ef5219 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -278,7 +278,8 @@ static void __init setup_memory(void)
void __init setup_arch(char **cmdline_p)
{
- early_init_devtree( __dtb_start);
+ early_init_devtree(__atags_pointer ? \
+ phys_to_virt(__atags_pointer) : __dtb_start);
setup_cpuinfo();
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index ce8fd34497bf..254703653b6f 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -13,7 +13,39 @@
extern struct cache_info L1_cache_info[2];
-#ifndef CONFIG_CPU_CACHE_ALIASING
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size, flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & ~(line_size - 1);
+ end = (end + line_size - 1) & ~(line_size - 1);
+ local_irq_save(flags);
+ cpu_cache_wbinval_range(start, end, 1);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long flags;
+ unsigned long kaddr;
+ local_irq_save(flags);
+ kaddr = (unsigned long)kmap_atomic(page);
+ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+ kunmap_atomic((void *)kaddr);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ unsigned long kaddr;
+ kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
+ flush_icache_range(kaddr, kaddr + len);
+ kunmap_atomic((void *)kaddr);
+}
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte)
{
@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
(vma->vm_flags & VM_EXEC)) {
-
- if (!PageHighMem(page)) {
- cpu_cache_wbinval_page((unsigned long)
- page_address(page),
- vma->vm_flags & VM_EXEC);
- } else {
- unsigned long kaddr = (unsigned long)kmap_atomic(page);
- cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
- kunmap_atomic((void *)kaddr);
- }
+ unsigned long kaddr;
+ local_irq_save(flags);
+ kaddr = (unsigned long)kmap_atomic(page);
+ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+ kunmap_atomic((void *)kaddr);
+ local_irq_restore(flags);
}
}
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
local_irq_restore(flags);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
- unsigned long line_size, flags;
- line_size = L1_cache_info[DCACHE].line_size;
- start = start & ~(line_size - 1);
- end = (end + line_size - 1) & ~(line_size - 1);
- local_irq_save(flags);
- cpu_cache_wbinval_range(start, end, 1);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- unsigned long flags;
- local_irq_save(flags);
- cpu_cache_wbinval_page((unsigned long)page_address(page),
- vma->vm_flags & VM_EXEC);
- local_irq_restore(flags);
-}
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t * pte)
-{
- struct page *page;
- unsigned long flags;
- unsigned long pfn = pte_pfn(*pte);
-
- if (!pfn_valid(pfn))
- return;
-
- if (vma->vm_mm == current->active_mm) {
- local_irq_save(flags);
- __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
- __nds32__tlbop_rwr(*pte);
- __nds32__isb();
- local_irq_restore(flags);
- }
-
- page = pfn_to_page(pfn);
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
- (vma->vm_flags & VM_EXEC)) {
- local_irq_save(flags);
- cpu_dcache_wbinval_page((unsigned long)page_address(page));
- local_irq_restore(flags);
- }
-}
#endif
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index 9bdb7c3ecbb6..b740534b152c 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -73,7 +73,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
struct mm_struct *mm;
struct vm_area_struct *vma;
int si_code;
- int fault;
+ vm_fault_t fault;
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 3d4ec88f1db1..f4ad1138e6b9 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config NIOS2
def_bool y
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_NO_SWAP
+ select DMA_NONCOHERENT_OPS
select TIMER_OF
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
@@ -38,27 +42,16 @@ config HAS_DMA
config FPU
def_bool n
-config SWAP
- def_bool n
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool n
-source "init/Kconfig"
-
menu "Kernel features"
-source "kernel/Kconfig.preempt"
-
-source "kernel/Kconfig.freezer"
-
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 9 20
@@ -195,23 +188,3 @@ config NIOS2_IO_REGION_BASE
default "0xe0000000"
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/nios2/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index edfeef049a51..7a49f0d28d14 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
@@ -24,5 +21,3 @@ config EARLY_PRINTK
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash.
-
-endmenu
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 64ed3d656956..8fde4fa2c34f 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
deleted file mode 100644
index 6ceb92251da0..000000000000
--- a/arch/nios2/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2009 Wind River Systems Inc
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- */
-
-#ifndef _ASM_NIOS2_DMA_MAPPING_H
-#define _ASM_NIOS2_DMA_MAPPING_H
-
-extern const struct dma_map_ops nios2_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &nios2_dma_ops;
-}
-
-#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 4be815519dd4..4af9e5b5ba1c 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -12,18 +12,18 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/export.h>
#include <linux/string.h>
-#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/cache.h>
#include <asm/cacheflush.h>
-static inline void __dma_sync_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- switch (direction) {
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
case DMA_FROM_DEVICE:
invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)(vaddr + size));
@@ -42,10 +42,12 @@ static inline void __dma_sync_for_device(void *vaddr, size_t size,
}
}
-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
- enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- switch (direction) {
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
invalidate_dcache_range((unsigned long)vaddr,
@@ -58,8 +60,8 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
}
}
-static void *nios2_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -80,125 +82,10 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
return ret;
}
-static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
free_pages(addr, get_order(size));
}
-
-static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
-
- for_each_sg(sg, sg, nents, i) {
- void *addr = sg_virt(sg);
-
- if (!addr)
- continue;
-
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- __dma_sync_for_device(addr, sg->length, direction);
- }
-
- return nents;
-}
-
-static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- void *addr = page_address(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync_for_device(addr, size, direction);
-
- return page_to_phys(page) + offset;
-}
-
-static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
-}
-
-static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction,
- unsigned long attrs)
-{
- void *addr;
- int i;
-
- if (direction == DMA_TO_DEVICE)
- return;
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- for_each_sg(sg, sg, nhwentries, i) {
- addr = sg_virt(sg);
- if (addr)
- __dma_sync_for_cpu(addr, sg->length, direction);
- }
-}
-
-static void nios2_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-
-static void nios2_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-
-static void nios2_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for_each_sg(sg, sg, nelems, i)
- __dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-}
-
-static void nios2_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for_each_sg(sg, sg, nelems, i)
- __dma_sync_for_device(sg_virt(sg), sg->length, direction);
-
-}
-
-const struct dma_map_ops nios2_dma_ops = {
- .alloc = nios2_dma_alloc,
- .free = nios2_dma_free,
- .map_page = nios2_dma_map_page,
- .unmap_page = nios2_dma_unmap_page,
- .map_sg = nios2_dma_map_sg,
- .unmap_sg = nios2_dma_unmap_sg,
- .sync_single_for_device = nios2_dma_sync_single_for_device,
- .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
- .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
- .sync_sg_for_device = nios2_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(nios2_dma_ops);
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index b804dd06ea1c..24fd84cf6006 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -47,7 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int code = SEGV_MAPERR;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
cause >>= 2;
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 9ecad05bfc73..ed5f32d8fbd8 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -27,7 +27,6 @@ config OPENRISC
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select MODULES_USE_ELF_RELA
- select MULTI_IRQ_HANDLER
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS
+ select GENERIC_IRQ_MULTI_HANDLER
config CPU_BIG_ENDIAN
def_bool y
@@ -69,13 +69,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
choice
@@ -147,8 +140,6 @@ config SMP
If you don't know what to do here, say N.
source kernel/Kconfig.hz
-source kernel/Kconfig.preempt
-source "mm/Kconfig"
config OPENRISC_NO_SPR_SR_DSX
bool "use SPR_SR_DSX software emulation" if OR1K_1200
@@ -206,27 +197,3 @@ config OPENRISC_ESR_EXCEPTION_BUG_CHECK
endmenu
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-endmenu
diff --git a/arch/openrisc/Kconfig.debug b/arch/openrisc/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/openrisc/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 89076a66eee2..70e06d34006c 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -19,7 +19,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig
-LDFLAGS :=
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index 146e1660f00e..b589fac39b92 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -100,7 +100,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define __atomic_add_unless __atomic_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#include <asm-generic/atomic.h>
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index d29f7db53906..f9cd43a39d72 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -16,8 +16,9 @@
#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#define __HAVE_ARCH_CMPXCHG 1
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
index d9eee0a2b7b4..eb612b1865d2 100644
--- a/arch/openrisc/include/asm/irq.h
+++ b/arch/openrisc/include/asm/irq.h
@@ -24,6 +24,4 @@
#define NO_IRQ (-1)
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3e1a46615120..8999b9226512 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
__free_page(pte);
}
+#define __pte_free_tlb(tlb, pte, addr) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
#define check_pgt_cache() do { } while (0)
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 690d55272ba6..0c826ad6e994 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
- /*
- * __PHX__: TODO
- *
- * all this can be written much simpler. look at
- * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
- */
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
l.lwz r6,PT_PC(r3) // address of an offending insn
l.lwz r6,0(r6) // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
#else
- l.lwz r6,PT_SR(r3) // SR
+ l.mfspr r6,r0,SPR_SR // SR
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
l.sfne r6,r0 // exception happened in delay slot
l.bnf 7f
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index fb02b2a1d6f2..9fc6b60140f0 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -210,8 +210,7 @@
* r4 - EEAR exception EA
* r10 - current pointing to current_thread_info struct
* r12 - syscall 0, since we didn't come from syscall
- * r13 - temp it actually contains new SR, not needed anymore
- * r31 - handler address of the handler we'll jump to
+ * r30 - handler address of the handler we'll jump to
*
* handler has to save remaining registers to the exception
* ksp frame *before* tainting them!
@@ -244,6 +243,7 @@
/* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\
+ /* r4 use for tmp before EA */ ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
@@ -263,7 +263,10 @@
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- turn on MMU ----- */ ;\
- l.ori r30,r0,(EXCEPTION_SR) ;\
+ /* Carry DSX into exception SR */ ;\
+ l.mfspr r30,r0,SPR_SR ;\
+ l.andi r30,r30,SPR_SR_DSX ;\
+ l.ori r30,r30,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r30: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
index 35e478a93116..5f9445effaf8 100644
--- a/arch/openrisc/kernel/irq.c
+++ b/arch/openrisc/kernel/irq.c
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
irqchip_init();
}
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- handle_arch_irq = handle_irq;
-}
-
void __irq_entry do_IRQ(struct pt_regs *regs)
{
handle_arch_irq(regs);
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index fac246e6f37a..d8981cbb852a 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
return 0;
}
#else
- return regs->sr & SPR_SR_DSX;
+ return mfspr(SPR_SR) & SPR_SR_DSX;
#endif
}
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 9f011d16cc46..dc4dbafc1d83 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
struct mm_struct *mm;
struct vm_area_struct *vma;
int si_code;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 17526bebcbd2..8e6d83f79e72 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,7 +11,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
@@ -46,6 +45,7 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select GENERIC_CLOCKEVENTS
@@ -129,10 +129,6 @@ config PGTABLE_LEVELS
config SYS_SUPPORTS_HUGETLBFS
def_bool y if PA20
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
@@ -188,6 +184,10 @@ config PA20
config PA11
def_bool y
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_NONCOHERENT_OPS
+ select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
def_bool y
@@ -195,7 +195,7 @@ config PREFETCH
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
- def_bool y if (!MODULES)
+ default y
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
@@ -327,9 +327,7 @@ config NODES_SHIFT
default "3"
depends on NEED_MULTIPLE_NODES
-source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
config COMPAT
def_bool y
@@ -354,21 +352,6 @@ endmenu
source "drivers/parisc/Kconfig"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/parisc/Kconfig.debug"
-
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
@@ -384,9 +367,3 @@ config SECCOMP
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index fb3507f9b14a..1478ded0e247 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -1,9 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-
-endmenu
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07b7e32..e9c6385ef0d1 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -36,6 +36,7 @@
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
+#define REG_SZ 8
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
@@ -50,6 +51,7 @@
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
+#define REG_SZ 4
#define ASM_ULONG_INSN .word
#endif
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 88bae6676c9b..118953d41763 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -77,30 +77,6 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
@@ -160,28 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_inc(v) (atomic_add( 1,(v)))
-#define atomic_dec(v) (atomic_add( -1,(v)))
-
-#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
-#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_64BIT
@@ -264,72 +218,11 @@ atomic64_read(const atomic64_t *v)
return READ_ONCE((v)->counter);
}
-#define atomic64_inc(v) (atomic64_add( 1,(v)))
-#define atomic64_dec(v) (atomic64_add( -1,(v)))
-
-#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
-#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
-
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 000000000000..dbaaca84f27f
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+ which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb() do { synchronize_caches(); } while (0)
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() mb()
+#define __smp_rmb() mb()
+#define __smp_wmb() mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 01e1fc057c83..44a9f97194aa 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -21,11 +21,6 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
-#ifdef CONFIG_PA11
-extern const struct dma_map_ops pcxl_dma_ops;
-extern const struct dma_map_ops pcx_dma_ops;
-#endif
-
extern const struct dma_map_ops *hppa_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index f019d3ec0c1c..d00973aab7f1 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -235,6 +235,7 @@ typedef unsigned long elf_greg_t;
#define SET_PERSONALITY(ex) \
({ \
set_personality((current->personality & ~PER_MASK) | PER_LINUX); \
+ clear_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE; \
current->thread.task_size = DEFAULT_TASK_SIZE; \
})
@@ -243,9 +244,11 @@ typedef unsigned long elf_greg_t;
#define COMPAT_SET_PERSONALITY(ex) \
({ \
- set_thread_flag(TIF_32BIT); \
- current->thread.map_base = DEFAULT_MAP_BASE32; \
- current->thread.task_size = DEFAULT_TASK_SIZE32; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
+ set_thread_flag(TIF_32BIT); \
+ current->thread.map_base = DEFAULT_MAP_BASE32; \
+ current->thread.task_size = DEFAULT_TASK_SIZE32; \
+ } else clear_thread_flag(TIF_32BIT); \
})
/*
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index 9a69bf6fc4b6..cd6fe4febead 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -18,26 +18,22 @@
#ifdef __ASSEMBLY__
#define ENTRY(name) \
- .export name !\
- ALIGN !\
-name:
-
-#ifdef CONFIG_64BIT
-#define ENDPROC(name) \
- END(name)
-#else
-#define ENDPROC(name) \
- .type name, @function !\
- END(name)
-#endif
+ ALIGN !\
+name: ASM_NL\
+ .export name
-#define ENTRY_CFI(name) \
+#define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\
+ .proc ASM_NL\
+ .callinfo __VA_ARGS__ ASM_NL\
+ .entry ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
- ENDPROC(name) ASM_NL\
- CFI_ENDPROC
+ CFI_ENDPROC ASM_NL\
+ .exit ASM_NL\
+ .procend ASM_NL\
+ ENDPROC(name)
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2dbe5580a1a4..2bd5e695bdad 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -256,11 +256,7 @@ on downward growing arches, it looks like this:
* it in here from the current->personality
*/
-#ifdef CONFIG_64BIT
-#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
-#else
-#define USER_WIDE_MODE 0
-#endif
+#define USER_WIDE_MODE (!is_32bit_task())
#define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 46da07670c2b..2a27b275ab09 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -25,4 +25,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->gr[20];
}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->iaoq[0] = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
+
#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 6f84b6acc86e..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
- mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
- mb();
}
#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
- mb();
+
a = __ldcw_align(x);
- *a = 1;
mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
- mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
- mb();
return ret;
}
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index e00013248907..8ecc1f0c0483 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -2,7 +2,9 @@
#ifndef __ASM_TRAPS_H
#define __ASM_TRAPS_H
-#ifdef __KERNEL__
+#define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */
+
+#if !defined(__ASSEMBLY__)
struct pt_regs;
/* traps.c */
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h
index c73a3ee20226..9547e5261a8b 100644
--- a/arch/parisc/include/asm/unwind.h
+++ b/arch/parisc/include/asm/unwind.h
@@ -4,6 +4,9 @@
#include <linux/list.h>
+/* Max number of levels to backtrace */
+#define MAX_UNWIND_ENTRIES 30
+
/* From ABI specifications */
struct unwind_table_entry {
unsigned int region_start;
@@ -70,8 +73,10 @@ unwind_table_remove(struct unwind_table *table);
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs);
-void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info,
+ struct task_struct *t);
+void unwind_frame_init_task(struct unwind_frame_info *info,
+ struct task_struct *task, struct pt_regs *regs);
int unwind_once(struct unwind_frame_info *info);
int unwind_to_user(struct unwind_frame_info *info);
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index fc0df353ff0d..87245c584784 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
-#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 1d0fdc3b5d22..061b9cf2a779 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -104,4 +104,7 @@
#define SO_ZEROCOPY 0x4035
+#define SO_TXTIME 0x4036
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e95207c0565e..242c5ab65611 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -36,6 +36,7 @@
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/ldcw.h>
+#include <asm/traps.h>
#include <asm/thread_info.h>
#include <linux/linkage.h>
@@ -482,6 +483,8 @@
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
+ sync
+ or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
@@ -690,7 +693,7 @@ ENTRY(fault_vector_20)
def 3
extint 4
def 5
- itlb_20 6
+ itlb_20 PARISC_ITLB_TRAP
def 7
def 8
def 9
@@ -733,7 +736,7 @@ ENTRY(fault_vector_11)
def 3
extint 4
def 5
- itlb_11 6
+ itlb_11 PARISC_ITLB_TRAP
def 7
def 8
def 9
@@ -764,7 +767,6 @@ END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
-ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
@@ -775,8 +777,7 @@ ENTRY(end_fault_vector)
* copy_thread moved args into task save area.
*/
-ENTRY_CFI(ret_from_kernel_thread)
-
+ENTRY(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
@@ -791,7 +792,7 @@ ENTRY_CFI(ret_from_kernel_thread)
copy %r31, %r2
b finish_child_return
nop
-ENDPROC_CFI(ret_from_kernel_thread)
+END(ret_from_kernel_thread)
/*
@@ -816,7 +817,7 @@ ENTRY_CFI(_switch_to)
bv %r0(%r2)
mtctl %r25,%cr30
-_switch_to_ret:
+ENTRY(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
@@ -886,7 +887,7 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
-intr_return:
+ENTRY(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -1066,21 +1067,12 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
save_specials %r29
/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
-
- /*
- * FIXME: 1) Use a #define for the hardwired "6" below (and in
- * traps.c.
- * 2) Once we start executing code above 4 Gb, we need
- * to adjust iasq/iaoq here in the same way we
- * adjust isr/ior below.
- */
-
- cmpib,COND(=),n 6,%r26,skip_save_ior
+ cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
- mfctl %cr20, %r16 /* isr */
+ mfctl %isr, %r16
nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
- mfctl %cr21, %r17 /* ior */
+ mfctl %ior, %r17
#ifdef CONFIG_64BIT
@@ -1092,22 +1084,34 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
extrd,u,*<> %r8,PSW_W_BIT,1,%r0
depdi 0,1,2,%r17
- /*
- * FIXME: This code has hardwired assumptions about the split
- * between space bits and offset bits. This will change
- * when we allow alternate page sizes.
- */
-
- /* adjust isr/ior. */
- extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
- depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
- depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
+ /* adjust isr/ior: get high bits from isr and deposit in ior */
+ space_adjust %r16,%r17,%r1
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
+#if 0 && defined(CONFIG_64BIT)
+ /* Revisit when we have 64-bit code above 4Gb */
+ b,n intr_save2
skip_save_ior:
+ /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
+ * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
+ * above.
+ */
+ extrd,u,* %r8,PSW_W_BIT,1,%r1
+ cmpib,COND(=),n 1,%r1,intr_save2
+ LDREG PT_IASQ0(%r29), %r16
+ LDREG PT_IAOQ0(%r29), %r17
+ /* adjust iasq/iaoq */
+ space_adjust %r16,%r17,%r1
+ STREG %r16, PT_IASQ0(%r29)
+ STREG %r17, PT_IAOQ0(%r29)
+#else
+skip_save_ior:
+#endif
+
+intr_save2:
virt_map
save_general %r29
@@ -1745,7 +1749,7 @@ fork_like fork
fork_like vfork
/* Set the return value for the child */
-ENTRY_CFI(child_return)
+ENTRY(child_return)
BL schedule_tail, %r2
nop
finish_child_return:
@@ -1757,7 +1761,7 @@ finish_child_return:
reg_restore %r1
b syscall_exit
copy %r0,%r28
-ENDPROC_CFI(child_return)
+END(child_return)
ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
@@ -1789,7 +1793,7 @@ ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC_CFI(sys_rt_sigreturn_wrapper)
-ENTRY_CFI(syscall_exit)
+ENTRY(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
@@ -1988,21 +1992,16 @@ syscall_do_resched:
#else
nop
#endif
-ENDPROC_CFI(syscall_exit)
+END(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
.import ftrace_function_trampoline,code
.align L1_CACHE_BYTES
- .globl mcount
- .type mcount, @function
-ENTRY(mcount)
+ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
- .proc
- .callinfo caller,frame=0
- .entry
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
@@ -2024,18 +2023,11 @@ ftrace_stub:
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
- .exit
- .procend
-ENDPROC(mcount)
+ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
- .globl return_to_handler
- .type return_to_handler, @function
-ENTRY_CFI(return_to_handler)
- .proc
- .callinfo caller,frame=FRAME_SIZE
- .entry
+ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
@@ -2074,8 +2066,6 @@ parisc_return_to_handler:
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
- .exit
- .procend
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2085,31 +2075,31 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
-ENTRY_CFI(call_on_stack)
+ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
+ENTRY(_call_on_stack)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
+
+ /* Switch to new stack. We allocate two frames. */
+ ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
- /* Switch to new stack. We allocate two 128 byte frames. */
- ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %rp, -144(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
- STREG %r1, -136(%sp)
- LDREG -144(%sp), %rp
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
- LDREG -136(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
- /* Switch to new stack. We allocate two 64 byte frames. */
- ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %r1, -68(%sp)
- STREG %rp, -84(%sp)
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
@@ -2117,9 +2107,9 @@ ENTRY_CFI(call_on_stack)
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
- LDREG -84(%sp), %rp
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
- LDREG -68(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 22e6374ece44..f33bf2d306d6 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -44,10 +44,6 @@
.align 16
ENTRY_CFI(flush_tlb_all_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
@@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2)
nop
-
- .exit
- .procend
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Data Cache */
@@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
@@ -353,6 +332,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
+ sync
stw \tmp,0(\la)
mtsm \flags
#endif
@@ -361,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* Unroll the loop. */
@@ -423,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
@@ -541,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_page_asm)
/*
@@ -597,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/
ENTRY_CFI(copy_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
@@ -749,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
@@ -835,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -902,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -976,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1019,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1061,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1082,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1104,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1126,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1147,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
@@ -1190,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1211,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
- .procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
@@ -1222,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* Switch to real mode
*/
@@ -1307,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(disable_sr_hashing_asm)
.end
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 6df07ce4f3c2..04c48f1ef3fb 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -21,13 +21,12 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <linux/export.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -395,7 +394,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void *pa11_dma_alloc(struct device *dev, size_t size,
+static void *pcxl_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
@@ -422,190 +421,60 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
return (void *)vaddr;
}
-static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
- int order;
-
- order = get_order(size);
- size = 1 << (order + PAGE_SHIFT);
- unmap_uncached_pages((unsigned long)vaddr, size);
- pcxl_free_range((unsigned long)vaddr, size);
- free_pages((unsigned long)__va(dma_handle), order);
-}
+ void *addr;
-static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- void *addr = page_address(page) + offset;
- BUG_ON(direction == DMA_NONE);
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+ return NULL;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- flush_kernel_dcache_range((unsigned long) addr, size);
+ addr = (void *)__get_free_pages(flag, get_order(size));
+ if (addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
- return virt_to_phys(addr);
+ return addr;
}
-static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
- if (direction == DMA_TO_DEVICE)
- return;
-
- /*
- * For PCI_DMA_FROMDEVICE this flush is not necessary for the
- * simple map/unmap case. However, it IS necessary if if
- * pci_dma_sync_single_* has been called and the buffer reused.
- */
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
+ return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
}
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
+ int order = get_order(size);
- for_each_sg(sglist, sg, nents, i) {
- unsigned long vaddr = (unsigned long)sg_virt(sg);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
+ size = 1 << (order + PAGE_SHIFT);
+ unmap_uncached_pages((unsigned long)vaddr, size);
+ pcxl_free_range((unsigned long)vaddr, size);
- sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
- sg_dma_len(sg) = sg->length;
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- flush_kernel_dcache_range(vaddr, sg->length);
+ vaddr = __va(dma_handle);
}
- return nents;
-}
-
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- if (direction == DMA_TO_DEVICE)
- return;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
-}
-
-static void pa11_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
-}
-
-static void pa11_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
+ free_pages((unsigned long)vaddr, get_order(size));
}
-static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
-
-const struct dma_map_ops pcxl_dma_ops = {
- .alloc = pa11_dma_alloc,
- .free = pa11_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
-
-static void *pcx_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
- void *addr;
-
- if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
- return NULL;
-
- addr = (void *)__get_free_pages(flag, get_order(size));
- if (addr)
- *dma_handle = (dma_addr_t)virt_to_phys(addr);
-
- return addr;
-}
-
-static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t iova, unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
- return;
-}
-
-const struct dma_map_ops pcx_dma_ops = {
- .alloc = pcx_dma_alloc,
- .free = pcx_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b931745815e0..eb39e7e380d7 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
- } while (count++ < 16);
+ } while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 45cc65902fce..82bd0d0927ce 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -288,6 +288,8 @@ void __init collect_boot_cpu_data(void)
printk(KERN_INFO "model %s\n",
boot_cpu_data.pdc.sys_model_name);
+ dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
+
boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 7aa1d4d0d444..2582df1c529b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -676,3 +676,103 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
return &user_parisc_native_view;
}
+
+
+/* HAVE_REGS_AND_STACK_ACCESS_API feature */
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_INDEX(gr,0),
+ REG_OFFSET_INDEX(gr,1),
+ REG_OFFSET_INDEX(gr,2),
+ REG_OFFSET_INDEX(gr,3),
+ REG_OFFSET_INDEX(gr,4),
+ REG_OFFSET_INDEX(gr,5),
+ REG_OFFSET_INDEX(gr,6),
+ REG_OFFSET_INDEX(gr,7),
+ REG_OFFSET_INDEX(gr,8),
+ REG_OFFSET_INDEX(gr,9),
+ REG_OFFSET_INDEX(gr,10),
+ REG_OFFSET_INDEX(gr,11),
+ REG_OFFSET_INDEX(gr,12),
+ REG_OFFSET_INDEX(gr,13),
+ REG_OFFSET_INDEX(gr,14),
+ REG_OFFSET_INDEX(gr,15),
+ REG_OFFSET_INDEX(gr,16),
+ REG_OFFSET_INDEX(gr,17),
+ REG_OFFSET_INDEX(gr,18),
+ REG_OFFSET_INDEX(gr,19),
+ REG_OFFSET_INDEX(gr,20),
+ REG_OFFSET_INDEX(gr,21),
+ REG_OFFSET_INDEX(gr,22),
+ REG_OFFSET_INDEX(gr,23),
+ REG_OFFSET_INDEX(gr,24),
+ REG_OFFSET_INDEX(gr,25),
+ REG_OFFSET_INDEX(gr,26),
+ REG_OFFSET_INDEX(gr,27),
+ REG_OFFSET_INDEX(gr,28),
+ REG_OFFSET_INDEX(gr,29),
+ REG_OFFSET_INDEX(gr,30),
+ REG_OFFSET_INDEX(gr,31),
+ REG_OFFSET_INDEX(sr,0),
+ REG_OFFSET_INDEX(sr,1),
+ REG_OFFSET_INDEX(sr,2),
+ REG_OFFSET_INDEX(sr,3),
+ REG_OFFSET_INDEX(sr,4),
+ REG_OFFSET_INDEX(sr,5),
+ REG_OFFSET_INDEX(sr,6),
+ REG_OFFSET_INDEX(sr,7),
+ REG_OFFSET_INDEX(iasq,0),
+ REG_OFFSET_INDEX(iasq,1),
+ REG_OFFSET_INDEX(iaoq,0),
+ REG_OFFSET_INDEX(iaoq,1),
+ REG_OFFSET_NAME(cr27),
+ REG_OFFSET_NAME(ksp),
+ REG_OFFSET_NAME(kpc),
+ REG_OFFSET_NAME(sar),
+ REG_OFFSET_NAME(iir),
+ REG_OFFSET_NAME(isr),
+ REG_OFFSET_NAME(ior),
+ REG_OFFSET_NAME(ipsw),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index cc9963421a19..2b16d8d6598f 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -35,12 +35,6 @@ real32_stack:
real64_stack:
.block 8192
-#ifdef CONFIG_64BIT
-# define REG_SZ 8
-#else
-# define REG_SZ 4
-#endif
-
#define N_SAVED_REGS 9
save_cr_space:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 8d3a7b80ac42..4e87c35c22b7 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -97,14 +97,12 @@ void __init dma_ops_init(void)
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
- case pcxs:
- case pcxt:
- hppa_dma_ops = &pcx_dma_ops;
- break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
- hppa_dma_ops = &pcxl_dma_ops;
+ case pcxs:
+ case pcxt:
+ hppa_dma_ops = &dma_noncoherent_ops;
break;
default:
break;
diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c
index 2fe914c5f533..ec5835e83a7a 100644
--- a/arch/parisc/kernel/stacktrace.c
+++ b/arch/parisc/kernel/stacktrace.c
@@ -16,20 +16,7 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
{
struct unwind_frame_info info;
- /* initialize unwind info */
- if (task == current) {
- unsigned long sp;
- struct pt_regs r;
-HERE:
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long)&&HERE;
- r.gr[2] = (unsigned long)__builtin_return_address(0);
- r.gr[30] = sp;
- unwind_frame_init(&info, task, &r);
- } else {
- unwind_frame_init_from_blocked_task(&info, task);
- }
+ unwind_frame_init_task(&info, task, NULL);
/* unwind stack and save entries in stack_trace struct */
trace->nr_entries = 0;
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 43b308cfdf53..376ea0d1b275 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -156,11 +156,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
-#ifdef CONFIG_64BIT
- /* This should only ever run for 32-bit processes. */
- BUG_ON(!test_thread_flag(TIF_32BIT));
-#endif
-
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e775f80ae28c..f453997a7b8f 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -108,12 +108,8 @@ linux_gateway_entry:
mtsp %r0,%sr6 /* get kernel space into sr6 */
#ifdef CONFIG_64BIT
- /* for now we can *always* set the W bit on entry to the syscall
- * since we don't support wide userland processes. We could
- * also save the current SM other than in r0 and restore it on
- * exit from the syscall, and also use that value to know
- * whether to do narrow or wide syscalls. -PB
- */
+ /* Store W bit on entry to the syscall in case it's a wide userland
+ * process. */
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
@@ -227,8 +223,7 @@ linux_gateway_entry:
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
- ldil L%sys_call_table, %r1
- ldo R%sys_call_table(%r1), %r19
+ load32 sys_call_table, %r19
#endif
comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
@@ -331,8 +326,6 @@ tracesys_next:
* task->thread.regs.gr[20] above.
*/
copy %ret0,%r20
- ldil L%sys_call_table,%r1
- ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
@@ -354,6 +347,23 @@ tracesys_next:
comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Ltracesys_nosys
+ /* Note! We cannot use the syscall table that is mapped
+ nearby since the gateway page is mapped execute-only. */
+
+#ifdef CONFIG_64BIT
+ LDREG TASK_PT_GR30(%r1), %r19 /* get users sp back */
+ extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
+
+ ldil L%sys_call_table, %r1
+ or,= %r2,%r2,%r2
+ addil L%(sys_call_table64-sys_call_table), %r1
+ ldo R%sys_call_table(%r1), %r19
+ or,= %r2,%r2,%r2
+ ldo R%sys_call_table64(%r1), %r19
+#else
+ load32 sys_call_table, %r19
+#endif
+
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
@@ -464,16 +474,13 @@ tracesys_sigexit:
lws_start:
#ifdef CONFIG_64BIT
- /* FIXME: If we are a 64-bit kernel just
- * turn this on unconditionally.
- */
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
- /* Clip LWS number to a 32-bit value always */
+ /* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20
#endif
@@ -629,11 +636,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%r26), %r28
+1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%r26)
+2: stw %r24, 0(%r26)
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -647,6 +655,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@@ -796,30 +805,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%r26), %r29
+13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%r26)
+14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%r26), %r29
+15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%r26)
+16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%r26), %r29
+17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%r26)
+18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -827,10 +836,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%r26), %r29
+19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%r26)
+20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@@ -848,7 +857,8 @@ cas2_action:
cas2_end:
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -858,6 +868,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4309ad31a874..68f10f87073d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -45,7 +45,7 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
-static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs);
static int printbinary(char *buf, unsigned long x, int nbits)
@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
- parisc_show_stack(current, NULL, regs);
+ parisc_show_stack(current, regs);
}
}
@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
- while (i <= 16) {
+ while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
@@ -185,44 +185,19 @@ static void do_show_stack(struct unwind_frame_info *info)
printk(KERN_CRIT "\n");
}
-static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs)
{
struct unwind_frame_info info;
- struct task_struct *t;
- t = task ? task : current;
- if (regs) {
- unwind_frame_init(&info, t, regs);
- goto show_stack;
- }
-
- if (t == current) {
- unsigned long sp;
-
-HERE:
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- {
- struct pt_regs r;
-
- memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long)&&HERE;
- r.gr[2] = (unsigned long)__builtin_return_address(0);
- r.gr[30] = sp;
-
- unwind_frame_init(&info, current, &r);
- }
- } else {
- unwind_frame_init_from_blocked_task(&info, t);
- }
+ unwind_frame_init_task(&info, task, regs);
-show_stack:
do_show_stack(&info);
}
void show_stack(struct task_struct *t, unsigned long *sp)
{
- return parisc_show_stack(t, sp, NULL);
+ parisc_show_stack(t, NULL);
}
int is_valid_bugaddr(unsigned long iaoq)
@@ -557,7 +532,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
cpu_lpmc(5, regs);
return;
- case 6:
+ case PARISC_ITLB_TRAP:
/* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 2ef83d78eec4..f329b466e68f 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
- printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+ pr_warn("Out of order unwind entry! %px and %px\n",
+ start, start+1);
}
start->region_start += base_addr;
@@ -203,26 +203,63 @@ int __init unwind_init(void)
return 0;
}
-#ifdef CONFIG_64BIT
-#define get_func_addr(fptr) fptr[2]
-#else
-#define get_func_addr(fptr) fptr[0]
-#endif
-
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
- extern void handle_interruption(int, struct pt_regs *);
- static unsigned long *hi = (unsigned long *)&handle_interruption;
-
- if (pc == get_func_addr(hi)) {
+ /*
+ * We have to use void * instead of a function pointer, because
+ * function pointers aren't a pointer to the function on 64-bit.
+ * Make them const so the compiler knows they live in .text
+ * Note: We could use dereference_kernel_function_descriptor()
+ * instead but we want to keep it simple here.
+ */
+ extern void * const handle_interruption;
+ extern void * const ret_from_kernel_thread;
+ extern void * const syscall_exit;
+ extern void * const intr_return;
+ extern void * const _switch_to_ret;
+#ifdef CONFIG_IRQSTACKS
+ extern void * const _call_on_stack;
+#endif /* CONFIG_IRQSTACKS */
+
+ if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
+ return 1;
+ }
+
+ if (pc == (unsigned long) &ret_from_kernel_thread ||
+ pc == (unsigned long) &syscall_exit) {
+ info->prev_sp = info->prev_ip = 0;
+ return 1;
+ }
+
+ if (pc == (unsigned long) &intr_return) {
+ struct pt_regs *regs;
+
+ dbg("Found intr_return()\n");
+ regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
+ info->prev_sp = regs->gr[30];
+ info->prev_ip = regs->iaoq[0];
+ info->rp = regs->gr[2];
+ return 1;
+ }
+ if (pc == (unsigned long) &_switch_to_ret) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
+#ifdef CONFIG_IRQSTACKS
+ if (pc == (unsigned long) &_call_on_stack) {
+ info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+ info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
+ return 1;
+ }
+#endif
+
return 0;
}
@@ -238,34 +275,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) {
unsigned long sp;
- dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
-
-#ifdef CONFIG_KALLSYMS
- /* Handle some frequent special cases.... */
- {
- char symname[KSYM_NAME_LEN];
- char *modname;
-
- kallsyms_lookup(info->ip, NULL, NULL, &modname,
- symname);
-
- dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
-
- if (strcmp(symname, "_switch_to_ret") == 0) {
- info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
- info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
- dbg("_switch_to_ret @ %lx - setting "
- "prev_sp=%lx prev_ip=%lx\n",
- info->ip, info->prev_sp,
- info->prev_ip);
- return;
- } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
- strcmp(symname, "syscall_exit") == 0) {
- info->prev_ip = info->prev_sp = 0;
- return;
- }
- }
-#endif
+ dbg("Cannot find unwind entry for %pS; forced unwinding\n",
+ (void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
@@ -394,9 +405,31 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
kfree(r2);
}
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
+#define get_parisc_stackpointer() ({ \
+ unsigned long sp; \
+ __asm__("copy %%r30, %0" : "=r"(sp)); \
+ (sp); \
+})
+
+void unwind_frame_init_task(struct unwind_frame_info *info,
+ struct task_struct *task, struct pt_regs *regs)
{
- unwind_frame_init(info, current, regs);
+ task = task ? task : current;
+
+ if (task == current) {
+ struct pt_regs r;
+
+ if (!regs) {
+ memset(&r, 0, sizeof(r));
+ r.iaoq[0] = _THIS_IP_;
+ r.gr[2] = _RET_IP_;
+ r.gr[30] = get_parisc_stackpointer();
+ regs = &r;
+ }
+ unwind_frame_init(info, task, &r);
+ } else {
+ unwind_frame_init_from_blocked_task(info, task);
+ }
}
int unwind_once(struct unwind_frame_info *next_frame)
@@ -433,19 +466,12 @@ int unwind_to_user(struct unwind_frame_info *info)
unsigned long return_address(unsigned int level)
{
struct unwind_frame_info info;
- struct pt_regs r;
- unsigned long sp;
/* initialize unwind info */
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long) current_text_addr();
- r.gr[2] = (unsigned long) __builtin_return_address(0);
- r.gr[30] = sp;
- unwind_frame_init(&info, current, &r);
+ unwind_frame_init_task(&info, current, NULL);
/* unwind stack */
- ++level;
+ level += 2;
do {
if (unwind_once(&info) < 0 || info.ip == 0)
return 0;
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index d4fe19806d57..b53fb6fedf06 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -64,9 +64,6 @@
*/
ENTRY_CFI(lclear_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
@@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
-
- .exit
ENDPROC_CFI(lclear_user)
- .procend
-
/*
* long lstrnlen_user(char *s, long n)
*
@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/
ENTRY_CFI(lstrnlen_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
@@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
- .exit
$lslen_nzero:
b $lslen_done
@@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user)
- .procend
-
-
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31
ENTRY_CFI(pa_memcpy)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Last destination address */
add dst,len,end
@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
-
- .exit
ENDPROC_CFI(pa_memcpy)
- .procend
.end
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index a80117980fc2..c8e8b7c05558 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -262,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long acc_type;
- int fault = 0;
+ vm_fault_t fault = 0;
unsigned int flags;
if (faulthandler_disabled())
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 2607d2d33405..74842d28a7a1 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/unistd.h>
@@ -616,17 +615,13 @@ void __init mem_init(void)
free_all_bootmem();
#ifdef CONFIG_PA11
- if (hppa_dma_ops == &pcxl_dma_ops) {
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE);
- } else {
- pcxl_dma_start = 0;
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
- }
-#else
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ } else
#endif
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
mem_init_print_info(NULL);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9f2b75fe2c2d..db0b6eebbfa5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -165,7 +165,7 @@ config PPC
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
+ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_SMP_IDLE_THREAD
@@ -177,6 +177,7 @@ config PPC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
@@ -197,6 +198,7 @@ config PPC
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_XZ if PPC_BOOK3S
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
@@ -225,6 +227,7 @@ config PPC
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
+ select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH
select NO_BOOTMEM
select OF
@@ -240,6 +243,11 @@ config PPC
# Please keep this list sorted alphabetically.
#
+config PPC_BARRIER_NOSPEC
+ bool
+ default y
+ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+
config GENERIC_CSUM
def_bool n
@@ -383,10 +391,6 @@ config PGTABLE_LEVELS
default 3 if PPC_64K_PAGES && !PPC_BOOK3S_64
default 4
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/powerpc/sysdev/Kconfig"
source "arch/powerpc/platforms/Kconfig"
@@ -397,8 +401,6 @@ config HIGHMEM
depends on PPC32
source kernel/Kconfig.hz
-source kernel/Kconfig.preempt
-source "fs/Kconfig.binfmt"
config HUGETLB_PAGE_SIZE_VARIABLE
bool
@@ -641,8 +643,6 @@ config ILLEGAL_POINTER_VALUE
default 0x5deadbeef0000000 if PPC64
default 0
-source "mm/Kconfig"
-
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
@@ -884,9 +884,6 @@ config ZONE_DMA
bool
default y
-config NEED_DMA_MAP_STATE
- def_bool (PPC64 || NOT_COHERENT_CACHE)
-
config GENERIC_ISA_DMA
bool
depends on ISA_DMA_API
@@ -1201,20 +1198,6 @@ endif
config ARCH_RANDOM
def_bool n
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "lib/Kconfig"
-
-source "arch/powerpc/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
config PPC_LIB_RHEAP
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index c45424c64e19..fd63cd914a74 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config PPC_DISABLE_WERROR
bool "Don't build arch/powerpc code with -Werror"
@@ -379,5 +376,3 @@ config PPC_FAST_ENDIAN_SWITCH
depends on DEBUG_KERNEL && PPC_BOOK3S_64
help
If you're unsure what this is, say N.
-
-endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 2ea575cb3401..8397c7bd5880 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -36,7 +36,7 @@ else
KBUILD_DEFCONFIG := ppc64_defconfig
endif
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
@@ -74,7 +74,7 @@ KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
endif
endif
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += -mlittle-endian
LDFLAGS += -EL
LDEMULATION := lppc
@@ -117,7 +117,7 @@ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
# -mcmodel=medium breaks modules because it uses 32bit offsets from
# the TOC pointer to create pointers where possible. Pointers into the
@@ -134,7 +134,7 @@ endif
endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
@@ -148,8 +148,8 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
-ifeq ($(CONFIG_PPC_BOOK3S_64),y)
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_PPC_BOOK3S_64
+ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
@@ -164,16 +164,10 @@ ifdef CONFIG_MPROFILE_KERNEL
CC_FLAGS_FTRACE := -pg -mprofile-kernel
endif
-CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
-CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
-CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
-CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
-CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8)
-CFLAGS-$(CONFIG_POWER9_CPU) += $(call cc-option,-mcpu=power9)
-CFLAGS-$(CONFIG_PPC_8xx) += $(call cc-option,-mcpu=860)
+CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
# Altivec option not allowed with e500mc64 in GCC.
-ifeq ($(CONFIG_ALTIVEC),y)
+ifdef CONFIG_ALTIVEC
E5500_CPU := -mcpu=powerpc64
else
E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
@@ -181,8 +175,8 @@ endif
CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
-ifeq ($(CONFIG_PPC32),y)
-ifeq ($(CONFIG_PPC_E500MC),y)
+ifdef CONFIG_PPC32
+ifdef CONFIG_PPC_E500MC
CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
else
CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
@@ -204,7 +198,7 @@ else
CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
-ifeq ($(CONFIG_476FPE_ERR46),y)
+ifdef CONFIG_476FPE_ERR46
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
-T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
endif
@@ -231,18 +225,19 @@ KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
-ifeq ($(CONFIG_6xx),y)
+ifdef CONFIG_6xx
KBUILD_CFLAGS += -mcpu=powerpc
endif
# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ifdef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -mno-sched-epilog
endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
+cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
@@ -354,6 +349,21 @@ mpc86xx_smp_defconfig:
$(call merge_into_defconfig,mpc86xx_basic_defconfig,\
86xx-smp 86xx-hw fsl-emb-nonhw)
+PHONY += ppc32_allmodconfig
+ppc32_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/book3s_32.config \
+ -f $(srctree)/Makefile allmodconfig
+
+PHONY += ppc64le_allmodconfig
+ppc64le_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/le.config \
+ -f $(srctree)/Makefile allmodconfig
+
+PHONY += ppc64_book3e_allmodconfig
+ppc64_book3e_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
+ -f $(srctree)/Makefile allmodconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
@@ -380,7 +390,7 @@ install:
$(Q)$(MAKE) $(build)=$(boot) install
vdso_install:
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
endif
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index deea20c334df..0fb96c26136f 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -354,7 +354,7 @@ image-$(CONFIG_AMIGAONE) += cuImage.amigaone
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
image-$(CONFIG_PPC_PMAC) += zImage.coff zImage.miboot
endif
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index 83bcfd865167..0be5c4f3265d 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -176,12 +176,12 @@
clock-frequency = <400000>;
at24@30 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x30>;
};
at24@31 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x31>;
};
@@ -191,42 +191,42 @@
};
at24@50 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x50>;
};
at24@51 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x51>;
};
at24@52 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x52>;
};
at24@53 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x53>;
};
at24@54 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x54>;
};
at24@55 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x55>;
};
at24@56 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x56>;
};
at24@57 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x57>;
};
diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts
index 5922c1ea0e96..3094df05f5ea 100644
--- a/arch/powerpc/boot/dts/fsl/kmcent2.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts
@@ -130,7 +130,7 @@
#size-cells = <0>;
eeprom@54 {
- compatible = "24c02";
+ compatible = "atmel,24c02";
reg = <0x54>;
pagesize = <2>;
read-only;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
index abd01d466de4..9b6cf9149937 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x40 0xc>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -93,9 +94,11 @@ fman0: fman@400000 {
reg = <0x87000 0x1000>;
status = "disabled";
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
index debea75fd3f0..e95c11ff0417 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
@@ -37,12 +37,13 @@ fman1: fman@500000 {
#size-cells = <1>;
cell-index = <1>;
compatible = "fsl,fman";
- ranges = <0 0x500000 0x100000>;
- reg = <0x500000 0x100000>;
+ ranges = <0 0x500000 0xfe000>;
+ reg = <0x500000 0xfe000>;
interrupts = <97 2 0 0>, <16 2 1 0>;
clocks = <&clockgen 3 1>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x60 0xc>;
+ ptimer-handle = <&ptp_timer1>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -93,9 +94,11 @@ fman1: fman@500000 {
reg = <0x87000 0x1000>;
status = "disabled";
};
+};
- ptp_timer1: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer1: ptp-timer@5fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x5fe000 0x1000>;
+ interrupts = <97 2 0 0>;
+ clocks = <&clockgen 3 1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
index 3a20e0d1a6d2..d62b36c5a329 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -98,9 +99,11 @@ fman0: fman@400000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
index 82750ac944c7..310232460500 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
@@ -37,12 +37,13 @@ fman1: fman@500000 {
#size-cells = <1>;
cell-index = <1>;
compatible = "fsl,fman";
- ranges = <0 0x500000 0x100000>;
- reg = <0x500000 0x100000>;
+ ranges = <0 0x500000 0xfe000>;
+ reg = <0x500000 0xfe000>;
interrupts = <97 2 0 0>, <16 2 1 0>;
clocks = <&clockgen 3 1>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x820 0x10>;
+ ptimer-handle = <&ptp_timer1>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -98,9 +99,11 @@ fman1: fman@500000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer1: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer1: ptp-timer@5fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x5fe000 0x1000>;
+ interrupts = <97 2 0 0>;
+ clocks = <&clockgen 3 1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
index 7f60b6060176..c90702b04a53 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -86,9 +87,11 @@ fman0: fman@400000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t2080rdb.dts b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
index 836e4c965b22..55c0210a771d 100644
--- a/arch/powerpc/boot/dts/fsl/t2080rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
@@ -97,12 +97,12 @@
mdio@fd000 {
xg_cs4315_phy1: ethernet-phy@c {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0xc>;
};
xg_cs4315_phy2: ethernet-phy@d {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0xd>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index 15eb0a3f7290..a56a705d41f7 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -267,22 +267,22 @@
mdio@fd000 {
xfiphy1: ethernet-phy@10 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x10>;
};
xfiphy2: ethernet-phy@11 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x11>;
};
xfiphy3: ethernet-phy@13 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x13>;
};
xfiphy4: ethernet-phy@12 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x12>;
};
};
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 445b88114009..df1283b63d9b 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -98,7 +98,7 @@
fsl,preserve-clocking;
eeprom@50 {
- compatible = "at,24c01";
+ compatible = "atmel,24c01";
reg = <0x50>;
};
diff --git a/arch/powerpc/configs/book3s_32.config b/arch/powerpc/configs/book3s_32.config
new file mode 100644
index 000000000000..8721eb7b1294
--- /dev/null
+++ b/arch/powerpc/configs/book3s_32.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=n
+CONFIG_PPC_BOOK3S_32=y
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
index 2fe76f5e938a..4ffacafe4036 100644
--- a/arch/powerpc/configs/dpaa.config
+++ b/arch/powerpc/configs/dpaa.config
@@ -2,3 +2,4 @@ CONFIG_FSL_DPAA=y
CONFIG_FSL_PAMU=y
CONFIG_FSL_FMAN=y
CONFIG_FSL_DPAA_ETH=y
+CONFIG_CORTINA_PHY=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 10940533da71..f5c366b02828 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -78,7 +78,6 @@ CONFIG_GPIO_HLWD=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
# CONFIG_HWMON is not set
-CONFIG_SSB_DEBUG=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S
index 10cdf5bceebb..1834065362c7 100644
--- a/arch/powerpc/crypto/md5-asm.S
+++ b/arch/powerpc/crypto/md5-asm.S
@@ -11,6 +11,7 @@
*/
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#define rHP r3
#define rWP r4
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 92289679b4c4..7e44cec37bdb 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index c8951ce0dcc4..23e248beff71 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#ifdef __BIG_ENDIAN__
#define LWZ(rt, d, ra) \
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index f9ebc38d3fe7..9e1814d99318 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -185,7 +185,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index c154cebc1041..3911d5c254fa 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -132,7 +132,6 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 718a079dcdbf..6227888dcf7e 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -231,7 +231,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "sha256-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -248,7 +247,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha224",
.cra_driver_name= "sha224-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h
new file mode 100644
index 000000000000..7270d3ae7c8e
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-405.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_POWERPC_ASM_405_H
+#define _ASM_POWERPC_ASM_405_H
+
+#include <asm/asm-const.h>
+
+#ifdef __KERNEL__
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
+#define PPC405_ERR77_SYNC stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+#endif
+
+#endif /* _ASM_POWERPC_ASM_405_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 7f2a7702596c..19b70c5b5f18 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -1,21 +1,10 @@
#ifndef _ASM_POWERPC_ASM_COMPAT_H
#define _ASM_POWERPC_ASM_COMPAT_H
+#include <asm/asm-const.h>
#include <asm/types.h>
#include <asm/ppc-opcode.h>
-#ifdef __ASSEMBLY__
-# define stringify_in_c(...) __VA_ARGS__
-# define ASM_CONST(x) x
-#else
-/* This version of stringify will deal with commas... */
-# define __stringify_in_c(...) #__VA_ARGS__
-# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
-# define __ASM_CONST(x) x##UL
-# define ASM_CONST(x) __ASM_CONST(x)
-#endif
-
-
#ifdef __powerpc64__
/* operations for longs and pointers */
@@ -70,17 +59,4 @@
#endif
-#ifdef __KERNEL__
-#ifdef CONFIG_IBM405_ERR77
-/* Erratum #77 on the 405 means we need a sync or dcbt before every
- * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
-#define PPC405_ERR77_SYNC stringify_in_c(sync;)
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-#endif
-
#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
new file mode 100644
index 000000000000..082c1538c562
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_POWERPC_ASM_CONST_H
+#define _ASM_POWERPC_ASM_CONST_H
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 7841b8a60657..1f4691ce4126 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -48,8 +48,8 @@ void __trace_opal_exit(long opcode, unsigned long retval);
/* VMX copying */
int enter_vmx_usercopy(void);
int exit_vmx_usercopy(void);
-int enter_vmx_copy(void);
-void * exit_vmx_copy(void *dest);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
/* Traps */
long machine_check_early(struct pt_regs *regs);
@@ -143,4 +143,11 @@ struct kvm_vcpu;
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
+
+extern long flush_count_cache;
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 682b3e6a1e21..52eafaf74054 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+#include <asm/asm-405.h>
#define ATOMIC_INIT(i) { (i) }
@@ -18,18 +19,11 @@
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
static __inline__ int atomic_read(const atomic_t *v)
{
@@ -129,8 +123,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
@@ -145,6 +137,7 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_inc atomic_inc
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
{
@@ -163,16 +156,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
static __inline__ void atomic_dec(atomic_t *v)
{
int t;
@@ -187,6 +170,7 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_dec atomic_dec
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
{
@@ -218,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -226,13 +210,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # __atomic_add_unless\n\
+"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -248,6 +232,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return t;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -280,9 +265,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
@@ -412,8 +394,6 @@ ATOMIC64_OPS(xor, xor)
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
static __inline__ void atomic64_inc(atomic64_t *v)
{
long t;
@@ -427,6 +407,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_inc atomic64_inc
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
{
@@ -444,16 +425,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
static __inline__ void atomic64_dec(atomic64_t *v)
{
long t;
@@ -467,6 +438,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_dec atomic64_dec
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
{
@@ -487,9 +459,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
@@ -513,6 +482,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
return t;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_cmpxchg_relaxed(v, o, n) \
@@ -524,7 +494,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -532,13 +502,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
cmpd 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -551,8 +521,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -582,6 +553,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f67b3f6e36be..fbe8df433019 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -5,6 +5,8 @@
#ifndef _ASM_POWERPC_BARRIER_H
#define _ASM_POWERPC_BARRIER_H
+#include <asm/asm-const.h>
+
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
@@ -77,19 +79,25 @@ do { \
})
#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT nop
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define NOSPEC_BARRIER_SLOT nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
/*
* Prevent execution of subsequent instructions until preceding branches have
* been fully resolved and are no longer executing speculatively.
*/
-#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
// This also acts as a compiler barrier due to the memory clobber.
#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
-#else /* !CONFIG_PPC_BOOK3S_64 */
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
#define barrier_nospec_asm
#define barrier_nospec()
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
#include <asm-generic/barrier.h>
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index b750ffef83c7..ff71566dadee 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -45,6 +45,7 @@
#include <linux/compiler.h>
#include <asm/asm-compat.h>
#include <asm/synch.h>
+#include <asm/asm-405.h>
/* PPC bit number conversion */
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 02f5acd7ccc4..751cf931bb3f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -84,17 +84,12 @@
* of RAM. -- Cort
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
#define VMALLOC_END ioremap_bot
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
extern unsigned long ioremap_bot;
@@ -164,7 +159,6 @@ static inline unsigned long pte_update(pte_t *p,
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
@@ -186,7 +180,6 @@ static inline unsigned long long pte_update(pte_t *p,
lwzx %0,0,%3\n\
andc %1,%L0,%5\n\
or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
new file mode 100644
index 000000000000..068085b709fb
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT (0)
+/*
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index c81793d47af9..f82ee8a3b561 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -137,10 +137,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
shift = mmu_psize_defs[psize].shift; \
for (index = 0; vpn < __end; index++, \
vpn += (1L << (shift - VPN_SHIFT))) { \
- if (!__split || __rpte_sub_valid(rpte, index)) \
- do {
+ if (!__split || __rpte_sub_valid(rpte, index))
-#define pte_iterate_hashed_end() } while(0); } } while(0)
+#define pte_iterate_hashed_end() } } while(0)
#define pte_pagesize_index(mm, addr, pte) \
(((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0387b155f13d..d52a51b2ce7b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -3,6 +3,8 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_H
#ifdef __KERNEL__
+#include <asm/asm-const.h>
+
/*
* Common bits between 4K and 64K pages in a linux-style PTE.
* Additional bits may be defined in pgtable-hash64-*.h
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c459f937d484..50888388a359 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate)
}
}
-#define arch_make_huge_pte arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- unsigned long page_shift;
-
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
- return entry;
-
- page_shift = huge_page_shift(hstate_vma(vma));
- /*
- * We don't support 1G hugetlb pages yet.
- */
- VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
- if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- return __pte(pte_val(entry) | R_PAGE_LARGE);
- else
- return entry;
-}
-
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 50ed64fba4ae..b3520b549cba 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -12,9 +12,9 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/asm-compat.h>
#include <asm/page.h>
#include <asm/bug.h>
+#include <asm/asm-const.h>
/*
* This is necessary to get the definition of PGTABLE_RANGE which we
@@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r)
return r & ~HPTE_R_3_0_SSIZE_MASK;
}
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+ unsigned long hpte_v;
+
+ hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ return hpte_v;
+}
+
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
@@ -487,6 +497,9 @@ extern void hpte_init_native(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
+void slb_flush_all_realmode(void);
+void __slb_restore_bolted_realmode(void);
+void slb_restore_bolted_realmode(void);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 01ee40f11f3a..391ed2c3b697 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>
struct vmemmap_backing {
@@ -83,6 +84,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
/*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+
+ /*
* With hugetlb, we don't clear the second half of the page table.
* If we share the same slab cache with the pmd or pud level table,
* we need to make sure we zero out the full table on alloc.
@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -208,4 +227,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define check_pgt_cache() do { } while (0)
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[psize]);
+}
+
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 42aafba7a308..676118743a06 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -479,9 +479,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
{
if (full && radix_enabled()) {
/*
- * Let's skip the DD1 style pte update here. We know that
- * this is a full mm pte clear and hence can be sure there is
- * no parallel set_pte.
+ * We know that this is a full mm pte clear and
+ * hence can be sure there is no parallel set_pte.
*/
return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
}
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index ef9f96742ce1..7d1a3d1543fc 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
#define _ASM_POWERPC_PGTABLE_RADIX_H
+#include <asm/asm-const.h>
+
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
#endif
@@ -12,12 +14,6 @@
#include <asm/book3s/64/radix-4k.h>
#endif
-/*
- * For P9 DD1 only, we need to track whether the pte's huge.
- */
-#define R_PAGE_LARGE _RPAGE_RSV1
-
-
#ifndef __ASSEMBLY__
#include <asm/book3s/64/tlbflush-radix.h>
#include <asm/cpu_has_feature.h>
@@ -36,6 +32,9 @@
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
/*
* Size of EA range mapped by our pagetables.
*/
@@ -154,20 +153,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
{
unsigned long old_pte;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
- unsigned long new_pte;
-
- old_pte = __radix_pte_update(ptep, ~0ul, 0);
- /*
- * new value of pte
- */
- new_pte = (old_pte | set) & ~clr;
- radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
- if (new_pte)
- __radix_pte_update(ptep, 0, new_pte);
- } else
- old_pte = __radix_pte_update(ptep, clr, set);
+ old_pte = __radix_pte_update(ptep, clr, set);
if (!huge)
assert_pte_locked(mm, addr);
@@ -253,8 +239,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
@@ -285,18 +269,14 @@ static inline unsigned long radix__get_tree_size(void)
unsigned long rts_field;
/*
* We support 52 bits, hence:
- * DD1 52-28 = 24, 0b11000
- * Others 52-31 = 21, 0b10101
+ * bits 52 - 31 = 21, 0b10101
* RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- rts_field = (0x3UL << 61);
- else {
- rts_field = (0x5UL << 5); /* 6 - 8 bits */
- rts_field |= (0x2UL << 61);
- }
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
return rts_field;
}
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index ef5c3f2994c9..1154a6dc6d26 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
-extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
- unsigned long address);
extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h
new file mode 100644
index 000000000000..dec11de41055
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/tlbflush.h>
+#else
+#include <asm/book3s/32/tlbflush.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 0d72ec75da63..d5a8d7bf0759 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,7 +11,6 @@
#include <linux/mm.h>
#include <asm/cputable.h>
-#include <asm/cpu_has_feature.h>
/*
* No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 9b001f1f6b32..27183871eb3b 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -5,8 +5,8 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/synch.h>
-#include <asm/asm-compat.h>
#include <linux/bug.h>
+#include <asm/asm-405.h>
#ifdef __BIG_ENDIAN
#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 000000000000..ed7b1448493a
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+ .pushsection ".rodata"
+ .balign 4
+ .global \name
+\name:
+ .4byte \label - .
+ .popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 812535f40124..31733a95bbd0 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -14,6 +14,7 @@
#include <asm/ppc-opcode.h>
#include <linux/string.h>
#include <linux/kallsyms.h>
+#include <asm/asm-compat.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
@@ -32,6 +33,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int raw_patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_instruction_site(s32 *addr, unsigned int instr);
+int patch_branch_site(s32 *site, unsigned long target, int flags);
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
index ce216df31381..48616fe7ea75 100644
--- a/arch/powerpc/include/asm/copro.h
+++ b/arch/powerpc/include/asm/copro.h
@@ -10,13 +10,15 @@
#ifndef _ASM_POWERPC_COPRO_H
#define _ASM_POWERPC_COPRO_H
+#include <linux/mm_types.h>
+
struct copro_slb
{
u64 esid, vsid;
};
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
- unsigned long dsisr, unsigned *flt);
+ unsigned long dsisr, vm_fault_t *flt);
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index e210a83eb196..43e5f31fe64d 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -79,6 +79,19 @@ struct stop_sprs {
u64 mmcra;
};
+#define PNV_IDLE_NAME_LEN 16
+struct pnv_idle_states_t {
+ char name[PNV_IDLE_NAME_LEN];
+ u32 latency_ns;
+ u32 residency_ns;
+ u64 psscr_val;
+ u64 psscr_mask;
+ u32 flags;
+ bool valid;
+};
+
+extern struct pnv_idle_states_t *pnv_idle_states;
+extern int nr_pnv_idle_states;
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 9c0a3083571b..29f49a35d6ee 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -4,9 +4,8 @@
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h>
+#include <asm/asm-const.h>
#ifndef __ASSEMBLY__
@@ -210,7 +209,6 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000)
-#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000)
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
@@ -452,7 +450,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
-#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -464,8 +461,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
-#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
- (~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
@@ -488,17 +483,15 @@ static inline void cpu_feature_keys_init(void) { }
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
- CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \
- CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \
- CPU_FTRS_POWER9_DD2_2)
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
- CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
- CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \
- CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
+ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
@@ -566,17 +559,15 @@ enum {
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
- CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
- CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
- CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
- CPU_FTRS_DT_CPU_BASE)
+ ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index bc4903badb3f..133672744b2e 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -23,7 +23,6 @@
#include <asm/div64.h>
#include <asm/time.h>
#include <asm/param.h>
-#include <asm/cpu_has_feature.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 9f2ae0d25e15..99b84db23e8c 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,7 +16,7 @@
#include <linux/threads.h>
#include <asm/ppc-opcode.h>
-#include <asm/cpu_has_feature.h>
+#include <asm/feature-fixups.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 4a2beef74277..151dff555f50 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
+#include <linux/stringify.h>
typedef struct {
unsigned int base;
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index ce5da214ffe5..7756026b95ca 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -45,7 +45,6 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-void set_breakpoint(struct arch_hw_breakpoint *brk);
void __set_breakpoint(struct arch_hw_breakpoint *brk);
bool ppc_breakpoint_available(void);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
index 71515d909ed1..0c729e2d0e8a 100644
--- a/arch/powerpc/include/asm/dt_cpu_ftrs.h
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -10,8 +10,6 @@
*/
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h>
#ifdef CONFIG_PPC_DT_CPU_FTRS
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 677102baf3cd..219637ea69a1 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -36,13 +36,14 @@ struct pci_dn;
#ifdef CONFIG_EEH
/* EEH subsystem flags */
-#define EEH_ENABLED 0x01 /* EEH enabled */
-#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
-#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
-#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
-#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
-#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
-#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
+#define EEH_ENABLED 0x01 /* EEH enabled */
+#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
+#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
+#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
+#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
+#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
+#define EEH_POSTPONED_PROBE 0x80 /* Powernv may postpone device probe */
/*
* Delay for PE reset, all in ms
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c40b4380951c..a86feddddad0 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -35,6 +35,7 @@
* implementations as possible.
*/
#include <asm/head-64.h>
+#include <asm/feature-fixups.h>
/* PACA save area offsets (exgen, exmc, etc) */
#define EX_R9 0
@@ -156,7 +157,7 @@
b hrfi_flush_fallback
#ifdef CONFIG_RELOCATABLE
-#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2_RELON(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
mtctr r12; \
@@ -166,25 +167,26 @@
bctr;
#else
/* If not relocatable, we can jump directly -- and save messing with LR */
-#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2_RELON(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
mtmsrd r10,1; /* Set RI (EE=0) */ \
b label;
#endif
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
- __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define EXCEPTION_PROLOG_2_RELON(label, h) \
+ __EXCEPTION_PROLOG_2_RELON(label, h)
/*
- * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
- * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which
- * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
+ * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
+ * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
+ * EXCEPTION_PROLOG_2_RELON will be using LR.
*/
-#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \
+ SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
+ EXCEPTION_PROLOG_2_RELON(label, h)
/*
* We're short on space and time in the exception prolog, so we can't
@@ -315,7 +317,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define EXCEPTION_PROLOG_1(area, extra, vec) \
_EXCEPTION_PROLOG_1(area, extra, vec)
-#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2(label, h) \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
@@ -324,11 +326,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR1,r10; \
h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
- __EXCEPTION_PROLOG_PSERIES_1(label, h)
+#define EXCEPTION_PROLOG_2(label, h) \
+ __EXCEPTION_PROLOG_2(label, h)
/* _NORI variant keeps MSR_RI clear */
-#define __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+#define __EXCEPTION_PROLOG_2_NORI(label, h) \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -339,13 +341,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
- __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)
+#define EXCEPTION_PROLOG_2_NORI(label, h) \
+ __EXCEPTION_PROLOG_2_NORI(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+#define EXCEPTION_PROLOG(area, label, h, extra, vec) \
+ SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, h);
+ EXCEPTION_PROLOG_2(label, h);
#define __KVMTEST(h, n) \
lbz r10,HSTATE_IN_GUEST(r13); \
@@ -416,10 +419,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#endif
/* Do not enable RI */
-#define EXCEPTION_PROLOG_PSERIES_NORI(area, label, h, extra, vec) \
+#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1_NORI(label, h);
+ EXCEPTION_PROLOG_2_NORI(label, h);
#define __KVM_HANDLER(area, h, n) \
@@ -550,10 +553,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
/*
* Exception vectors.
*/
-#define STD_EXCEPTION_PSERIES(vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_STD, KVMTEST_PR, vec); \
+#define STD_EXCEPTION(vec, label) \
+ EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
/* Version of above for when we have to branch out-of-line */
#define __OOL_EXCEPTION(vec, label, hdlr) \
@@ -561,36 +562,31 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXCEPTION_PROLOG_0(PACA_EXGEN) \
b hdlr;
-#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
+#define STD_EXCEPTION_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2(label, EXC_STD)
#define STD_EXCEPTION_HV(loc, vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_HV, KVMTEST_HV, vec);
+ EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
#define STD_EXCEPTION_HV_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2(label, EXC_HV)
-#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+#define STD_RELON_EXCEPTION(loc, vec, label) \
/* No guest interrupts come through here */ \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
+ EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
-#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+#define STD_RELON_EXCEPTION_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_HV, KVMTEST_HV, vec);
+ EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
@@ -627,55 +623,45 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
+#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_PROLOG_PSERIES_1(label, h);
+ EXCEPTION_PROLOG_2(label, h);
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
+#define MASKABLE_EXCEPTION(vec, label, bitmask) \
+ __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
-#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_TEST_PR, bitmask)
-
-#define MASKABLE_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2(label, EXC_STD)
-#define MASKABLE_EXCEPTION_HV(loc, vec, label, bitmask) \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_TEST_HV, bitmask)
+#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \
+ __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2(label, EXC_HV)
-#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
+#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
-
-#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)\
- __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
+ EXCEPTION_PROLOG_2_RELON(label, h)
-#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
- _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_NOTEST_PR, bitmask)
+#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \
+ __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
-#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD);
+ EXCEPTION_PROLOG_2(label, EXC_STD);
-#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \
- _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_TEST_HV, bitmask)
+#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \
+ __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
/*
* Our exception common code can be passed various "additions"
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 5a23010af600..1e7a33592e29 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -195,9 +195,6 @@ struct fadump_crash_info_header {
struct cpumask online_mask;
};
-/* Crash memory ranges */
-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
-
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index fcfd05672b1b..33b6f9c892c8 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -1,6 +1,8 @@
#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
#define __ASM_POWERPC_FEATURE_FIXUPS_H
+#include <asm/asm-const.h>
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 535add3f7791..7a051bd21f87 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -14,8 +14,7 @@
#ifdef __KERNEL__
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
/* firmware feature bitmask values */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 6c40dfda5912..41cc15c14eee 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -15,7 +15,6 @@
#define _ASM_FIXMAP_H
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 1a944c18c539..94542776a62d 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,7 +8,7 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/synch.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-405.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 7e0e93f24cb7..a4f947888744 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -260,22 +260,22 @@ name:
#define EXC_REAL(name, start, size) \
EXC_REAL_BEGIN(name, start, size); \
- STD_EXCEPTION_PSERIES(start, name##_common); \
+ STD_EXCEPTION(start, name##_common); \
EXC_REAL_END(name, start, size);
#define EXC_VIRT(name, start, size, realvec) \
EXC_VIRT_BEGIN(name, start, size); \
- STD_RELON_EXCEPTION_PSERIES(start, realvec, name##_common); \
+ STD_RELON_EXCEPTION(start, realvec, name##_common); \
EXC_VIRT_END(name, start, size);
#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
EXC_REAL_BEGIN(name, start, size); \
- MASKABLE_EXCEPTION_PSERIES(start, start, name##_common, bitmask);\
+ MASKABLE_EXCEPTION(start, name##_common, bitmask); \
EXC_REAL_END(name, start, size);
#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
EXC_VIRT_BEGIN(name, start, size); \
- MASKABLE_RELON_EXCEPTION_PSERIES(start, realvec, name##_common, bitmask);\
+ MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask); \
EXC_VIRT_END(name, start, size);
#define EXC_REAL_HV(name, start, size) \
@@ -295,7 +295,7 @@ name:
#define __TRAMP_REAL_OOL(name, vec) \
TRAMP_REAL_BEGIN(tramp_real_##name); \
- STD_EXCEPTION_PSERIES_OOL(vec, name##_common); \
+ STD_EXCEPTION_OOL(vec, name##_common);
#define EXC_REAL_OOL(name, start, size) \
__EXC_REAL_OOL(name, start, size); \
@@ -306,7 +306,7 @@ name:
#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
TRAMP_REAL_BEGIN(tramp_real_##name); \
- MASKABLE_EXCEPTION_PSERIES_OOL(vec, name##_common, bitmask); \
+ MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask);
#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
__EXC_REAL_OOL_MASKABLE(name, start, size); \
@@ -346,7 +346,7 @@ name:
#define __TRAMP_VIRT_OOL(name, realvec) \
TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- STD_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common); \
+ STD_RELON_EXCEPTION_OOL(realvec, name##_common);
#define EXC_VIRT_OOL(name, start, size, realvec) \
__EXC_VIRT_OOL(name, start, size); \
@@ -357,7 +357,7 @@ name:
#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- MASKABLE_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common, bitmask);\
+ MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask);
#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
__EXC_VIRT_OOL_MASKABLE(name, start, size); \
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index cec820f961da..a4b65b186ec6 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <asm/kmap_types.h>
-#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 3225eb6402cc..2d00cc530083 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -84,9 +84,6 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
return dir + idx;
}
-pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
- unsigned long addr, unsigned *shift);
-
void flush_dcache_icache_hugepage(struct page *page);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 662c8347d699..a0b17f9f1ea4 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -342,10 +342,12 @@
#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 8e7b09703ca4..ece4dc89c90b 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -52,16 +52,20 @@ struct arch_hw_breakpoint {
#include <asm/reg.h>
#include <asm/debug.h>
+struct perf_event_attr;
struct perf_event;
struct pmu;
struct perf_sample_data;
+struct task_struct;
#define HW_BREAKPOINT_ALIGN 0x7
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e151774cb577..32a18f2f49bc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void)
/*
* This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts when soft-disabled after having
- * cleared the source of the interrupt
+ * re-enable hard interrupts after having cleared the source
+ * of the interrupt. They are kept disabled if there is a different
+ * soft-masked interrupt pending that requires hard masking.
*/
static inline void may_hard_irq_enable(void)
{
- get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
+ if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
+ }
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 20febe0b7f32..ab3a4fba38e3 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,6 +30,7 @@
#include <asm/machdep.h>
#include <asm/types.h>
#include <asm/pci-bridge.h>
+#include <asm/asm-const.h>
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
@@ -69,6 +70,8 @@ struct iommu_table_ops {
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
+
+ __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif
void (*clear)(struct iommu_table *tbl,
long index, long npages);
@@ -117,15 +120,16 @@ struct iommu_table {
unsigned long *it_map; /* A simple allocation bitmap for now */
unsigned long it_page_shift;/* table iommu page size */
struct list_head it_group_list;/* List of iommu_table_group_link */
- unsigned long *it_userspace; /* userspace view of the table */
+ __be64 *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops;
struct kref it_kref;
+ int it_nid;
};
+#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
- ((tbl)->it_userspace ? \
- &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \
- NULL)
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
/* Pure 2^n version of get_order */
static inline __attribute_const__
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 9a287e0ac8b1..a3b2cf940b4e 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <asm/feature-fixups.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-const.h>
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 9f3be5c8a4a3..785c464b6588 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -88,7 +88,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_msr;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int __is_active_jprobe(unsigned long addr);
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#else
static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 1f345a0b6ba2..83a9aa3cf689 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -390,4 +390,51 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
#define SPLIT_HACK_MASK 0xff000000
#define SPLIT_HACK_OFFS 0xfb000000
+/*
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
+ * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
+ * (but not its actual threading mode, which is not available) to avoid
+ * collisions.
+ *
+ * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
+ * 0) unchanged: if the guest is filling each VCORE completely then it will be
+ * using consecutive IDs and it will fill the space without any packing.
+ *
+ * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
+ * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
+ * added to avoid collisions.
+ *
+ * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
+ * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
+ * can be safely packed into the second half of each VCORE by adding an offset
+ * of (stride / 2).
+ *
+ * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
+ * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
+ * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
+ *
+ * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
+ * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
+ * must be free to use.
+ *
+ * (The offsets for each block are stored in block_offsets[], indexed by the
+ * block number if the stride is 8. For cases where the guest's stride is less
+ * than 8, we can re-use the block_offsets array by multiplying the block
+ * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
+ */
+static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
+{
+ const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
+ int stride = kvm->arch.emul_smt_mode;
+ int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
+ u32 packed_id;
+
+ if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
+ return 0;
+ packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
+ if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
+ return 0;
+ return packed_id;
+}
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index e5f048bbcb7c..931260b59ac6 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -9,6 +9,8 @@
#ifndef ASM_KVM_BOOKE_HV_ASM_H
#define ASM_KVM_BOOKE_HV_ASM_H
+#include <asm/feature-fixups.h>
+
#ifdef __ASSEMBLY__
/*
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fa4efa7e88f7..906bcbdfd2a1 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -42,7 +42,14 @@
#define KVM_USER_MEM_SLOTS 512
#include <asm/cputhreads.h>
-#define KVM_MAX_VCPU_ID (threads_per_subcore * KVM_MAX_VCORES)
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
+#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
+
+#else
+#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -672,7 +679,7 @@ struct kvm_vcpu_arch {
gva_t vaddr_accessed;
pgd_t *pgdir;
- u8 io_gpr; /* GPR used as IO source/target */
+ u16 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed;
u8 mmio_sign_extend;
/* conversion between single and double precision */
@@ -688,7 +695,6 @@ struct kvm_vcpu_arch {
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
- u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums;
u8 mmio_vmx_offset;
u8 mmio_copy_type;
@@ -801,14 +807,14 @@ struct kvm_vcpu_arch {
#define KVMPPC_VCPU_BUSY_IN_HOST 2
/* Values for vcpu->arch.io_gpr */
-#define KVM_MMIO_REG_MASK 0x001f
-#define KVM_MMIO_REG_EXT_MASK 0xffe0
+#define KVM_MMIO_REG_MASK 0x003f
+#define KVM_MMIO_REG_EXT_MASK 0xffc0
#define KVM_MMIO_REG_GPR 0x0000
-#define KVM_MMIO_REG_FPR 0x0020
-#define KVM_MMIO_REG_QPR 0x0040
-#define KVM_MMIO_REG_FQPR 0x0060
-#define KVM_MMIO_REG_VSX 0x0080
-#define KVM_MMIO_REG_VMX 0x00c0
+#define KVM_MMIO_REG_FPR 0x0040
+#define KVM_MMIO_REG_QPR 0x0080
+#define KVM_MMIO_REG_FQPR 0x00c0
+#define KVM_MMIO_REG_VSX 0x0100
+#define KVM_MMIO_REG_VMX 0x0180
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index cb57f29f531d..295b3dbb2698 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -5,7 +5,7 @@
* PPC440 support
*/
-#include <asm/page.h>
+#include <asm/asm-const.h>
#define PPC44x_MMUCR_TID 0x000000ff
#define PPC44x_MMUCR_STS 0x00010000
@@ -124,19 +124,19 @@ typedef struct {
/* Size of the TLBs used for pinning in lowmem */
#define PPC_PIN_SIZE (1 << 28) /* 256M */
-#if (PAGE_SHIFT == 12)
+#if defined(CONFIG_PPC_4K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K
-#elif (PAGE_SHIFT == 14)
+#elif defined(CONFIG_PPC_16K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K
-#elif (PAGE_SHIFT == 16)
+#elif defined(CONFIG_PPC_64K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K
-#elif (PAGE_SHIFT == 18)
+#elif defined(CONFIG_PPC_256K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
#define mmu_virtual_psize MMU_PAGE_256K
#else
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 61d15ce92278..13ea441ac531 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -5,8 +5,7 @@
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
/*
* MMU features bit definitions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 896efa559996..b2f89b621b15 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa);
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa);
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
{
int c;
- c = atomic_dec_if_positive(&mm->context.copros);
-
- /* Detect imbalance between add and remove */
- WARN_ON(c < 0);
-
/*
- * Need to broadcast a global flush of the full mm before
- * decrementing active_cpus count, as the next TLBI may be
- * local and the nMMU and/or PSL need to be cleaned up.
- * Should be rare enough so that it's acceptable.
+ * When removing the last copro, we need to broadcast a global
+ * flush of the full mm, as the next TLBI may be local and the
+ * nMMU and/or PSL need to be cleaned up.
+ *
+ * Both the 'copros' and 'active_cpus' counts are looked at in
+ * flush_all_mm() to determine the scope (local/global) of the
+ * TLBIs, so we need to flush first before decrementing
+ * 'copros'. If this API is used by several callers for the
+ * same context, it can lead to over-flushing. It's hopefully
+ * not common enough to be a problem.
*
* Skip on hash, as we don't know how to do the proper flush
* for the time being. Invalidations will remain global if
- * used on hash.
+ * used on hash. Note that we can't drop 'copros' either, as
+ * it could make some invalidations local with no flush
+ * in-between.
*/
- if (c == 0 && radix_enabled()) {
+ if (radix_enabled()) {
flush_all_mm(mm);
- dec_mm_active_cpus(mm);
+
+ c = atomic_dec_if_positive(&mm->context.copros);
+ /* Detect imbalance between add and remove */
+ WARN_ON(c < 0);
+
+ if (c == 0)
+ dec_mm_active_cpus(mm);
}
}
#else
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 7c46a98cc7f4..a507a65b0866 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -8,7 +8,8 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
+#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
+#include <asm/asm-405.h>
extern unsigned long ioremap_bot;
@@ -222,10 +223,6 @@ static inline unsigned long long pte_update(pte_t *p,
}
#endif /* CONFIG_PTE_64BIT */
-/*
- * 2.6 calls this without flushing the TLB entry; this is wrong
- * for our hash-based implementation, we fix that up here.
- */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
{
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index dd0c7236208f..7cd6809f4d33 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -3,11 +3,12 @@
#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
/*
* This file contains the functions and defines necessary to modify and use
- * the ppc64 hashed page table.
+ * the ppc64 non-hashed page table.
*/
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
+#include <asm/asm-const.h>
#ifdef CONFIG_PPC_64K_PAGES
#error "Page size not supported"
@@ -37,7 +38,7 @@
/*
* The vmalloc space starts at the beginning of that region, and
- * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * occupies a quarter of it on Book3E
* (we keep a quarter for the virtual memmap)
*/
#define VMALLOC_START KERN_VIRT_START
@@ -77,7 +78,7 @@
/*
* Defines the address of the vmemap area, in its own region on
- * hash table CPUs and after the vmalloc space on Book3E
+ * after the vmalloc space on Book3E
*/
#define VMEMMAP_BASE VMALLOC_END
#define VMEMMAP_END KERN_IO_START
@@ -247,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
-/*
- * We currently remove entries from the hashtable regardless of whether
- * the entry was young or dirty. The generic routines only flush if the
- * entry was young or dirty which is not good enough.
- *
- * We should be more intelligent about this but for the moment we override
- * these functions and force a tlb flush unconditionally
- */
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
@@ -278,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
}
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to flush the hash entry
- */
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address,
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
new file mode 100644
index 000000000000..b1d8fec29169
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H
+#define _ASM_POWERPC_NOHASH_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
+ * the local processor
+ * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
+ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *
+ */
+
+/*
+ * TLB flushing for software loaded TLB chips
+ *
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+struct vm_area_struct;
+struct mm_struct;
+
+#define MMU_NO_CONTEXT ((unsigned int)-1)
+
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 3bab299eda49..8365353330b4 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -206,9 +206,11 @@
#define OPAL_NPU_SPA_CLEAR_CACHE 160
#define OPAL_NPU_TL_SET 161
#define OPAL_SENSOR_READ_U64 162
+#define OPAL_SENSOR_GROUP_ENABLE 163
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
-#define OPAL_LAST 165
+#define OPAL_NX_COPROC_INIT 167
+#define OPAL_LAST 167
#define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e1b2910c6e81..834e7e29f1e4 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -292,6 +292,8 @@ int opal_set_powercap(u32 handle, int token, u32 pcap);
int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr);
int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
int opal_sensor_group_clear(u32 group_hndl, int token);
+int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
+int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
@@ -305,6 +307,8 @@ extern void opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_flush_console(uint32_t vtermno);
extern void hvc_opal_init_early(void);
@@ -326,6 +330,7 @@ extern int opal_async_wait_response_interruptible(uint64_t token,
struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data);
+extern int sensor_group_enable(u32 grp_hndl, bool enable);
struct rtc_time;
extern time64_t opal_get_boot_time(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 6d34bd71139d..ad4f16164619 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -187,11 +187,6 @@ struct paca_struct {
u8 subcore_sibling_mask;
/* Flag to request this thread not to stop */
atomic_t dont_stop;
- /*
- * Pointer to an array which contains pointer
- * to the sibling threads' paca.
- */
- struct paca_struct **thread_sibling_pacas;
/* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr;
@@ -252,6 +247,9 @@ struct paca_struct {
void *rfi_flush_fallback_area;
u64 l1d_flush_size;
#endif
+#ifdef CONFIG_PPC_PSERIES
+ u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
+#endif /* CONFIG_PPC_PSERIES */
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index db7be0779d55..f6a1265face2 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -16,8 +16,7 @@
#else
#include <asm/types.h>
#endif
-#include <asm/asm-compat.h>
-#include <asm/kdump.h>
+#include <asm/asm-const.h>
/*
* On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index af04acdb873f..c0ce17e909ef 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -10,6 +10,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/asm-const.h>
+
/*
* We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
* specific, every notion of page number shared with the firmware, TCEs,
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 5ba80cffb505..20ebf153c871 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -13,7 +13,8 @@
DECLARE_STATIC_KEY_TRUE(pkey_disabled);
extern int pkeys_total; /* total pkeys as per device tree */
-extern u32 initial_allocation_mask; /* bits set for reserved keys */
+extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */
+extern u32 reserved_allocation_mask; /* bits set for reserved keys */
#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
VM_PKEY_BIT3 | VM_PKEY_BIT4)
@@ -83,19 +84,21 @@ static inline u16 pte_to_pkey_bits(u64 pteflags)
#define __mm_pkey_is_allocated(mm, pkey) \
(mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
-#define __mm_pkey_is_reserved(pkey) (initial_allocation_mask & \
+#define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
pkey_alloc_mask(pkey))
static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
{
- /* A reserved key is never considered as 'explicitly allocated' */
- return ((pkey < arch_max_pkey()) &&
- !__mm_pkey_is_reserved(pkey) &&
- __mm_pkey_is_allocated(mm, pkey));
+ if (pkey < 0 || pkey >= arch_max_pkey())
+ return false;
+
+ /* Reserved keys are never allocated. */
+ if (__mm_pkey_is_reserved(pkey))
+ return false;
+
+ return __mm_pkey_is_allocated(mm, pkey);
}
-extern void __arch_activate_pkey(int pkey);
-extern void __arch_deactivate_pkey(int pkey);
/*
* Returns a positive, 5-bit key on success, or -1 on failure.
* Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
@@ -124,11 +127,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
ret = ffz((u32)mm_pkey_allocation_map(mm));
__mm_pkey_allocated(mm, ret);
- /*
- * Enable the key in the hardware
- */
- if (ret > 0)
- __arch_activate_pkey(ret);
return ret;
}
@@ -140,10 +138,6 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
if (!mm_pkey_is_allocated(mm, pkey))
return -EINVAL;
- /*
- * Disable the key in the hardware
- */
- __arch_deactivate_pkey(pkey);
__mm_pkey_free(mm, pkey);
return 0;
@@ -187,6 +181,16 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
{
if (static_branch_likely(&pkey_disabled))
return -EINVAL;
+
+ /*
+ * userspace should not change pkey-0 permissions.
+ * pkey-0 is associated with every page in the kernel.
+ * If userspace denies any permission on pkey-0, the
+ * kernel cannot operate.
+ */
+ if (pkey == 0)
+ return init_val ? -EINVAL : 0;
+
return __arch_set_user_pkey_access(tsk, pkey, init_val);
}
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index d2d8c28db336..7f627e3f4da4 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -50,13 +50,6 @@ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev, int num);
void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev);
-
-/* Support for the cxl kernel api on the real PHB (instead of vPHB) */
-int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable);
-bool pnv_pci_on_cxl_phb(struct pci_dev *dev);
-struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose);
-void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu);
-
#endif
struct pnv_php_slot {
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4436887bc415..665af14850e4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -12,8 +12,7 @@
#ifndef _ASM_POWERPC_PPC_OPCODE_H
#define _ASM_POWERPC_PPC_OPCODE_H
-#include <linux/stringify.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-const.h>
#define __REG_R0 0
#define __REG_R1 1
@@ -367,6 +366,8 @@
#define PPC_INST_STFDX 0x7c0005ae
#define PPC_INST_LVX 0x7c0000ce
#define PPC_INST_STVX 0x7c0001ce
+#define PPC_INST_VCMPEQUD 0x100000c7
+#define PPC_INST_VCMPEQUB 0x10000006
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
@@ -397,6 +398,7 @@
#define __PPC_BI(s) (((s) & 0x1f) << 16)
#define __PPC_CT(t) (((t) & 0x0f) << 21)
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
+#define __PPC_RC21 (0x1 << 10)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -568,4 +570,12 @@
((IH & 0x7) << 21))
#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
+#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \
+ ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+ ___PPC_RB(vrb) | __PPC_RC21)
+
+#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUB | \
+ ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+ ___PPC_RB(vrb) | __PPC_RC21)
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 75ece56dcd62..b5d023680801 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
#ifdef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 5debe337ea9d..52fadded5c1e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -39,10 +39,9 @@
#endif /* CONFIG_PPC64 */
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
-#include <linux/cache.h>
+#include <linux/types.h>
+#include <asm/thread_info.h>
#include <asm/ptrace.h>
-#include <asm/types.h>
#include <asm/hw_breakpoint.h>
/* We do _not_ want to define new machine types at all, those must die
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index e4923686e43a..447cbd1bee99 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -24,6 +24,7 @@
#define _ASM_POWERPC_PTRACE_H
#include <uapi/asm/ptrace.h>
+#include <asm/asm-const.h>
#ifdef __powerpc64__
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 562568414cf4..e5b314ed054e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -13,6 +13,8 @@
#include <linux/stringify.h>
#include <asm/cputable.h>
+#include <asm/asm-const.h>
+#include <asm/feature-fixups.h>
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
@@ -161,7 +163,7 @@
#define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */
#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
-#define PSSCR_GUEST_VIS 0xf0000000000003ff /* Guest-visible PSSCR fields */
+#define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */
#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
index 3ba9c6f096fc..74c2c57c492a 100644
--- a/arch/powerpc/include/asm/reg_a2.h
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -12,6 +12,8 @@
#ifndef __ASM_POWERPC_REG_A2_H__
#define __ASM_POWERPC_REG_A2_H__
+#include <asm/asm-const.h>
+
#define SPRN_TENSR 0x1b5
#define SPRN_TENS 0x1b6 /* Thread ENable Set */
#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index d7ccf93e6279..a21f529c43d9 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -7,6 +7,8 @@
#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
#define __ASM_POWERPC_REG_FSL_EMB_H__
+#include <linux/stringify.h>
+
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 44989b22383c..759597bf0fd8 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -22,6 +22,7 @@ enum stf_barrier_type {
void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature)
{
@@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Indirect branch prediction cache disabled
#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
+
// Features indicating need for Spectre/Meltdown mitigations
@@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Firmware configuration indicates user favours security over performance
#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 8721fd004291..1a951b00465d 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -52,11 +52,15 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { };
+#endif
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
#else
static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 29ffaabdf75b..95b66a0c639b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -56,7 +56,6 @@ struct smp_ops_t {
int (*cpu_bootable)(unsigned int nr);
};
-extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index bc66712bdc3c..28f5dae25db6 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -6,13 +6,20 @@
#ifdef CONFIG_SPARSEMEM
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
- * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
#define SECTION_SIZE_BITS 24
-
-#define MAX_PHYSADDR_BITS 46
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP.
+ */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define MAX_PHYSMEM_BITS 47
+#else
#define MAX_PHYSMEM_BITS 46
+#endif
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 72dc4ddc2972..685c72310f5d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -24,9 +24,9 @@
#include <asm/paca.h>
#include <asm/hvcall.h>
#endif
-#include <asm/asm-compat.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
+#include <asm/asm-405.h>
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..6149b53b3bc8
--- /dev/null
+++ b/arch/powerpc/include/asm/stacktrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Stack trace functions.
+ *
+ * Copyright 2018, Murilo Opsfelder Araujo, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_STACKTRACE_H
+#define _ASM_POWERPC_STACKTRACE_H
+
+void show_user_instructions(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_STACKTRACE_H */
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 9b8cedf618f4..1647de15a31e 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -50,6 +50,8 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8);
}
#else
+#define __HAVE_ARCH_STRLEN
+
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 6ec546090ba1..aca70fb43147 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -3,8 +3,8 @@
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
-#include <linux/stringify.h>
#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index f308dfeb2746..3c0002044bc9 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -9,6 +9,8 @@
#ifndef _ASM_POWERPC_THREAD_INFO_H
#define _ASM_POWERPC_THREAD_INFO_H
+#include <asm/asm-const.h>
+
#ifdef __KERNEL__
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
@@ -25,7 +27,6 @@
#include <linux/cache.h>
#include <asm/processor.h>
#include <asm/page.h>
-#include <linux/stringify.h>
#include <asm/accounting.h>
/*
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 9138baccebb0..f0e571b2dc7c 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -17,7 +17,6 @@
#include <asm/pgtable.h>
#endif
#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
#ifndef __powerpc64__
#include <asm/page.h>
#include <asm/mmu.h>
@@ -53,7 +52,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
if (!tlb->page_size)
tlb->page_size = page_size;
else if (tlb->page_size != page_size) {
- tlb_flush_mmu(tlb);
+ if (!tlb->fullmm)
+ tlb_flush_mmu(tlb);
/*
* update the page size after flush for the new
* mmu_gather.
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 7d5a157c7832..61fba43bf8b2 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -1,87 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H
-/*
- * TLB flushing:
- *
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
- * the local processor
- * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
- * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-
-#ifdef CONFIG_PPC_MMU_NOHASH
-/*
- * TLB flushing for software loaded TLB chips
- *
- * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
- * flush_tlb_kernel_range are best implemented as tlbia vs
- * specific tlbie's
- */
-
-struct vm_area_struct;
-struct mm_struct;
-
-#define MMU_NO_CONTEXT ((unsigned int)-1)
-
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-
-extern void local_flush_tlb_mm(struct mm_struct *mm);
-extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-
-extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- int tsize, int ind);
-
-#ifdef CONFIG_SMP
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- int tsize, int ind);
-#else
-#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
-#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
-#endif
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
-
-#define MMU_NO_CONTEXT (0)
-/*
- * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
- */
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- flush_tlb_page(vma, vmaddr);
-}
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
-{
- flush_tlb_mm(mm);
-}
-
-#elif defined(CONFIG_PPC_BOOK3S_64)
-#include <asm/book3s/64/tlbflush.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/tlbflush.h>
#else
-#error Unsupported MMU type
-#endif
+#include <asm/nohash/tlbflush.h>
+#endif /* !CONFIG_PPC_BOOK3S */
-#endif /*__KERNEL__ */
#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 468653ce844c..bac225bb7f64 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -2,7 +2,6 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
-#include <asm/asm-compat.h>
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -250,10 +249,17 @@ do { \
} \
} while (0)
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err; \
- unsigned long __gu_val; \
+ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
@@ -267,7 +273,7 @@ do { \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
+ __long_type(*(ptr)) __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
@@ -281,7 +287,7 @@ do { \
#define __get_user_nosleep(x, ptr, size) \
({ \
long __gu_err; \
- unsigned long __gu_val; \
+ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
barrier_nospec(); \
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 8d1a2792484f..3c704f5dd3ae 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -87,7 +87,6 @@ extern int xive_smp_prepare_cpu(unsigned int cpu);
extern void xive_smp_setup_cpu(void);
extern void xive_smp_disable_cpu(void);
extern void xive_teardown_cpu(void);
-extern void xive_kexec_teardown_cpu(int secondary);
extern void xive_shutdown(void);
extern void xive_flush_interrupt(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2b4c40b255e4..3b66f2c19c84 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -7,10 +7,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
endif
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
@@ -42,9 +42,10 @@ obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
@@ -62,13 +63,13 @@ obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_FA_DUMP) += fadump.o
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
obj-$(CONFIG_E500) += idle_e500.o
endif
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
-ifeq ($(CONFIG_FSL_BOOKE),y)
+ifdef CONFIG_FSL_BOOKE
obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
else
obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o
@@ -109,9 +110,11 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
-ifeq ($(CONFIG_HAVE_IMA_KEXEC)$(CONFIG_IMA),yy)
+ifdef CONFIG_HAVE_IMA_KEXEC
+ifdef CONFIG_IMA
obj-y += ima_kexec.o
endif
+endif
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
@@ -164,7 +167,7 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
-ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
+ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
$(obj)/built-in.a: prom_init_check
quiet_cmd_prom_init_check = CALL $<
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0a0544335950..89cf15566c4e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -766,7 +766,6 @@ int main(void)
OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
- OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index a9f3970693e1..fa3c2c91290c 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
_GLOBAL(__setup_cpu_603)
mflr r5
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c8fc9691f8c7..2da01340c84c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -447,25 +447,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
- { /* Power8 DD1: Does not support doorbell IPIs */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x004d0100,
- .cpu_name = "POWER8 (raw)",
- .cpu_features = CPU_FTRS_POWER8_DD1,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,
@@ -485,25 +466,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
- { /* Power9 DD1*/
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x004e0100,
- .cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD1,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .machine_check_early = __machine_check_early_realmode_p9,
- .platform = "power9",
- },
{ /* Power9 DD2.0 */
.pvr_mask = 0xffffefff,
.pvr_value = 0x004e0200,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 17c8b99680f2..43a3ce2301e8 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -23,7 +23,6 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
-#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/setjmp.h>
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 155170d70324..dbfc7056d7df 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -357,9 +357,6 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
{
-#ifdef CONFIG_PCI
- dma_debug_add_bus(&pci_bus_type);
-#endif
#ifdef CONFIG_IBMVIO
dma_debug_add_bus(&vio_bus_type);
#endif
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 96dd3d871986..f432054234a4 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -701,9 +701,7 @@ static __init void cpufeatures_cpu_quirks(void)
/*
* Not all quirks can be derived from the cpufeatures device tree.
*/
- if ((version & 0xffffff00) == 0x004e0100)
- cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
- else if ((version & 0xffffefff) == 0x004e0200)
+ if ((version & 0xffffefff) == 0x004e0200)
; /* DD2.0 has no feature flag */
else if ((version & 0xffffefff) == 0x004e0201)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 5746809cfaad..6ebba3e48b01 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1087,7 +1087,7 @@ static int eeh_init(void)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
- else
+ else if (!eeh_has_flag(EEH_POSTPONED_PROBE))
pr_info("EEH: No capable adapters found\n");
return ret;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 973577f2141c..e58c3f467db5 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -33,6 +33,9 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-405.h>
+#include <asm/feature-fixups.h>
+#include <asm/barrier.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -358,6 +361,15 @@ syscall_dotrace_cont:
ori r10,r10,sys_call_table@l
slwi r0,r0,2
bge- 66f
+
+ barrier_nospec_asm
+ /*
+ * Prevent the load of the handler below (based on the user-passed
+ * system call number) being speculatively executed until the test
+ * against NR_syscalls and branch to .66f above has
+ * committed.
+ */
+
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 729e9ef4d3bb..2206912ea4f0 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
+#include <asm/code-patching-asm.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
@@ -38,11 +39,13 @@
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/exception-64s.h>
#else
#include <asm/exception-64e.h>
#endif
+#include <asm/feature-fixups.h>
/*
* System calls.
@@ -504,6 +507,57 @@ _GLOBAL(ret_from_kernel_thread)
li r3,0
b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define FLUSH_COUNT_CACHE \
+1: nop; \
+ patch_site 1b, patch__call_flush_count_cache
+
+
+#define BCCTR_FLUSH .long 0x4c400420
+
+.macro nops number
+ .rept \number
+ nop
+ .endr
+.endm
+
+.balign 32
+.global flush_count_cache
+flush_count_cache:
+ /* Save LR into r9 */
+ mflr r9
+
+ .rept 64
+ bl .+4
+ .endr
+ b 1f
+ nops 6
+
+ .balign 32
+ /* Restore LR */
+1: mtlr r9
+ li r9,0x7fff
+ mtctr r9
+
+ BCCTR_FLUSH
+
+2: nop
+ patch_site 2b patch__flush_count_cache_return
+
+ nops 3
+
+ .rept 278
+ .balign 32
+ BCCTR_FLUSH
+ nops 7
+ .endr
+
+ blr
+#else
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -535,6 +589,8 @@ _GLOBAL(_switch)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+ FLUSH_COUNT_CACHE
+
/*
* On SMP kernels, care must be taken because a task may be
* scheduled off CPUx and on to CPUy. Memory ordering must be
@@ -993,6 +1049,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r4,_TRAP(r1)
/*
+ * PACA_IRQ_HARD_DIS won't always be set here, so set it now
+ * to reconcile the IRQ state. Tracing is already accounted for.
+ */
+ lbz r4,PACAIRQHAPPENED(r13)
+ ori r4,r4,PACA_IRQ_HARD_DIS
+ stb r4,PACAIRQHAPPENED(r13)
+
+ /*
* Then find the right handler and call it. Interrupts are
* still soft-disabled and we keep them that way.
*/
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9b6e653e501a..6d6e144a28ce 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -27,6 +27,7 @@
#include <asm/hw_irq.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
+#include <asm/feature-fixups.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -949,7 +950,11 @@ kernel_dbg_exc:
.macro masked_interrupt_book3e paca_irq full_mask
lbz r10,PACAIRQHAPPENED(r13)
+ .if \full_mask == 1
+ ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
+ .else
ori r10,r10,\paca_irq
+ .endif
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 285c6465324a..ea04dfb8c092 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/cpuidle.h>
#include <asm/head-64.h>
+#include <asm/feature-fixups.h>
/*
* There are a few constraints to be concerned with.
@@ -126,8 +127,8 @@ EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
* being used, so a nested NMI exception would corrupt it.
*/
- EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
- IDLETEST, 0x100)
+ EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x100)
EXC_VIRT_NONE(0x4100, 0x100)
@@ -230,8 +231,8 @@ EXC_COMMON_BEGIN(system_reset_common)
TRAMP_REAL_BEGIN(system_reset_fwnmi)
SET_SCRATCH0(r13) /* save r13 */
/* See comment at system_reset exception */
- EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common,
- EXC_STD, NOTEST, 0x100)
+ EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -276,9 +277,7 @@ BEGIN_FTR_SECTION
*
* This interrupt can wake directly from idle. If that is the case,
* the machine check is handled then the idle wakeup code is called
- * to restore state. In that case, the POWER9 DD1 idle PACA workaround
- * is not applied in the early machine check code, which will cause
- * bugs.
+ * to restore state.
*/
mr r11,r1 /* Save r1 */
lhz r10,PACA_IN_MCE(r13)
@@ -339,7 +338,7 @@ machine_check_pSeries_0:
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
- EXCEPTION_PROLOG_PSERIES_1_NORI(machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -769,13 +768,9 @@ EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
.globl hardware_interrupt_hv;
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_HV, SOFTEN_TEST_HV,
- IRQS_DISABLED)
+ MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_STD, SOFTEN_TEST_PR,
- IRQS_DISABLED)
+ MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
@@ -783,13 +778,11 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
.globl hardware_interrupt_relon_hv;
hardware_interrupt_relon_hv:
BEGIN_FTR_SECTION
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_HV, SOFTEN_TEST_HV,
- IRQS_DISABLED)
+ MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
+ IRQS_DISABLED)
FTR_SECTION_ELSE
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_STD, SOFTEN_TEST_PR,
- IRQS_DISABLED)
+ __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
+ EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
@@ -1328,7 +1321,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#endif
KVMTEST_HV(0x1500)
- EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+ EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1448,7 +1441,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
std r12,PACA_EXGEN+EX_R12(r13); \
GET_SCRATCH0(r10); \
std r10,PACA_EXGEN+EX_R13(r13); \
- EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
+ EXCEPTION_PROLOG_2(soft_nmi_common, _H)
/*
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
@@ -1500,7 +1493,10 @@ masked_##_H##interrupt: \
mfspr r10,SPRN_##_H##SRR1; \
xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \
-2: mtcrf 0x80,r9; \
+ ori r11,r11,PACA_IRQ_HARD_DIS; \
+ stb r11,PACAIRQHAPPENED(r13); \
+2: /* done */ \
+ mtcrf 0x80,r9; \
std r1,PACAR1(r13); \
ld r9,PACA_EXGEN+EX_R9(r13); \
ld r10,PACA_EXGEN+EX_R10(r13); \
@@ -1526,6 +1522,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@@ -1560,12 +1558,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@@ -1600,6 +1601,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
hrfid
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 07e8396d472b..986ec476fd5d 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+struct fad_crash_memory_ranges *crash_memory_ranges;
+int crash_memory_ranges_size;
int crash_mem_ranges;
+int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@@ -868,38 +870,107 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
-static inline void fadump_add_crash_memory(unsigned long long base,
- unsigned long long end)
+static void free_crash_memory_ranges(void)
+{
+ kfree(crash_memory_ranges);
+ crash_memory_ranges = NULL;
+ crash_memory_ranges_size = 0;
+ max_crash_mem_ranges = 0;
+}
+
+/*
+ * Allocate or reallocate crash memory ranges array in incremental units
+ * of PAGE_SIZE.
+ */
+static int allocate_crash_memory_ranges(void)
{
+ struct fad_crash_memory_ranges *new_array;
+ u64 new_size;
+
+ new_size = crash_memory_ranges_size + PAGE_SIZE;
+ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
+ new_size);
+
+ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
+ if (new_array == NULL) {
+ pr_err("Insufficient memory for setting up crash memory ranges\n");
+ free_crash_memory_ranges();
+ return -ENOMEM;
+ }
+
+ crash_memory_ranges = new_array;
+ crash_memory_ranges_size = new_size;
+ max_crash_mem_ranges = (new_size /
+ sizeof(struct fad_crash_memory_ranges));
+ return 0;
+}
+
+static inline int fadump_add_crash_memory(unsigned long long base,
+ unsigned long long end)
+{
+ u64 start, size;
+ bool is_adjacent = false;
+
if (base == end)
- return;
+ return 0;
+
+ /*
+ * Fold adjacent memory ranges to bring down the memory ranges/
+ * PT_LOAD segments count.
+ */
+ if (crash_mem_ranges) {
+ start = crash_memory_ranges[crash_mem_ranges - 1].base;
+ size = crash_memory_ranges[crash_mem_ranges - 1].size;
+
+ if ((start + size) == base)
+ is_adjacent = true;
+ }
+ if (!is_adjacent) {
+ /* resize the array on reaching the limit */
+ if (crash_mem_ranges == max_crash_mem_ranges) {
+ int ret;
+
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
+ start = base;
+ crash_memory_ranges[crash_mem_ranges].base = start;
+ crash_mem_ranges++;
+ }
+
+ crash_memory_ranges[crash_mem_ranges - 1].size = (end - start);
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
- crash_mem_ranges, base, end - 1, (end - base));
- crash_memory_ranges[crash_mem_ranges].base = base;
- crash_memory_ranges[crash_mem_ranges].size = end - base;
- crash_mem_ranges++;
+ (crash_mem_ranges - 1), start, end - 1, (end - start));
+ return 0;
}
-static void fadump_exclude_reserved_area(unsigned long long start,
+static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
+ int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
- fadump_add_crash_memory(start, ra_start);
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(start, ra_start);
+ if (ret)
+ return ret;
+
+ ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
- fadump_add_crash_memory(start, ra_start);
+ ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(ra_end, end);
}
} else
- fadump_add_crash_memory(start, end);
+ ret = fadump_add_crash_memory(start, end);
+
+ return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@@ -939,13 +1010,22 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
-static void fadump_setup_crash_memory_ranges(void)
+static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
+
+ /* allocate memory for crash memory ranges for the first time */
+ if (!max_crash_mem_ranges) {
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
+
/*
* add the first memory chunk (RMA_START through boot_memory_size) as
* a separate memory chunk. The reason is, at the time crash firmware
@@ -953,7 +1033,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ if (ret)
+ return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@@ -973,8 +1055,12 @@ static void fadump_setup_crash_memory_ranges(void)
}
/* add this range excluding the reserved dump area. */
- fadump_exclude_reserved_area(start, end);
+ ret = fadump_exclude_reserved_area(start, end);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/*
@@ -1097,6 +1183,7 @@ static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
+ int ret;
/*
* If no memory is reserved then we can not register for firmware-
@@ -1105,7 +1192,9 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
- fadump_setup_crash_memory_ranges();
+ ret = fadump_setup_crash_memory_ranges();
+ if (ret)
+ return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@@ -1183,6 +1272,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
+ free_crash_memory_ranges();
}
}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 6c509f39bbde..529dcc21c3f9 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -25,6 +25,8 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
#define __REST_32FPVSRS(n,c,base) \
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 29b2fed93289..61ca27929355 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -35,6 +35,7 @@
#include <asm/bug.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 41374a468d1c..b19d78410511 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -42,6 +42,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-405.h>
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 6eca15f25c73..4898e9491a1c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -44,6 +44,7 @@
#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6cab07e76732..6582f824d620 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,7 +30,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#include <asm/fixmap.h>
#include <asm/export.h>
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
@@ -873,6 +872,10 @@ start_here:
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ lis r6, swapper_pg_dir@ha
+ tophys(r6,r6)
+ mtspr SPRN_M_TW, r6
+
bl early_init /* We have to do this with MMU on */
/*
@@ -893,9 +896,6 @@ start_here:
* init's THREAD like the context switch code does, but this is
* easier......until someone changes init's static structures.
*/
- lis r6, swapper_pg_dir@ha
- tophys(r6,r6)
- mtspr SPRN_M_TW, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index bf4c6021515f..e2750b856c8f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -43,6 +43,7 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 80547dad37da..fec8a6773119 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- return is_kernel_addr(info->address);
+ return is_kernel_addr(hw->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
int ret = -EINVAL, length_max;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
- info->type = HW_BRK_TYPE_TRANSLATE;
- if (bp->attr.bp_type & HW_BREAKPOINT_R)
- info->type |= HW_BRK_TYPE_READ;
- if (bp->attr.bp_type & HW_BREAKPOINT_W)
- info->type |= HW_BRK_TYPE_WRITE;
- if (info->type == HW_BRK_TYPE_TRANSLATE)
+ hw->type = HW_BRK_TYPE_TRANSLATE;
+ if (attr->bp_type & HW_BREAKPOINT_R)
+ hw->type |= HW_BRK_TYPE_READ;
+ if (attr->bp_type & HW_BREAKPOINT_W)
+ hw->type |= HW_BRK_TYPE_WRITE;
+ if (hw->type == HW_BRK_TYPE_TRANSLATE)
/* must set alteast read or write */
return ret;
- if (!(bp->attr.exclude_user))
- info->type |= HW_BRK_TYPE_USER;
- if (!(bp->attr.exclude_kernel))
- info->type |= HW_BRK_TYPE_KERNEL;
- if (!(bp->attr.exclude_hv))
- info->type |= HW_BRK_TYPE_HYP;
- info->address = bp->attr.bp_addr;
- info->len = bp->attr.bp_len;
+ if (!attr->exclude_user)
+ hw->type |= HW_BRK_TYPE_USER;
+ if (!attr->exclude_kernel)
+ hw->type |= HW_BRK_TYPE_KERNEL;
+ if (!attr->exclude_hv)
+ hw->type |= HW_BRK_TYPE_HYP;
+ hw->address = attr->bp_addr;
+ hw->len = attr->bp_len;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
- if ((bp->attr.bp_addr >> 9) !=
- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+ if ((attr->bp_addr >> 9) !=
+ ((attr->bp_addr + attr->bp_len - 1) >> 9))
return -EINVAL;
}
- if (info->len >
- (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
+ if (hw->len >
+ (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 1686916cc7f0..ff026c9d3cab 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -20,6 +20,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 2b269315d377..4e0d94d02030 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -36,7 +36,7 @@ _GLOBAL(\name)
*/
lbz r3,PACAIRQHAPPENED(r13)
cmpwi cr0,r3,0
- bnelr
+ bne 2f
/* Now we are going to mark ourselves as soft and hard enabled in
* order to be able to take interrupts while asleep. We inform lockdep
@@ -72,6 +72,11 @@ _GLOBAL(\name)
wrteei 1
\loop
+2:
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+ blr
.endm
.macro BOOK3E_IDLE_LOOP
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index e734f6e45abc..7f5ac2e8581b 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -23,6 +23,8 @@
#include <asm/exception-64s.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/mmu.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#undef DEBUG
@@ -144,7 +146,9 @@ power9_restore_additional_sprs:
mtspr SPRN_MMCR1, r4
ld r3, STOP_MMCR2(r13)
+ ld r4, PACA_SPRG_VDSO(r13)
mtspr SPRN_MMCR2, r3
+ mtspr SPRN_SPRG3, r4
blr
/*
@@ -467,43 +471,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
#endif
/*
- * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
- * HSPRG0 will be set to the HSPRG0 value of one of the
- * threads in this core. Thus the value we have in r13
- * may not be this thread's paca pointer.
- *
- * Fortunately, the TIR remains invariant. Since this thread's
- * paca pointer is recorded in all its sibling's paca, we can
- * correctly recover this thread's paca pointer if we
- * know the index of this thread in the core.
- *
- * This index can be obtained from the TIR.
- *
- * i.e, thread's position in the core = TIR.
- * If this value is i, then this thread's paca is
- * paca->thread_sibling_pacas[i].
- */
-power9_dd1_recover_paca:
- mfspr r4, SPRN_TIR
- /*
- * Since each entry in thread_sibling_pacas is 8 bytes
- * we need to left-shift by 3 bits. Thus r4 = i * 8
- */
- sldi r4, r4, 3
- /* Get &paca->thread_sibling_pacas[0] in r5 */
- ld r5, PACA_SIBLING_PACA_PTRS(r13)
- /* Load paca->thread_sibling_pacas[i] into r13 */
- ldx r13, r4, r5
- SET_PACA(r13)
- /*
- * Indicate that we have lost NVGPR state
- * which needs to be restored from the stack.
- */
- li r3, 1
- stb r3,PACA_NAPSTATELOST(r13)
- blr
-
-/*
* Called from machine check handler for powersave wakeups.
* Low level machine check processing has already been done. Now just
* go through the wake up path to get everything in order.
@@ -537,9 +504,6 @@ pnv_powersave_wakeup:
ld r2, PACATOC(r13)
BEGIN_FTR_SECTION
-BEGIN_FTR_SECTION_NESTED(70)
- bl power9_dd1_recover_paca
-END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
bl pnv_restore_hyp_resource_arch300
FTR_SECTION_ELSE
bl pnv_restore_hyp_resource_arch207
@@ -602,22 +566,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
-BEGIN_FTR_SECTION_NESTED(71)
- /*
- * Assume that we are waking up from the state
- * same as the Requested Level (RL) in the PSSCR
- * which are Bits 60-63
- */
- ld r5,PACA_REQ_PSSCR(r13)
- rldicl r5,r5,0,60
-FTR_SECTION_ELSE_NESTED(71)
/*
* 0-3 bits correspond to Power-Saving Level Status
* which indicates the idle state we are waking up from
*/
mfspr r5, SPRN_PSSCR
rldicl r5,r5,4,60
-ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
li r0, 0 /* clear requested_psscr to say we're awake */
std r0, PACA_REQ_PSSCR(r13)
cmpd cr4,r5,r4
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index b9b6ef510be1..583e55ac7d26 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -17,6 +17,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 08faa93755f9..dd7471fe20bd 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
+#include <asm/feature-fixups.h>
#undef DEBUG
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0682fef1f385..916ddc4aac44 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -145,8 +145,20 @@ notrace unsigned int __check_irq_replay(void)
trace_hardirqs_on();
trace_hardirqs_off();
+ /*
+ * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
+ * not be set, which means interrupts have only just been hard
+ * disabled as part of the local_irq_restore or interrupt return
+ * code. In that case, skip the decrementr check becaus it's
+ * expensive to read the TB.
+ *
+ * HARD_DIS then gets cleared here, but it's reconciled later.
+ * Either local_irq_disable will replay the interrupt and that
+ * will reconcile state like other hard interrupts. Or interrupt
+ * retur will replay the interrupt and in that case it sets
+ * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
+ */
if (happened & PACA_IRQ_HARD_DIS) {
- /* Clear bit 0 which we wouldn't clear otherwise */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
/*
@@ -248,24 +260,33 @@ notrace void arch_local_irq_restore(unsigned long mask)
* cannot have preempted.
*/
irq_happened = get_irq_happened();
- if (!irq_happened)
+ if (!irq_happened) {
+ /*
+ * FIXME. Here we'd like to be able to do:
+ *
+ * #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ * WARN_ON(!(mfmsr() & MSR_EE));
+ * #endif
+ *
+ * But currently it hits in a few paths, we should fix those and
+ * enable the warning.
+ */
return;
+ }
/*
* We need to hard disable to get a trusted value from
* __check_irq_replay(). We also need to soft-disable
* again to avoid warnings in there due to the use of
* per-cpu variables.
- *
- * We know that if the value in irq_happened is exactly 0x01
- * then we are already hard disabled (there are other less
- * common cases that we'll ignore for now), so we skip the
- * (expensive) mtmsrd.
*/
- if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(!(mfmsr() & MSR_EE));
+#endif
__hard_irq_disable();
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- else {
+ } else {
/*
* We should already be hard disabled here. We had bugs
* where that wasn't the case so let's dbl check it and
@@ -274,8 +295,8 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
- }
#endif
+ }
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 7a1f99f1b47f..e4a49c051325 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -25,50 +25,6 @@
#include <linux/preempt.h>
#include <linux/ftrace.h>
-/*
- * This is called from ftrace code after invoking registered handlers to
- * disambiguate regs->nip changes done by jprobes and livepatch. We check if
- * there is an active jprobe at the provided address (mcount location).
- */
-int __is_active_jprobe(unsigned long addr)
-{
- if (!preemptible()) {
- struct kprobe *p = raw_cpu_read(current_kprobe);
- return (p && (unsigned long)p->addr == addr) ? 1 : 0;
- }
-
- return 0;
-}
-
-static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_nip)
-{
- /*
- * Emulate singlestep (and also recover regs->nip)
- * as if there is a nop
- */
- regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_nip)
- regs->nip = orig_nip;
- return 1;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p))
- return __skip_singlestep(p, regs, kcb, 0);
- else
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- preempt_disable();
-
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
- goto end;
+ return;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
- unsigned long orig_nip = regs->nip;
-
/*
* On powerpc, NIP is *before* this instruction for the
* pre handler
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
- __skip_singlestep(p, regs, kcb, orig_nip);
- else {
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
- * If pre_handler returns !0, it sets regs->nip and
- * resets current kprobe. In this case, we should not
- * re-enable preemption.
+ * Emulate singlestep (and also recover regs->nip)
+ * as if there is a nop
*/
- return;
+ regs->nip += MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
}
+ /*
+ * If pre_handler returns !0, it changes regs->nip. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
}
-end:
- preempt_enable_no_resched();
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e4c5bf33970b..5c60bb0f927f 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
}
prepare_singlestep(p, regs);
return 1;
- } else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /* If trap variant, then it belongs not to us */
- kprobe_opcode_t cur_insn = *addr;
- if (is_trap(cur_insn))
- goto no_kprobe;
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* If trap variant, then it belongs not to us */
+ kprobe_opcode_t cur_insn = *addr;
+
+ if (is_trap(cur_insn))
goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- goto ss_probe;
- ret = 1;
- }
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
*/
kprobe_opcode_t cur_insn = *addr;
if (is_trap(cur_insn))
- goto no_kprobe;
+ goto no_kprobe;
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
- if (p->pre_handler && p->pre_handler(p, regs))
- /* handler has already set things up, so skip ss setup */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler changed execution path, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
}
NOKPROBE_SYMBOL(arch_deref_entry_point);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef PPC64_ELF_ABI_v2
- regs->gpr[12] = (unsigned long)jp->entry;
-#elif defined(PPC64_ELF_ABI_v1)
- regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#endif
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
-
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void __used jprobe_return(void)
-{
- asm volatile("jprobe_return_trap:\n"
- "trap\n"
- ::: "memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
- pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
- regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
- return 0;
- }
-
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index e100ff324a85..c005088f6c9c 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -23,6 +23,7 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#define KVM_MAGIC_PAGE (-4096)
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 6408f09dbbd9..6e7dbb7d527c 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -45,6 +45,7 @@
#include <asm/ppc_asm.h>
#include <asm/cache.h>
#include <asm/page.h>
+#include <asm/feature-fixups.h>
/* Usage:
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 936c7e2d421e..63f5a9311a29 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/ftrace.h>
+#include <asm/kdump.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -188,7 +189,12 @@ void __init reserve_crashkernel(void)
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- memblock_reserve(crashk_res.start, crash_size);
+ if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+ memblock_reserve(crashk_res.start, crash_size)) {
+ pr_err("Failed to reserve memory for crashkernel!\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index 0bd23dc789a4..c77e95e9b384 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -269,18 +269,14 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
ret = fdt_setprop_u64(fdt, chosen_node,
"linux,initrd-start",
initrd_load_addr);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
/* initrd-end is the first address after the initrd image. */
ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
initrd_load_addr + initrd_len);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
if (ret) {
@@ -292,10 +288,8 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
if (cmdline != NULL) {
ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
} else {
ret = fdt_delprop(fdt, chosen_node, "bootargs");
if (ret && ret != -FDT_ERR_NOTFOUND) {
@@ -311,10 +305,12 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
}
ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
- if (ret) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret)
+ goto err;
return 0;
+
+err:
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 38c5b4764bfe..3497c8329c1d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
#ifdef CONFIG_PPC_BOOK3S_64
static void flush_and_reload_slb(void)
{
- struct slb_shadow *slb;
- unsigned long i, n;
-
/* Invalidate all SLBs */
- asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+ slb_flush_all_realmode();
#ifdef CONFIG_KVM_BOOK3S_HANDLER
/*
@@ -76,22 +73,17 @@ static void flush_and_reload_slb(void)
if (get_paca()->kvm_hstate.in_guest)
return;
#endif
-
- /* For host kernel, reload the SLBs from shadow SLB buffer. */
- slb = get_slb_shadow();
- if (!slb)
+ if (early_radix_enabled())
return;
- n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
-
- /* Load up the SLB entries from shadow SLB */
- for (i = 0; i < n; i++) {
- unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
- unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
+ /*
+ * This probably shouldn't happen, but it may be possible it's
+ * called in early boot before SLB shadows are allocated.
+ */
+ if (!get_slb_shadow())
+ return;
- rb = (rb & ~0xFFFul) | i;
- asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
- }
+ slb_restore_bolted_realmode();
}
#endif
@@ -257,12 +249,12 @@ static const struct mce_derror_table mce_p7_derror_table[] = {
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000040, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
@@ -290,12 +282,12 @@ static const struct mce_derror_table mce_p8_derror_table[] = {
{ 0x00000200, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0, false, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p9_derror_table[] = {
@@ -320,12 +312,12 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
{ 0x00000200, false,
MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000040, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 3f7a9a2d2435..695b24a2d954 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -34,6 +34,7 @@
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index fa267e94090a..262ba9481781 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -28,6 +28,7 @@
#include <asm/ptrace.h>
#include <asm/mmu.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 1b3c6835e730..77371c9ef3d8 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -72,13 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
do_feature_fixups(powerpc_firmware_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
if (sect != NULL)
do_barrier_nospec_fixups_range(barrier_nospec_enabled,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index fe9733ffffaa..88e4f69a09e5 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -42,6 +42,8 @@
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+#include "../../../drivers/pci/pci.h"
+
/* hose_spinlock protects accesses to the the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
@@ -366,9 +368,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
-#ifdef DEBUG
- memset(&oirq, 0xff, sizeof(oirq));
-#endif
/* Try to get a mapping from the device-tree */
virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
if (virq <= 0) {
@@ -1014,7 +1013,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
/* Cardbus can call us to add new devices to a bus, so ignore
* those who are already fully discovered
*/
- if (dev->is_added)
+ if (pci_dev_is_added(dev))
continue;
pcibios_setup_device(dev);
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 8afbe213d729..6d1b42ee797c 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -12,6 +12,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/asm-compat.h>
/*
* Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9ef4aea9fffe..913c5725cdb2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -583,6 +583,7 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
+ thread_pkey_regs_save(&tsk->thread);
}
void flush_all_to_thread(struct task_struct *tsk)
@@ -716,6 +717,13 @@ void switch_booke_debug_regs(struct debug_reg *new_debug)
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
+static void set_breakpoint(struct arch_hw_breakpoint *brk)
+{
+ preempt_disable();
+ __set_breakpoint(brk);
+ preempt_enable();
+}
+
static void set_debug_reg_defaults(struct thread_struct *thread)
{
thread->hw_brk.address = 0;
@@ -828,13 +836,6 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
WARN_ON_ONCE(1);
}
-void set_breakpoint(struct arch_hw_breakpoint *brk)
-{
- preempt_disable();
- __set_breakpoint(brk);
- preempt_enable();
-}
-
/* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void)
{
@@ -866,8 +867,7 @@ static inline bool tm_enabled(struct task_struct *tsk)
return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
}
-static void tm_reclaim_thread(struct thread_struct *thr,
- struct thread_info *ti, uint8_t cause)
+static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
{
/*
* Use the current MSR TM suspended bit to track if we have
@@ -914,7 +914,7 @@ static void tm_reclaim_thread(struct thread_struct *thr,
void tm_reclaim_current(uint8_t cause)
{
tm_enable();
- tm_reclaim_thread(&current->thread, current_thread_info(), cause);
+ tm_reclaim_thread(&current->thread, cause);
}
static inline void tm_reclaim_task(struct task_struct *tsk)
@@ -945,7 +945,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
thr->regs->ccr, thr->regs->msr,
thr->regs->trap);
- tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
+ tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
TM_DEBUG("--- tm_reclaim on pid %d complete\n",
tsk->pid);
@@ -1250,17 +1250,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
* mappings. If the new process has the foreign real address
* mappings, we must issue a cp_abort to clear any state and
* prevent snooping, corruption or a covert channel.
- *
- * DD1 allows paste into normal system memory so we do an
- * unpaired copy, rather than cp_abort, to clear the buffer,
- * since cp_abort is quite expensive.
*/
- if (current_thread_info()->task->thread.used_vas) {
+ if (current_thread_info()->task->thread.used_vas)
asm volatile(PPC_CP_ABORT);
- } else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- asm volatile(PPC_COPY(%0, %1)
- : : "r"(dummy_copy_buffer), "r"(0));
- }
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1307,6 +1299,38 @@ static void show_instructions(struct pt_regs *regs)
pr_cont("\n");
}
+void show_user_instructions(struct pt_regs *regs)
+{
+ unsigned long pc;
+ int i;
+
+ pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+
+ pr_info("%s[%d]: code: ", current->comm, current->pid);
+
+ for (i = 0; i < instructions_to_print; i++) {
+ int instr;
+
+ if (!(i % 8) && (i > 0)) {
+ pr_cont("\n");
+ pr_info("%s[%d]: code: ", current->comm, current->pid);
+ }
+
+ if (probe_kernel_address((unsigned int __user *)pc, instr)) {
+ pr_cont("XXXXXXXX ");
+ } else {
+ if (regs->nip == pc)
+ pr_cont("<%08x> ", instr);
+ else
+ pr_cont("%08x ", instr);
+ }
+
+ pc += sizeof(int);
+ }
+
+ pr_cont("\n");
+}
+
struct regbit {
unsigned long bit;
const char *name;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05e7fb47a7a4..c4d7078e5295 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
@@ -440,6 +439,29 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
return 1;
}
+/*
+ * Compare the range against max mem limit and update
+ * size if it cross the limit.
+ */
+
+#ifdef CONFIG_SPARSEMEM
+static bool validate_mem_limit(u64 base, u64 *size)
+{
+ u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
+
+ if (base >= max_mem)
+ return false;
+ if ((base + *size) > max_mem)
+ *size = max_mem - base;
+ return true;
+}
+#else
+static bool validate_mem_limit(u64 base, u64 *size)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_PPC_PSERIES
/*
* Interpret the ibm dynamic reconfiguration memory LMBs.
@@ -494,7 +516,8 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
}
DBG("Adding: %llx -> %llx\n", base, size);
- memblock_add(base, size);
+ if (validate_mem_limit(base, &size))
+ memblock_add(base, size);
} while (--rngs);
}
#endif /* CONFIG_PPC_PSERIES */
@@ -548,8 +571,10 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
}
/* Add the chunk to the MEMBLOCK list */
- if (add_mem_to_memblock)
- memblock_add(base, size);
+ if (add_mem_to_memblock) {
+ if (validate_mem_limit(base, &size))
+ memblock_add(base, size);
+ }
}
static void __init early_reserve_mem_dt(void)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5425dd3d6a9f..9b38a2e5dd35 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
-#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
@@ -629,7 +628,7 @@ static void __init early_cmdline_parse(void)
const char *opt;
char *p;
- int l = 0;
+ int l __maybe_unused = 0;
prom_cmd_line[0] = 0;
p = prom_cmd_line;
@@ -1422,7 +1421,10 @@ static void __init reserve_mem(u64 base, u64 size)
static void __init prom_init_mem(void)
{
phandle node;
- char *path, type[64];
+#ifdef DEBUG_PROM
+ char *path;
+#endif
+ char type[64];
unsigned int plen;
cell_t *p, *endp;
__be32 val;
@@ -1443,7 +1445,9 @@ static void __init prom_init_mem(void)
prom_debug("root_size_cells: %x\n", rsc);
prom_debug("scanning memory:\n");
+#ifdef DEBUG_PROM
path = prom_scratch;
+#endif
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -2102,8 +2106,6 @@ static void __init prom_init_stdout(void)
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
if (stdout_node != PROM_ERROR) {
val = cpu_to_be32(stdout_node);
- prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
- &val, sizeof(val));
/* If it's a display, note it */
memset(type, 0, sizeof(type));
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index a8b277362931..f6f469fc4073 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -8,6 +8,8 @@
#include <linux/device.h>
#include <linux/seq_buf.h>
+#include <asm/asm-prototypes.h>
+#include <asm/code-patching.h>
#include <asm/debugfs.h>
#include <asm/security_features.h>
#include <asm/setup.h>
@@ -15,7 +17,15 @@
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+enum count_cache_flush_type {
+ COUNT_CACHE_FLUSH_NONE = 0x1,
+ COUNT_CACHE_FLUSH_SW = 0x2,
+ COUNT_CACHE_FLUSH_HW = 0x4,
+};
+static enum count_cache_flush_type count_cache_flush_type;
+
bool barrier_nospec_enabled;
+static bool no_nospec;
static void enable_barrier_nospec(bool enable)
{
@@ -42,8 +52,17 @@ void setup_barrier_nospec(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
- enable_barrier_nospec(enable);
+ if (!no_nospec)
+ enable_barrier_nospec(enable);
+}
+
+static int __init handle_nospectre_v1(char *p)
+{
+ no_nospec = true;
+
+ return 0;
}
+early_param("nospectre_v1", handle_nospectre_v1);
#ifdef CONFIG_DEBUG_FS
static int barrier_nospec_set(void *data, u64 val)
@@ -82,6 +101,7 @@ static __init int barrier_nospec_debugfs_init(void)
device_initcall(barrier_nospec_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
bool thread_priv;
@@ -114,51 +134,72 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
return sprintf(buf, "Vulnerable\n");
}
+#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
- return sprintf(buf, "Not affected\n");
+ struct seq_buf s;
- if (barrier_nospec_enabled)
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ seq_buf_init(&s, buf, PAGE_SIZE - 1);
- return sprintf(buf, "Vulnerable\n");
+ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
+ if (barrier_nospec_enabled)
+ seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
+ else
+ seq_buf_printf(&s, "Vulnerable");
+
+ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
+ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+
+ seq_buf_printf(&s, "\n");
+ } else
+ seq_buf_printf(&s, "Not affected\n");
+
+ return s.len;
}
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{
- bool bcs, ccd, ori;
struct seq_buf s;
+ bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
- if (bcs || ccd) {
+ if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+ bool comma = false;
seq_buf_printf(&s, "Mitigation: ");
- if (bcs)
+ if (bcs) {
seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
+ comma = true;
+ }
+
+ if (ccd) {
+ if (comma)
+ seq_buf_printf(&s, ", ");
+ seq_buf_printf(&s, "Indirect branch cache disabled");
+ comma = true;
+ }
- if (bcs && ccd)
+ if (comma)
seq_buf_printf(&s, ", ");
- if (ccd)
- seq_buf_printf(&s, "Indirect branch cache disabled");
+ seq_buf_printf(&s, "Software count cache flush");
+
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+ seq_buf_printf(&s, "(hardware accelerated)");
} else
seq_buf_printf(&s, "Vulnerable");
- if (ori)
- seq_buf_printf(&s, ", ori31 speculation barrier enabled");
-
seq_buf_printf(&s, "\n");
return s.len;
}
+#ifdef CONFIG_PPC_BOOK3S_64
/*
* Store-forwarding barrier support.
*/
@@ -306,3 +347,71 @@ static __init int stf_barrier_debugfs_init(void)
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+
+static void toggle_count_cache_flush(bool enable)
+{
+ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+ pr_info("count-cache-flush: software flush disabled.\n");
+ return;
+ }
+
+ patch_branch_site(&patch__call_flush_count_cache,
+ (u64)&flush_count_cache, BRANCH_SET_LINK);
+
+ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+ count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
+ pr_info("count-cache-flush: full software flush sequence enabled.\n");
+ return;
+ }
+
+ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+ count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
+ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+}
+
+void setup_count_cache_flush(void)
+{
+ toggle_count_cache_flush(true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int count_cache_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ toggle_count_cache_flush(enable);
+
+ return 0;
+}
+
+static int count_cache_flush_get(void *data, u64 *val)
+{
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+ count_cache_flush_set, "%llu\n");
+
+static __init int count_cache_flush_debugfs_init(void)
+{
+ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
+ NULL, &fops_count_cache_flush);
+ return 0;
+}
+device_initcall(count_cache_flush_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 40b44bb53a4e..93fa0c99681e 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -972,6 +972,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
+ setup_barrier_nospec();
+
paging_init();
/* Initialize the MMU context management stuff. */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 74457485574b..8c507be12c3c 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,6 +40,10 @@
#include <asm/code-patching.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
+#include <asm/kdump.h>
+#include <asm/feature-fixups.h>
+
+#include "setup.h"
#define DBG(fmt...)
@@ -95,11 +99,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* We do the initial parsing of the flat device-tree and prepares
* for the MMU to be fully initialized.
*/
-extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
-
notrace void __init machine_init(u64 dt_ptr)
{
- unsigned int *addr = &memset_nocache_branch;
+ unsigned int *addr = (unsigned int *)((unsigned long)&patch__memset_nocache +
+ patch__memset_nocache);
unsigned long insn;
/* Configure static keys first, now that we're relocated. */
@@ -108,7 +111,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+ patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
insn = create_cond_branch(addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 225bc5f91049..6a501b25dd85 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -68,6 +68,7 @@
#include <asm/opal.h>
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
+#include <asm/feature-fixups.h>
#include "setup.h"
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 4794d6b4f4d2..b19d832ef386 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
fn(regs);
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
+ nmi_ipi_busy_count--;
out:
nmi_ipi_unlock_end(&flags);
@@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
}
}
-void smp_flush_nmi_ipi(u64 delay_us)
-{
- unsigned long flags;
-
- nmi_ipi_lock_start(&flags);
- while (nmi_ipi_busy_count) {
- nmi_ipi_unlock_end(&flags);
- udelay(1);
- if (delay_us) {
- delay_us--;
- if (!delay_us)
- return;
- }
- nmi_ipi_lock_start(&flags);
- }
- nmi_ipi_unlock_end(&flags);
-}
-
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
- * enter the handler, == 0 specifies indefinite delay.
+ * complete executing the handler, == 0 specifies indefinite delay.
*/
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
@@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
do_smp_send_nmi_ipi(cpu, safe);
+ nmi_ipi_lock();
+ /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ nmi_ipi_unlock();
udelay(1);
+ nmi_ipi_lock();
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+ break;
+ }
+ }
+
+ while (nmi_ipi_busy_count > 1) {
+ nmi_ipi_unlock();
+ udelay(1);
+ nmi_ipi_lock();
if (delay_us) {
delay_us--;
if (!delay_us)
@@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
}
}
- nmi_ipi_lock();
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
- /* Could not gather all CPUs */
+ /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ret = 0;
cpumask_clear(&nmi_ipi_pending_mask);
}
+ if (nmi_ipi_busy_count > 1) {
+ /* Timeout waiting for CPUs to execute fn */
+ ret = 0;
+ nmi_ipi_busy_count = 1;
+ }
+
nmi_ipi_busy_count--;
nmi_ipi_unlock_end(&flags);
@@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1)
+ nmi_ipi_busy_count--;
nmi_ipi_unlock();
spin_begin();
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 34b73a262709..7a919e9a3400 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
/*
* Structure for storing CPU registers on the save area.
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 82d8aae81c6a..f83bf6f72cb0 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
/*
* Structure for storing CPU registers on the save area.
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index ff12f47a96b6..6bffbc5affe7 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -13,6 +13,7 @@
#include <asm/reg.h>
#include <asm/bug.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
/* See fpu.S, this is borrowed from there */
@@ -312,8 +313,8 @@ _GLOBAL(tm_reclaim)
blr
- /* void __tm_recheckpoint(struct thread_struct *thread,
- * unsigned long orig_msr)
+ /*
+ * void __tm_recheckpoint(struct thread_struct *thread)
* - Restore the checkpointed register state saved by tm_reclaim
* when we switch_to a process.
*
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 9a5b5a513604..32476a6e4e9c 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -104,39 +104,13 @@ ftrace_regs_call:
bl ftrace_stub
nop
- /* Load the possibly modified NIP */
- ld r15, _NIP(r1)
-
+ /* Load ctr with the possibly modified NIP */
+ ld r3, _NIP(r1)
+ mtctr r3
#ifdef CONFIG_LIVEPATCH
- cmpd r14, r15 /* has NIP been altered? */
+ cmpd r14, r3 /* has NIP been altered? */
#endif
-#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
- /* NIP has not been altered, skip over further checks */
- beq 1f
-
- /* Check if there is an active jprobe on us */
- subi r3, r14, 4
- bl __is_active_jprobe
- nop
-
- /*
- * If r3 == 1, then this is a kprobe/jprobe.
- * else, this is livepatched function.
- *
- * The conditional branch for livepatch_handler below will use the
- * result of this comparison. For kprobe/jprobe, we just need to branch to
- * the new NIP, not call livepatch_handler. The branch below is bne, so we
- * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
- * CR0[EQ] = (r3 == 1).
- */
- cmpdi r3, 1
-1:
-#endif
-
- /* Load CTR with the possibly modified NIP */
- mtctr r15
-
/* Restore gprs */
REST_GPR(0,r1)
REST_10GPRS(2,r1)
@@ -154,10 +128,7 @@ ftrace_regs_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
- /*
- * Based on the cmpd or cmpdi above, if the NIP was altered and we're
- * not on a kprobe/jprobe, then handle livepatch.
- */
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0e17dcb48720..070e96f1773a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -70,6 +70,7 @@
#include <asm/hmi.h>
#include <sysdev/fsl_pci.h>
#include <asm/kprobes.h>
+#include <asm/stacktrace.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -96,6 +97,19 @@ EXPORT_SYMBOL(__debugger_fault_handler);
#define TM_DEBUG(x...) do { } while(0)
#endif
+static const char *signame(int signr)
+{
+ switch (signr) {
+ case SIGBUS: return "bus error";
+ case SIGFPE: return "floating point exception";
+ case SIGILL: return "illegal instruction";
+ case SIGSEGV: return "segfault";
+ case SIGTRAP: return "unhandled trap";
+ }
+
+ return "unknown signal";
+}
+
/*
* Trap & Exception support
*/
@@ -301,26 +315,44 @@ void user_single_step_siginfo(struct task_struct *tsk,
info->si_addr = (void __user *)regs->nip;
}
+static bool show_unhandled_signals_ratelimited(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ return show_unhandled_signals && __ratelimit(&rs);
+}
+
+static void show_signal_msg(int signr, struct pt_regs *regs, int code,
+ unsigned long addr)
+{
+ if (!show_unhandled_signals_ratelimited())
+ return;
+
+ if (!unhandled_signal(current, signr))
+ return;
+
+ pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
+ current->comm, current->pid, signame(signr), signr,
+ addr, regs->nip, regs->link, code);
+
+ print_vma_addr(KERN_CONT " in ", regs->nip);
+
+ pr_cont("\n");
+
+ show_user_instructions(regs);
+}
void _exception_pkey(int signr, struct pt_regs *regs, int code,
- unsigned long addr, int key)
+ unsigned long addr, int key)
{
siginfo_t info;
- const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
- "at %08lx nip %08lx lr %08lx code %x\n";
- const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
- "at %016lx nip %016lx lr %016lx code %x\n";
if (!user_mode(regs)) {
die("Exception in kernel mode", regs, signr);
return;
}
- if (show_unhandled_signals && unhandled_signal(current, signr)) {
- printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, signr,
- addr, regs->nip, regs->link, code);
- }
+ show_signal_msg(signr, regs, code, addr);
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index d2205b97628c..65b3bdb99f0b 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -22,7 +22,6 @@
#include <linux/security.h>
#include <linux/memblock.h>
-#include <asm/cpu_has_feature.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/vdso32/note.S b/arch/powerpc/kernel/vdso32/note.S
index d4b5be4f3d5f..227a7327399e 100644
--- a/arch/powerpc/kernel/vdso32/note.S
+++ b/arch/powerpc/kernel/vdso32/note.S
@@ -5,6 +5,7 @@
#include <linux/uts.h>
#include <linux/version.h>
+#include <linux/build-salt.h>
#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
.section name, flags; \
@@ -23,3 +24,5 @@
ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
.long LINUX_VERSION_CODE
ASM_ELF_NOTE_END
+
+BUILD_SALT
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f314fd475491..21165da0052d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -8,6 +8,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
/*
* Load state from memory into VMX registers including VSCR.
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 5baac79df97e..07ae018e550e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -153,14 +153,16 @@ SECTIONS
*(__rfi_flush_fixup)
__stop___rfi_flush_fixup = .;
}
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
. = ALIGN(8);
__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
__start___barrier_nospec_fixup = .;
*(__barrier_nospec_fixup)
__stop___barrier_nospec_fixup = .;
}
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
EXCEPTION_TABLE(0)
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 1d82274f7e9f..3c6ab22a0c4e 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -174,7 +174,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
continue;
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
}
- smp_flush_nmi_ipi(1000000);
}
/* Take the stuck CPUs out of the watch group */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index edaf4720d156..87348e498c89 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -28,7 +28,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 45c8ea4a0487..612169988a3d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index cf9d686e8162..c92dd25bed23 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7f3a8cf5d66f..3c0e8fb2b773 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -29,7 +29,6 @@
#include <linux/file.h>
#include <linux/debugfs.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 176f911ee983..0af1c0aea1fe 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bits = root & RPDS_MASK;
root = root & RPDB_MASK;
- /* P9 DD1 interprets RTS (radix tree size) differently */
offset = rts + 31;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- offset -= 3;
/* current implementations only support 52-bit space */
if (offset != 52)
@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
unsigned long clr, unsigned long set,
unsigned long addr, unsigned int shift)
{
- unsigned long old = 0;
-
- if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- pte_present(*ptep)) {
- /* have to invalidate it first */
- old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
- kvmppc_radix_tlbie_page(kvm, addr, shift);
- set |= _PAGE_PRESENT;
- old &= _PAGE_PRESENT;
- }
- return __radix_pte_update(ptep, clr, set) | old;
+ return __radix_pte_update(ptep, clr, set);
}
void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 688722acd692..066c665dc86f 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,6 +17,9 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#define SHADOW_SLB_ENTRY_LEN 0x10
#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index d066e37551ec..9a3f2646ecc7 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -31,7 +31,6 @@
#include <linux/iommu.h>
#include <linux/file.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -180,7 +179,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if ((tbltmp->it_page_shift <= stt->page_shift) &&
(tbltmp->it_offset << tbltmp->it_page_shift ==
stt->offset << stt->page_shift) &&
- (tbltmp->it_size << tbltmp->it_page_shift ==
+ (tbltmp->it_size << tbltmp->it_page_shift >=
stt->size << stt->page_shift)) {
/*
* Reference the table to avoid races with
@@ -296,7 +295,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
{
struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
- unsigned long npages, size;
+ unsigned long npages, size = args->size;
int ret = -ENOMEM;
int i;
@@ -304,7 +303,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL;
- size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret)
@@ -378,19 +376,19 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -437,7 +435,8 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
enum dma_data_direction dir)
{
long ret;
- unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ unsigned long hpa;
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -449,7 +448,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
return H_TOO_HARD;
- if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
+ if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
return H_HARDWARE;
if (mm_iommu_mapped_inc(mem))
@@ -464,7 +463,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 925fc316a104..506a4d400458 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -26,8 +26,8 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
+#include <linux/stringify.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -200,23 +200,19 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
- mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -268,7 +264,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{
long ret;
unsigned long hpa = 0;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -279,11 +275,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (!mem)
return H_TOO_HARD;
- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
- return H_HARDWARE;
-
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
+ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+ &hpa)))
return H_HARDWARE;
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
@@ -302,7 +295,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
@@ -469,7 +462,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
if (mem)
- prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+ prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
+ IOMMU_PAGE_SHIFT_4K, &tces) == 0;
}
if (!prereg) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de686b340f4a..574fc1dcb2bf 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -53,7 +53,6 @@
#include <asm/disassemble.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
@@ -128,14 +127,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
* and SPURR count and should be set according to the number of
* online threads in the vcore being run.
*/
-#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
-#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
-#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
+#define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
+#define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
+#define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
RWMR_RPA_P8_1THREAD,
@@ -216,7 +215,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -1693,14 +1692,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
- /*
- * POWER9 DD1 has an erratum where writing TBU40 causes
- * the timebase to lose ticks. So we don't let the
- * timebase offset be changed on P9 DD1. (It is
- * initialized to zero.)
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -1816,7 +1807,7 @@ static int threads_per_vcore(struct kvm *kvm)
return threads_per_subcore;
}
-static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
+static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
{
struct kvmppc_vcore *vcore;
@@ -1830,7 +1821,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * kvm->arch.smt_mode;
+ vcore->first_vcpuid = id;
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
@@ -2026,8 +2017,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
/*
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
- * On POWER9 DD1, TM doesn't work, so we make sure to
- * prevent the guest from using it.
* On POWER9, we want to virtualize the doorbell facility, so we
* turn off the HFSCR bit, which causes those instructions to trap.
*/
@@ -2048,12 +2037,26 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
mutex_lock(&kvm->lock);
vcore = NULL;
err = -EINVAL;
- core = id / kvm->arch.smt_mode;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
+ pr_devel("KVM: VCPU ID too high\n");
+ core = KVM_MAX_VCORES;
+ } else {
+ BUG_ON(kvm->arch.smt_mode != 1);
+ core = kvmppc_pack_vcpu_id(kvm, id);
+ }
+ } else {
+ core = id / kvm->arch.smt_mode;
+ }
if (core < KVM_MAX_VCORES) {
vcore = kvm->arch.vcores[core];
- if (!vcore) {
+ if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ pr_devel("KVM: collision on id %u", id);
+ vcore = NULL;
+ } else if (!vcore) {
err = -ENOMEM;
- vcore = kvmppc_vcore_create(kvm, core);
+ vcore = kvmppc_vcore_create(kvm,
+ id & ~(kvm->arch.smt_mode - 1));
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
}
@@ -3188,7 +3191,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
}
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
if (kvmppc_vcore_check_block(vc)) {
finish_swait(&vc->wq, &wait);
@@ -3311,7 +3314,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- swake_up(&vc->wq);
+ swake_up_one(&vc->wq);
}
}
@@ -4561,6 +4564,8 @@ static int kvmppc_book3s_init_hv(void)
pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
return -ENODEV;
}
+ /* presence of intc confirmed - node can be dropped again */
+ of_node_put(np);
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index d4a3f4da409b..fc6bb9630a9c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -77,7 +77,7 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
- GFP_KERNEL);
+ false);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 82f2ff9410b6..666b91c79eb4 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -27,6 +27,8 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
#include <asm/ppc-opcode.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/*****************************************************************************
* *
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1f22d9e977d4..a67cf1cdeda4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/log2.h>
-#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 153988d878e8..1d14046124a0 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,6 +32,8 @@
#include <asm/opal.h>
#include <asm/xive-regs.h>
#include <asm/thread_info.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
@@ -917,9 +919,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -1912,7 +1911,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0
- beq cr2, 4f
+ beq cr2, 2f
/*
* Radix: do eieio; tlbsync; ptesync sequence in case we
@@ -1952,11 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
bdnz 1b
ptesync
-2: /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-4:
+2:
#endif /* CONFIG_PPC_RADIX_MMU */
/*
@@ -3367,11 +3362,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
- /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-
BEGIN_MMU_FTR_SECTION
b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c18e845019ec..d71dab16dc6f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -23,6 +23,7 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/asm-compat.h>
#if defined(CONFIG_PPC_BOOK3S_64)
#ifdef PPC64_ELF_ABI_v2
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c3b8006f0eac..47ee43bbd696 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -27,7 +27,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34a5adeff084..b0089e04c8c8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -23,6 +23,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/exception-64s.h>
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 98ccc7ec5d48..e5c542a7c5ac 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -19,6 +19,9 @@
/* Real mode helpers */
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f9818d7d3381..126f02b3ffb8 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -317,6 +317,11 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
return -EBUSY;
}
+static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
struct kvmppc_xive_src_block *sb,
struct kvmppc_xive_irq_state *state)
@@ -362,7 +367,7 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */
state->old_p = true;
@@ -418,7 +423,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/* If an EOI is needed, do it here */
if (!state->old_p)
@@ -495,7 +500,7 @@ static int xive_target_interrupt(struct kvm *kvm,
kvmppc_xive_select_irq(state, &hw_num, NULL);
return xive_native_configure_irq(hw_num,
- xive->vp_base + server,
+ xive_vp(xive, server),
prio, state->number);
}
@@ -883,7 +888,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
* which is fine for a never started interrupt.
*/
xive_native_configure_irq(hw_irq,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -959,7 +964,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Reconfigure the IPI */
xive_native_configure_irq(state->ipi_number,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1084,7 +1089,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
pr_devel("Duplicate !\n");
return -EEXIST;
}
- if (cpu >= KVM_MAX_VCPUS) {
+ if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
@@ -1098,7 +1103,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = xive->vp_base + cpu;
+ xc->vp_id = xive_vp(xive, cpu);
xc->mfrr = 0xff;
xc->valid = true;
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 6e41ba7ec8f4..4171ede8722b 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
*/
eieio();
- /*
- * DD1 bug workaround: If PIPR is less favored than CPPR
- * ignore the interrupt or we might incorrectly lose an IPB
- * bit.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
- u8 pipr = be64_to_cpu(qw1) & 0xff;
- if (pipr >= xc->hw_cppr)
- return;
- }
-
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
- else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
opal_int_eoi(hw_irq);
+ else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ /*
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
+ */
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
} else {
uint64_t eoi_val;
@@ -102,20 +97,12 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
*
* This allows us to then do a re-trigger if Q was set
* rather than synthetizing an interrupt in software
- *
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
*/
- if (xd->flags & XIVE_IRQ_FLAG_LSI)
- __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
- else {
- eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
-
- /* Re-trigger if needed */
- if ((eoi_val & 1) && __x_trig_page(xd))
- __x_writeq(0, __x_trig_page(xd));
- }
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
}
}
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index f9f6468f4171..afd3c255a427 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include "../mm/mmu_decl.h"
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index d0b6b5788afc..d31645491a93 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index afde788be141..75dce1ef3bc8 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -106,7 +106,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
* if mmio_vsx_tx_sx_enabled == 1, copy data between
* VSR[32..63] and memory
*/
- vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu->arch.mmio_vsx_offset = 0;
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
@@ -242,8 +241,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
emulated = kvmppc_handle_vsx_load(run, vcpu,
- KVM_MMIO_REG_VSX | (op.reg & 0x1f),
- io_size_each, 1, op.type & SIGNEXT);
+ KVM_MMIO_REG_VSX|op.reg, io_size_each,
+ 1, op.type & SIGNEXT);
break;
}
#endif
@@ -363,7 +362,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
emulated = kvmppc_handle_vsx_store(run, vcpu,
- op.reg & 0x1f, io_size_each, 1);
+ op.reg, io_size_each, 1);
break;
}
#endif
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e8c20c5eaac..eba5756d5b41 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,7 +33,6 @@
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
-#include <asm/tlbflush.h>
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include <asm/iommu.h>
@@ -880,10 +879,10 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
}
@@ -895,11 +894,11 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
@@ -912,12 +911,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (index >= 32) {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
@@ -937,10 +936,10 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
@@ -1361,10 +1360,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break;
}
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (rs < 32) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs);
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1378,13 +1377,13 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break;
}
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs);
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsx32val[vsx_offset];
}
break;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d0ca13ad8231..670286808928 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -12,7 +12,7 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
obj-y += string.o alloc.o code-patching.o feature-fixups.o
-obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
+obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o
# See corresponding test in arch/powerpc/Makefile
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index e0d881ab304e..850f3b8f4da5 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -195,6 +195,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
return patch_instruction(addr, create_branch(addr, target, flags));
}
+int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, create_branch(addr, target, flags));
+}
+
+int patch_instruction_site(s32 *site, unsigned int instr)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, instr);
+}
+
bool is_offset_in_branch_range(long offset)
{
/*
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index da425bb6b369..ba66846fe973 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -13,6 +13,7 @@
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/code-patching-asm.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
@@ -107,8 +108,8 @@ _GLOBAL(memset)
* Skip optimised bloc until cache is enabled. Will be replaced
* by 'bne' during boot to use normal procedure if r4 is not zero
*/
-_GLOBAL(memset_nocache_branch)
- b 2f
+5: b 2f
+ patch_site 5b, patch__memset_nocache
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
@@ -168,7 +169,9 @@ _GLOBAL(memmove)
/* fall through */
_GLOBAL(memcpy)
- b generic_memcpy
+1: b generic_memcpy
+ patch_site 1b, patch__memcpy_nocache
+
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 8d5034f645f3..694390357667 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -11,6 +11,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.section ".toc","aw"
PPC64_CACHES:
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 8fa73b7ab20e..e38f956f7d9f 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -57,7 +57,7 @@ _GLOBAL(copypage_power7)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_copy
+ bl enter_vmx_ops
cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -100,7 +100,7 @@ _GLOBAL(copypage_power7)
addi r3,r3,128
bdnz 1b
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
#else
li r0,(PAGE_SIZE/128)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 506677395681..96c514bee66b 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,13 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+#ifndef SELFTEST_CASE
+/* 0 == most CPUs, 1 == POWER6, 2 == Cell */
+#define SELFTEST_CASE 0
+#endif
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
@@ -18,6 +25,28 @@
#define sHd sld /* Shift towards high-numbered address. */
#endif
+/*
+ * These macros are used to generate exception table entries.
+ * The exception handlers below use the original arguments
+ * (stored on the stack) and the point where we're up to in
+ * the destination buffer, i.e. the address of the first
+ * unmodified byte. Generally r3 points into the destination
+ * buffer, but the first unmodified byte is at a variable
+ * offset from r3. In the code below, the symbol r3_offset
+ * is set to indicate the current offset at each point in
+ * the code. This offset is then used as a negative offset
+ * from the exception handler code, and those instructions
+ * before the exception handlers are addi instructions that
+ * adjust r3 to point to the correct place.
+ */
+ .macro lex /* exception handler for load */
+100: EX_TABLE(100b, .Lld_exc - r3_offset)
+ .endm
+
+ .macro stex /* exception handler for store */
+100: EX_TABLE(100b, .Lst_exc - r3_offset)
+ .endm
+
.align 7
_GLOBAL_TOC(__copy_tofrom_user)
#ifdef CONFIG_PPC_BOOK3S_64
@@ -28,7 +57,7 @@ FTR_SECTION_ELSE
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#endif
_GLOBAL(__copy_tofrom_user_base)
- /* first check for a whole page copy on a page boundary */
+ /* first check for a 4kB copy on a 4kB boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
@@ -49,6 +78,7 @@ _GLOBAL(__copy_tofrom_user_base)
* At the time of writing the only CPU that has this combination of bits
* set is Power6.
*/
+test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
@@ -57,6 +87,8 @@ ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
+r3_offset = 16
+test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
@@ -64,57 +96,69 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blt cr1,.Ldo_tail /* if < 16 bytes to copy */
srdi r0,r5,5
cmpdi cr1,r0,0
-20: ld r7,0(r4)
-220: ld r6,8(r4)
+lex; ld r7,0(r4)
+lex; ld r6,8(r4)
addi r4,r4,16
mtctr r0
andi. r0,r5,0x10
beq 22f
addi r3,r3,16
+r3_offset = 0
addi r4,r4,-16
mr r9,r7
mr r8,r6
beq cr1,72f
-21: ld r7,16(r4)
-221: ld r6,24(r4)
+21:
+lex; ld r7,16(r4)
+lex; ld r6,24(r4)
addi r4,r4,32
-70: std r9,0(r3)
-270: std r8,8(r3)
-22: ld r9,0(r4)
-222: ld r8,8(r4)
-71: std r7,16(r3)
-271: std r6,24(r3)
+stex; std r9,0(r3)
+r3_offset = 8
+stex; std r8,8(r3)
+r3_offset = 16
+22:
+lex; ld r9,0(r4)
+lex; ld r8,8(r4)
+stex; std r7,16(r3)
+r3_offset = 24
+stex; std r6,24(r3)
addi r3,r3,32
+r3_offset = 0
bdnz 21b
-72: std r9,0(r3)
-272: std r8,8(r3)
+72:
+stex; std r9,0(r3)
+r3_offset = 8
+stex; std r8,8(r3)
+r3_offset = 16
andi. r5,r5,0xf
beq+ 3f
addi r4,r4,16
.Ldo_tail:
addi r3,r3,16
+r3_offset = 0
bf cr7*4+0,246f
-244: ld r9,0(r4)
+lex; ld r9,0(r4)
addi r4,r4,8
-245: std r9,0(r3)
+stex; std r9,0(r3)
addi r3,r3,8
246: bf cr7*4+1,1f
-23: lwz r9,0(r4)
+lex; lwz r9,0(r4)
addi r4,r4,4
-73: stw r9,0(r3)
+stex; stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
-44: lhz r9,0(r4)
+lex; lhz r9,0(r4)
addi r4,r4,2
-74: sth r9,0(r3)
+stex; sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
-45: lbz r9,0(r4)
-75: stb r9,0(r3)
+lex; lbz r9,0(r4)
+stex; stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
+r3_offset = 16
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
@@ -127,58 +171,69 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
add r5,r5,r0
bt cr7*4+0,28f
-24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
-25: ld r0,8(r4)
+lex; ld r9,0(r4) /* 3+2n loads, 2+2n stores */
+lex; ld r0,8(r4)
sLd r6,r9,r10
-26: ldu r9,16(r4)
+lex; ldu r9,16(r4)
sHd r7,r0,r11
sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
-27: ld r0,8(r4)
+lex; ld r0,8(r4)
b 2f
-28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
-29: ldu r9,8(r4)
+28:
+lex; ld r0,0(r4) /* 4+2n loads, 3+2n stores */
+lex; ldu r9,8(r4)
sLd r8,r0,r10
addi r3,r3,-8
+r3_offset = 24
blt cr6,5f
-30: ld r0,8(r4)
+lex; ld r0,8(r4)
sHd r12,r9,r11
sLd r6,r9,r10
-31: ldu r9,16(r4)
+lex; ldu r9,16(r4)
or r12,r8,r12
sHd r7,r0,r11
sLd r8,r0,r10
addi r3,r3,16
+r3_offset = 8
beq cr6,78f
1: or r7,r7,r6
-32: ld r0,8(r4)
-76: std r12,8(r3)
+lex; ld r0,8(r4)
+stex; std r12,8(r3)
+r3_offset = 16
2: sHd r12,r9,r11
sLd r6,r9,r10
-33: ldu r9,16(r4)
+lex; ldu r9,16(r4)
or r12,r8,r12
-77: stdu r7,16(r3)
+stex; stdu r7,16(r3)
+r3_offset = 8
sHd r7,r0,r11
sLd r8,r0,r10
bdnz 1b
-78: std r12,8(r3)
+78:
+stex; std r12,8(r3)
+r3_offset = 16
or r7,r7,r6
-79: std r7,16(r3)
+79:
+stex; std r7,16(r3)
+r3_offset = 24
5: sHd r12,r9,r11
or r12,r8,r12
-80: std r12,24(r3)
+stex; std r12,24(r3)
+r3_offset = 32
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
+r3_offset = 0
sLd r9,r9,r10
ble cr1,7f
-34: ld r0,8(r4)
+lex; ld r0,8(r4)
sHd r7,r0,r11
or r9,r7,r9
7:
@@ -186,7 +241,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
#endif
-94: stw r9,0(r3)
+stex; stw r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,32
#endif
@@ -195,7 +250,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
#endif
-95: sth r9,0(r3)
+stex; sth r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,16
#endif
@@ -204,7 +259,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
#endif
-96: stb r9,0(r3)
+stex; stb r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,8
#endif
@@ -212,47 +267,55 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
+r3_offset = 0
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
-35: lbz r0,0(r4)
-81: stb r0,0(r3)
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lbz r0,0(r4)
+100: EX_TABLE(100b, .Lst_exc_r7)
+ stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
-36: lhzx r0,r7,r4
-82: sthx r0,r7,r3
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lhzx r0,r7,r4
+100: EX_TABLE(100b, .Lst_exc_r7)
+ sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
-37: lwzx r0,r7,r4
-83: stwx r0,r7,r3
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lwzx r0,r7,r4
+100: EX_TABLE(100b, .Lst_exc_r7)
+ stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
+r3_offset = 0
bf cr7*4+0,1f
-38: lwz r0,0(r4)
-39: lwz r9,4(r4)
+lex; lwz r0,0(r4)
+lex; lwz r9,4(r4)
addi r4,r4,8
-84: stw r0,0(r3)
-85: stw r9,4(r3)
+stex; stw r0,0(r3)
+stex; stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
-40: lwz r0,0(r4)
+lex; lwz r0,0(r4)
addi r4,r4,4
-86: stw r0,0(r3)
+stex; stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
-41: lhz r0,0(r4)
+lex; lhz r0,0(r4)
addi r4,r4,2
-87: sth r0,0(r3)
+stex; sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
-42: lbz r0,0(r4)
-88: stb r0,0(r3)
+lex; lbz r0,0(r4)
+stex; stb r0,0(r3)
4: li r3,0
blr
@@ -260,48 +323,34 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
+ * Note that the number of bytes of instructions for adjusting r3 needs
+ * to equal the amount of the adjustment, due to the trick of using
+ * .Lld_exc - r3_offset as the handler address.
*/
-136:
-137:
+.Lld_exc_r7:
add r3,r3,r7
- b 1f
-130:
-131:
+ b .Lld_exc
+
+ /* adjust by 24 */
addi r3,r3,8
-120:
-320:
-122:
-322:
-124:
-125:
-126:
-127:
-128:
-129:
-133:
+ nop
+ /* adjust by 16 */
addi r3,r3,8
-132:
+ nop
+ /* adjust by 8 */
addi r3,r3,8
-121:
-321:
-344:
-134:
-135:
-138:
-139:
-140:
-141:
-142:
-123:
-144:
-145:
+ nop
/*
- * here we have had a fault on a load and r3 points to the first
- * unmodified byte of the destination
+ * Here we have had a fault on a load and r3 points to the first
+ * unmodified byte of the destination. We use the original arguments
+ * and r3 to work out how much wasn't copied. Since we load some
+ * distance ahead of the stores, we continue copying byte-by-byte until
+ * we hit the load fault again in order to copy as much as possible.
*/
-1: ld r6,-24(r1)
+.Lld_exc:
+ ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
@@ -312,9 +361,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
+r3_offset = 0
+100: EX_TABLE(100b, .Ldone)
43: lbz r0,0(r4)
addi r4,r4,1
-89: stb r0,0(r3)
+stex; stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
@@ -323,116 +374,63 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
/*
* here we have trapped again, amount remaining is in ctr.
*/
-143: mfctr r3
+.Ldone:
+ mfctr r3
blr
/*
- * exception handlers for stores: we just need to work
- * out how many bytes weren't copied
+ * exception handlers for stores: we need to work out how many bytes
+ * weren't copied, and we may need to copy some more.
+ * Note that the number of bytes of instructions for adjusting r3 needs
+ * to equal the amount of the adjustment, due to the trick of using
+ * .Lst_exc - r3_offset as the handler address.
*/
-182:
-183:
+.Lst_exc_r7:
add r3,r3,r7
- b 1f
-371:
-180:
+ b .Lst_exc
+
+ /* adjust by 24 */
addi r3,r3,8
-171:
-177:
-179:
+ nop
+ /* adjust by 16 */
addi r3,r3,8
-370:
-372:
-176:
-178:
+ nop
+ /* adjust by 8 */
addi r3,r3,4
-185:
+ /* adjust by 4 */
addi r3,r3,4
-170:
-172:
-345:
-173:
-174:
-175:
-181:
-184:
-186:
-187:
-188:
-189:
-194:
-195:
-196:
-1:
- ld r6,-24(r1)
- ld r5,-8(r1)
- add r6,r6,r5
- subf r3,r3,r6 /* #bytes not copied */
+.Lst_exc:
+ ld r6,-24(r1) /* original destination pointer */
+ ld r4,-16(r1) /* original source pointer */
+ ld r5,-8(r1) /* original number of bytes */
+ add r7,r6,r5
+ /*
+ * If the destination pointer isn't 8-byte aligned,
+ * we may have got the exception as a result of a
+ * store that overlapped a page boundary, so we may be
+ * able to copy a few more bytes.
+ */
+17: andi. r0,r3,7
+ beq 19f
+ subf r8,r6,r3 /* #bytes copied */
+100: EX_TABLE(100b,19f)
+ lbzx r0,r8,r4
+100: EX_TABLE(100b,19f)
+ stb r0,0(r3)
+ addi r3,r3,1
+ cmpld r3,r7
+ blt 17b
+19: subf r3,r3,r7 /* #bytes not copied in r3 */
blr
- EX_TABLE(20b,120b)
- EX_TABLE(220b,320b)
- EX_TABLE(21b,121b)
- EX_TABLE(221b,321b)
- EX_TABLE(70b,170b)
- EX_TABLE(270b,370b)
- EX_TABLE(22b,122b)
- EX_TABLE(222b,322b)
- EX_TABLE(71b,171b)
- EX_TABLE(271b,371b)
- EX_TABLE(72b,172b)
- EX_TABLE(272b,372b)
- EX_TABLE(244b,344b)
- EX_TABLE(245b,345b)
- EX_TABLE(23b,123b)
- EX_TABLE(73b,173b)
- EX_TABLE(44b,144b)
- EX_TABLE(74b,174b)
- EX_TABLE(45b,145b)
- EX_TABLE(75b,175b)
- EX_TABLE(24b,124b)
- EX_TABLE(25b,125b)
- EX_TABLE(26b,126b)
- EX_TABLE(27b,127b)
- EX_TABLE(28b,128b)
- EX_TABLE(29b,129b)
- EX_TABLE(30b,130b)
- EX_TABLE(31b,131b)
- EX_TABLE(32b,132b)
- EX_TABLE(76b,176b)
- EX_TABLE(33b,133b)
- EX_TABLE(77b,177b)
- EX_TABLE(78b,178b)
- EX_TABLE(79b,179b)
- EX_TABLE(80b,180b)
- EX_TABLE(34b,134b)
- EX_TABLE(94b,194b)
- EX_TABLE(95b,195b)
- EX_TABLE(96b,196b)
- EX_TABLE(35b,135b)
- EX_TABLE(81b,181b)
- EX_TABLE(36b,136b)
- EX_TABLE(82b,182b)
- EX_TABLE(37b,137b)
- EX_TABLE(83b,183b)
- EX_TABLE(38b,138b)
- EX_TABLE(39b,139b)
- EX_TABLE(84b,184b)
- EX_TABLE(85b,185b)
- EX_TABLE(40b,140b)
- EX_TABLE(86b,186b)
- EX_TABLE(41b,141b)
- EX_TABLE(87b,187b)
- EX_TABLE(42b,142b)
- EX_TABLE(88b,188b)
- EX_TABLE(43b,143b)
- EX_TABLE(89b,189b)
-
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label).
*/
+ .macro exc
+100: EX_TABLE(100b, .Labort)
+ .endm
.Lcopy_page_4K:
std r31,-32(1)
std r30,-40(1)
@@ -451,86 +449,86 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
li r0,5
0: addi r5,r5,-24
mtctr r0
-20: ld r22,640(4)
-21: ld r21,512(4)
-22: ld r20,384(4)
-23: ld r11,256(4)
-24: ld r9,128(4)
-25: ld r7,0(4)
-26: ld r25,648(4)
-27: ld r24,520(4)
-28: ld r23,392(4)
-29: ld r10,264(4)
-30: ld r8,136(4)
-31: ldu r6,8(4)
+exc; ld r22,640(4)
+exc; ld r21,512(4)
+exc; ld r20,384(4)
+exc; ld r11,256(4)
+exc; ld r9,128(4)
+exc; ld r7,0(4)
+exc; ld r25,648(4)
+exc; ld r24,520(4)
+exc; ld r23,392(4)
+exc; ld r10,264(4)
+exc; ld r8,136(4)
+exc; ldu r6,8(4)
cmpwi r5,24
1:
-32: std r22,648(3)
-33: std r21,520(3)
-34: std r20,392(3)
-35: std r11,264(3)
-36: std r9,136(3)
-37: std r7,8(3)
-38: ld r28,648(4)
-39: ld r27,520(4)
-40: ld r26,392(4)
-41: ld r31,264(4)
-42: ld r30,136(4)
-43: ld r29,8(4)
-44: std r25,656(3)
-45: std r24,528(3)
-46: std r23,400(3)
-47: std r10,272(3)
-48: std r8,144(3)
-49: std r6,16(3)
-50: ld r22,656(4)
-51: ld r21,528(4)
-52: ld r20,400(4)
-53: ld r11,272(4)
-54: ld r9,144(4)
-55: ld r7,16(4)
-56: std r28,664(3)
-57: std r27,536(3)
-58: std r26,408(3)
-59: std r31,280(3)
-60: std r30,152(3)
-61: stdu r29,24(3)
-62: ld r25,664(4)
-63: ld r24,536(4)
-64: ld r23,408(4)
-65: ld r10,280(4)
-66: ld r8,152(4)
-67: ldu r6,24(4)
+exc; std r22,648(3)
+exc; std r21,520(3)
+exc; std r20,392(3)
+exc; std r11,264(3)
+exc; std r9,136(3)
+exc; std r7,8(3)
+exc; ld r28,648(4)
+exc; ld r27,520(4)
+exc; ld r26,392(4)
+exc; ld r31,264(4)
+exc; ld r30,136(4)
+exc; ld r29,8(4)
+exc; std r25,656(3)
+exc; std r24,528(3)
+exc; std r23,400(3)
+exc; std r10,272(3)
+exc; std r8,144(3)
+exc; std r6,16(3)
+exc; ld r22,656(4)
+exc; ld r21,528(4)
+exc; ld r20,400(4)
+exc; ld r11,272(4)
+exc; ld r9,144(4)
+exc; ld r7,16(4)
+exc; std r28,664(3)
+exc; std r27,536(3)
+exc; std r26,408(3)
+exc; std r31,280(3)
+exc; std r30,152(3)
+exc; stdu r29,24(3)
+exc; ld r25,664(4)
+exc; ld r24,536(4)
+exc; ld r23,408(4)
+exc; ld r10,280(4)
+exc; ld r8,152(4)
+exc; ldu r6,24(4)
bdnz 1b
-68: std r22,648(3)
-69: std r21,520(3)
-70: std r20,392(3)
-71: std r11,264(3)
-72: std r9,136(3)
-73: std r7,8(3)
-74: addi r4,r4,640
-75: addi r3,r3,648
+exc; std r22,648(3)
+exc; std r21,520(3)
+exc; std r20,392(3)
+exc; std r11,264(3)
+exc; std r9,136(3)
+exc; std r7,8(3)
+ addi r4,r4,640
+ addi r3,r3,648
bge 0b
mtctr r5
-76: ld r7,0(4)
-77: ld r8,8(4)
-78: ldu r9,16(4)
+exc; ld r7,0(4)
+exc; ld r8,8(4)
+exc; ldu r9,16(4)
3:
-79: ld r10,8(4)
-80: std r7,8(3)
-81: ld r7,16(4)
-82: std r8,16(3)
-83: ld r8,24(4)
-84: std r9,24(3)
-85: ldu r9,32(4)
-86: stdu r10,32(3)
+exc; ld r10,8(4)
+exc; std r7,8(3)
+exc; ld r7,16(4)
+exc; std r8,16(3)
+exc; ld r8,24(4)
+exc; std r9,24(3)
+exc; ldu r9,32(4)
+exc; stdu r10,32(3)
bdnz 3b
4:
-87: ld r10,8(4)
-88: std r7,8(3)
-89: std r8,16(3)
-90: std r9,24(3)
-91: std r10,32(3)
+exc; ld r10,8(4)
+exc; std r7,8(3)
+exc; std r8,16(3)
+exc; std r9,24(3)
+exc; std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
@@ -550,7 +548,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
-100: ld r20,-120(1)
+.Labort:
+ ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
@@ -566,78 +565,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r4,-16(r1)
li r5,4096
b .Ldst_aligned
-
- EX_TABLE(20b,100b)
- EX_TABLE(21b,100b)
- EX_TABLE(22b,100b)
- EX_TABLE(23b,100b)
- EX_TABLE(24b,100b)
- EX_TABLE(25b,100b)
- EX_TABLE(26b,100b)
- EX_TABLE(27b,100b)
- EX_TABLE(28b,100b)
- EX_TABLE(29b,100b)
- EX_TABLE(30b,100b)
- EX_TABLE(31b,100b)
- EX_TABLE(32b,100b)
- EX_TABLE(33b,100b)
- EX_TABLE(34b,100b)
- EX_TABLE(35b,100b)
- EX_TABLE(36b,100b)
- EX_TABLE(37b,100b)
- EX_TABLE(38b,100b)
- EX_TABLE(39b,100b)
- EX_TABLE(40b,100b)
- EX_TABLE(41b,100b)
- EX_TABLE(42b,100b)
- EX_TABLE(43b,100b)
- EX_TABLE(44b,100b)
- EX_TABLE(45b,100b)
- EX_TABLE(46b,100b)
- EX_TABLE(47b,100b)
- EX_TABLE(48b,100b)
- EX_TABLE(49b,100b)
- EX_TABLE(50b,100b)
- EX_TABLE(51b,100b)
- EX_TABLE(52b,100b)
- EX_TABLE(53b,100b)
- EX_TABLE(54b,100b)
- EX_TABLE(55b,100b)
- EX_TABLE(56b,100b)
- EX_TABLE(57b,100b)
- EX_TABLE(58b,100b)
- EX_TABLE(59b,100b)
- EX_TABLE(60b,100b)
- EX_TABLE(61b,100b)
- EX_TABLE(62b,100b)
- EX_TABLE(63b,100b)
- EX_TABLE(64b,100b)
- EX_TABLE(65b,100b)
- EX_TABLE(66b,100b)
- EX_TABLE(67b,100b)
- EX_TABLE(68b,100b)
- EX_TABLE(69b,100b)
- EX_TABLE(70b,100b)
- EX_TABLE(71b,100b)
- EX_TABLE(72b,100b)
- EX_TABLE(73b,100b)
- EX_TABLE(74b,100b)
- EX_TABLE(75b,100b)
- EX_TABLE(76b,100b)
- EX_TABLE(77b,100b)
- EX_TABLE(78b,100b)
- EX_TABLE(79b,100b)
- EX_TABLE(80b,100b)
- EX_TABLE(81b,100b)
- EX_TABLE(82b,100b)
- EX_TABLE(83b,100b)
- EX_TABLE(84b,100b)
- EX_TABLE(85b,100b)
- EX_TABLE(86b,100b)
- EX_TABLE(87b,100b)
- EX_TABLE(88b,100b)
- EX_TABLE(89b,100b)
- EX_TABLE(90b,100b)
- EX_TABLE(91b,100b)
-
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 215e4760c09f..1a1fe180af62 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,6 +19,11 @@
*/
#include <asm/ppc_asm.h>
+#ifndef SELFTEST_CASE
+/* 0 == don't use VMX, 1 == use VMX */
+#define SELFTEST_CASE 0
+#endif
+
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
@@ -80,7 +85,6 @@
_GLOBAL(__copy_tofrom_user_power7)
-#ifdef CONFIG_ALTIVEC
cmpldi r5,16
cmpldi cr1,r5,3328
@@ -89,15 +93,12 @@ _GLOBAL(__copy_tofrom_user_power7)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy
- bge cr1,.Lvmx_copy
-#else
- cmpldi r5,16
- std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
- std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
-
- blt .Lshort_copy
+#ifdef CONFIG_ALTIVEC
+test_feature = SELFTEST_CASE
+BEGIN_FTR_SECTION
+ bgt cr1,.Lvmx_copy
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
@@ -278,8 +279,8 @@ err1; stb r0,0(r3)
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
-#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
+#ifdef CONFIG_ALTIVEC
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index f16cec989506..ee7c5fd5fc64 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -11,6 +11,7 @@
#include <asm/feature-fixups.h>
#include <asm/ppc_asm.h>
#include <asm/synch.h>
+#include <asm/asm-compat.h>
.text
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 8b69f868298c..e613b02bb2f0 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -304,6 +304,9 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups(bool enable)
{
void *start, *end;
@@ -313,8 +316,38 @@ void do_barrier_nospec_fixups(bool enable)
do_barrier_nospec_fixups_range(enable, start, end);
}
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+ unsigned int instr[2], *dest;
+ long *start, *end;
+ int i;
+
+ start = fixup_start;
+ end = fixup_end;
+
+ instr[0] = PPC_INST_NOP;
+ instr[1] = PPC_INST_NOP;
+
+ if (enable) {
+ pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
+ instr[0] = PPC_INST_ISYNC;
+ instr[1] = PPC_INST_SYNC;
+ }
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+ patch_instruction(dest, instr[0]);
+ patch_instruction(dest + 1, instr[1]);
+ }
+
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 3de7ac154f24..0526b2225260 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -20,6 +20,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* Note: This code relies on -mminimal-toc */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index ae15eba49c1f..32e91994b6b2 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -15,6 +15,7 @@
#include <asm/ppc-opcode.h>
#include <asm/reg.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#include <linux/errno.h>
#ifdef CONFIG_PPC_FPU
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index b7b1237d4aa6..35a0ef932e1a 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <linux/stringify.h>
#include <linux/smp.h>
/* waiting for a spinlock... */
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index d75d18b7bd55..844d8e774492 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -9,6 +9,7 @@
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/ppc-opcode.h>
#define off8 r6
#define off16 r7
@@ -24,28 +25,102 @@
#define rH r31
#ifdef __LITTLE_ENDIAN__
+#define LH lhbrx
+#define LW lwbrx
#define LD ldbrx
+#define LVS lvsr
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+ vperm _VRT,_VRB,_VRA,_VRC
#else
+#define LH lhzx
+#define LW lwzx
#define LD ldx
+#define LVS lvsl
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+ vperm _VRT,_VRA,_VRB,_VRC
#endif
-_GLOBAL(memcmp)
+#define VMX_THRESH 4096
+#define ENTER_VMX_OPS \
+ mflr r0; \
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+ std r0,16(r1); \
+ stdu r1,-STACKFRAMESIZE(r1); \
+ bl enter_vmx_ops; \
+ cmpwi cr1,r3,0; \
+ ld r0,STACKFRAMESIZE+16(r1); \
+ ld r3,STK_REG(R31)(r1); \
+ ld r4,STK_REG(R30)(r1); \
+ ld r5,STK_REG(R29)(r1); \
+ addi r1,r1,STACKFRAMESIZE; \
+ mtlr r0
+
+#define EXIT_VMX_OPS \
+ mflr r0; \
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+ std r0,16(r1); \
+ stdu r1,-STACKFRAMESIZE(r1); \
+ bl exit_vmx_ops; \
+ ld r0,STACKFRAMESIZE+16(r1); \
+ ld r3,STK_REG(R31)(r1); \
+ ld r4,STK_REG(R30)(r1); \
+ ld r5,STK_REG(R29)(r1); \
+ addi r1,r1,STACKFRAMESIZE; \
+ mtlr r0
+
+/*
+ * LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with
+ * 16 bytes boundary and permute the result with the 1st 16 bytes.
+
+ * | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z |
+ * ^ ^ ^
+ * 0xbbbb10 0xbbbb20 0xbbb30
+ * ^
+ * _vaddr
+ *
+ *
+ * _vmask is the mask generated by LVS
+ * _v1st_qw is the 1st aligned QW of current addr which is already loaded.
+ * for example: 0xyyyyyyyyyyyyy012 for big endian
+ * _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded.
+ * for example: 0x3456789abcdefzzz for big endian
+ * The permute result is saved in _v_res.
+ * for example: 0x0123456789abcdef for big endian.
+ */
+#define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \
+ lvx _v2nd_qw,_vaddr,off16; \
+ VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask)
+
+/*
+ * There are 2 categories for memcmp:
+ * 1) src/dst has the same offset to the 8 bytes boundary. The handlers
+ * are named like .Lsameoffset_xxxx
+ * 2) src/dst has different offset to the 8 bytes boundary. The handlers
+ * are named like .Ldiffoffset_xxxx
+ */
+_GLOBAL_TOC(memcmp)
cmpdi cr1,r5,0
- /* Use the short loop if both strings are not 8B aligned */
- or r6,r3,r4
+ /* Use the short loop if the src/dst addresses are not
+ * with the same offset of 8 bytes align boundary.
+ */
+ xor r6,r3,r4
andi. r6,r6,7
- /* Use the short loop if length is less than 32B */
- cmpdi cr6,r5,31
+ /* Fall back to short loop if compare at aligned addrs
+ * with less than 8 bytes.
+ */
+ cmpdi cr6,r5,7
beq cr1,.Lzero
- bne .Lshort
- bgt cr6,.Llong
+ bgt cr6,.Lno_short
.Lshort:
mtctr r5
-
1: lbz rA,0(r3)
lbz rB,0(r4)
subf. rC,rB,rA
@@ -78,11 +153,98 @@ _GLOBAL(memcmp)
li r3,0
blr
+.Lno_short:
+ dcbt 0,r3
+ dcbt 0,r4
+ bne .Ldiffoffset_8bytes_make_align_start
+
+
+.Lsameoffset_8bytes_make_align_start:
+ /* attempt to compare bytes not aligned with 8 bytes so that
+ * rest comparison can run based on 8 bytes alignment.
+ */
+ andi. r6,r3,7
+
+ /* Try to compare the first double word which is not 8 bytes aligned:
+ * load the first double word at (src & ~7UL) and shift left appropriate
+ * bits before comparision.
+ */
+ rlwinm r6,r3,3,26,28
+ beq .Lsameoffset_8bytes_aligned
+ clrrdi r3,r3,3
+ clrrdi r4,r4,3
+ LD rA,0,r3
+ LD rB,0,r4
+ sld rA,rA,r6
+ sld rB,rB,r6
+ cmpld cr0,rA,rB
+ srwi r6,r6,3
+ bne cr0,.LcmpAB_lightweight
+ subfic r6,r6,8
+ subf. r5,r6,r5
+ addi r3,r3,8
+ addi r4,r4,8
+ beq .Lzero
+
+.Lsameoffset_8bytes_aligned:
+ /* now we are aligned with 8 bytes.
+ * Use .Llong loop if left cmp bytes are equal or greater than 32B.
+ */
+ cmpdi cr6,r5,31
+ bgt cr6,.Llong
+
+.Lcmp_lt32bytes:
+ /* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */
+ cmpdi cr5,r5,7
+ srdi r0,r5,3
+ ble cr5,.Lcmp_rest_lt8bytes
+
+ /* handle 8 ~ 31 bytes */
+ clrldi r5,r5,61
+ mtctr r0
+2:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ bdnz 2b
+
+ cmpwi r5,0
+ beq .Lzero
+
+.Lcmp_rest_lt8bytes:
+ /* Here we have only less than 8 bytes to compare with. at least s1
+ * Address is aligned with 8 bytes.
+ * The next double words are load and shift right with appropriate
+ * bits.
+ */
+ subfic r6,r5,8
+ slwi r6,r6,3
+ LD rA,0,r3
+ LD rB,0,r4
+ srd rA,rA,r6
+ srd rB,rB,r6
+ cmpld cr0,rA,rB
+ bne cr0,.LcmpAB_lightweight
+ b .Lzero
+
.Lnon_zero:
mr r3,rC
blr
.Llong:
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ /* Try to use vmx loop if length is equal or greater than 4K */
+ cmpldi cr6,r5,VMX_THRESH
+ bge cr6,.Lsameoffset_vmx_cmp
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+.Llong_novmx_cmp:
+#endif
+ /* At least s1 addr is aligned with 8 bytes */
li off8,8
li off16,16
li off24,24
@@ -232,4 +394,240 @@ _GLOBAL(memcmp)
ld r28,-32(r1)
ld r27,-40(r1)
blr
+
+.LcmpAB_lightweight: /* skip NV GPRS restore */
+ li r3,1
+ bgtlr
+ li r3,-1
+ blr
+
+#ifdef CONFIG_ALTIVEC
+.Lsameoffset_vmx_cmp:
+ /* Enter with src/dst addrs has the same offset with 8 bytes
+ * align boundary.
+ *
+ * There is an optimization based on following fact: memcmp()
+ * prones to fail early at the first 32 bytes.
+ * Before applying VMX instructions which will lead to 32x128bits
+ * VMX regs load/restore penalty, we compare the first 32 bytes
+ * so that we can catch the ~80% fail cases.
+ */
+
+ li r0,4
+ mtctr r0
+.Lsameoffset_prechk_32B_loop:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ addi r5,r5,-8
+ bdnz .Lsameoffset_prechk_32B_loop
+
+ ENTER_VMX_OPS
+ beq cr1,.Llong_novmx_cmp
+
+3:
+ /* need to check whether r4 has the same offset with r3
+ * for 16 bytes boundary.
+ */
+ xor r0,r3,r4
+ andi. r0,r0,0xf
+ bne .Ldiffoffset_vmx_cmp_start
+
+ /* len is no less than 4KB. Need to align with 16 bytes further.
+ */
+ andi. rA,r3,8
+ LD rA,0,r3
+ beq 4f
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ addi r5,r5,-8
+
+ beq cr0,4f
+ /* save and restore cr0 */
+ mfocrf r5,128
+ EXIT_VMX_OPS
+ mtocrf 128,r5
+ b .LcmpAB_lightweight
+
+4:
+ /* compare 32 bytes for each loop */
+ srdi r0,r5,5
+ mtctr r0
+ clrldi r5,r5,59
+ li off16,16
+
+.balign 16
+5:
+ lvx v0,0,r3
+ lvx v1,0,r4
+ VCMPEQUD_RC(v0,v0,v1)
+ bnl cr6,7f
+ lvx v0,off16,r3
+ lvx v1,off16,r4
+ VCMPEQUD_RC(v0,v0,v1)
+ bnl cr6,6f
+ addi r3,r3,32
+ addi r4,r4,32
+ bdnz 5b
+
+ EXIT_VMX_OPS
+ cmpdi r5,0
+ beq .Lzero
+ b .Lcmp_lt32bytes
+
+6:
+ addi r3,r3,16
+ addi r4,r4,16
+
+7:
+ /* diff the last 16 bytes */
+ EXIT_VMX_OPS
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ li off8,8
+ bne cr0,.LcmpAB_lightweight
+
+ LD rA,off8,r3
+ LD rB,off8,r4
+ cmpld cr0,rA,rB
+ bne cr0,.LcmpAB_lightweight
+ b .Lzero
+#endif
+
+.Ldiffoffset_8bytes_make_align_start:
+ /* now try to align s1 with 8 bytes */
+ rlwinm r6,r3,3,26,28
+ beq .Ldiffoffset_align_s1_8bytes
+
+ clrrdi r3,r3,3
+ LD rA,0,r3
+ LD rB,0,r4 /* unaligned load */
+ sld rA,rA,r6
+ srd rA,rA,r6
+ srd rB,rB,r6
+ cmpld cr0,rA,rB
+ srwi r6,r6,3
+ bne cr0,.LcmpAB_lightweight
+
+ subfic r6,r6,8
+ subf. r5,r6,r5
+ addi r3,r3,8
+ add r4,r4,r6
+
+ beq .Lzero
+
+.Ldiffoffset_align_s1_8bytes:
+ /* now s1 is aligned with 8 bytes. */
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ /* only do vmx ops when the size equal or greater than 4K bytes */
+ cmpdi cr5,r5,VMX_THRESH
+ bge cr5,.Ldiffoffset_vmx_cmp
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+.Ldiffoffset_novmx_cmp:
+#endif
+
+
+ cmpdi cr5,r5,31
+ ble cr5,.Lcmp_lt32bytes
+
+#ifdef CONFIG_ALTIVEC
+ b .Llong_novmx_cmp
+#else
+ b .Llong
+#endif
+
+#ifdef CONFIG_ALTIVEC
+.Ldiffoffset_vmx_cmp:
+ /* perform a 32 bytes pre-checking before
+ * enable VMX operations.
+ */
+ li r0,4
+ mtctr r0
+.Ldiffoffset_prechk_32B_loop:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ addi r5,r5,-8
+ bdnz .Ldiffoffset_prechk_32B_loop
+
+ ENTER_VMX_OPS
+ beq cr1,.Ldiffoffset_novmx_cmp
+
+.Ldiffoffset_vmx_cmp_start:
+ /* Firstly try to align r3 with 16 bytes */
+ andi. r6,r3,0xf
+ li off16,16
+ beq .Ldiffoffset_vmx_s1_16bytes_align
+
+ LVS v3,0,r3
+ LVS v4,0,r4
+
+ lvx v5,0,r3
+ lvx v6,0,r4
+ LD_VSR_CROSS16B(r3,v3,v5,v7,v9)
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+
+ VCMPEQUB_RC(v7,v9,v10)
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ subfic r6,r6,16
+ subf r5,r6,r5
+ add r3,r3,r6
+ add r4,r4,r6
+
+.Ldiffoffset_vmx_s1_16bytes_align:
+ /* now s1 is aligned with 16 bytes */
+ lvx v6,0,r4
+ LVS v4,0,r4
+ srdi r6,r5,5 /* loop for 32 bytes each */
+ clrldi r5,r5,59
+ mtctr r6
+
+.balign 16
+.Ldiffoffset_vmx_32bytesloop:
+ /* the first qw of r4 was saved in v6 */
+ lvx v9,0,r3
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+ VCMPEQUB_RC(v7,v9,v10)
+ vor v6,v8,v8
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ addi r3,r3,16
+ addi r4,r4,16
+
+ lvx v9,0,r3
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+ VCMPEQUB_RC(v7,v9,v10)
+ vor v6,v8,v8
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ addi r3,r3,16
+ addi r4,r4,16
+
+ bdnz .Ldiffoffset_vmx_32bytesloop
+
+ EXIT_VMX_OPS
+
+ cmpdi r5,0
+ beq .Lzero
+ b .Lcmp_lt32bytes
+
+.Ldiffoffset_vmx_diff_found:
+ EXIT_VMX_OPS
+ /* anyway, the diff will appear in next 16 bytes */
+ li r5,16
+ b .Lcmp_lt32bytes
+
+#endif
EXPORT_SYMBOL(memcmp)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 8d8265be1a59..273ea67e60a1 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -9,6 +9,13 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+#ifndef SELFTEST_CASE
+/* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */
+#define SELFTEST_CASE 0
+#endif
.align 7
_GLOBAL_TOC(memcpy)
@@ -20,10 +27,8 @@ BEGIN_FTR_SECTION
#endif
FTR_SECTION_ELSE
#ifdef CONFIG_PPC_BOOK3S_64
-#ifndef SELFTEST
b memcpy_power7
#endif
-#endif
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#ifdef __LITTLE_ENDIAN__
/* dumb little-endian memcpy that will get replaced at runtime */
@@ -47,6 +52,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
cleared.
At the time of writing the only CPU that has this combination of bits
set is Power6. */
+test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
@@ -55,6 +61,7 @@ ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
+test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index df7de9d3da08..89bfefcf7fcc 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -19,7 +19,10 @@
*/
#include <asm/ppc_asm.h>
-_GLOBAL(memcpy_power7)
+#ifndef SELFTEST_CASE
+/* 0 == don't use VMX, 1 == use VMX */
+#define SELFTEST_CASE 0
+#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
@@ -29,20 +32,17 @@ _GLOBAL(memcpy_power7)
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
-#ifdef CONFIG_ALTIVEC
+_GLOBAL(memcpy_power7)
cmpldi r5,16
cmpldi cr1,r5,4096
-
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
-
blt .Lshort_copy
- bgt cr1,.Lvmx_copy
-#else
- cmpldi r5,16
-
- std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- blt .Lshort_copy
+#ifdef CONFIG_ALTIVEC
+test_feature = SELFTEST_CASE
+BEGIN_FTR_SECTION
+ bgt cr1, .Lvmx_copy
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
@@ -223,14 +223,14 @@ _GLOBAL(memcpy_power7)
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
-#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
+#ifdef CONFIG_ALTIVEC
mflr r0
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_copy
+ bl enter_vmx_ops
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -445,7 +445,7 @@ _GLOBAL(memcpy_power7)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -649,5 +649,5 @@ _GLOBAL(memcpy_power7)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/strlen_32.S b/arch/powerpc/lib/strlen_32.S
new file mode 100644
index 000000000000..0a8d3f64d493
--- /dev/null
+++ b/arch/powerpc/lib/strlen_32.S
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * strlen() for PPC32
+ *
+ * Copyright (C) 2018 Christophe Leroy CS Systemes d'Information.
+ *
+ * Inspired from glibc implementation
+ */
+#include <asm/ppc_asm.h>
+#include <asm/export.h>
+#include <asm/cache.h>
+
+ .text
+
+/*
+ * Algorithm:
+ *
+ * 1) Given a word 'x', we can test to see if it contains any 0 bytes
+ * by subtracting 0x01010101, and seeing if any of the high bits of each
+ * byte changed from 0 to 1. This works because the least significant
+ * 0 byte must have had no incoming carry (otherwise it's not the least
+ * significant), so it is 0x00 - 0x01 == 0xff. For all other
+ * byte values, either they have the high bit set initially, or when
+ * 1 is subtracted you get a value in the range 0x00-0x7f, none of which
+ * have their high bit set. The expression here is
+ * (x - 0x01010101) & ~x & 0x80808080), which gives 0x00000000 when
+ * there were no 0x00 bytes in the word. You get 0x80 in bytes that
+ * match, but possibly false 0x80 matches in the next more significant
+ * byte to a true match due to carries. For little-endian this is
+ * of no consequence since the least significant match is the one
+ * we're interested in, but big-endian needs method 2 to find which
+ * byte matches.
+ * 2) Given a word 'x', we can test to see _which_ byte was zero by
+ * calculating ~(((x & ~0x80808080) - 0x80808080 - 1) | x | ~0x80808080).
+ * This produces 0x80 in each byte that was zero, and 0x00 in all
+ * the other bytes. The '| ~0x80808080' clears the low 7 bits in each
+ * byte, and the '| x' part ensures that bytes with the high bit set
+ * produce 0x00. The addition will carry into the high bit of each byte
+ * iff that byte had one of its low 7 bits set. We can then just see
+ * which was the most significant bit set and divide by 8 to find how
+ * many to add to the index.
+ * This is from the book 'The PowerPC Compiler Writer's Guide',
+ * by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
+ */
+
+_GLOBAL(strlen)
+ andi. r0, r3, 3
+ lis r7, 0x0101
+ addi r10, r3, -4
+ addic r7, r7, 0x0101 /* r7 = 0x01010101 (lomagic) & clear XER[CA] */
+ rotlwi r6, r7, 31 /* r6 = 0x80808080 (himagic) */
+ bne- 3f
+ .balign IFETCH_ALIGN_BYTES
+1: lwzu r9, 4(r10)
+2: subf r8, r7, r9
+ and. r8, r8, r6
+ beq+ 1b
+ andc. r8, r8, r9
+ beq+ 1b
+ andc r8, r9, r6
+ orc r9, r9, r6
+ subfe r8, r6, r8
+ nor r8, r8, r9
+ cntlzw r8, r8
+ subf r3, r3, r10
+ srwi r8, r8, 3
+ add r3, r3, r8
+ blr
+
+ /* Missaligned string: make sure bytes before string are seen not 0 */
+3: xor r10, r10, r0
+ orc r8, r8, r8
+ lwzu r9, 4(r10)
+ slwi r0, r0, 3
+ srw r8, r8, r0
+ orc r9, r9, r8
+ b 2b
+EXPORT_SYMBOL(strlen)
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index bf925cdcaca9..9f340494a8ac 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -53,7 +53,7 @@ int exit_vmx_usercopy(void)
return 0;
}
-int enter_vmx_copy(void)
+int enter_vmx_ops(void)
{
if (in_interrupt())
return 0;
@@ -70,7 +70,7 @@ int enter_vmx_copy(void)
* passed a pointer to the destination which we return as required by a
* memcpy implementation.
*/
-void *exit_vmx_copy(void *dest)
+void *exit_vmx_ops(void *dest)
{
disable_kernel_altivec();
preempt_enable();
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 82b1ff759e26..12d92518e898 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -229,7 +229,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
}
#ifdef CONFIG_SMP
-void mmu_init_secondary(int cpu)
+void __init mmu_init_secondary(int cpu)
{
unsigned long addr;
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index f06f3577d8d1..cdf6a9960046 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o
-ifeq ($(CONFIG_PPC_BOOK3S_64),y)
+ifdef CONFIG_PPC_BOOK3S_64
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
@@ -31,7 +31,7 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o
-ifeq ($(CONFIG_HUGETLB_PAGE),y)
+ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o
obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 7d0945bd3a61..c8da352e8686 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -34,7 +34,7 @@
* to handle fortunately.
*/
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
- unsigned long dsisr, unsigned *flt)
+ unsigned long dsisr, vm_fault_t *flt)
{
struct vm_area_struct *vma;
unsigned long is_write;
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index 14cfb11b09d0..869294695048 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
@@ -260,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
/* to check in the secondary hash table, we invert the hash */
if (!primary)
hash = ~hash;
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* see if we can find an entry in the hpte with this hash */
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b1ca7a0974e3..d51cf5f4e45e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -42,7 +42,6 @@
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
@@ -156,7 +155,7 @@ static noinline int bad_access(struct pt_regs *regs, unsigned long address)
}
static int do_sigbus(struct pt_regs *regs, unsigned long address,
- unsigned int fault)
+ vm_fault_t fault)
{
siginfo_t info;
unsigned int lsb = 0;
@@ -187,7 +186,8 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address,
return 0;
}
-static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
+static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
+ vm_fault_t fault)
{
/*
* Kernel page fault interrupted by SIGKILL. We have no reason to
@@ -415,7 +415,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
int is_exec = TRAP(regs) == 0x400;
int is_user = user_mode(regs);
int is_write = page_fault_is_write(error_code);
- int fault, major = 0;
+ vm_fault_t fault, major = 0;
bool must_retry = false;
if (notify_page_fault(regs))
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index d573d7d07f25..6fa6765a10eb 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -80,7 +80,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -89,7 +89,7 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -97,8 +97,8 @@ repeat:
MMU_PAGE_4K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index e601d95c3b20..3afa253d7f52 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -154,7 +154,7 @@ htab_insert_hpte:
}
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -165,7 +165,7 @@ repeat:
if (unlikely(slot == -1)) {
bool soft_invalid;
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY,
MMU_PAGE_4K, MMU_PAGE_4K,
@@ -193,8 +193,7 @@ repeat:
* that we do not get the same soft-invalid slot.
*/
if (soft_invalid || (mftb() & 0x1))
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
@@ -288,7 +287,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -298,7 +297,7 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -306,8 +305,8 @@ repeat:
MMU_PAGE_64K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index ffbd7c0bda96..26acf6c8c20c 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -27,6 +27,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_SMP
.section .bss
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 1d049c78c82a..729f02df8290 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -23,13 +23,13 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
+#include <asm/feature-fixups.h>
#include <misc/cxl-base.h>
@@ -423,9 +423,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -439,9 +437,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} else {
native_lock_hpte(hptep);
/* recheck with locks held */
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
@@ -481,11 +477,9 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- hptep = htab_address + slot;
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hptep = htab_address + slot;
+ hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
return slot;
@@ -574,11 +568,19 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
- native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ native_lock_hpte(hptep);
+ /* recheck with locks held */
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ hptep->v = 0;
+ else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -586,13 +588,6 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
* (hpte_remove) because we assume the old translation is still
* technically "valid".
*/
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
-
- /* Invalidate the TLB */
tlbie(vpn, bpsize, apsize, ssize, local);
local_irq_restore(flags);
@@ -634,17 +629,23 @@ static void native_hugepage_invalidate(unsigned long vsid,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
- native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /* recheck with locks held */
+ native_lock_hpte(hptep);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /*
+ * Invalidate the hpte. NOTE: this also unlocks it
+ */
+
+ hptep->v = 0;
+ } else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to do tlb invalidate for all the address, tlbie
* instruction compares entry_VA in tlb with the VA specified
@@ -812,16 +813,19 @@ static void native_flush_hash_range(unsigned long number, int local)
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
+ continue;
+ /* lock and try again */
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v,
- be64_to_cpu(hptep->r));
- if (!HPTE_V_COMPARE(hpte_v, want_v) ||
- !(hpte_v & HPTE_V_VALID))
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
else
hptep->v = 0;
+
} pte_iterate_hashed_end();
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8318716e5075..f23a89d8e4ce 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -48,7 +48,6 @@
#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
@@ -808,31 +807,6 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static void update_hid_for_hash(void)
-{
- unsigned long hid0;
- unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
-
- asm volatile("ptesync": : :"memory");
- /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
- asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
- trace_tlbie(0, 0, rb, 0, 2, 0, 0);
-
- /*
- * now switch the HID
- */
- hid0 = mfspr(SPRN_HID0);
- hid0 &= ~HID0_POWER9_RADIX;
- mtspr(SPRN_HID0, hid0);
- asm volatile("isync": : :"memory");
-
- /* Wait for it to happen */
- while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
- cpu_relax();
-}
-
static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size)
{
@@ -845,8 +819,6 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
htab_size = __ilog2(htab_size) - 18;
mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
pr_info("Partition table %p\n", partition_tb);
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_hash();
}
static void __init htab_initialize(void)
@@ -1077,9 +1049,6 @@ void hash__early_init_mmu_secondary(void)
/* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_hash();
-
if (!cpu_has_feature(CPU_FTR_ARCH_300))
mtspr(SPRN_SDR1, _SDR1);
else
@@ -1783,8 +1752,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
long slot;
repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
@@ -1792,15 +1760,14 @@ repeat:
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
vflags | HPTE_V_SECONDARY,
psize, psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 668e87d03f9e..82a0e37557a5 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
+ int type __maybe_unused;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index f20d16f849c5..01f213d2bcb9 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -128,7 +128,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= H_PAGE_HASHPTE;
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -137,16 +137,15 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
psize, lpsize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8a9a49c13865..e87f9ef9115b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -118,15 +118,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
}
/*
- * These macros define how to determine which level of the page table holds
- * the hpdp.
- */
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
-#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
-#define HUGEPD_PUD_SHIFT PUD_SHIFT
-#endif
-
-/*
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
@@ -174,13 +165,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
}
}
#else
- if (pshift >= HUGEPD_PGD_SHIFT) {
+ if (pshift >= PGDIR_SHIFT) {
ptl = &mm->page_table_lock;
hpdp = (hugepd_t *)pg;
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
- if (pshift >= HUGEPD_PUD_SHIFT) {
+ if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu;
} else {
@@ -621,15 +612,12 @@ static int __init add_huge_page_size(unsigned long long size)
* firmware we only add hugetlb support for page sizes that can be
* supported by linux page table layout.
* For now we have
- * Radix: 2M
+ * Radix: 2M and 1G
* Hash: 16M and 16G
*/
if (radix_enabled()) {
- if (mmu_psize != MMU_PAGE_2M) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
- (mmu_psize != MMU_PAGE_1G))
- return -EINVAL;
- }
+ if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
+ return -EINVAL;
} else {
if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
return -EINVAL;
@@ -695,9 +683,9 @@ static int __init hugetlbpage_init(void)
else
pdshift = PMD_SHIFT;
#else
- if (shift < HUGEPD_PUD_SHIFT)
+ if (shift < PUD_SHIFT)
pdshift = PMD_SHIFT;
- else if (shift < HUGEPD_PGD_SHIFT)
+ else if (shift < PGDIR_SHIFT)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index f3d4b4a0e561..4a892d894a0f 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag)
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
- if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
+ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -215,13 +215,13 @@ static void pmd_frag_destroy(void *pmd_frag)
/* drop all the pending references */
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
- if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) {
+ if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
-static void destroy_pagetable_page(struct mm_struct *mm)
+static void destroy_pagetable_cache(struct mm_struct *mm)
{
void *frag;
@@ -244,13 +244,14 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(process_tb[mm->context.id].prtb0 != 0);
else
subpage_prot_free(mm);
- destroy_pagetable_page(mm);
destroy_contexts(&mm->context);
mm->context.id = MMU_NO_CONTEXT;
}
void arch_exit_mmap(struct mm_struct *mm)
{
+ destroy_pagetable_cache(mm);
+
if (radix_enabled()) {
/*
* Radix doesn't have a valid bit in the process table
@@ -273,15 +274,7 @@ void arch_exit_mmap(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
-
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- isync();
- mtspr(SPRN_PID, next->context.id);
- isync();
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
- } else {
- mtspr(SPRN_PID, next->context.id);
- isync();
- }
+ mtspr(SPRN_PID, next->context.id);
+ isync();
}
#endif
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index aa5a7fd89461..921c1e33e941 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -27,7 +27,6 @@
#include <linux/export.h>
#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index abb43646927a..a4ca57612558 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <asm/mmu_context.h>
+#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
struct rcu_head rcu;
unsigned long used;
atomic64_t mapped;
+ unsigned int pageshift;
u64 ua; /* userspace address */
u64 entries; /* number of entries in hpas[] */
u64 *hpas; /* vmalloc'ed */
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
{
struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0;
+ unsigned int pageshift;
+ unsigned long flags;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
+ /*
+ * For a starting point for a maximum page size calculation
+ * we use @ua and @entries natural alignment to allow IOMMU pages
+ * smaller than huge pages but still bigger than PAGE_SIZE.
+ */
+ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
if (!mem->hpas) {
kfree(mem);
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
}
populate:
+ pageshift = PAGE_SHIFT;
+ if (PageCompound(page)) {
+ pte_t *pte;
+ struct page *head = compound_head(page);
+ unsigned int compshift = compound_order(head);
+
+ local_irq_save(flags); /* disables as well */
+ pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
+ local_irq_restore(flags);
+
+ /* Double check it is still the same pinned page */
+ if (pte && pte_page(*pte) == head &&
+ pageshift == compshift)
+ pageshift = max_t(unsigned int, pageshift,
+ PAGE_SHIFT);
+ }
+ mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
EXPORT_SYMBOL_GPL(mm_iommu_find);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa)
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
u64 *va = &mem->hpas[entry];
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
+ if (pageshift > mem->pageshift)
+ return -EFAULT;
+
*hpa = *va | (ua & ~PAGE_MASK);
return 0;
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa)
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
+ if (pageshift > mem->pageshift)
+ return -EFAULT;
+
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index c4c0a09a7775..e5d779eed181 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -19,7 +19,6 @@
*
*/
#include <linux/mm.h>
-#include <asm/tlbflush.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_MMU_NOHASH
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 4afbfbb64bfd..01d7c0f7c4f0 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
return NULL;
}
+ atomic_set(&page->pt_frag_refcount, 1);
+
ret = page_address(page);
/*
* if we support only one fragment just return the
@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
* count.
*/
if (likely(!mm->context.pmd_frag)) {
- set_page_count(page, PMD_FRAG_NR);
+ atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd)
{
struct page *page = virt_to_page(pmd);
- if (put_page_testzero(page)) {
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return NULL;
}
+ atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page);
/*
@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
* count.
*/
if (likely(!mm->context.pte_frag)) {
- set_page_count(page, PTE_FRAG_NR);
+ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
- if (put_page_testzero(page)) {
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
pgtable_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -450,3 +455,25 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
return pgtable_free(table, index);
}
#endif
+
+#ifdef CONFIG_PROC_FS
+atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+
+void arch_report_meminfo(struct seq_file *m)
+{
+ /*
+ * Hash maps the memory with one size mmu_linear_psize.
+ * So don't bother to print these on hash
+ */
+ if (!radix_enabled())
+ return;
+ seq_printf(m, "DirectMap4k: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
+ seq_printf(m, "DirectMap64k: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
+ seq_printf(m, "DirectMap2M: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
+ seq_printf(m, "DirectMap1G: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
+}
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 96f68c5aa1f5..7be99fd9af15 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void)
{
unsigned long start, end;
- /*
- * mark_rodata_ro() will mark itself as !writable at some point.
- * Due to DD1 workaround in radix__pte_update(), we'll end up with
- * an invalid pte and the system will crash quite severly.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
- return;
- }
-
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
@@ -277,6 +267,7 @@ static int __meminit create_physical_mapping(unsigned long start,
#else
int split_text_mapping = 0;
#endif
+ int psize;
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
@@ -290,13 +281,17 @@ static int __meminit create_physical_mapping(unsigned long start,
retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
mmu_psize_defs[MMU_PAGE_1G].shift &&
- PUD_SIZE <= max_mapping_size)
+ PUD_SIZE <= max_mapping_size) {
mapping_size = PUD_SIZE;
- else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
- mmu_psize_defs[MMU_PAGE_2M].shift)
+ psize = MMU_PAGE_1G;
+ } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
+ mmu_psize_defs[MMU_PAGE_2M].shift) {
mapping_size = PMD_SIZE;
- else
+ psize = MMU_PAGE_2M;
+ } else {
mapping_size = PAGE_SIZE;
+ psize = mmu_virtual_psize;
+ }
if (split_text_mapping && (mapping_size == PUD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
@@ -307,8 +302,10 @@ retry:
if (split_text_mapping && (mapping_size == PMD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext))
+ (addr + mapping_size) >= __pa_symbol(_stext)) {
mapping_size = PAGE_SIZE;
+ psize = mmu_virtual_psize;
+ }
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
@@ -326,6 +323,8 @@ retry:
rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
if (rc)
return rc;
+
+ update_page_count(psize, 1);
}
print_mapping(start, addr, mapping_size);
@@ -533,35 +532,6 @@ found:
return;
}
-static void update_hid_for_radix(void)
-{
- unsigned long hid0;
- unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
-
- asm volatile("ptesync": : :"memory");
- /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
- /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
- asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
- trace_tlbie(0, 0, rb, 0, 2, 0, 1);
- trace_tlbie(0, 0, rb, 0, 2, 1, 1);
-
- /*
- * now switch the HID
- */
- hid0 = mfspr(SPRN_HID0);
- hid0 |= HID0_POWER9_RADIX;
- mtspr(SPRN_HID0, hid0);
- asm volatile("isync": : :"memory");
-
- /* Wait for it to happen */
- while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
- cpu_relax();
-}
-
static void radix_init_amor(void)
{
/*
@@ -576,22 +546,12 @@ static void radix_init_amor(void)
static void radix_init_iamr(void)
{
- unsigned long iamr;
-
- /*
- * The IAMR should set to 0 on DD1.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- iamr = 0;
- else
- iamr = (1ul << 62);
-
/*
* Radix always uses key0 of the IAMR to determine if an access is
* allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
* fetch.
*/
- mtspr(SPRN_IAMR, iamr);
+ mtspr(SPRN_IAMR, (1ul << 62));
}
void __init radix__early_init_mmu(void)
@@ -644,8 +604,6 @@ void __init radix__early_init_mmu(void)
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native();
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_radix();
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
@@ -671,10 +629,6 @@ void radix__early_init_mmu_secondary(void)
* update partition table control register and UPRT
*/
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_radix();
-
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
@@ -1095,8 +1049,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
* To avoid NMMU hang while relaxing access, we need mark
* the pte invalid in between.
*/
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
- atomic_read(&mm->context.copros) > 0) {
+ if (atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index e6f500fabf5e..333b1f80c435 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -14,9 +14,12 @@ DEFINE_STATIC_KEY_TRUE(pkey_disabled);
bool pkey_execute_disable_supported;
int pkeys_total; /* Total pkeys as per device tree */
bool pkeys_devtree_defined; /* pkey property exported by device tree */
-u32 initial_allocation_mask; /* Bits set for reserved keys */
-u64 pkey_amr_uamor_mask; /* Bits in AMR/UMOR not to be touched */
+u32 initial_allocation_mask; /* Bits set for the initially allocated keys */
+u32 reserved_allocation_mask; /* Bits set for reserved keys */
+u64 pkey_amr_mask; /* Bits in AMR not to be touched */
u64 pkey_iamr_mask; /* Bits in AMR not to be touched */
+u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */
+int execute_only_key = 2;
#define AMR_BITS_PER_PKEY 2
#define AMR_RD_BIT 0x1UL
@@ -91,7 +94,7 @@ int pkey_initialize(void)
* arch-neutral code.
*/
pkeys_total = min_t(int, pkeys_total,
- (ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT));
+ ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)+1));
if (!pkey_mmu_enabled() || radix_enabled() || !pkeys_total)
static_branch_enable(&pkey_disabled);
@@ -119,20 +122,39 @@ int pkey_initialize(void)
#else
os_reserved = 0;
#endif
- initial_allocation_mask = ~0x0;
- pkey_amr_uamor_mask = ~0x0ul;
+ /* Bits are in LE format. */
+ reserved_allocation_mask = (0x1 << 1) | (0x1 << execute_only_key);
+
+ /* register mask is in BE format */
+ pkey_amr_mask = ~0x0ul;
+ pkey_amr_mask &= ~(0x3ul << pkeyshift(0));
+
pkey_iamr_mask = ~0x0ul;
- /*
- * key 0, 1 are reserved.
- * key 0 is the default key, which allows read/write/execute.
- * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
- * programming note.
- */
- for (i = 2; i < (pkeys_total - os_reserved); i++) {
- initial_allocation_mask &= ~(0x1 << i);
- pkey_amr_uamor_mask &= ~(0x3ul << pkeyshift(i));
- pkey_iamr_mask &= ~(0x1ul << pkeyshift(i));
+ pkey_iamr_mask &= ~(0x3ul << pkeyshift(0));
+ pkey_iamr_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+
+ pkey_uamor_mask = ~0x0ul;
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(0));
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+
+ /* mark the rest of the keys as reserved and hence unavailable */
+ for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) {
+ reserved_allocation_mask |= (0x1 << i);
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(i));
+ }
+ initial_allocation_mask = reserved_allocation_mask | (0x1 << 0);
+
+ if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) {
+ /*
+ * Insufficient number of keys to support
+ * execute only key. Mark it unavailable.
+ * Any AMR, UAMOR, IAMR bit set for
+ * this key is irrelevant since this key
+ * can never be allocated.
+ */
+ execute_only_key = -1;
}
+
return 0;
}
@@ -143,8 +165,7 @@ void pkey_mm_init(struct mm_struct *mm)
if (static_branch_likely(&pkey_disabled))
return;
mm_pkey_allocation_map(mm) = initial_allocation_mask;
- /* -1 means unallocated or invalid */
- mm->context.execute_only_pkey = -1;
+ mm->context.execute_only_pkey = execute_only_key;
}
static inline u64 read_amr(void)
@@ -213,33 +234,6 @@ static inline void init_iamr(int pkey, u8 init_bits)
write_iamr(old_iamr | new_iamr_bits);
}
-static void pkey_status_change(int pkey, bool enable)
-{
- u64 old_uamor;
-
- /* Reset the AMR and IAMR bits for this key */
- init_amr(pkey, 0x0);
- init_iamr(pkey, 0x0);
-
- /* Enable/disable key */
- old_uamor = read_uamor();
- if (enable)
- old_uamor |= (0x3ul << pkeyshift(pkey));
- else
- old_uamor &= ~(0x3ul << pkeyshift(pkey));
- write_uamor(old_uamor);
-}
-
-void __arch_activate_pkey(int pkey)
-{
- pkey_status_change(pkey, true);
-}
-
-void __arch_deactivate_pkey(int pkey)
-{
- pkey_status_change(pkey, false);
-}
-
/*
* Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
* specified in @init_val.
@@ -289,9 +283,6 @@ void thread_pkey_regs_restore(struct thread_struct *new_thread,
if (static_branch_likely(&pkey_disabled))
return;
- /*
- * TODO: Just set UAMOR to zero if @new_thread hasn't used any keys yet.
- */
if (old_thread->amr != new_thread->amr)
write_amr(new_thread->amr);
if (old_thread->iamr != new_thread->iamr)
@@ -305,9 +296,13 @@ void thread_pkey_regs_init(struct thread_struct *thread)
if (static_branch_likely(&pkey_disabled))
return;
- thread->amr = read_amr() & pkey_amr_uamor_mask;
- thread->iamr = read_iamr() & pkey_iamr_mask;
- thread->uamor = read_uamor() & pkey_amr_uamor_mask;
+ thread->amr = pkey_amr_mask;
+ thread->iamr = pkey_iamr_mask;
+ thread->uamor = pkey_uamor_mask;
+
+ write_uamor(pkey_uamor_mask);
+ write_amr(pkey_amr_mask);
+ write_iamr(pkey_iamr_mask);
}
static inline bool pkey_allows_readwrite(int pkey)
@@ -322,48 +317,7 @@ static inline bool pkey_allows_readwrite(int pkey)
int __execute_only_pkey(struct mm_struct *mm)
{
- bool need_to_set_mm_pkey = false;
- int execute_only_pkey = mm->context.execute_only_pkey;
- int ret;
-
- /* Do we need to assign a pkey for mm's execute-only maps? */
- if (execute_only_pkey == -1) {
- /* Go allocate one to use, which might fail */
- execute_only_pkey = mm_pkey_alloc(mm);
- if (execute_only_pkey < 0)
- return -1;
- need_to_set_mm_pkey = true;
- }
-
- /*
- * We do not want to go through the relatively costly dance to set AMR
- * if we do not need to. Check it first and assume that if the
- * execute-only pkey is readwrite-disabled than we do not have to set it
- * ourselves.
- */
- if (!need_to_set_mm_pkey && !pkey_allows_readwrite(execute_only_pkey))
- return execute_only_pkey;
-
- /*
- * Set up AMR so that it denies access for everything other than
- * execution.
- */
- ret = __arch_set_user_pkey_access(current, execute_only_pkey,
- PKEY_DISABLE_ACCESS |
- PKEY_DISABLE_WRITE);
- /*
- * If the AMR-set operation failed somehow, just return 0 and
- * effectively disable execute-only support.
- */
- if (ret) {
- mm_pkey_free(mm, execute_only_pkey);
- return -1;
- }
-
- /* We got one, store it and use it from here on out */
- if (need_to_set_mm_pkey)
- mm->context.execute_only_pkey = execute_only_pkey;
- return execute_only_pkey;
+ return mm->context.execute_only_pkey;
}
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
@@ -407,9 +361,6 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
int pkey_shift;
u64 amr;
- if (!pkey)
- return true;
-
if (!is_pkey_enabled(pkey))
return true;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cb796724a6fc..0b095fa54049 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}
+/*
+ * Insert bolted entries into SLB (which may not be empty, so don't clear
+ * slb_cache_ptr).
+ */
+void __slb_restore_bolted_realmode(void)
+{
+ struct slb_shadow *p = get_slb_shadow();
+ enum slb_index index;
+
+ /* No isync needed because realmode. */
+ for (index = 0; index < SLB_NUM_BOLTED; index++) {
+ asm volatile("slbmte %0,%1" :
+ : "r" (be64_to_cpu(p->save_area[index].vsid)),
+ "r" (be64_to_cpu(p->save_area[index].esid)));
+ }
+}
+
+/*
+ * Insert the bolted entries into an empty SLB.
+ * This is not the same as rebolt because the bolted segments are not
+ * changed, just loaded from the shadow area.
+ */
+void slb_restore_bolted_realmode(void)
+{
+ __slb_restore_bolted_realmode();
+ get_paca()->slb_cache_ptr = 0;
+}
+
+/*
+ * This flushes all SLB entries including 0, so it must be realmode.
+ */
+void slb_flush_all_realmode(void)
+{
+ /*
+ * This flushes all SLB entries including 0, so it must be realmode.
+ */
+ asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+}
+
static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a83fbd2a4a24..4ac5057ad439 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -22,6 +22,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
/*
* This macro generates asm code to compute the VSID scramble
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 9d16ee251fc0..3327551c8b47 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -17,7 +17,6 @@
#include <asm/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/tlbflush.h>
/*
* Free all pages allocated for subpage protection maps and pointers.
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 1135b43a597c..fef3e1eb3a19 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -1048,24 +1048,6 @@ void radix__flush_tlb_all(void)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
- unsigned long address)
-{
- /*
- * We track page size in pte only for DD1, So we can
- * call this only on DD1.
- */
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- VM_WARN_ON(1);
- return;
- }
-
- if (old_pte & R_PAGE_LARGE)
- radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
- else
- radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
-}
-
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
{
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index eb82d787d99a..7fd20c52a8ec 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -22,6 +22,7 @@
#include <asm/ppc-opcode.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_PPC_64K_PAGES
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 048b8e9f4492..e066a658acac 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -34,6 +34,8 @@
#include <asm/asm-offsets.h>
#include <asm/processor.h>
#include <asm/bug.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#if defined(CONFIG_40x)
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 809f019d3cba..c2dec3a68d4c 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -2,7 +2,7 @@
#
# Arch-specific network modules
#
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index a8cd7e289ecd..6f4daacad296 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -13,6 +13,7 @@
#ifndef _BPF_JIT32_H
#define _BPF_JIT32_H
+#include <asm/asm-compat.h>
#include "bpf_jit.h"
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
index 3dd9c43d40c9..c80280dc2e04 100644
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -10,6 +10,7 @@
*/
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
#include "bpf_jit32.h"
/*
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 5b061fc81df3..d5bfe24bb3b5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 380cbf9a40d9..50b129785aee 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -13,6 +13,7 @@
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
@@ -286,6 +287,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
+ u32 tmp_idx;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +639,7 @@ emit_clear:
case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- /* error if EA is not word-aligned */
- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
+ tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */
@@ -649,32 +647,16 @@ emit_clear:
/* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
- /* otherwise, let's try once more */
- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- /* exit if the store was not successful */
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_BCC(COND_NE, exit_addr);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- /* error if EA is not doubleword-aligned */
- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ tmp_idx = ctx->idx * 4;
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_BCC(COND_NE, exit_addr);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3f66fcf8ad99..81f8a0c838ae 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -128,10 +128,6 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
-static bool use_ic(u64 event)
-{
- return false;
-}
#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs)
@@ -714,14 +710,6 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
-static bool use_ic(u64 event)
-{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- (event == 0x200f2 || event == 0x300f2))
- return true;
-
- return false;
-}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -1046,7 +1034,6 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED)
return;
@@ -1056,13 +1043,6 @@ static void power_pmu_read(struct perf_event *event)
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
local64_set(&event->hw.prev_count, val);
return;
}
@@ -1076,13 +1056,6 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
@@ -1469,7 +1442,7 @@ static int collect_events(struct perf_event *group, int max_count,
}
/*
- * Add a event to the PMU.
+ * Add an event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
@@ -1535,20 +1508,13 @@ nocheck:
event->attr.branch_sample_type);
}
- /*
- * Workaround for POWER9 DD1 to use the Instruction Counter
- * register value for instruction counting
- */
- if (use_ic(event->attr.config))
- cpuhw->ic_init = mfspr(SPRN_IC);
-
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
- * Remove a event from the PMU.
+ * Remove an event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
@@ -1742,7 +1708,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
- * A event can only go on a limited PMC if it counts something
+ * An event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d1977b61f827..1fafc32b12a0 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -867,59 +867,6 @@ static int thread_imc_cpu_init(void)
ppc_thread_imc_cpu_offline);
}
-void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
- bool sched_in)
-{
- int core_id;
- struct imc_pmu_ref *ref;
-
- if (!is_core_imc_mem_inited(smp_processor_id()))
- return;
-
- core_id = smp_processor_id() / threads_per_core;
- /*
- * imc pmus are enabled only when it is used.
- * See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
- * If not, just increment the count in ref count struct.
- */
- ref = &core_imc_refc[core_id];
- if (!ref)
- return;
-
- if (sched_in) {
- mutex_lock(&ref->lock);
- if (ref->refc == 0) {
- if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to start the counter\
- for core %d\n", core_id);
- return;
- }
- }
- ++ref->refc;
- mutex_unlock(&ref->lock);
- } else {
- mutex_lock(&ref->lock);
- ref->refc--;
- if (ref->refc == 0) {
- if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to stop the counters\
- for core %d\n", core_id);
- return;
- }
- } else if (ref->refc < 0) {
- ref->refc = 0;
- }
- mutex_unlock(&ref->lock);
- }
-
- return;
-}
-
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
@@ -1046,22 +993,70 @@ static int imc_event_add(struct perf_event *event, int flags)
static int thread_imc_event_add(struct perf_event *event, int flags)
{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
if (flags & PERF_EF_START)
imc_event_start(event, flags);
- /* Enable the sched_task to start the engine */
- perf_sched_cb_inc(event->ctx->pmu);
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return -EINVAL;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
+
+ int core_id;
+ struct imc_pmu_ref *ref;
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
- perf_sched_cb_dec(event->ctx->pmu);
+
+ core_id = smp_processor_id() / threads_per_core;
+ ref = &core_imc_refc[core_id];
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1087,7 +1082,6 @@ static int update_pmu_ops(struct imc_pmu *pmu)
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
- pmu->pmu.sched_task = thread_imc_pmu_sched_task;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 2efee3f196f5..177de814286f 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -59,7 +59,7 @@ static bool is_event_valid(u64 event)
{
u64 valid_mask = EVENT_VALID_MASK;
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask);
@@ -86,8 +86,6 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
* Incase of Power9:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events.
- * Non-Marked events (for DD1):
- * MMCRA[SDAR_MODE] will be set to 0b01
* For rest
* MMCRA[SDAR_MODE] will be set from event code.
* If sdar_mode from event is zero, default to 0b01. Hardware
@@ -96,7 +94,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
- else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event))
+ else if (p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
else
*mmcra |= MMCRA_SDAR_MODE_DCACHE;
@@ -106,7 +104,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
static u64 thresh_cmp_val(u64 value)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
return value << MMCRA_THR_CMP_SHIFT;
@@ -114,7 +112,7 @@ static u64 thresh_cmp_val(u64 value)
static unsigned long combine_from_event(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_EVENT_COMBINE(event);
return EVENT_COMBINE(event);
@@ -122,7 +120,7 @@ static unsigned long combine_from_event(u64 event)
static unsigned long combine_shift(unsigned long pmc)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_MMCR1_COMBINE_SHIFT(pmc);
return MMCR1_COMBINE_SHIFT(pmc);
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 6a0b586c935a..0028f4b9490d 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -158,11 +158,6 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-/*
- * Lets restrict use of PMC5 for instruction counting.
- */
-#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
-
/* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2ca0b33b4efb..e012b1030a5b 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -219,12 +219,6 @@ static struct attribute_group power9_pmu_events_group = {
.attrs = power9_events_attr,
};
-static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
- &isa207_pmu_format_group,
- &power9_pmu_events_group,
- NULL,
-};
-
PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
@@ -267,17 +261,6 @@ static const struct attribute_group *power9_pmu_attr_groups[] = {
NULL,
};
-static int power9_generic_events_dd1[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
- [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
- [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
- [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
-};
-
static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
@@ -439,25 +422,6 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
-static struct power_pmu power9_isa207_pmu = {
- .name = "POWER9",
- .n_counter = MAX_PMU_COUNTERS,
- .add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
- .config_bhrb = power9_config_bhrb,
- .bhrb_filter_map = power9_bhrb_filter_map,
- .get_constraint = isa207_get_constraint,
- .get_alternatives = power9_get_alternatives,
- .disable_pmc = isa207_disable_pmc,
- .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
- .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
- .generic_events = power9_generic_events_dd1,
- .cache_events = &power9_cache_events,
- .attr_groups = power9_isa207_pmu_attr_groups,
- .bhrb_nr = 32,
-};
-
static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
@@ -500,23 +464,7 @@ static int __init init_power9_pmu(void)
}
}
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- /*
- * Since PM_INST_CMPL may not provide right counts in all
- * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
- */
- EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
- /*
- * Power9 DD1 should use PM_BR_CMPL_ALT event code for
- * "branches" to provide correct counter value.
- */
- EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
- EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
- rc = register_power_pmu(&power9_isa207_pmu);
- } else {
- rc = register_power_pmu(&power9_pmu);
- }
-
+ rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/req-gen/_begin.h b/arch/powerpc/perf/req-gen/_begin.h
index 549f8782c52d..a200b86eba3b 100644
--- a/arch/powerpc/perf/req-gen/_begin.h
+++ b/arch/powerpc/perf/req-gen/_begin.h
@@ -3,6 +3,8 @@
#ifndef POWERPC_PERF_REQ_GEN_H_
#define POWERPC_PERF_REQ_GEN_H_
+#include <linux/stringify.h>
+
#define CAT2_STR_(t, s) __stringify(t/s)
#define CAT2_STR(t, s) CAT2_STR_(t, s)
#define I(...) __VA_ARGS__
diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
index 871a9a1766c2..fa9bc804e67a 100644
--- a/arch/powerpc/perf/req-gen/perf.h
+++ b/arch/powerpc/perf/req-gen/perf.h
@@ -3,6 +3,7 @@
#define LINUX_POWERPC_PERF_REQ_GEN_PERF_H_
#include <linux/perf_event.h>
+#include <linux/stringify.h>
#ifndef REQUEST_FILE
#error "REQUEST_FILE must be defined before including"
diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
index 81b2cbce7df8..7c324eff2f22 100644
--- a/arch/powerpc/platforms/4xx/msi.c
+++ b/arch/powerpc/platforms/4xx/msi.c
@@ -146,13 +146,19 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
const u32 *sdr_addr;
dma_addr_t msi_phys;
void *msi_virt;
+ int err;
sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
if (!sdr_addr)
- return -1;
+ return -EINVAL;
- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+ msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
+ if (!msi_data)
+ return -EINVAL;
+
+ msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
+ if (!msi_mask)
+ return -EINVAL;
msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
if (!msi->msi_dev)
@@ -160,30 +166,30 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
msi->msi_regs = of_iomap(msi->msi_dev, 0);
if (!msi->msi_regs) {
- dev_err(&dev->dev, "of_iomap problem failed\n");
- return -ENOMEM;
+ dev_err(&dev->dev, "of_iomap failed\n");
+ err = -ENOMEM;
+ goto node_put;
}
dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- if (!msi_virt)
- return -ENOMEM;
+ if (!msi_virt) {
+ err = -ENOMEM;
+ goto iounmap;
+ }
msi->msi_addr_hi = upper_32_bits(msi_phys);
msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
msi->msi_addr_hi, msi->msi_addr_lo);
+ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
+ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
- if (!msi_data)
- return -1;
- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
- if (!msi_mask)
- return -1;
/* Program MSI Expected data and Mask bits */
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
@@ -191,6 +197,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
return 0;
+
+iounmap:
+ iounmap(msi->msi_regs);
+node_put:
+ of_node_put(msi->msi_dev);
+ return err;
}
static int ppc4xx_of_msi_remove(struct platform_device *dev)
@@ -209,7 +221,6 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
msi_bitmap_free(&msi->bitmap);
iounmap(msi->msi_regs);
of_node_put(msi->msi_dev);
- kfree(msi);
return 0;
}
@@ -223,18 +234,16 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
- msi = kzalloc(sizeof(*msi), GFP_KERNEL);
- if (!msi) {
- dev_err(&dev->dev, "No memory for MSI structure\n");
+ msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
+ if (!msi)
return -ENOMEM;
- }
dev->dev.platform_data = msi;
/* Get MSI ranges */
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
- goto error_out;
+ return err;
}
msi_irqs = of_irq_count(dev->dev.of_node);
@@ -243,7 +252,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
err = ppc4xx_setup_pcieh_hw(dev, res, msi);
if (err)
- goto error_out;
+ return err;
err = ppc4xx_msi_init_allocator(dev, msi);
if (err) {
@@ -256,7 +265,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
}
- return err;
+ return 0;
error_out:
ppc4xx_of_msi_remove(dev);
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index ff2f86fe5429..f40d48eab779 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_PPC_LITE5200) += lite5200.o
obj-$(CONFIG_PPC_MEDIA5200) += media5200.o
obj-$(CONFIG_PM) += mpc52xx_sleep.o mpc52xx_pm.o
-ifeq ($(CONFIG_PPC_LITE5200),y)
+ifdef CONFIG_PPC_LITE5200
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index 31d3515672f3..b1d208ded981 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -117,7 +117,10 @@ int mpc52xx_pm_enter(suspend_state_t state)
u32 intr_main_mask;
void __iomem * irq_0x500 = (void __iomem *)CONFIG_KERNEL_START + 0x500;
unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
- char saved_0x500[mpc52xx_ds_cached_size];
+ char saved_0x500[0x600-0x500];
+
+ if (WARN_ON(mpc52xx_ds_cached_size > sizeof(saved_0x500)))
+ return -ENOMEM;
/* disable all interrupts in PIC */
intr_main_mask = in_be32(&intr->main_mask);
diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
index 58fa3d319f1c..dac36ba82fea 100644
--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
+++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
@@ -9,8 +9,10 @@
* option) any later version.
*/
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
}
early_initcall(t1042rdb_diu_init);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e6a1de521319..6c6a7c72cae4 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -74,7 +74,6 @@ config PPC_BOOK3S_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
- select HAVE_KERNEL_XZ
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -86,7 +85,6 @@ endchoice
choice
prompt "CPU selection"
- depends on PPC64
default GENERIC_CPU
help
This will create a kernel which is optimised for a particular CPU.
@@ -96,13 +94,17 @@ choice
config GENERIC_CPU
bool "Generic (POWER4 and above)"
- depends on !CPU_LITTLE_ENDIAN
+ depends on PPC64 && !CPU_LITTLE_ENDIAN
config GENERIC_CPU
bool "Generic (POWER8 and above)"
- depends on CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
+config GENERIC_CPU
+ bool "Generic 32 bits powerpc"
+ depends on PPC32 && !PPC_8xx
+
config CELL_CPU
bool "Cell Broadband Engine"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
@@ -138,8 +140,37 @@ config E6500_CPU
bool "Freescale e6500"
depends on E500
+config 860_CPU
+ bool "8xx family"
+ depends on PPC_8xx
+
+config E300C2_CPU
+ bool "e300c2 (832x)"
+ depends on PPC_BOOK3S_32
+
+config E300C3_CPU
+ bool "e300c3 (831x)"
+ depends on PPC_BOOK3S_32
+
endchoice
+config TARGET_CPU_BOOL
+ bool
+ default !GENERIC_CPU
+
+config TARGET_CPU
+ string
+ depends on TARGET_CPU_BOOL
+ default "cell" if CELL_CPU
+ default "power5" if POWER5_CPU
+ default "power6" if POWER6_CPU
+ default "power7" if POWER7_CPU
+ default "power8" if POWER8_CPU
+ default "power9" if POWER9_CPU
+ default "860" if 860_CPU
+ default "e300c2" if E300C2_CPU
+ default "e300c3" if E300C3_CPU
+
config PPC_BOOK3S
def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index d5f808e8a5f3..10064a33ca96 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
-ifeq ($(CONFIG_SMP),y)
+ifdef CONFIG_SMP
obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
endif
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 2c15ff094483..55aac74e1cb9 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -49,6 +49,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
+#include <linux/stringify.h>
#include <asm/spu.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 1e002e94d0f6..83cf58daaa79 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -111,7 +111,7 @@ int spufs_handle_class1(struct spu_context *ctx)
{
u64 ea, dsisr, access;
unsigned long flags;
- unsigned flt = 0;
+ vm_fault_t flt = 0;
int ret;
/*
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.h b/arch/powerpc/platforms/cell/spufs/sputrace.h
index d557e999b662..1def11e911ac 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.h
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.h
@@ -3,6 +3,7 @@
#define _TRACE_SPUFS_H
#include <linux/tracepoint.h>
+#include <linux/stringify.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM spufs
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index c3ede2c365c3..791b86398e1d 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -11,6 +11,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -89,3 +90,5 @@ void __init chrp_nvram_init(void)
return;
}
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 8bb46dcbebd8..403523c061ba 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -67,16 +67,6 @@ void __init wii_memory_fixups(void)
{
struct memblock_region *p = memblock.memory.regions;
- /*
- * This is part of a workaround to allow the use of two
- * discontinuous RAM ranges on the Wii, even if this is
- * currently unsupported on 32-bit PowerPC Linux.
- *
- * We coalesce the two memory ranges of the Wii into a
- * single range, then create a reservation for the "hole"
- * between both ranges.
- */
-
BUG_ON(memblock.memory.cnt != 2);
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 2c72263ad6ab..c80f72c370ae 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -531,7 +531,7 @@ int pasemi_dma_init(void)
iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
if (!iob_pdev) {
BUG();
- printk(KERN_WARNING "Can't find I/O Bridge\n");
+ pr_warn("Can't find I/O Bridge\n");
err = -ENODEV;
goto out;
}
@@ -540,7 +540,7 @@ int pasemi_dma_init(void)
dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!dma_pdev) {
BUG();
- printk(KERN_WARNING "Can't find DMA controller\n");
+ pr_warn("Can't find DMA controller\n");
err = -ENODEV;
goto out;
}
@@ -623,7 +623,7 @@ int pasemi_dma_init(void)
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
- printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
+ pr_info("PA Semi PWRficient DMA library initialized "
"(%d tx, %d rx channels)\n", num_txch, num_rxch);
out:
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index c23e60959aa8..6f35a2afe522 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -256,7 +256,7 @@ static int gpio_mdio_probe(struct platform_device *ofdev)
err = of_mdiobus_register(new_bus, np);
if (err != 0) {
- printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n",
+ pr_err("%s: Cannot register as MDIO bus, err %d\n",
new_bus->name, err);
goto out_free_irq;
}
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 44e0d9226f0a..8bb4e8082441 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -78,13 +78,13 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
static int __init pasemi_idle_init(void)
{
#ifndef CONFIG_PPC_PASEMI_CPUFREQ
- printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n");
+ pr_warn("No cpufreq driver, powersavings modes disabled\n");
current_mode = 0;
#endif
ppc_md.system_reset_exception = pasemi_system_reset_exception;
ppc_md.power_save = modes[current_mode].entry;
- printk(KERN_INFO "Using PA6T idle loop (%s)\n", modes[current_mode].name);
+ pr_info("Using PA6T idle loop (%s)\n", modes[current_mode].name);
return 0;
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 78b80cbd9768..f06c83f321e6 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -210,7 +210,7 @@ static int __init iob_init(struct device_node *dn)
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
- printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
+ pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c
index 8571e7bf78b6..aa958a46957f 100644
--- a/arch/powerpc/platforms/pasemi/misc.c
+++ b/arch/powerpc/platforms/pasemi/misc.c
@@ -69,9 +69,7 @@ static int __init pasemi_register_i2c_devices(void)
addr = of_get_property(node, "reg", &len);
if (!addr || len < sizeof(int) ||
*addr > (1 << 10) - 1) {
- printk(KERN_WARNING
- "pasemi_register_i2c_devices: "
- "invalid i2c device entry\n");
+ pr_warn("pasemi_register_i2c_devices: invalid i2c device entry\n");
continue;
}
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aea9ff2c8e6d..c3c64172482d 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -205,7 +205,7 @@ static int __init pas_add_bridge(struct device_node *dev)
setup_pa_pxp(hose);
- printk(KERN_INFO "Found PA-PXP PCI host bridge.\n");
+ pr_info("Found PA-PXP PCI host bridge.\n");
/* Interpret the "ranges" property */
pci_process_bridge_OF_ranges(hose, dev, 1);
@@ -216,21 +216,21 @@ static int __init pas_add_bridge(struct device_node *dev)
void __init pas_pci_init(void)
{
struct device_node *np, *root;
+ int res;
root = of_find_node_by_path("/");
if (!root) {
- printk(KERN_CRIT "pas_pci_init: can't find root "
- "of device tree\n");
+ pr_crit("pas_pci_init: can't find root of device tree\n");
return;
}
pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS);
- for (np = NULL; (np = of_get_next_child(root, np)) != NULL;)
- if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np))
- of_node_get(np);
-
- of_node_put(root);
+ np = of_find_compatible_node(root, NULL, "pasemi,rootbus");
+ if (np) {
+ res = pas_add_bridge(np);
+ of_node_put(np);
+ }
}
void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index d0b8ae53660d..9a6eb04cca83 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -207,8 +207,7 @@ static __init void pas_init_IRQ(void)
break;
}
if (!mpic_node) {
- printk(KERN_ERR
- "Failed to locate the MPIC interrupt controller\n");
+ pr_err("Failed to locate the MPIC interrupt controller\n");
return;
}
@@ -217,12 +216,12 @@ static __init void pas_init_IRQ(void)
naddr = of_n_addr_cells(root);
opprop = of_get_property(root, "platform-open-pic", &opplen);
if (!opprop) {
- printk(KERN_ERR "No platform-open-pic property.\n");
+ pr_err("No platform-open-pic property.\n");
of_node_put(root);
return;
}
openpic_addr = of_read_number(opprop, naddr);
- printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
+ pr_debug("OpenPIC addr: %lx\n", openpic_addr);
mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS | MPIC_NO_RESET;
@@ -265,72 +264,72 @@ static int pas_machine_check_handler(struct pt_regs *regs)
srr1 = regs->msr;
if (nmi_virq && mpic_get_mcirq() == nmi_virq) {
- printk(KERN_ERR "NMI delivered\n");
+ pr_err("NMI delivered\n");
debugger(regs);
mpic_end_irq(irq_get_irq_data(nmi_virq));
goto out;
}
dsisr = mfspr(SPRN_DSISR);
- printk(KERN_ERR "Machine Check on CPU %d\n", cpu);
- printk(KERN_ERR "SRR0 0x%016lx SRR1 0x%016lx\n", srr0, srr1);
- printk(KERN_ERR "DSISR 0x%016lx DAR 0x%016lx\n", dsisr, regs->dar);
- printk(KERN_ERR "BER 0x%016lx MER 0x%016lx\n", mfspr(SPRN_PA6T_BER),
+ pr_err("Machine Check on CPU %d\n", cpu);
+ pr_err("SRR0 0x%016lx SRR1 0x%016lx\n", srr0, srr1);
+ pr_err("DSISR 0x%016lx DAR 0x%016lx\n", dsisr, regs->dar);
+ pr_err("BER 0x%016lx MER 0x%016lx\n", mfspr(SPRN_PA6T_BER),
mfspr(SPRN_PA6T_MER));
- printk(KERN_ERR "IER 0x%016lx DER 0x%016lx\n", mfspr(SPRN_PA6T_IER),
+ pr_err("IER 0x%016lx DER 0x%016lx\n", mfspr(SPRN_PA6T_IER),
mfspr(SPRN_PA6T_DER));
- printk(KERN_ERR "Cause:\n");
+ pr_err("Cause:\n");
if (srr1 & 0x200000)
- printk(KERN_ERR "Signalled by SDC\n");
+ pr_err("Signalled by SDC\n");
if (srr1 & 0x100000) {
- printk(KERN_ERR "Load/Store detected error:\n");
+ pr_err("Load/Store detected error:\n");
if (dsisr & 0x8000)
- printk(KERN_ERR "D-cache ECC double-bit error or bus error\n");
+ pr_err("D-cache ECC double-bit error or bus error\n");
if (dsisr & 0x4000)
- printk(KERN_ERR "LSU snoop response error\n");
+ pr_err("LSU snoop response error\n");
if (dsisr & 0x2000) {
- printk(KERN_ERR "MMU SLB multi-hit or invalid B field\n");
+ pr_err("MMU SLB multi-hit or invalid B field\n");
dump_slb = 1;
}
if (dsisr & 0x1000)
- printk(KERN_ERR "Recoverable Duptags\n");
+ pr_err("Recoverable Duptags\n");
if (dsisr & 0x800)
- printk(KERN_ERR "Recoverable D-cache parity error count overflow\n");
+ pr_err("Recoverable D-cache parity error count overflow\n");
if (dsisr & 0x400)
- printk(KERN_ERR "TLB parity error count overflow\n");
+ pr_err("TLB parity error count overflow\n");
}
if (srr1 & 0x80000)
- printk(KERN_ERR "Bus Error\n");
+ pr_err("Bus Error\n");
if (srr1 & 0x40000) {
- printk(KERN_ERR "I-side SLB multiple hit\n");
+ pr_err("I-side SLB multiple hit\n");
dump_slb = 1;
}
if (srr1 & 0x20000)
- printk(KERN_ERR "I-cache parity error hit\n");
+ pr_err("I-cache parity error hit\n");
if (num_mce_regs == 0)
- printk(KERN_ERR "No MCE registers mapped yet, can't dump\n");
+ pr_err("No MCE registers mapped yet, can't dump\n");
else
- printk(KERN_ERR "SoC debug registers:\n");
+ pr_err("SoC debug registers:\n");
for (i = 0; i < num_mce_regs; i++)
- printk(KERN_ERR "%s: 0x%08x\n", mce_regs[i].name,
+ pr_err("%s: 0x%08x\n", mce_regs[i].name,
in_le32(mce_regs[i].addr));
if (dump_slb) {
unsigned long e, v;
int i;
- printk(KERN_ERR "slb contents:\n");
+ pr_err("slb contents:\n");
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
- printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
+ pr_err("%02d %016lx %016lx\n", i, e, v);
}
}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index cc5347eb1662..27862feee4a5 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
+#include <asm/feature-fixups.h>
/*
* Flush and disable all data caches (dL1, L2, L3). This is used
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 3f82cb24eb2b..4eb8cb38fc69 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2889,10 +2889,8 @@ set_initial_features(void)
/* On all machines, switch modem & serial ports off */
for_each_node_by_name(np, "ch-a")
initial_serial_shutdown(np);
- of_node_put(np);
for_each_node_by_name(np, "ch-b")
initial_serial_shutdown(np);
- of_node_put(np);
}
void __init
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index df762bb3c735..04527d13d5a4 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -781,12 +781,12 @@ static int __init pmac_add_bridge(struct device_node *dev)
struct resource rsrc;
char *disp_name;
const int *bus_range;
- int primary = 1, has_address = 0;
+ int primary = 1;
DBG("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
- has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+ of_address_to_resource(dev, 0, &rsrc);
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
@@ -904,7 +904,7 @@ static int pmac_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
- struct device_node *ht = NULL;
+ struct device_node *ht __maybe_unused = NULL;
pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
@@ -1019,7 +1019,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
return true;
}
-void pmac_pci_fixup_ohci(struct pci_dev *dev)
+static void pmac_pci_fixup_ohci(struct pci_dev *dev)
{
struct device_node *node = pci_device_to_OF_node(dev);
@@ -1054,7 +1054,7 @@ void __init pmac_pcibios_after_init(void)
}
}
-void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+static void pmac_pci_fixup_cardbus(struct pci_dev *dev)
{
if (!machine_is(powermac))
return;
@@ -1091,7 +1091,7 @@ void pmac_pci_fixup_cardbus(struct pci_dev* dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
-void pmac_pci_fixup_pciata(struct pci_dev* dev)
+static void pmac_pci_fixup_pciata(struct pci_dev *dev)
{
u8 progif = 0;
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index 1c2802fabd57..f89808b9713d 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
#define MAGIC 0x4c617273 /* 'Lars' */
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 12e6e4d30602..f92c1918fb56 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -34,6 +34,8 @@
#include <asm/nvram.h>
#include <asm/smu.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -249,7 +251,7 @@ int pmac_set_rtc_time(struct rtc_time *tm)
* Calibrate the decrementer register using VIA timer 1.
* This is used both on powermacs and CHRP machines.
*/
-int __init via_calibrate_decr(void)
+static int __init via_calibrate_decr(void)
{
struct device_node *vias;
volatile unsigned char __iomem *via;
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index d83135a9830e..8901973ed683 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -73,7 +73,7 @@ void udbg_scc_init(int force_scc)
struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
const char *path;
- int i, x;
+ int i;
escc = of_find_node_by_name(NULL, "escc");
if (escc == NULL)
@@ -120,7 +120,7 @@ void udbg_scc_init(int force_scc)
mb();
for (i = 20000; i != 0; --i)
- x = in_8(sccc);
+ in_8(sccc);
out_8(sccc, 0x09); /* reset A or B side */
out_8(sccc, 0xc0);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 703a350a7f4e..b540ce8eec55 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,7 +6,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
-obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o
+obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o
obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index ddfc3544d285..3c1beae29f2d 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -223,6 +223,14 @@ int pnv_eeh_post_init(void)
eeh_probe_devices();
eeh_addr_cache_build();
+ if (eeh_has_flag(EEH_POSTPONED_PROBE)) {
+ eeh_clear_flag(EEH_POSTPONED_PROBE);
+ if (eeh_enabled())
+ pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
+ else
+ pr_info("EEH: No capable adapters found\n");
+ }
+
/* Register OPAL event notifier */
eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
if (eeh_event_irq < 0) {
@@ -384,8 +392,10 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
return NULL;
/* Skip if we haven't probed yet */
- if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE)
+ if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE) {
+ eeh_add_flag(EEH_POSTPONED_PROBE);
return NULL;
+ }
/* Initialize eeh device */
edev->class_code = pdn->class_code;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 1c5d0675b43c..35f699ebb662 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -36,6 +36,8 @@
#define P9_STOP_SPR_PSSCR 855
static u32 supported_cpuidle_states;
+struct pnv_idle_states_t *pnv_idle_states;
+int nr_pnv_idle_states;
/*
* The default stop state that will be used by ppc_md.power_save
@@ -177,11 +179,6 @@ static void pnv_alloc_idle_core_states(void)
paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
paca_ptrs[cpu]->thread_mask = 1 << j;
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
- continue;
- paca_ptrs[cpu]->thread_sibling_pacas =
- kmalloc_node(paca_ptr_array_size,
- GFP_KERNEL, node);
}
}
@@ -622,48 +619,10 @@ int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
* @dt_idle_states: Number of idle state entries
* Returns 0 on success
*/
-static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
- int dt_idle_states)
+static int __init pnv_power9_idle_init(void)
{
- u64 *psscr_val = NULL;
- u64 *psscr_mask = NULL;
- u32 *residency_ns = NULL;
u64 max_residency_ns = 0;
- int rc = 0, i;
-
- psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL);
- psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL);
- residency_ns = kcalloc(dt_idle_states, sizeof(*residency_ns),
- GFP_KERNEL);
-
- if (!psscr_val || !psscr_mask || !residency_ns) {
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u64_array(np,
- "ibm,cpu-idle-state-psscr",
- psscr_val, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u64_array(np,
- "ibm,cpu-idle-state-psscr-mask",
- psscr_mask, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u32_array(np,
- "ibm,cpu-idle-state-residency-ns",
- residency_ns, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
- rc = -1;
- goto out;
- }
+ int i;
/*
* Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask},
@@ -679,33 +638,37 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
* the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state.
*/
pnv_first_deep_stop_state = MAX_STOP_STATE;
- for (i = 0; i < dt_idle_states; i++) {
+ for (i = 0; i < nr_pnv_idle_states; i++) {
int err;
- u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
+ struct pnv_idle_states_t *state = &pnv_idle_states[i];
+ u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
- if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
- (pnv_first_deep_stop_state > psscr_rl))
+ if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
+ pnv_first_deep_stop_state > psscr_rl)
pnv_first_deep_stop_state = psscr_rl;
- err = validate_psscr_val_mask(&psscr_val[i], &psscr_mask[i],
- flags[i]);
+ err = validate_psscr_val_mask(&state->psscr_val,
+ &state->psscr_mask,
+ state->flags);
if (err) {
- report_invalid_psscr_val(psscr_val[i], err);
+ report_invalid_psscr_val(state->psscr_val, err);
continue;
}
- if (max_residency_ns < residency_ns[i]) {
- max_residency_ns = residency_ns[i];
- pnv_deepest_stop_psscr_val = psscr_val[i];
- pnv_deepest_stop_psscr_mask = psscr_mask[i];
- pnv_deepest_stop_flag = flags[i];
+ state->valid = true;
+
+ if (max_residency_ns < state->residency_ns) {
+ max_residency_ns = state->residency_ns;
+ pnv_deepest_stop_psscr_val = state->psscr_val;
+ pnv_deepest_stop_psscr_mask = state->psscr_mask;
+ pnv_deepest_stop_flag = state->flags;
deepest_stop_found = true;
}
if (!default_stop_found &&
- (flags[i] & OPAL_PM_STOP_INST_FAST)) {
- pnv_default_stop_val = psscr_val[i];
- pnv_default_stop_mask = psscr_mask[i];
+ (state->flags & OPAL_PM_STOP_INST_FAST)) {
+ pnv_default_stop_val = state->psscr_val;
+ pnv_default_stop_mask = state->psscr_mask;
default_stop_found = true;
}
}
@@ -728,11 +691,8 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
pnv_first_deep_stop_state);
-out:
- kfree(psscr_val);
- kfree(psscr_mask);
- kfree(residency_ns);
- return rc;
+
+ return 0;
}
/*
@@ -740,50 +700,146 @@ out:
*/
static void __init pnv_probe_idle_states(void)
{
- struct device_node *np;
- int dt_idle_states;
- u32 *flags = NULL;
int i;
+ if (nr_pnv_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ return;
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (pnv_power9_idle_init())
+ return;
+ }
+
+ for (i = 0; i < nr_pnv_idle_states; i++)
+ supported_cpuidle_states |= pnv_idle_states[i].flags;
+}
+
+/*
+ * This function parses device-tree and populates all the information
+ * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
+ * which is the number of cpuidle states discovered through device-tree.
+ */
+
+static int pnv_parse_cpuidle_dt(void)
+{
+ struct device_node *np;
+ int nr_idle_states, i;
+ int rc = 0;
+ u32 *temp_u32;
+ u64 *temp_u64;
+ const char **temp_string;
+
np = of_find_node_by_path("/ibm,opal/power-mgt");
if (!np) {
pr_warn("opal: PowerMgmt Node not found\n");
- goto out;
+ return -ENODEV;
}
- dt_idle_states = of_property_count_u32_elems(np,
- "ibm,cpu-idle-state-flags");
- if (dt_idle_states < 0) {
- pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ nr_idle_states = of_property_count_u32_elems(np,
+ "ibm,cpu-idle-state-flags");
+
+ pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
+ GFP_KERNEL);
+ temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
+ temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
+ temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
+
+ if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
+ pr_err("Could not allocate memory for dt parsing\n");
+ rc = -ENOMEM;
goto out;
}
- flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL);
-
- if (of_property_read_u32_array(np,
- "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ /* Read flags */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
+ temp_u32, nr_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].flags = temp_u32[i];
+
+ /* Read latencies */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
+ temp_u32, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].latency_ns = temp_u32[i];
+
+ /* Read residencies */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
+ temp_u32, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ rc = -EINVAL;
goto out;
}
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].residency_ns = temp_u32[i];
+ /* For power9 */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (pnv_power9_idle_init(np, flags, dt_idle_states))
+ /* Read pm_crtl_val */
+ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
+ temp_u64, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].psscr_val = temp_u64[i];
+
+ /* Read pm_crtl_mask */
+ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
+ temp_u64, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
+ rc = -EINVAL;
goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].psscr_mask = temp_u64[i];
}
- for (i = 0; i < dt_idle_states; i++)
- supported_cpuidle_states |= flags[i];
+ /*
+ * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
+ * ibm,cpu-idle-state-pmicr-val were never used and there is no
+ * plan to use it in near future. Hence, not parsing these properties
+ */
+ if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
+ temp_string, nr_idle_states) < 0) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ strlcpy(pnv_idle_states[i].name, temp_string[i],
+ PNV_IDLE_NAME_LEN);
+ nr_pnv_idle_states = nr_idle_states;
+ rc = 0;
out:
- kfree(flags);
+ kfree(temp_u32);
+ kfree(temp_u64);
+ kfree(temp_string);
+ return rc;
}
+
static int __init pnv_init_idle_states(void)
{
-
+ int rc = 0;
supported_cpuidle_states = 0;
+ /* In case we error out nr_pnv_idle_states will be zero */
+ nr_pnv_idle_states = 0;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
goto out;
-
+ rc = pnv_parse_cpuidle_dt();
+ if (rc)
+ return rc;
pnv_probe_idle_states();
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -805,29 +861,6 @@ static int __init pnv_init_idle_states(void)
pnv_alloc_idle_core_states();
- /*
- * For each CPU, record its PACA address in each of it's
- * sibling thread's PACA at the slot corresponding to this
- * CPU's index in the core.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- int cpu;
-
- pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
- for_each_present_cpu(cpu) {
- int base_cpu = cpu_first_thread_sibling(cpu);
- int idx = cpu_thread_in_core(cpu);
- int i;
-
- for (i = 0; i < threads_per_core; i++) {
- int j = base_cpu + i;
-
- paca_ptrs[j]->thread_sibling_pacas[idx] =
- paca_ptrs[cpu];
- }
- }
- }
-
if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
ppc_md.power_save = power7_idle;
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index b99283df8584..51dc398ae3f7 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -47,38 +47,9 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
}
-static bool valid_memtrace_range(struct memtrace_entry *dev,
- unsigned long start, unsigned long size)
-{
- if ((start >= dev->start) &&
- ((start + size) <= (dev->start + dev->size)))
- return true;
-
- return false;
-}
-
-static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long size = vma->vm_end - vma->vm_start;
- struct memtrace_entry *dev = filp->private_data;
-
- if (!valid_memtrace_range(dev, vma->vm_pgoff << PAGE_SHIFT, size))
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (dev->start >> PAGE_SHIFT),
- size, vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
static const struct file_operations memtrace_fops = {
.llseek = default_llseek,
.read = memtrace_read,
- .mmap = memtrace_mmap,
.open = simple_open,
};
@@ -206,8 +177,11 @@ static int memtrace_init_debugfs(void)
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
- if (!dir)
+ if (!dir) {
+ pr_err("Failed to create debugfs directory for node %d\n",
+ ent->nid);
return -1;
+ }
ent->dir = dir;
debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
@@ -218,18 +192,93 @@ static int memtrace_init_debugfs(void)
return ret;
}
+static int online_mem_block(struct memory_block *mem, void *arg)
+{
+ return device_online(&mem->dev);
+}
+
+/*
+ * Iterate through the chunks of memory we have removed from the kernel
+ * and attempt to add them back to the kernel.
+ */
+static int memtrace_online(void)
+{
+ int i, ret = 0;
+ struct memtrace_entry *ent;
+
+ for (i = memtrace_array_nr - 1; i >= 0; i--) {
+ ent = &memtrace_array[i];
+
+ /* We have onlined this chunk previously */
+ if (ent->nid == -1)
+ continue;
+
+ /* Remove from io mappings */
+ if (ent->mem) {
+ iounmap(ent->mem);
+ ent->mem = 0;
+ }
+
+ if (add_memory(ent->nid, ent->start, ent->size)) {
+ pr_err("Failed to add trace memory to node %d\n",
+ ent->nid);
+ ret += 1;
+ continue;
+ }
+
+ /*
+ * If kernel isn't compiled with the auto online option
+ * we need to online the memory ourselves.
+ */
+ if (!memhp_auto_online) {
+ walk_memory_range(PFN_DOWN(ent->start),
+ PFN_UP(ent->start + ent->size - 1),
+ NULL, online_mem_block);
+ }
+
+ /*
+ * Memory was added successfully so clean up references to it
+ * so on reentry we can tell that this chunk was added.
+ */
+ debugfs_remove_recursive(ent->dir);
+ pr_info("Added trace memory back to node %d\n", ent->nid);
+ ent->size = ent->start = ent->nid = -1;
+ }
+ if (ret)
+ return ret;
+
+ /* If all chunks of memory were added successfully, reset globals */
+ kfree(memtrace_array);
+ memtrace_array = NULL;
+ memtrace_size = 0;
+ memtrace_array_nr = 0;
+ return 0;
+}
+
static int memtrace_enable_set(void *data, u64 val)
{
- if (memtrace_size)
+ u64 bytes;
+
+ /*
+ * Don't attempt to do anything if size isn't aligned to a memory
+ * block or equal to zero.
+ */
+ bytes = memory_block_size_bytes();
+ if (val & (bytes - 1)) {
+ pr_err("Value must be aligned with 0x%llx\n", bytes);
return -EINVAL;
+ }
- if (!val)
- return -EINVAL;
+ /* Re-add/online previously removed/offlined memory */
+ if (memtrace_size) {
+ if (memtrace_online())
+ return -EAGAIN;
+ }
- /* Make sure size is aligned to a memory block */
- if (val & (memory_block_size_bytes() - 1))
- return -EINVAL;
+ if (!val)
+ return 0;
+ /* Offline and remove memory */
if (memtrace_init_regions_runtime(val))
return -EINVAL;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 8cdf91f5d3a4..8006c54a91e3 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -17,7 +17,9 @@
#include <linux/pci.h>
#include <linux/memblock.h>
#include <linux/iommu.h>
+#include <linux/debugfs.h>
+#include <asm/debugfs.h>
#include <asm/tlb.h>
#include <asm/powernv.h>
#include <asm/reg.h>
@@ -44,7 +46,8 @@ static DEFINE_SPINLOCK(npu_context_lock);
* entire TLB on the GPU for the given PID rather than each specific address in
* the range.
*/
-#define ATSD_THRESHOLD (2*1024*1024)
+static uint64_t atsd_threshold = 2 * 1024 * 1024;
+static struct dentry *atsd_threshold_dentry;
/*
* Other types of TCE cache invalidation are not functional in the
@@ -437,8 +440,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
int i;
for (i = 0; i < npu->mmio_atsd_count; i++) {
- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
- return i;
+ if (!test_bit(i, &npu->mmio_atsd_usage))
+ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
+ return i;
}
return -ENOSPC;
@@ -683,7 +687,7 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
- if (end - start > ATSD_THRESHOLD) {
+ if (end - start > atsd_threshold) {
/*
* Just invalidate the entire PID if the address range is too
* large.
@@ -958,6 +962,11 @@ int pnv_npu2_init(struct pnv_phb *phb)
static int npu_index;
uint64_t rc = 0;
+ if (!atsd_threshold_dentry) {
+ atsd_threshold_dentry = debugfs_create_x64("atsd_threshold",
+ 0600, powerpc_debugfs_root, &atsd_threshold);
+ }
+
phb->npu.nmmu_flush =
of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
for_each_child_of_node(phb->hose->dn, dn) {
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 0dc8fa4e0af2..198143833f00 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -225,13 +225,16 @@ static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *
if (rc == OPAL_PARAMETER)
rc = opal_dump_info(&id, &size);
+ if (rc) {
+ pr_warn("%s: Failed to get dump info (%d)\n",
+ __func__, rc);
+ return rc;
+ }
+
*dump_id = be32_to_cpu(id);
*dump_size = be32_to_cpu(size);
*dump_type = be32_to_cpu(type);
- if (rc)
- pr_warn("%s: Failed to get dump info (%d)\n",
- __func__, rc);
return rc;
}
@@ -368,13 +371,12 @@ static irqreturn_t process_dump(int irq, void *data)
{
int rc;
uint32_t dump_id, dump_size, dump_type;
- struct dump_obj *dump;
char name[22];
struct kobject *kobj;
rc = dump_read_info(&dump_id, &dump_size, &dump_type);
if (rc != OPAL_SUCCESS)
- return rc;
+ return IRQ_HANDLED;
sprintf(name, "0x%x-0x%x", dump_type, dump_id);
@@ -386,12 +388,10 @@ static irqreturn_t process_dump(int irq, void *data)
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
- return 0;
+ return IRQ_HANDLED;
}
- dump = create_dump_obj(dump_id, dump_size, dump_type);
- if (!dump)
- return -1;
+ create_dump_obj(dump_id, dump_size, dump_type);
return IRQ_HANDLED;
}
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 605c7e5d52c2..bc97770a67db 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -38,8 +39,8 @@ struct opal_event_irqchip {
};
static struct opal_event_irqchip opal_event_irqchip;
static u64 last_outstanding_events;
-static unsigned int opal_irq_count;
-static unsigned int *opal_irqs;
+static int opal_irq_count;
+static struct resource *opal_irqs;
void opal_handle_events(void)
{
@@ -165,24 +166,23 @@ void opal_event_shutdown(void)
/* First free interrupts, which will also mask them */
for (i = 0; i < opal_irq_count; i++) {
- if (!opal_irqs[i])
+ if (!opal_irqs || !opal_irqs[i].start)
continue;
if (in_interrupt() || irqs_disabled())
- disable_irq_nosync(opal_irqs[i]);
+ disable_irq_nosync(opal_irqs[i].start);
else
- free_irq(opal_irqs[i], NULL);
+ free_irq(opal_irqs[i].start, NULL);
- opal_irqs[i] = 0;
+ opal_irqs[i].start = 0;
}
}
int __init opal_event_init(void)
{
struct device_node *dn, *opal_node;
- const char **names;
- u32 *irqs;
- int i, rc;
+ bool old_style = false;
+ int i, rc = 0;
opal_node = of_find_node_by_path("/ibm,opal");
if (!opal_node) {
@@ -207,67 +207,91 @@ int __init opal_event_init(void)
goto out;
}
- /* Get opal-interrupts property and names if present */
- rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
- if (rc < 0)
- goto out;
+ /* Look for new-style (standard) "interrupts" property */
+ opal_irq_count = of_irq_count(opal_node);
- opal_irq_count = rc;
- pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
+ /* Absent ? Look for the old one */
+ if (opal_irq_count < 1) {
+ /* Get opal-interrupts property and names if present */
+ rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
+ if (rc > 0)
+ opal_irq_count = rc;
+ old_style = true;
+ }
- irqs = kcalloc(opal_irq_count, sizeof(*irqs), GFP_KERNEL);
- names = kcalloc(opal_irq_count, sizeof(*names), GFP_KERNEL);
- opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL);
+ /* No interrupts ? Bail out */
+ if (!opal_irq_count)
+ goto out;
- if (WARN_ON(!irqs || !names || !opal_irqs))
- goto out_free;
+ pr_debug("OPAL: Found %d interrupts reserved for OPAL using %s scheme\n",
+ opal_irq_count, old_style ? "old" : "new");
- rc = of_property_read_u32_array(opal_node, "opal-interrupts",
- irqs, opal_irq_count);
- if (rc < 0) {
- pr_err("Error %d reading opal-interrupts array\n", rc);
- goto out_free;
+ /* Allocate an IRQ resources array */
+ opal_irqs = kcalloc(opal_irq_count, sizeof(struct resource), GFP_KERNEL);
+ if (WARN_ON(!opal_irqs)) {
+ rc = -ENOMEM;
+ goto out;
}
- /* It's not an error for the names to be missing */
- of_property_read_string_array(opal_node, "opal-interrupts-names",
- names, opal_irq_count);
+ /* Build the resources array */
+ if (old_style) {
+ /* Old style "opal-interrupts" property */
+ for (i = 0; i < opal_irq_count; i++) {
+ struct resource *r = &opal_irqs[i];
+ const char *name = NULL;
+ u32 hw_irq;
+ int virq;
+
+ rc = of_property_read_u32_index(opal_node, "opal-interrupts",
+ i, &hw_irq);
+ if (WARN_ON(rc < 0)) {
+ opal_irq_count = i;
+ break;
+ }
+ of_property_read_string_index(opal_node, "opal-interrupts-names",
+ i, &name);
+ virq = irq_create_mapping(NULL, hw_irq);
+ if (!virq) {
+ pr_warn("Failed to map OPAL irq 0x%x\n", hw_irq);
+ continue;
+ }
+ r->start = r->end = virq;
+ r->flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW;
+ r->name = name;
+ }
+ } else {
+ /* new style standard "interrupts" property */
+ rc = of_irq_to_resource_table(opal_node, opal_irqs, opal_irq_count);
+ if (WARN_ON(rc < 0)) {
+ opal_irq_count = 0;
+ kfree(opal_irqs);
+ goto out;
+ }
+ if (WARN_ON(rc < opal_irq_count))
+ opal_irq_count = rc;
+ }
/* Install interrupt handlers */
for (i = 0; i < opal_irq_count; i++) {
- unsigned int virq;
- char *name;
-
- /* Get hardware and virtual IRQ */
- virq = irq_create_mapping(NULL, irqs[i]);
- if (!virq) {
- pr_warn("Failed to map irq 0x%x\n", irqs[i]);
- continue;
- }
+ struct resource *r = &opal_irqs[i];
+ const char *name;
- if (names[i] && strlen(names[i]))
- name = kasprintf(GFP_KERNEL, "opal-%s", names[i]);
+ /* Prefix name */
+ if (r->name && strlen(r->name))
+ name = kasprintf(GFP_KERNEL, "opal-%s", r->name);
else
name = kasprintf(GFP_KERNEL, "opal");
/* Install interrupt handler */
- rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
+ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL);
if (rc) {
- irq_dispose_mapping(virq);
- pr_warn("Error %d requesting irq %d (0x%x)\n",
- rc, virq, irqs[i]);
+ pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
continue;
}
-
- /* Cache IRQ */
- opal_irqs[i] = virq;
}
-
-out_free:
- kfree(irqs);
- kfree(names);
-out:
+ rc = 0;
+ out:
of_node_put(opal_node);
return rc;
}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
index 6f1214d4de92..55691950d981 100644
--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -23,12 +23,9 @@
* may not be completely printed. This function does not actually dump the
* message, it just ensures that OPAL completely flushes the console buffer.
*/
-static void force_opal_console_flush(struct kmsg_dumper *dumper,
+static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
- int i;
- int64_t ret;
-
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
@@ -36,32 +33,11 @@ static void force_opal_console_flush(struct kmsg_dumper *dumper,
if (reason != KMSG_DUMP_PANIC)
return;
- if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
- ret = opal_console_flush(0);
-
- if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
- return;
-
- /* Incrementally flush until there's nothing left */
- while (opal_console_flush(0) != OPAL_SUCCESS);
- } else {
- /*
- * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
- * the console can still be flushed by calling the polling
- * function enough times to flush the buffer. We don't know
- * how much output still needs to be flushed, but we can be
- * generous since the kernel is in panic and doesn't need
- * to do much else.
- */
- printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
- for (i = 0; i < 1024; i++) {
- opal_poll_events(NULL);
- }
- }
+ opal_flush_console(0);
}
static struct kmsg_dumper opal_kmsg_dumper = {
- .dump = force_opal_console_flush
+ .dump = kmsg_dump_opal_console_flush
};
void __init opal_kmsg_init(void)
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index 541c9ea04a32..f7d04b6a2d7a 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -32,6 +32,34 @@ static struct sensor_group {
struct sg_attr *sgattrs;
} *sgs;
+int sensor_group_enable(u32 handle, bool enable)
+{
+ struct opal_msg msg;
+ int token, ret;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0)
+ return token;
+
+ ret = opal_sensor_group_enable(handle, token, enable);
+ if (ret == OPAL_ASYNC_COMPLETION) {
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ } else {
+ ret = opal_error_code(ret);
+ }
+
+out:
+ opal_async_release_token(token);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sensor_group_enable);
+
static ssize_t sg_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index a8d9b4089c31..251528231a9e 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -14,6 +14,8 @@
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
.section ".text"
@@ -327,3 +329,5 @@ OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET);
OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR);
OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR);
OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64);
+OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE);
+OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 0d539c661748..404c379db168 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -344,70 +344,125 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count)
return 0;
}
-int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
{
- int written = 0;
+ unsigned long flags = 0 /* shut up gcc */;
+ int written;
__be64 olen;
- s64 len, rc;
- unsigned long flags;
- __be64 evt;
+ s64 rc;
if (!opal.entry)
return -ENODEV;
- /* We want put_chars to be atomic to avoid mangling of hvsi
- * packets. To do that, we first test for room and return
- * -EAGAIN if there isn't enough.
- *
- * Unfortunately, opal_console_write_buffer_space() doesn't
- * appear to work on opal v1, so we just assume there is
- * enough room and be done with it
- */
- spin_lock_irqsave(&opal_write_lock, flags);
+ if (atomic)
+ spin_lock_irqsave(&opal_write_lock, flags);
rc = opal_console_write_buffer_space(vtermno, &olen);
- len = be64_to_cpu(olen);
- if (rc || len < total_len) {
- spin_unlock_irqrestore(&opal_write_lock, flags);
+ if (rc || be64_to_cpu(olen) < total_len) {
/* Closed -> drop characters */
if (rc)
- return total_len;
- opal_poll_events(NULL);
- return -EAGAIN;
+ written = total_len;
+ else
+ written = -EAGAIN;
+ goto out;
}
- /* We still try to handle partial completions, though they
- * should no longer happen.
- */
- rc = OPAL_BUSY;
- while(total_len > 0 && (rc == OPAL_BUSY ||
- rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
- olen = cpu_to_be64(total_len);
- rc = opal_console_write(vtermno, &olen, data);
- len = be64_to_cpu(olen);
-
- /* Closed or other error drop */
- if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
- rc != OPAL_BUSY_EVENT) {
- written = total_len;
- break;
+ /* Should not get a partial write here because space is available. */
+ olen = cpu_to_be64(total_len);
+ rc = opal_console_write(vtermno, &olen, data);
+ if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
}
- if (rc == OPAL_SUCCESS) {
- total_len -= len;
- data += len;
- written += len;
+ written = -EAGAIN;
+ goto out;
+ }
+
+ /* Closed or other error drop */
+ if (rc != OPAL_SUCCESS) {
+ written = opal_error_code(rc);
+ goto out;
+ }
+
+ written = be64_to_cpu(olen);
+ if (written < total_len) {
+ if (atomic) {
+ /* Should not happen */
+ pr_warn("atomic console write returned partial "
+ "len=%d written=%d\n", total_len, written);
}
- /* This is a bit nasty but we need that for the console to
- * flush when there aren't any interrupts. We will clean
- * things a bit later to limit that to synchronous path
- * such as the kernel console and xmon/udbg
+ if (!written)
+ written = -EAGAIN;
+ }
+
+out:
+ if (atomic)
+ spin_unlock_irqrestore(&opal_write_lock, flags);
+
+ /* In the -EAGAIN case, callers loop, so we have to flush the console
+ * here in case they have interrupts off (and we don't want to wait
+ * for async flushing if we can make immediate progress here). If
+ * necessary the API could be made entirely non-flushing if the
+ * callers had a ->flush API to use.
+ */
+ if (written == -EAGAIN)
+ opal_flush_console(vtermno);
+
+ return written;
+}
+
+int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+{
+ return __opal_put_chars(vtermno, data, total_len, false);
+}
+
+/*
+ * opal_put_chars_atomic will not perform partial-writes. Data will be
+ * atomically written to the terminal or not at all. This is not strictly
+ * true at the moment because console space can race with OPAL's console
+ * writes.
+ */
+int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
+{
+ return __opal_put_chars(vtermno, data, total_len, true);
+}
+
+int opal_flush_console(uint32_t vtermno)
+{
+ s64 rc;
+
+ if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
+ __be64 evt;
+
+ WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
+ /*
+ * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
+ * the console can still be flushed by calling the polling
+ * function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
*/
- do
+ do {
opal_poll_events(&evt);
- while(rc == OPAL_SUCCESS &&
- (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
+ } while (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT);
+
+ return OPAL_SUCCESS;
}
- spin_unlock_irqrestore(&opal_write_lock, flags);
- return written;
+
+ do {
+ rc = OPAL_BUSY;
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_console_flush(vtermno);
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ }
+ }
+ } while (rc == OPAL_PARTIAL); /* More to flush */
+
+ return opal_error_code(rc);
}
static int opal_recover_mce(struct pt_regs *regs,
@@ -922,6 +977,7 @@ EXPORT_SYMBOL_GPL(opal_flash_read);
EXPORT_SYMBOL_GPL(opal_flash_write);
EXPORT_SYMBOL_GPL(opal_flash_erase);
EXPORT_SYMBOL_GPL(opal_prd_msg);
+EXPORT_SYMBOL_GPL(opal_check_token);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -1034,3 +1090,5 @@ EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
EXPORT_SYMBOL_GPL(opal_int_eoi);
EXPORT_SYMBOL_GPL(opal_error_code);
+/* Export the below symbol for NX compression */
+EXPORT_SYMBOL(opal_nx_coproc_init);
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index cee003de63af..1b18111453d7 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -8,11 +8,8 @@
*/
#include <linux/module.h>
-#include <linux/msi.h>
-#include <asm/pci-bridge.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
-#include <misc/cxl.h>
#include "pci.h"
@@ -179,199 +176,3 @@ static inline int get_cxl_module(void)
#else
static inline int get_cxl_module(void) { return 0; }
#endif
-
-/*
- * Sets flags and switches the controller ops to enable the cxl kernel api.
- * Originally the cxl kernel API operated on a virtual PHB, but certain cards
- * such as the Mellanox CX4 use a peer model instead and for these cards the
- * cxl kernel api will operate on the real PHB.
- */
-int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
-{
- struct pnv_phb *phb = hose->private_data;
- int rc;
-
- if (!enable) {
- /*
- * Once cxl mode is enabled on the PHB, there is currently no
- * known safe method to disable it again, and trying risks a
- * checkstop. If we can find a way to safely disable cxl mode
- * in the future we can revisit this, but for now the only sane
- * thing to do is to refuse to disable cxl mode:
- */
- return -EPERM;
- }
-
- /*
- * Hold a reference to the cxl module since several PHB operations now
- * depend on it, and it would be insane to allow it to be removed so
- * long as we are in this mode (and since we can't safely disable this
- * mode once enabled...).
- */
- rc = get_cxl_module();
- if (rc)
- return rc;
-
- phb->flags |= PNV_PHB_FLAG_CXL;
- hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
-
-bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return !!(phb->flags & PNV_PHB_FLAG_CXL);
-}
-EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
-
-struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
-
- return (struct cxl_afu *)phb->cxl_afu;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
-
-void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- phb->cxl_afu = afu;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
-
-/*
- * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
- * by other functions on the device for memory access and interrupts. When the
- * other functions are enabled we explicitly take a reference on the cxl
- * function since they will use it, and allocate a default context associated
- * with that function just like the vPHB model of the cxl kernel API.
- */
-bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct cxl_afu *afu = phb->cxl_afu;
-
- if (!pnv_pci_enable_device_hook(dev))
- return false;
-
-
- /* No special handling for the cxl function, which is always PF 0 */
- if (PCI_FUNC(dev->devfn) == 0)
- return true;
-
- if (!afu) {
- dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
- return false;
- }
-
- dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
-
- /* Make sure the peer AFU can't go away while this device is active */
- cxl_afu_get(afu);
-
- return cxl_pci_associate_default_context(dev, afu);
-}
-
-void pnv_cxl_disable_device(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct cxl_afu *afu = phb->cxl_afu;
-
- /* No special handling for cxl function: */
- if (PCI_FUNC(dev->devfn) == 0)
- return;
-
- cxl_pci_disable_device(dev);
- cxl_afu_put(afu);
-}
-
-/*
- * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
- * function handles setting up the IVTE entries for the XSL to use.
- *
- * We are currently not filling out the MSIX table, since the only currently
- * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
- * is up to their driver to fill that out. In the future we may fill out the
- * MSIX table (and change the IVTE entries to be an index to the MSIX table)
- * for adapters implementing the Full MSI-X mode described in the CAIA.
- */
-int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- struct cxl_context *ctx = NULL;
- unsigned int virq;
- int hwirq;
- int afu_irq = 0;
- int rc;
-
- if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
- return -ENODEV;
-
- if (pdev->no_64bit_msi && !phb->msi32_support)
- return -ENODEV;
-
- rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
- if (rc)
- return rc;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
- pr_warn("%s: Supports only 64-bit MSIs\n",
- pci_name(pdev));
- return -ENXIO;
- }
-
- hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
- if (WARN_ON(hwirq <= 0))
- return (hwirq ? hwirq : -ENOMEM);
-
- virq = irq_create_mapping(NULL, hwirq);
- if (!virq) {
- pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
- pci_name(pdev));
- return -ENOMEM;
- }
-
- rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
- if (rc) {
- pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
- irq_dispose_mapping(virq);
- return rc;
- }
-
- irq_set_msi_desc(virq, entry);
- }
-
- return 0;
-}
-
-void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- irq_hw_number_t hwirq;
-
- if (WARN_ON(!phb))
- return;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
- hwirq = virq_to_hw(entry->irq);
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- }
-
- cxl_cx4_teardown_msi_irqs(pdev);
-}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
new file mode 100644
index 000000000000..6c5db1acbe8d
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TCE helpers for IODA PCI/PCIe on PowerNV platforms
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include "pci.h"
+
+void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset, unsigned int page_shift)
+{
+ tbl->it_blocksize = 16;
+ tbl->it_base = (unsigned long)tce_mem;
+ tbl->it_page_shift = page_shift;
+ tbl->it_offset = dma_offset >> tbl->it_page_shift;
+ tbl->it_index = 0;
+ tbl->it_size = tce_size >> 3;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_PCI;
+}
+
+static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
+{
+ struct page *tce_mem = NULL;
+ __be64 *addr;
+
+ tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
+ if (!tce_mem) {
+ pr_err("Failed to allocate a TCE memory, level shift=%d\n",
+ shift);
+ return NULL;
+ }
+ addr = page_address(tce_mem);
+ memset(addr, 0, 1UL << shift);
+
+ return addr;
+}
+
+static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
+{
+ __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
+ int level = tbl->it_indirect_levels;
+ const long shift = ilog2(tbl->it_level_size);
+ unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
+
+ while (level) {
+ int n = (idx & mask) >> (level * shift);
+ unsigned long tce;
+
+ if (tmp[n] == 0) {
+ __be64 *tmp2;
+
+ if (!alloc)
+ return NULL;
+
+ tmp2 = pnv_alloc_tce_level(tbl->it_nid,
+ ilog2(tbl->it_level_size) + 3);
+ if (!tmp2)
+ return NULL;
+
+ tmp[n] = cpu_to_be64(__pa(tmp2) |
+ TCE_PCI_READ | TCE_PCI_WRITE);
+ }
+ tce = be64_to_cpu(tmp[n]);
+
+ tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
+ idx &= ~mask;
+ mask >>= shift;
+ --level;
+ }
+
+ return tmp + idx;
+}
+
+int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ u64 proto_tce = iommu_direction_to_tce_perm(direction);
+ u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
+ long i;
+
+ if (proto_tce & TCE_PCI_WRITE)
+ proto_tce |= TCE_PCI_READ;
+
+ for (i = 0; i < npages; i++) {
+ unsigned long newtce = proto_tce |
+ ((rpn + i) << tbl->it_page_shift);
+ unsigned long idx = index - tbl->it_offset + i;
+
+ *(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction,
+ bool alloc)
+{
+ u64 proto_tce = iommu_direction_to_tce_perm(*direction);
+ unsigned long newtce = *hpa | proto_tce, oldtce;
+ unsigned long idx = index - tbl->it_offset;
+ __be64 *ptce = NULL;
+
+ BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+
+ if (*direction == DMA_NONE) {
+ ptce = pnv_tce(tbl, false, idx, false);
+ if (!ptce) {
+ *hpa = 0;
+ return 0;
+ }
+ }
+
+ if (!ptce) {
+ ptce = pnv_tce(tbl, false, idx, alloc);
+ if (!ptce)
+ return alloc ? H_HARDWARE : H_TOO_HARD;
+ }
+
+ if (newtce & TCE_PCI_WRITE)
+ newtce |= TCE_PCI_READ;
+
+ oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce)));
+ *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ *direction = iommu_tce_direction(oldtce);
+
+ return 0;
+}
+
+__be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc)
+{
+ if (WARN_ON_ONCE(!tbl->it_userspace))
+ return NULL;
+
+ return pnv_tce(tbl, true, index - tbl->it_offset, alloc);
+}
+#endif
+
+void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+{
+ long i;
+
+ for (i = 0; i < npages; i++) {
+ unsigned long idx = index - tbl->it_offset + i;
+ __be64 *ptce = pnv_tce(tbl, false, idx, false);
+
+ if (ptce)
+ *ptce = cpu_to_be64(0);
+ }
+}
+
+unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+{
+ __be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false);
+
+ if (!ptce)
+ return 0;
+
+ return be64_to_cpu(*ptce);
+}
+
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+ unsigned long size, unsigned int levels)
+{
+ const unsigned long addr_ul = (unsigned long) addr &
+ ~(TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (levels) {
+ long i;
+ u64 *tmp = (u64 *) addr_ul;
+
+ for (i = 0; i < size; ++i) {
+ unsigned long hpa = be64_to_cpu(tmp[i]);
+
+ if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
+ continue;
+
+ pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
+ levels - 1);
+ }
+ }
+
+ free_pages(addr_ul, get_order(size << 3));
+}
+
+void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
+{
+ const unsigned long size = tbl->it_indirect_levels ?
+ tbl->it_level_size : tbl->it_size;
+
+ if (!tbl->it_size)
+ return;
+
+ pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
+ tbl->it_indirect_levels);
+ if (tbl->it_userspace) {
+ pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size,
+ tbl->it_indirect_levels);
+ }
+}
+
+static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int shift,
+ unsigned int levels, unsigned long limit,
+ unsigned long *current_offset, unsigned long *total_allocated)
+{
+ __be64 *addr, *tmp;
+ unsigned long allocated = 1UL << shift;
+ unsigned int entries = 1UL << (shift - 3);
+ long i;
+
+ addr = pnv_alloc_tce_level(nid, shift);
+ *total_allocated += allocated;
+
+ --levels;
+ if (!levels) {
+ *current_offset += allocated;
+ return addr;
+ }
+
+ for (i = 0; i < entries; ++i) {
+ tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
+ levels, limit, current_offset, total_allocated);
+ if (!tmp)
+ break;
+
+ addr[i] = cpu_to_be64(__pa(tmp) |
+ TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (*current_offset >= limit)
+ break;
+ }
+
+ return addr;
+}
+
+long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ bool alloc_userspace_copy, struct iommu_table *tbl)
+{
+ void *addr, *uas = NULL;
+ unsigned long offset = 0, level_shift, total_allocated = 0;
+ unsigned long total_allocated_uas = 0;
+ const unsigned int window_shift = ilog2(window_size);
+ unsigned int entries_shift = window_shift - page_shift;
+ unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
+ PAGE_SHIFT);
+ const unsigned long tce_table_size = 1UL << table_shift;
+ unsigned int tmplevels = levels;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
+ return -EINVAL;
+
+ if (!is_power_of_2(window_size))
+ return -EINVAL;
+
+ if (alloc_userspace_copy && (window_size > (1ULL << 32)))
+ tmplevels = 1;
+
+ /* Adjust direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ level_shift = entries_shift + 3;
+ level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
+
+ if ((level_shift - 3) * levels + page_shift >= 60)
+ return -EINVAL;
+
+ /* Allocate TCE table */
+ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+ tmplevels, tce_table_size, &offset, &total_allocated);
+
+ /* addr==NULL means that the first level allocation failed */
+ if (!addr)
+ return -ENOMEM;
+
+ /*
+ * First level was allocated but some lower level failed as
+ * we did not allocate as much as we wanted,
+ * release partially allocated table.
+ */
+ if (tmplevels == levels && offset < tce_table_size)
+ goto free_tces_exit;
+
+ /* Allocate userspace view of the TCE table */
+ if (alloc_userspace_copy) {
+ offset = 0;
+ uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+ levels, tce_table_size, &offset,
+ &total_allocated_uas);
+ if (!uas)
+ goto free_tces_exit;
+ if (tmplevels == levels && (offset < tce_table_size ||
+ total_allocated_uas != total_allocated))
+ goto free_uas_exit;
+ }
+
+ /* Setup linux iommu table */
+ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
+ page_shift);
+ tbl->it_level_size = 1ULL << (level_shift - 3);
+ tbl->it_indirect_levels = levels - 1;
+ tbl->it_allocated_size = total_allocated;
+ tbl->it_userspace = uas;
+ tbl->it_nid = nid;
+
+ pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
+ window_size, tce_table_size, bus_offset, tbl->it_base,
+ tbl->it_userspace, tmplevels, levels);
+
+ return 0;
+
+free_uas_exit:
+ pnv_pci_ioda2_table_do_free_pages(uas,
+ 1ULL << (level_shift - 3), levels - 1);
+free_tces_exit:
+ pnv_pci_ioda2_table_do_free_pages(addr,
+ 1ULL << (level_shift - 3), levels - 1);
+
+ return -ENOMEM;
+}
+
+static void pnv_iommu_table_group_link_free(struct rcu_head *head)
+{
+ struct iommu_table_group_link *tgl = container_of(head,
+ struct iommu_table_group_link, rcu);
+
+ kfree(tgl);
+}
+
+void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
+{
+ long i;
+ bool found;
+ struct iommu_table_group_link *tgl;
+
+ if (!tbl || !table_group)
+ return;
+
+ /* Remove link to a group from table's list of attached groups */
+ found = false;
+ list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+ if (tgl->table_group == table_group) {
+ list_del_rcu(&tgl->next);
+ call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
+ found = true;
+ break;
+ }
+ }
+ if (WARN_ON(!found))
+ return;
+
+ /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
+ found = false;
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (table_group->tables[i] == tbl) {
+ table_group->tables[i] = NULL;
+ found = true;
+ break;
+ }
+ }
+ WARN_ON(!found);
+}
+
+long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
+{
+ struct iommu_table_group_link *tgl = NULL;
+
+ if (WARN_ON(!tbl || !table_group))
+ return -EINVAL;
+
+ tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
+ node);
+ if (!tgl)
+ return -ENOMEM;
+
+ tgl->table_group = table_group;
+ list_add_rcu(&tgl->next, &tbl->it_group_list);
+
+ table_group->tables[num] = tbl;
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5bd0eb6681bc..4e6302bf4073 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -46,17 +46,14 @@
#include "powernv.h"
#include "pci.h"
+#include "../../../../drivers/pci/pci.h"
#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
-#define POWERNV_IOMMU_DEFAULT_LEVELS 1
-#define POWERNV_IOMMU_MAX_LEVELS 5
-
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
"NPU_OCAPI" };
-static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
@@ -2007,7 +2004,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
@@ -2018,7 +2015,7 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
@@ -2040,6 +2037,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda1_tce_xchg,
.exchange_rm = pnv_ioda1_tce_xchg_rm,
+ .useraddrptr = pnv_tce_useraddrptr,
#endif
.clear = pnv_ioda1_tce_free,
.get = pnv_tce_get,
@@ -2171,7 +2169,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
@@ -2182,7 +2180,7 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
@@ -2199,20 +2197,16 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
}
-static void pnv_ioda2_table_free(struct iommu_table *tbl)
-{
- pnv_pci_ioda2_table_free_pages(tbl);
-}
-
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_ioda2_tce_build,
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda2_tce_xchg,
.exchange_rm = pnv_ioda2_tce_xchg_rm,
+ .useraddrptr = pnv_tce_useraddrptr,
#endif
.clear = pnv_ioda2_tce_free,
.get = pnv_tce_get,
- .free = pnv_ioda2_table_free,
+ .free = pnv_pci_ioda2_table_free_pages,
};
static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
@@ -2462,13 +2456,9 @@ void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
pe->tce_bypass_enabled = enable;
}
-static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
- __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table *tbl);
-
static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
int num, __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table **ptbl)
+ bool alloc_userspace_copy, struct iommu_table **ptbl)
{
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
table_group);
@@ -2485,7 +2475,7 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
ret = pnv_pci_ioda2_table_alloc_pages(nid,
bus_offset, page_shift, window_size,
- levels, tbl);
+ levels, alloc_userspace_copy, tbl);
if (ret) {
iommu_tce_table_put(tbl);
return ret;
@@ -2518,7 +2508,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
IOMMU_PAGE_SHIFT_4K,
window_size,
- POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
+ POWERNV_IOMMU_DEFAULT_LEVELS, false, &tbl);
if (rc) {
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
rc);
@@ -2605,7 +2595,16 @@ static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
tce_table_size, direct_table_size);
}
- return bytes;
+ return bytes + bytes; /* one for HW table, one for userspace copy */
+}
+
+static long pnv_pci_ioda2_create_table_userspace(
+ struct iommu_table_group *table_group,
+ int num, __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table **ptbl)
+{
+ return pnv_pci_ioda2_create_table(table_group,
+ num, page_shift, window_size, levels, true, ptbl);
}
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
@@ -2634,7 +2633,7 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_pci_ioda2_create_table,
+ .create_table = pnv_pci_ioda2_create_table_userspace,
.set_window = pnv_pci_ioda2_set_window,
.unset_window = pnv_pci_ioda2_unset_window,
.take_ownership = pnv_ioda2_take_ownership,
@@ -2739,7 +2738,7 @@ static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
.get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_pci_ioda2_create_table,
+ .create_table = pnv_pci_ioda2_create_table_userspace,
.set_window = pnv_pci_ioda2_npu_set_window,
.unset_window = pnv_pci_ioda2_npu_unset_window,
.take_ownership = pnv_ioda2_npu_take_ownership,
@@ -2773,144 +2772,6 @@ static void pnv_pci_ioda_setup_iommu_api(void)
static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
- unsigned levels, unsigned long limit,
- unsigned long *current_offset, unsigned long *total_allocated)
-{
- struct page *tce_mem = NULL;
- __be64 *addr, *tmp;
- unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
- unsigned long allocated = 1UL << (order + PAGE_SHIFT);
- unsigned entries = 1UL << (shift - 3);
- long i;
-
- tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
- if (!tce_mem) {
- pr_err("Failed to allocate a TCE memory, order=%d\n", order);
- return NULL;
- }
- addr = page_address(tce_mem);
- memset(addr, 0, allocated);
- *total_allocated += allocated;
-
- --levels;
- if (!levels) {
- *current_offset += allocated;
- return addr;
- }
-
- for (i = 0; i < entries; ++i) {
- tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
- levels, limit, current_offset, total_allocated);
- if (!tmp)
- break;
-
- addr[i] = cpu_to_be64(__pa(tmp) |
- TCE_PCI_READ | TCE_PCI_WRITE);
-
- if (*current_offset >= limit)
- break;
- }
-
- return addr;
-}
-
-static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
- unsigned long size, unsigned level);
-
-static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
- __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table *tbl)
-{
- void *addr;
- unsigned long offset = 0, level_shift, total_allocated = 0;
- const unsigned window_shift = ilog2(window_size);
- unsigned entries_shift = window_shift - page_shift;
- unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
- const unsigned long tce_table_size = 1UL << table_shift;
-
- if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
- return -EINVAL;
-
- if (!is_power_of_2(window_size))
- return -EINVAL;
-
- /* Adjust direct table size from window_size and levels */
- entries_shift = (entries_shift + levels - 1) / levels;
- level_shift = entries_shift + 3;
- level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
-
- if ((level_shift - 3) * levels + page_shift >= 60)
- return -EINVAL;
-
- /* Allocate TCE table */
- addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset, &total_allocated);
-
- /* addr==NULL means that the first level allocation failed */
- if (!addr)
- return -ENOMEM;
-
- /*
- * First level was allocated but some lower level failed as
- * we did not allocate as much as we wanted,
- * release partially allocated table.
- */
- if (offset < tce_table_size) {
- pnv_pci_ioda2_table_do_free_pages(addr,
- 1ULL << (level_shift - 3), levels - 1);
- return -ENOMEM;
- }
-
- /* Setup linux iommu table */
- pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
- page_shift);
- tbl->it_level_size = 1ULL << (level_shift - 3);
- tbl->it_indirect_levels = levels - 1;
- tbl->it_allocated_size = total_allocated;
-
- pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
- window_size, tce_table_size, bus_offset);
-
- return 0;
-}
-
-static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
- unsigned long size, unsigned level)
-{
- const unsigned long addr_ul = (unsigned long) addr &
- ~(TCE_PCI_READ | TCE_PCI_WRITE);
-
- if (level) {
- long i;
- u64 *tmp = (u64 *) addr_ul;
-
- for (i = 0; i < size; ++i) {
- unsigned long hpa = be64_to_cpu(tmp[i]);
-
- if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
- continue;
-
- pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
- level - 1);
- }
- }
-
- free_pages(addr_ul, get_order(size << 3));
-}
-
-static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
-{
- const unsigned long size = tbl->it_indirect_levels ?
- tbl->it_level_size : tbl->it_size;
-
- if (!tbl->it_size)
- return;
-
- pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
- tbl->it_indirect_levels);
-}
-
static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
{
struct pci_controller *hose = phb->hose;
@@ -2925,7 +2786,7 @@ static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
/* Add 16M for POWER8 by default */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
!cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= SZ_16M;
+ mask |= SZ_16M | SZ_256M;
return mask;
}
@@ -3138,7 +2999,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
struct pci_dn *pdn;
int mul, total_vfs;
- if (!pdev->is_physfn || pdev->is_added)
+ if (!pdev->is_physfn || pci_dev_is_added(pdev))
return;
pdn = pci_get_pdn(pdev);
@@ -3575,7 +3436,7 @@ static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-bool pnv_pci_enable_device_hook(struct pci_dev *dev)
+static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -3843,26 +3704,6 @@ static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown,
};
-#ifdef CONFIG_CXL_BASE
-const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
- .dma_dev_setup = pnv_pci_dma_dev_setup,
- .dma_bus_setup = pnv_pci_dma_bus_setup,
-#ifdef CONFIG_PCI_MSI
- .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
- .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
-#endif
- .enable_device_hook = pnv_cxl_enable_device_hook,
- .disable_device = pnv_cxl_disable_device,
- .release_device = pnv_pci_release_device,
- .window_alignment = pnv_pci_window_alignment,
- .setup_bridge = pnv_pci_setup_bridge,
- .reset_secondary_bus = pnv_pci_reset_secondary_bus,
- .dma_set_mask = pnv_pci_ioda_dma_set_mask,
- .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
- .shutdown = pnv_pci_ioda_shutdown,
-};
-#endif
-
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
u64 hub_id, int ioda_type)
{
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b265ecc0836a..13aef2323bbc 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -802,85 +802,6 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
-static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
-{
- __be64 *tmp = ((__be64 *)tbl->it_base);
- int level = tbl->it_indirect_levels;
- const long shift = ilog2(tbl->it_level_size);
- unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
-
- while (level) {
- int n = (idx & mask) >> (level * shift);
- unsigned long tce = be64_to_cpu(tmp[n]);
-
- tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
- idx &= ~mask;
- mask >>= shift;
- --level;
- }
-
- return tmp + idx;
-}
-
-int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- unsigned long attrs)
-{
- u64 proto_tce = iommu_direction_to_tce_perm(direction);
- u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
- long i;
-
- if (proto_tce & TCE_PCI_WRITE)
- proto_tce |= TCE_PCI_READ;
-
- for (i = 0; i < npages; i++) {
- unsigned long newtce = proto_tce |
- ((rpn + i) << tbl->it_page_shift);
- unsigned long idx = index - tbl->it_offset + i;
-
- *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction)
-{
- u64 proto_tce = iommu_direction_to_tce_perm(*direction);
- unsigned long newtce = *hpa | proto_tce, oldtce;
- unsigned long idx = index - tbl->it_offset;
-
- BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
-
- if (newtce & TCE_PCI_WRITE)
- newtce |= TCE_PCI_READ;
-
- oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
- *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
- *direction = iommu_tce_direction(oldtce);
-
- return 0;
-}
-#endif
-
-void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
-{
- long i;
-
- for (i = 0; i < npages; i++) {
- unsigned long idx = index - tbl->it_offset + i;
-
- *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
- }
-}
-
-unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
-{
- return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
-}
-
struct iommu_table *pnv_pci_table_alloc(int nid)
{
struct iommu_table *tbl;
@@ -895,85 +816,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
return tbl;
}
-long pnv_pci_link_table_and_group(int node, int num,
- struct iommu_table *tbl,
- struct iommu_table_group *table_group)
-{
- struct iommu_table_group_link *tgl = NULL;
-
- if (WARN_ON(!tbl || !table_group))
- return -EINVAL;
-
- tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
- node);
- if (!tgl)
- return -ENOMEM;
-
- tgl->table_group = table_group;
- list_add_rcu(&tgl->next, &tbl->it_group_list);
-
- table_group->tables[num] = tbl;
-
- return 0;
-}
-
-static void pnv_iommu_table_group_link_free(struct rcu_head *head)
-{
- struct iommu_table_group_link *tgl = container_of(head,
- struct iommu_table_group_link, rcu);
-
- kfree(tgl);
-}
-
-void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
- struct iommu_table_group *table_group)
-{
- long i;
- bool found;
- struct iommu_table_group_link *tgl;
-
- if (!tbl || !table_group)
- return;
-
- /* Remove link to a group from table's list of attached groups */
- found = false;
- list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
- if (tgl->table_group == table_group) {
- list_del_rcu(&tgl->next);
- call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
- found = true;
- break;
- }
- }
- if (WARN_ON(!found))
- return;
-
- /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
- found = false;
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- if (table_group->tables[i] == tbl) {
- table_group->tables[i] = NULL;
- found = true;
- break;
- }
- }
- WARN_ON(!found);
-}
-
-void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
- void *tce_mem, u64 tce_size,
- u64 dma_offset, unsigned page_shift)
-{
- tbl->it_blocksize = 16;
- tbl->it_base = (unsigned long)tce_mem;
- tbl->it_page_shift = page_shift;
- tbl->it_offset = dma_offset >> tbl->it_page_shift;
- tbl->it_index = 0;
- tbl->it_size = tce_size >> 3;
- tbl->it_busno = 0;
- tbl->it_type = TCE_PCI;
-}
-
void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index eada4b6068cb..8b37b28e3831 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -88,7 +88,6 @@ struct pnv_ioda_pe {
};
#define PNV_PHB_FLAG_EEH (1 << 0)
-#define PNV_PHB_FLAG_CXL (1 << 1) /* Real PHB supporting the cxl kernel API */
struct pnv_phb {
struct pci_controller *hose;
@@ -194,20 +193,10 @@ struct pnv_phb {
bool nmmu_flush;
} npu;
-#ifdef CONFIG_CXL_BASE
- struct cxl_afu *cxl_afu;
-#endif
int p2p_target_count;
};
extern struct pci_ops pnv_pci_ops;
-extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- unsigned long attrs);
-extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
-extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction);
-extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
@@ -217,14 +206,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern struct iommu_table *pnv_pci_table_alloc(int nid);
-extern long pnv_pci_link_table_and_group(int node, int num,
- struct iommu_table *tbl,
- struct iommu_table_group *table_group);
-extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
- struct iommu_table_group *table_group);
-extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
- void *tce_mem, u64 tce_size,
- u64 dma_offset, unsigned page_shift);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
@@ -238,7 +219,6 @@ extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
-extern bool pnv_pci_enable_device_hook(struct pci_dev *dev);
extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
extern int pnv_eeh_post_init(void);
@@ -262,14 +242,33 @@ extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
extern int pnv_npu2_init(struct pnv_phb *phb);
-/* cxl functions */
-extern bool pnv_cxl_enable_device_hook(struct pci_dev *dev);
-extern void pnv_cxl_disable_device(struct pci_dev *dev);
-extern int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
-extern void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
+/* pci-ioda-tce.c */
+#define POWERNV_IOMMU_DEFAULT_LEVELS 1
+#define POWERNV_IOMMU_MAX_LEVELS 5
+extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ unsigned long attrs);
+extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
+extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction,
+ bool alloc);
+extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
+ bool alloc);
+extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
+
+extern long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ bool alloc_userspace_copy, struct iommu_table *tbl);
+extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
-/* phb ops (cxl switches these when enabling the kernel api on the phb) */
-extern const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops;
+extern long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
+extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
+extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset, unsigned int page_shift);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index f96df0a25d05..adddde023622 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,6 +78,12 @@ static void init_fw_feat_flags(struct device_node *np)
if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -124,7 +130,7 @@ static void pnv_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
- setup_barrier_nospec();
+ setup_count_cache_flush();
}
static void __init pnv_setup_arch(void)
@@ -314,7 +320,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
u64 reinit_flags;
if (xive_enabled())
- xive_kexec_teardown_cpu(secondary);
+ xive_teardown_cpu();
else
xics_kexec_teardown_cpu(secondary);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index b80909957792..0d354e19ef92 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -283,23 +283,6 @@ static void pnv_cause_ipi(int cpu)
ic_cause_ipi(cpu);
}
-static void pnv_p9_dd1_cause_ipi(int cpu)
-{
- int this_cpu = get_cpu();
-
- /*
- * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
- * IPIs to same core, because it requires additional synchronization
- * for inter-core doorbells which we do not implement.
- */
- if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
- doorbell_global_ipi(cpu);
- else
- ic_cause_ipi(cpu);
-
- put_cpu();
-}
-
static void __init pnv_smp_probe(void)
{
if (xive_enabled())
@@ -311,14 +294,10 @@ static void __init pnv_smp_probe(void)
ic_cause_ipi = smp_ops->cause_ipi;
WARN_ON(!ic_cause_ipi);
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
- else
- smp_ops->cause_ipi = doorbell_global_ipi;
- } else {
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ smp_ops->cause_ipi = doorbell_global_ipi;
+ else
smp_ops->cause_ipi = pnv_cause_ipi;
- }
}
}
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index ae0100fd35bb..f5493dbdd7ff 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/dcache.h>
#include <linux/mutex.h>
+#include <linux/stringify.h>
/*
* Overview of Virtual Accelerator Switchboard (VAS).
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 13eede60c24d..7e89d5c47068 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -25,6 +25,6 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
-ifeq ($(CONFIG_PPC_PSERIES),y)
+ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
endif
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c511a1743a44..d91412c591ef 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -13,6 +13,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/feature-fixups.h>
.section ".text"
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 46fbaef69a59..23f54223ed56 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -58,7 +58,7 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
}
if (xive_enabled()) {
- xive_kexec_teardown_cpu(secondary);
+ xive_teardown_cpu();
if (!secondary)
xive_shutdown();
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5a392e40f3d2..d3992ced0782 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -21,6 +21,7 @@
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
+#define pr_fmt(fmt) "lpar: " fmt
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
@@ -36,7 +37,6 @@
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
-#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
@@ -165,8 +165,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
- if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" full\n");
+ pr_devel("Hash table group is full\n");
return -1;
}
@@ -176,8 +175,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
* or we will loop forever, so return -2 in this case.
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
- if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" lpar err %ld\n", lpar_rc);
+ pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
@@ -240,8 +238,11 @@ static void manual_hpte_clear_all(void)
*/
for (i = 0; i < hpte_count; i += 4) {
lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
+ pr_info("Failed to read hash page table at %ld err %ld\n",
+ i, lpar_rc);
continue;
+ }
for (j = 0; j < 4; j++){
if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
HPTE_V_VRMA_MASK)
@@ -340,8 +341,11 @@ static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_gr
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
+ pr_info("Failed to read hash page table at %ld err %ld\n",
+ hpte_group, lpar_rc);
continue;
+ }
for (j = 0; j < 4; j++) {
if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
@@ -612,8 +616,8 @@ static int __init disable_bulk_remove(char *str)
{
if (strcmp(str, "off") == 0 &&
firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
- printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
- powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+ pr_info("Disabling BULK_REMOVE firmware feature");
+ powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
}
return 1;
}
@@ -659,8 +663,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
return -ENODEV;
- printk(KERN_INFO "lpar: Attempting to resize HPT to shift %lu\n",
- shift);
+ pr_info("Attempting to resize HPT to shift %lu\n", shift);
t0 = ktime_get();
@@ -672,8 +675,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
/* prepare with shift==0 cancels an in-progress resize */
rc = plpar_resize_hpt_prepare(0, 0);
if (rc != H_SUCCESS)
- printk(KERN_WARNING
- "lpar: Unexpected error %d cancelling timed out HPT resize\n",
+ pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
rc);
return -ETIMEDOUT;
}
@@ -691,9 +693,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
case H_RESOURCE:
return -EPERM;
default:
- printk(KERN_WARNING
- "lpar: Unexpected error %d from H_RESIZE_HPT_PREPARE\n",
- rc);
+ pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
return -EIO;
}
@@ -706,22 +706,19 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
if (rc != 0) {
switch (state.commit_rc) {
case H_PTEG_FULL:
- printk(KERN_WARNING
- "lpar: Hash collision while resizing HPT\n");
+ pr_warn("Hash collision while resizing HPT\n");
return -ENOSPC;
default:
- printk(KERN_WARNING
- "lpar: Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
- state.commit_rc);
+ pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
+ state.commit_rc);
return -EIO;
};
}
- printk(KERN_INFO
- "lpar: HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
- shift, (long long) ktime_ms_delta(t1, t0),
- (long long) ktime_ms_delta(t2, t1));
+ pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
+ shift, (long long) ktime_ms_delta(t1, t0),
+ (long long) ktime_ms_delta(t2, t1));
return 0;
}
@@ -785,13 +782,13 @@ static int __init cmo_free_hint(char *str)
parm = strstrip(str);
if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
- printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
+ pr_info("%s: CMO free page hinting is not active.\n", __func__);
cmo_free_hint_flag = 0;
return 1;
}
cmo_free_hint_flag = 1;
- printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
+ pr_info("%s: CMO free page hinting is active.\n", __func__);
if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
return 1;
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 8a8033a249c7..f0e30dc94988 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/stringify.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5e1ef9150182..851ce326874a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/fs.h>
#include <linux/reboot.h>
+#include <linux/irq_work.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -32,11 +33,13 @@
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
-static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
-static DEFINE_PER_CPU(__u64, mce_data_buf);
-
static int ras_check_exception_token;
+static void mce_process_errlog_event(struct irq_work *work);
+static struct irq_work mce_errlog_process_work = {
+ .func = mce_process_errlog_event,
+};
+
#define EPOW_SENSOR_TOKEN 9
#define EPOW_SENSOR_INDEX 0
@@ -330,16 +333,20 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
(((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+static inline struct rtas_error_log *fwnmi_get_errlog(void)
+{
+ return (struct rtas_error_log *)local_paca->mce_data_buf;
+}
+
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
* will be returned if found.
*
- * If the RTAS error is not of the extended type, then we put it in a per
- * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
+ * Use one buffer mce_data_buf per cpu to store RTAS error.
*
- * The global_mce_data_buf does not have any locks or protection around it,
+ * The mce_data_buf does not have any locks or protection around it,
* if a second machine check comes in, or a system reset is done
* before we have logged the error, then we will get corruption in the
* error log. This is preferable over holding off on calling
@@ -349,7 +356,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
unsigned long *savep;
- struct rtas_error_log *h, *errhdr = NULL;
+ struct rtas_error_log *h;
/* Mask top two bits */
regs->gpr[3] &= ~(0x3UL << 62);
@@ -360,24 +367,22 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
- regs->gpr[3] = savep[0]; /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
- /* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
+ /* Use the per cpu buffer from paca to store rtas error log */
+ memset(local_paca->mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
if (!rtas_error_extended(h)) {
- memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
- errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
+ memcpy(local_paca->mce_data_buf, h, sizeof(__u64));
} else {
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
- memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
- memcpy(global_mce_data_buf, h, len);
- errhdr = (struct rtas_error_log *)global_mce_data_buf;
+ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
+ memcpy(local_paca->mce_data_buf, h, len);
}
- return errhdr;
+ return (struct rtas_error_log *)local_paca->mce_data_buf;
}
/* Call this when done with the data returned by FWNMI_get_errinfo.
@@ -423,6 +428,17 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
}
/*
+ * Process MCE rtas errlog event.
+ */
+static void mce_process_errlog_event(struct irq_work *work)
+{
+ struct rtas_error_log *err;
+
+ err = fwnmi_get_errlog();
+ log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
+}
+
+/*
* See if we can recover from a machine check exception.
* This is only called on power4 (or above) and only via
* the Firmware Non-Maskable Interrupts (fwnmi) handler
@@ -466,7 +482,8 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
recovered = 1;
}
- log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
+ /* Queue irq work to log this rtas event later. */
+ irq_work_queue(&mce_errlog_process_work);
return recovered;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 139f0af6c3d9..ba1791fd3234 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -41,6 +41,7 @@
#include <linux/root_dev.h>
#include <linux/of.h>
#include <linux/of_pci.h>
+#include <linux/memblock.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -69,8 +70,10 @@
#include <asm/kexec.h>
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
+#include <asm/asm-const.h>
#include "pseries.h"
+#include "../../../../drivers/pci/pci.h"
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
@@ -101,6 +104,9 @@ static void pSeries_show_cpuinfo(struct seq_file *m)
static void __init fwnmi_init(void)
{
unsigned long system_reset_addr, machine_check_addr;
+ u8 *mce_data_buf;
+ unsigned int i;
+ int nr_cpus = num_possible_cpus();
int ibm_nmi_register = rtas_token("ibm,nmi-register");
if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
@@ -114,6 +120,18 @@ static void __init fwnmi_init(void)
if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
machine_check_addr))
fwnmi_active = 1;
+
+ /*
+ * Allocate a chunk for per cpu buffer to hold rtas errorlog.
+ * It will be used in real mode mce handler, hence it needs to be
+ * below RMA.
+ */
+ mce_data_buf = __va(memblock_alloc_base(RTAS_ERROR_LOG_MAX * nr_cpus,
+ RTAS_ERROR_LOG_MAX, ppc64_rma_size));
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->mce_data_buf = mce_data_buf +
+ (RTAS_ERROR_LOG_MAX * i);
+ }
}
static void pseries_8259_cascade(struct irq_desc *desc)
@@ -484,6 +502,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -534,7 +558,7 @@ void pseries_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable);
- setup_barrier_nospec();
+ setup_count_cache_flush();
}
#ifdef CONFIG_PCI_IOV
@@ -646,6 +670,15 @@ void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
}
}
+static void pseries_disable_sriov_resources(struct pci_dev *pdev)
+{
+ int i;
+
+ pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
+}
+
static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
const int *indexes;
@@ -653,10 +686,10 @@ static void pseries_pci_fixup_resources(struct pci_dev *pdev)
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_set_vf_bar_size(pdev, indexes);
+ if (indexes)
+ of_pci_set_vf_bar_size(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}
static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
@@ -664,14 +697,14 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!pdev->is_physfn || pdev->is_added)
+ if (!pdev->is_physfn || pci_dev_is_added(pdev))
return;
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_parse_iov_addrs(pdev, indexes);
+ if (indexes)
+ of_pci_parse_iov_addrs(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}
static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 30e05decbb4c..4314ba5baf43 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -6,9 +6,8 @@ LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
$(call if_changed,ld)
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index 4aad9dd10ace..1e1129553fd7 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -12,15 +12,7 @@
* Software Foundation (version 2 of the License).
*/
-#if defined(__LITTLE_ENDIAN__)
-#define STWX_BE stwbrx
-#define LWZX_BE lwbrx
-#elif defined(__BIG_ENDIAN__)
-#define STWX_BE stwx
-#define LWZX_BE lwzx
-#else
-#error no endianness defined!
-#endif
+#include <asm/asm-compat.h>
.machine ppc64
.balign 256
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index ea2f595b5133..f730539074c4 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -48,7 +48,7 @@ obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o
obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
-ifeq ($(CONFIG_SUSPEND),y)
+ifdef CONFIG_SUSPEND
obj-$(CONFIG_6xx) += 6xx-suspend.o
endif
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 5240d3a74a10..4f8dcf124828 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -38,7 +38,6 @@
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
-#include <asm/tlbflush.h>
#include <asm/rheap.h>
#include <asm/prom.h>
#include <asm/cpm.h>
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 488ec453038a..2a98837dc6ba 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -76,7 +76,7 @@ int mpic_setup_error_int(struct mpic *mpic, int intvec)
mpic->flags |= MPIC_FSL_HAS_EIMR;
/* allocate interrupt vectors for error interrupts */
for (i = MPIC_MAX_ERR - 1; i >= 0; i--)
- mpic->err_int_vecs[i] = --intvec;
+ mpic->err_int_vecs[i] = intvec--;
return 0;
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 353b43972bbf..934a77324f6b 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1380,12 +1380,12 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* global vector number space, as in case of ipis
* and timer interrupts.
*
- * Available vector space = intvec_top - 12, where 12
+ * Available vector space = intvec_top - 13, where 13
* is the number of vectors which have been consumed by
- * ipis and timer interrupts.
+ * ipis, timer interrupts and spurious.
*/
if (fsl_version >= 0x401) {
- ret = mpic_setup_error_int(mpic, intvec_top - 12);
+ ret = mpic_setup_error_int(mpic, intvec_top - 13);
if (ret)
return NULL;
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index eb69a5186243..280e964e1aa8 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
+ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 6243a7e537d0..e64a411d1a00 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -225,22 +225,23 @@ static void __init test_of_node(void)
struct device_node of_node;
struct property prop;
struct msi_bitmap bmp;
- int size = 256;
- DECLARE_BITMAP(expected, size);
+#define SIZE_EXPECTED 256
+ DECLARE_BITMAP(expected, SIZE_EXPECTED);
/* There should really be a struct device_node allocator */
memset(&of_node, 0, sizeof(of_node));
of_node_init(&of_node);
of_node.full_name = node_name;
- WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node));
+ WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));
/* No msi-available-ranges, so expect > 0 */
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
/* Should all still be free */
- WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
- bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
+ WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
+ get_count_order(SIZE_EXPECTED)));
+ bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));
/* Now create a fake msi-available-ranges property */
@@ -256,8 +257,8 @@ static void __init test_of_node(void)
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));
/* Check we got the expected result */
- WARN_ON(bitmap_parselist(expected_str, expected, size));
- WARN_ON(!bitmap_equal(expected, bmp.bitmap, size));
+ WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
+ WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));
msi_bitmap_free(&bmp);
kfree(bmp.bitmap);
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 3459015092fa..e8f5b0551095 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -319,7 +319,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* The FW told us to call it. This happens for some
* interrupt sources that need additional HW whacking
* beyond the ESB manipulation. For example LPC interrupts
- * on P9 DD1.0 need a latch to be clared in the LPC bridge
+ * on P9 DD1.0 needed a latch to be clared in the LPC bridge
* itself. The Firmware will take care of it.
*/
if (WARN_ON_ONCE(!xive_ops->eoi))
@@ -337,9 +337,9 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* This allows us to then do a re-trigger if Q was set
* rather than synthesizing an interrupt in software
*
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
@@ -1408,28 +1408,6 @@ void xive_teardown_cpu(void)
xive_cleanup_cpu_queues(cpu, xc);
}
-void xive_kexec_teardown_cpu(int secondary)
-{
- struct xive_cpu *xc = __this_cpu_read(xive_cpu);
- unsigned int cpu = smp_processor_id();
-
- /* Set CPPR to 0 to disable flow of interrupts */
- xc->cppr = 0;
- out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
-
- /* Backend cleanup if any */
- if (xive_ops->teardown_cpu)
- xive_ops->teardown_cpu(cpu, xc);
-
-#ifdef CONFIG_SMP
- /* Get rid of IPI */
- xive_cleanup_cpu_ipi(cpu, xc);
-#endif
-
- /* Disable and free the queues */
- xive_cleanup_cpu_queues(cpu, xc);
-}
-
void xive_shutdown(void)
{
xive_ops->shutdown();
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 311185b9960a..5b20a678d755 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -109,7 +109,7 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc == 0 ? 0 : -ENXIO;
}
@@ -163,7 +163,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
@@ -190,7 +190,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
@@ -253,7 +253,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
irq = opal_xive_allocate_irq(chip_id);
if (irq == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
if (irq < 0) {
@@ -275,7 +275,7 @@ u32 xive_native_alloc_irq(void)
rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc < 0)
return 0;
@@ -289,7 +289,7 @@ void xive_native_free_irq(u32 irq)
s64 rc = opal_xive_free_irq(irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
EXPORT_SYMBOL_GPL(xive_native_free_irq);
@@ -305,7 +305,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
if (rc == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
xc->hw_ipi = 0;
@@ -395,12 +395,11 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
- pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
@@ -415,16 +414,9 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
}
vp_cam = be64_to_cpu(vp_cam_be);
- pr_debug("VP CAM = %llx\n", vp_cam);
-
/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
- pr_debug("(Old HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
- out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
- TM_QW2W2_VP | vp_cam);
- pr_debug("(New HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
+ out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
}
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -444,7 +436,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
@@ -645,7 +637,7 @@ u32 xive_native_alloc_vp_block(u32 max_vcpus)
rc = opal_xive_alloc_vp_block(order);
switch (rc) {
case OPAL_BUSY:
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
break;
case OPAL_XIVE_PROVISIONING:
if (!xive_native_provision_pages())
@@ -687,7 +679,7 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
rc = opal_xive_set_vp_info(vp_id, flags, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}
@@ -701,7 +693,7 @@ int xive_native_disable_vp(u32 vp_id)
rc = opal_xive_set_vp_info(vp_id, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}
diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh
new file mode 100755
index 000000000000..1fad3fb90e7c
--- /dev/null
+++ b/arch/powerpc/tools/checkpatch.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018, Michael Ellerman, IBM Corporation.
+#
+# Wrapper around checkpatch that uses our preferred settings
+
+script_base=$(realpath $(dirname $0))
+
+exec $script_base/../../../scripts/checkpatch.pl \
+ --subjective \
+ --no-summary \
+ --max-line-length=90 \
+ --show-types \
+ --ignore ARCH_INCLUDE_LINUX \
+ --ignore BIT_MACRO \
+ --ignore COMPARISON_TO_NULL \
+ --ignore EMAIL_SUBJECT \
+ --ignore FILE_PATH_CHANGES \
+ --ignore GLOBAL_INITIALISERS \
+ --ignore LINE_SPACING \
+ --ignore MULTIPLE_ASSIGNMENTS \
+ $@
diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S
index 4099cbcddaaa..720a52afdd58 100644
--- a/arch/powerpc/xmon/spr_access.S
+++ b/arch/powerpc/xmon/spr_access.S
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
/* unsigned long xmon_mfspr(sprn, default_value) */
_GLOBAL(xmon_mfspr)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 47166ad2a669..4264aedc7775 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -56,6 +56,7 @@
#include <asm/opal.h>
#include <asm/firmware.h>
#include <asm/code-patching.h>
+#include <asm/sections.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -244,6 +245,7 @@ Commands:\n\
f flush cache\n\
la lookup symbol+offset of specified address\n\
ls lookup address of specified symbol\n\
+ lp s [#] lookup address of percpu symbol s for current cpu, or cpu #\n\
m examine/change memory\n\
mm move a block of memory\n\
ms set a block of memory\n\
@@ -918,13 +920,13 @@ static void remove_cpu_bpts(void)
static void
show_uptime(void)
{
- struct timespec uptime;
+ struct timespec64 uptime;
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
- get_monotonic_boottime(&uptime);
+ ktime_get_coarse_boottime_ts64(&uptime);
printf("Uptime: %lu.%.2lu seconds\n", (unsigned long)uptime.tv_sec,
((unsigned long)uptime.tv_nsec / (NSEC_PER_SEC/100)));
@@ -2429,7 +2431,6 @@ static void dump_one_paca(int cpu)
DUMP(p, thread_idle_state, "%#-*x");
DUMP(p, thread_mask, "%#-*x");
DUMP(p, subcore_sibling_mask, "%#-*x");
- DUMP(p, thread_sibling_pacas, "%-*px");
DUMP(p, requested_psscr, "%#-*llx");
DUMP(p, stop_sprs.pid, "%#-*llx");
DUMP(p, stop_sprs.ldbar, "%#-*llx");
@@ -2734,7 +2735,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int nr, dotted;
unsigned long first_adr;
- unsigned long inst, last_inst = 0;
+ unsigned int inst, last_inst = 0;
unsigned char val[4];
dotted = 0;
@@ -2758,7 +2759,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted = 0;
last_inst = inst;
if (praddr)
- printf(REG" %.8lx", adr, inst);
+ printf(REG" %.8x", adr, inst);
printf("\t");
dump_func(inst, adr);
printf("\n");
@@ -3353,7 +3354,8 @@ static void
symbol_lookup(void)
{
int type = inchar();
- unsigned long addr;
+ unsigned long addr, cpu;
+ void __percpu *ptr = NULL;
static char tmp[64];
switch (type) {
@@ -3377,6 +3379,34 @@ symbol_lookup(void)
catch_memory_errors = 0;
termch = 0;
break;
+ case 'p':
+ getstring(tmp, 64);
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+ ptr = (void __percpu *)kallsyms_lookup_name(tmp);
+ sync();
+ }
+
+ if (ptr &&
+ ptr >= (void __percpu *)__per_cpu_start &&
+ ptr < (void __percpu *)__per_cpu_end)
+ {
+ if (scanhex(&cpu) && cpu < num_possible_cpus()) {
+ addr = (unsigned long)per_cpu_ptr(ptr, cpu);
+ } else {
+ cpu = raw_smp_processor_id();
+ addr = (unsigned long)this_cpu_ptr(ptr);
+ }
+
+ printf("%s for cpu 0x%lx: %lx\n", tmp, cpu, addr);
+ } else {
+ printf("Percpu symbol '%s' not found.\n", tmp);
+ }
+
+ catch_memory_errors = 0;
+ termch = 0;
+ break;
}
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index f12680c9b947..a344980287a5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -107,6 +107,7 @@ config ARCH_RV32I
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_UCMPDI2
config ARCH_RV64I
bool "RV64I"
@@ -211,10 +212,6 @@ endmenu
menu "Kernel type"
-source "mm/Kconfig"
-
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
endmenu
@@ -241,75 +238,8 @@ source "drivers/pci/Kconfig"
endmenu
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source kernel/power/Kconfig
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
- help
- For most platforms, it is firmware or second stage bootloader
- that by default specifies the kernel command line options.
- However, it might be necessary or advantageous to either override
- the default kernel command line or add a few extra options to it.
- For such cases, this option allows hardcoding command line options
- directly into the kernel.
-
- For that, choose 'Y' here and fill in the extra boot parameters
- in CONFIG_CMDLINE.
-
- The built-in options will be concatenated to the default command
- line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
- command line will be ignored and replaced by the built-in string.
-
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
- help
- Supply command-line options at build time by entering them here.
-
-config CMDLINE_FORCE
- bool "Built-in command line overrides bootloader arguments"
- depends on CMDLINE_BOOL
- help
- Set this option to 'Y' to have the kernel ignore the bootloader
- or firmware command line. Instead, the built-in command line
- will be used exclusively.
-
- If you don't know what to do here, say N.
-
-config EARLY_PRINTK
- def_bool y
-
-source "lib/Kconfig.debug"
-
-config CMDLINE_BOOL
- bool
-endmenu
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
new file mode 100644
index 000000000000..3224ff6ecf6e
--- /dev/null
+++ b/arch/riscv/Kconfig.debug
@@ -0,0 +1,37 @@
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most platforms, it is firmware or second stage bootloader
+ that by default specifies the kernel command line options.
+ However, it might be necessary or advantageous to either override
+ the default kernel command line or add a few extra options to it.
+ For such cases, this option allows hardcoding command line options
+ directly into the kernel.
+
+ For that, choose 'Y' here and fill in the extra boot parameters
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+config CMDLINE
+ string "Built-in kernel command string"
+ depends on CMDLINE_BOOL
+ default ""
+ help
+ Supply command-line options at build time by entering them here.
+
+config CMDLINE_FORCE
+ bool "Built-in command line overrides bootloader arguments"
+ depends on CMDLINE_BOOL
+ help
+ Set this option to 'Y' to have the kernel ignore the bootloader
+ or firmware command line. Instead, the built-in command line
+ will be used exclusively.
+
+ If you don't know what to do here, say N.
+
+config EARLY_PRINTK
+ def_bool y
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6d4a5f6c3f4f..9ddd88bb30b7 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -8,7 +8,6 @@
# for more details.
#
-LDFLAGS :=
OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
@@ -26,6 +25,9 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_CFLAGS += -mabi=lp64
KBUILD_AFLAGS += -mabi=lp64
+
+ KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
+
KBUILD_MARCH = rv64im
LDFLAGS += -melf64lriscv
else
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 07326466871b..36473d7dbaac 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -76,3 +76,4 @@ CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_SIFIVE_PLIC=y
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 576ffdca06ba..efdbe311e936 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += bugs.h
generic-y += cacheflush.h
generic-y += checksum.h
+generic-y += compat.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 855115ace98c..c452359c9cb8 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,18 +25,11 @@
#define ATOMIC_INIT(i) { (i) }
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
static __always_inline int atomic_read(const atomic_t *v)
{
@@ -209,130 +202,8 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-/*
- * The extra atomic operations that are constructed from one of the core
- * AMO-based operations above (aside from sub, which is easier to fit above).
- * These are required to perform a full barrier, but they're OK this way
- * because atomic_*_return is also required to perform a full barrier.
- *
- */
-#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, )
-#else
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, ) \
- ATOMIC_OP(op, func_op, comp_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(add_and_test, add, ==, 0)
-ATOMIC_OPS(sub_and_test, sub, ==, 0)
-ATOMIC_OPS(add_negative, add, <, 0)
-
-#undef ATOMIC_OP
-#undef ATOMIC_OPS
-
-#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-void atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- atomic##prefix##_##func_op(I, v); \
-}
-
-#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
-} \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op(I, v); \
-}
-
-#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
-} \
-static __always_inline \
-c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op(v) c_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
-#else
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
- ATOMIC_OP( op, asm_op, I, long, 64) \
- ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(inc, add, +, 1)
-ATOMIC_OPS(dec, add, +, -1)
-
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#define atomic_inc_return atomic_inc_return
-#define atomic_dec_return atomic_dec_return
-
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#define atomic_fetch_inc atomic_fetch_inc
-#define atomic_fetch_dec atomic_fetch_dec
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_inc_return atomic64_inc_return
-#define atomic64_dec_return atomic64_dec_return
-
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#define atomic64_fetch_inc atomic64_fetch_inc
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-
-#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(v) comp_op I; \
-}
-
-ATOMIC_OP(inc_and_test, inc, ==, 0, )
-ATOMIC_OP(dec_and_test, dec, ==, 0, )
-#ifndef CONFIG_GENERIC_ATOMIC64
-ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
-ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
-#endif
-
-#undef ATOMIC_OP
-
/* This is required to provide a full barrier on success. */
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -349,9 +220,10 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
@@ -368,27 +240,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
: "memory");
return prev;
}
-
-static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- return __atomic64_add_unless(v, a, u) != u;
-}
-#endif
-
-/*
- * The extra atomic operations that are constructed from one of the core
- * LR/SC-based operations above.
- */
-static __always_inline int atomic_inc_not_zero(atomic_t *v)
-{
- return __atomic_add_unless(v, 1, 0);
-}
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
/*
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 421fa3585798..28a0d1cb374c 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -54,6 +54,7 @@
/* Interrupt Enable and Interrupt Pending flags */
#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
+#define SIE_SEIE _AC(0x00000200, UL) /* External Interrupt Enable */
#define EXC_INST_MISALIGNED 0
#define EXC_INST_ACCESS 1
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 4dee9d4c13c0..996b6fbe17a6 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -17,11 +17,8 @@
#define NR_IRQS 0
-#define INTERRUPT_CAUSE_SOFTWARE 1
-#define INTERRUPT_CAUSE_TIMER 5
-#define INTERRUPT_CAUSE_EXTERNAL 9
-
void riscv_timer_interrupt(void);
+void riscv_software_interrupt(void);
#include <asm-generic/irq.h>
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
index 0e638a0c3feb..aefbfaa6a781 100644
--- a/arch/riscv/include/asm/perf_event.h
+++ b/arch/riscv/include/asm/perf_event.h
@@ -10,6 +10,7 @@
#include <linux/perf_event.h>
#include <linux/ptrace.h>
+#include <linux/interrupt.h>
#define RISCV_BASE_COUNTERS 2
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 85e4220839b0..36016845461d 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -25,9 +25,6 @@
#ifdef CONFIG_SMP
/* SMP initialization hook for setup_arch */
-void __init init_clockevent(void);
-
-/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
/* Hook for the generic smp_call_function_many() routine. */
@@ -44,9 +41,6 @@ void arch_send_call_function_single_ipi(int cpu);
*/
#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU)))
-/* Interprocessor interrupt handler */
-irqreturn_t handle_ipi(void);
-
#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 080fb28061de..0caea01d5cca 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -11,6 +11,11 @@
* GNU General Public License for more details.
*/
+/*
+ * There is explicitly no include guard here because this file is expected to
+ * be included multiple times. See uapi/asm/syscalls.h for more info.
+ */
+
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 541544d64c33..ec6180a4b55d 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -38,8 +38,6 @@ struct vdso_data {
(void __user *)((unsigned long)(base) + __vdso_##name); \
})
-#ifdef CONFIG_SMP
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
-#endif
#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index 5cae4c30cd8e..1e0dfc36aab9 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
typedef union __riscv_fp_state elf_fpregset_t;
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info)
+#endif
/*
* RISC-V relocation types
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
index 818655b0d535..206dc4b0f6ea 100644
--- a/arch/riscv/include/uapi/asm/syscalls.h
+++ b/arch/riscv/include/uapi/asm/syscalls.h
@@ -1,10 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017-2018 SiFive
*/
-#ifndef _ASM__UAPI__SYSCALLS_H
-#define _ASM__UAPI__SYSCALLS_H
+/*
+ * There is explicitly no include guard here because this file is expected to
+ * be included multiple times in order to define the syscall macros via
+ * __SYSCALL.
+ */
/*
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
@@ -20,7 +23,7 @@
* caller. We don't currently do anything with the address range, that's just
* in there for forwards compatibility.
*/
+#ifndef __NR_riscv_flush_icache
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
-
#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 9aaf6c986771..fa2c08e3c05e 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -168,8 +168,8 @@ ENTRY(handle_exception)
/* Handle interrupts */
move a0, sp /* pt_regs */
- REG_L a1, handle_arch_irq
- jr a1
+ move a1, s4 /* scause */
+ tail do_IRQ
1:
/* Exceptions run with interrupts enabled */
csrs sstatus, SR_SIE
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 6e07ed37bbff..c4d2c63f9a29 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -94,6 +94,7 @@ relocate:
or a0, a0, a1
sfence.vma
csrw sptbr, a0
+.align 2
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
@@ -143,6 +144,7 @@ relocate:
tail smp_callin
#endif
+.align 2
.Lsecondary_park:
/* We lack SMP support or have too many harts, so park this hart */
wfi
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index b74cbfbce2d0..0cfac48a1272 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -1,24 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2018 Christoph Hellwig
*/
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
+/*
+ * Possible interrupt causes:
+ */
+#define INTERRUPT_CAUSE_SOFTWARE 1
+#define INTERRUPT_CAUSE_TIMER 5
+#define INTERRUPT_CAUSE_EXTERNAL 9
+
+/*
+ * The high order bit of the trap cause register is always set for
+ * interrupts, which allows us to differentiate them from exceptions
+ * quickly. The INTERRUPT_CAUSE_* macros don't contain that bit, so we
+ * need to mask it off.
+ */
+#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
+
+asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs, unsigned long cause)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ switch (cause & ~INTERRUPT_CAUSE_FLAG) {
+ case INTERRUPT_CAUSE_TIMER:
+ riscv_timer_interrupt();
+ break;
+#ifdef CONFIG_SMP
+ case INTERRUPT_CAUSE_SOFTWARE:
+ /*
+ * We only use software interrupts to pass IPIs, so if a non-SMP
+ * system gets one, then we don't know what to do.
+ */
+ riscv_software_interrupt();
+ break;
#endif
+ case INTERRUPT_CAUSE_EXTERNAL:
+ handle_arch_irq(regs);
+ break;
+ default:
+ panic("unexpected interrupt cause");
+ }
+ irq_exit();
+
+ set_irq_regs(old_regs);
+}
void __init init_IRQ(void)
{
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 1d5e9b934b8c..3303ed2cd419 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u32 imm12 = (offset & 0x1000) << (31 - 12);
u32 imm11 = (offset & 0x800) >> (11 - 7);
u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u32 imm20 = (offset & 0x100000) << (31 - 20);
u32 imm19_12 = (offset & 0xff000);
u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 hi20;
if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 hi20;
/* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
static int apply_r_riscv_call_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- *(u32 *)location += (*(u32 *)v);
+ *(u32 *)location += (u32)v;
return 0;
}
static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- *(u32 *)location -= (*(u32 *)v);
+ *(u32 *)location -= (u32)v;
return 0;
}
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int j;
for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
- u64 hi20_loc =
+ unsigned long hi20_loc =
sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[j].r_offset;
u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Sym *hi20_sym =
(Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_RISCV_R_SYM(rel[j].r_info);
- u64 hi20_sym_val =
+ unsigned long hi20_sym_val =
hi20_sym->st_value
+ rel[j].r_addend;
/* Calculate lo12 */
- u64 offset = hi20_sym_val - hi20_loc;
+ size_t offset = hi20_sym_val - hi20_loc;
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
&& hi20_type == R_RISCV_GOT_HI20) {
offset = module_emit_got_entry(
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index b0e10c4e9f77..a243fae1c1db 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -27,7 +27,6 @@
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/irq.h>
-#include <linux/interrupt.h>
#include <linux/perf_event.h>
#include <linux/atomic.h>
#include <linux/of.h>
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index ba3e80712797..9f82a7e34c64 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
struct pt_regs *regs;
regs = task_pt_regs(target);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
return ret;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index ee44a48faf79..db20dc630e7e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -39,6 +39,27 @@
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_EARLY_PRINTK
+static void sbi_console_write(struct console *co, const char *buf,
+ unsigned int n)
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ if (buf[i] == '\n')
+ sbi_console_putchar('\r');
+ sbi_console_putchar(buf[i]);
+ }
+}
+
+struct console riscv_sbi_early_console_dev __initdata = {
+ .name = "early",
+ .write = sbi_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
+ .index = -1
+};
+#endif
+
#ifdef CONFIG_DUMMY_CONSOLE
struct screen_info screen_info = {
.orig_video_lines = 30,
@@ -195,6 +216,12 @@ static void __init setup_bootmem(void)
void __init setup_arch(char **cmdline_p)
{
+#if defined(CONFIG_EARLY_PRINTK)
+ if (likely(early_console == NULL)) {
+ early_console = &riscv_sbi_early_console_dev;
+ register_console(early_console);
+ }
+#endif
*cmdline_p = boot_command_line;
parse_early_param();
@@ -220,8 +247,3 @@ void __init setup_arch(char **cmdline_p)
riscv_fill_hwcap();
}
-static int __init riscv_device_init(void)
-{
- return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 6d3962435720..906fe21ea21b 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -45,7 +45,7 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}
-irqreturn_t handle_ipi(void)
+void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -60,7 +60,7 @@ irqreturn_t handle_ipi(void)
ops = xchg(pending_ipis, 0);
if (ops == 0)
- return IRQ_HANDLED;
+ return;
if (ops & (1 << IPI_RESCHEDULE))
scheduler_ipi();
@@ -73,8 +73,6 @@ irqreturn_t handle_ipi(void)
/* Order data access and bit testing. */
mb();
}
-
- return IRQ_HANDLED;
}
static void
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f741458c5a3f..56abab6a9812 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -104,7 +104,6 @@ asmlinkage void __init smp_callin(void)
current->active_mm = mm;
trap_init();
- init_clockevent();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
local_flush_tlb_all();
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f7181ed8aafc..568026ccf6e8 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -48,7 +48,6 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
}
#endif /* !CONFIG_64BIT */
-#ifdef CONFIG_SMP
/*
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
* having a direct 'fence.i' instruction available to userspace (which we
@@ -66,15 +65,24 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
uintptr_t, flags)
{
+#ifdef CONFIG_SMP
struct mm_struct *mm = current->mm;
bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
+#endif
/* Check the reserved flags. */
if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
return -EINVAL;
+ /*
+ * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
+ * which generates unused variable warnings all over this function.
+ */
+#ifdef CONFIG_SMP
flush_icache_mm(mm, local);
+#else
+ flush_icache_all();
+#endif
return 0;
}
-#endif
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 2463fcca719e..1911c8f6b8a6 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -13,38 +13,11 @@
*/
#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/delay.h>
-
-#ifdef CONFIG_RISCV_TIMER
-#include <linux/timer_riscv.h>
-#endif
-
#include <asm/sbi.h>
unsigned long riscv_timebase;
-DECLARE_PER_CPU(struct clock_event_device, riscv_clock_event);
-
-void riscv_timer_interrupt(void)
-{
-#ifdef CONFIG_RISCV_TIMER
- /*
- * FIXME: This needs to be cleaned up along with the rest of the IRQ
- * handling cleanup. See irq.c for more details.
- */
- struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
-
- evdev->event_handler(evdev);
-#endif
-}
-
-void __init init_clockevent(void)
-{
- timer_probe();
- csr_set(sie, SIE_STIE);
-}
-
void __init time_init(void)
{
struct device_node *cpu;
@@ -56,6 +29,5 @@ void __init time_init(void)
riscv_timebase = prop;
lpj_fine = riscv_timebase / HZ;
-
- init_clockevent();
+ timer_probe();
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 81a1952015a6..24a9333dda2c 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -138,7 +138,6 @@ asmlinkage void do_trap_break(struct pt_regs *regs)
#endif /* CONFIG_GENERIC_BUG */
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc), current);
- regs->sepc += 0x4;
}
#ifdef CONFIG_GENERIC_BUG
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f6561b783b61..eed1c137f618 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -52,8 +52,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
# Make sure only to export the intended __vdso_xxx symbol offsets.
quiet_cmd_vdsold = VDSOLD $@
- cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
+ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
$(CROSS_COMPILE)objcopy \
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 596c2ca40d63..445ec84f9a47 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -2,5 +2,6 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += uaccess.o
+lib-y += tishift.o
lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S
new file mode 100644
index 000000000000..69abb1277234
--- /dev/null
+++ b/arch/riscv/lib/tishift.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+ .globl __lshrti3
+__lshrti3:
+ beqz a2, .L1
+ li a5,64
+ sub a5,a5,a2
+ addi sp,sp,-16
+ sext.w a4,a5
+ blez a5, .L2
+ sext.w a2,a2
+ sll a4,a1,a4
+ srl a0,a0,a2
+ srl a1,a1,a2
+ or a0,a0,a4
+ sd a1,8(sp)
+ sd a0,0(sp)
+ ld a0,0(sp)
+ ld a1,8(sp)
+ addi sp,sp,16
+ ret
+.L1:
+ ret
+.L2:
+ negw a4,a4
+ srl a1,a1,a4
+ sd a1,0(sp)
+ sd zero,8(sp)
+ ld a0,0(sp)
+ ld a1,8(sp)
+ addi sp,sp,16
+ ret
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 148c98ca9b45..88401d5125bc 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -41,7 +41,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
struct mm_struct *mm;
unsigned long addr, cause;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- int fault, code = SEGV_MAPERR;
+ int code = SEGV_MAPERR;
+ vm_fault_t fault;
cause = regs->scause;
addr = regs->sbadaddr;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c77df8142be2..58a522f9bcc3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
+#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index baed39772c84..054b29c9a533 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -106,7 +106,6 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
- select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -136,16 +135,18 @@ config S390
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_GCC_PLUGINS
+ select HAVE_GCC_PLUGINS if BROKEN
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KRETPROBES
@@ -157,9 +158,11 @@ config S390
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NOP_MCOUNT
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
@@ -183,10 +186,6 @@ config PGTABLE_LEVELS
int
default 5
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "kernel/livepatch/Kconfig"
menu "Processor type and features"
@@ -514,8 +513,6 @@ config SCHED_TOPOLOGY
making when dealing with machines that have multi-threading,
multiple cores or multiple books.
-source kernel/Kconfig.preempt
-
source kernel/Kconfig.hz
config KEXEC
@@ -626,8 +623,6 @@ config FORCE_MAX_ZONEORDER
int
default "9"
-source "mm/Kconfig"
-
config MAX_PHYSMEM_BITS
int "Maximum size of supported physical memory in bits (42-53)"
range 42 53
@@ -797,10 +792,6 @@ config CRASH_DUMP
endmenu
-menu "Executable file formats / Emulations"
-
-source "fs/Kconfig.binfmt"
-
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
@@ -818,8 +809,6 @@ config SECCOMP
If unsure, say Y.
-endmenu
-
menu "Power Management"
config ARCH_HIBERNATION_POSSIBLE
@@ -829,30 +818,16 @@ source "kernel/power/Kconfig"
endmenu
-source "net/Kconfig"
-
config PCMCIA
def_bool n
config CCW
def_bool y
-source "drivers/Kconfig"
-
config HAVE_PNETID
tristate
default (SMC || CCWGROUP)
-source "fs/Kconfig"
-
-source "arch/s390/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
menu "Virtualization"
config PFAULT
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2cfdfbf8d320..190527560b2c 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config S390_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
@@ -20,5 +17,3 @@ config S390_PTDUMP
config EARLY_PRINTK
def_bool y
-
-endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 68a690442be0..ba6d122526fb 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,8 +14,18 @@ LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
+KBUILD_CFLAGS += -m64
+aflags_dwarf := -Wa,-gdwarf-2
+KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__
@@ -52,18 +62,14 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
#
cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
-# old style option for packed stacks
-ifeq ($(call cc-option-yn,-mkernel-backchain),y)
-cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
-aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-endif
-
-# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
endif
+KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
+KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
+
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
ifneq ($(call cc-option-yn,-mstack-size=8192),y)
@@ -71,8 +77,11 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
endif
-ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ifdef CONFIG_WARN_DYNAMIC_STACK
+ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+ KBUILD_CFLAGS += -mwarn-dynamicstack
+ KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
+ endif
endif
ifdef CONFIG_EXPOLINE
@@ -82,17 +91,20 @@ ifdef CONFIG_EXPOLINE
CC_FLAGS_EXPOLINE += -mindirect-branch-table
export CC_FLAGS_EXPOLINE
cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ aflags-y += -DCC_USING_EXPOLINE
endif
endif
ifdef CONFIG_FUNCTION_TRACER
-# make use of hotpatch feature if the compiler supports it
-cc_hotpatch := -mhotpatch=0,3
-ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
-CC_FLAGS_FTRACE := $(cc_hotpatch)
-KBUILD_AFLAGS += -DCC_USING_HOTPATCH
-KBUILD_CFLAGS += -DCC_USING_HOTPATCH
-endif
+ ifeq ($(call cc-option-yn,-mfentry -mnop-mcount),n)
+ # make use of hotpatch feature if the compiler supports it
+ cc_hotpatch := -mhotpatch=0,3
+ ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
+ CC_FLAGS_FTRACE := $(cc_hotpatch)
+ KBUILD_AFLAGS += -DCC_USING_HOTPATCH
+ KBUILD_CFLAGS += -DCC_USING_HOTPATCH
+ endif
+ endif
endif
# Test CFI features of binutils
@@ -102,11 +114,12 @@ KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
+export KBUILD_AFLAGS_DECOMPRESSOR
+export KBUILD_CFLAGS_DECOMPRESSOR
OBJCOPYFLAGS := -O binary
-head-y := arch/s390/kernel/head.o
-head-y += arch/s390/kernel/head64.o
+head-y := arch/s390/kernel/head64.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
@@ -121,7 +134,7 @@ boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
-all: image bzImage
+all: bzImage
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
KBUILD_IMAGE := $(boot)/bzImage
@@ -129,7 +142,7 @@ KBUILD_IMAGE := $(boot)/bzImage
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-image bzImage: vmlinux
+bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zfcpdump:
@@ -152,8 +165,7 @@ archprepare:
# Don't use tabs in echo arguments
define archhelp
- echo '* image - Kernel image for IPL ($(boot)/image)'
- echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
+ echo '* bzImage - Kernel image for IPL ($(boot)/bzImage)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ee6a9c387c87..9bf8489df6e6 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -206,35 +206,28 @@ static int
appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- char buf[2];
+ int timer_active = appldata_timer_active;
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &timer_active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
spin_lock(&appldata_timer_lock);
- if (buf[0] == '1')
+ if (timer_active)
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
- else if (buf[0] == '0')
+ else
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -248,37 +241,24 @@ static int
appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- int interval;
- char buf[16];
+ int interval = appldata_interval;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &interval,
+ .maxlen = sizeof(int),
+ .extra1 = &one,
+ };
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- len = sprintf(buf, "%i\n", appldata_interval);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- interval = 0;
- sscanf(buf, "%i", &interval);
- if (interval <= 0)
- return -EINVAL;
+ rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -293,10 +273,17 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
- unsigned int len;
- int rc, found;
- char buf[2];
struct list_head *lh;
+ int rc, found;
+ int active;
+ int zero = 0;
+ int one = 1;
+ struct ctl_table ctl_entry = {
+ .data = &active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
found = 0;
mutex_lock(&appldata_ops_mutex);
@@ -317,31 +304,15 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
}
mutex_unlock(&appldata_ops_mutex);
- if (!*lenp || *ppos) {
- *lenp = 0;
+ active = ops->active;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write) {
module_put(ops->owner);
- return 0;
- }
- if (!write) {
- strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len)) {
- module_put(ops->owner);
- return -EFAULT;
- }
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len)) {
- module_put(ops->owner);
- return -EFAULT;
+ return rc;
}
mutex_lock(&appldata_ops_mutex);
- if ((buf[0] == '1') && (ops->active == 0)) {
+ if (active && (ops->active == 0)) {
// protect work queue callback
if (!try_module_get(ops->owner)) {
mutex_unlock(&appldata_ops_mutex);
@@ -359,7 +330,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
} else
ops->active = 1;
- } else if ((buf[0] == '0') && (ops->active == 1)) {
+ } else if (!active && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size,
@@ -370,9 +341,6 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
}
mutex_unlock(&appldata_ops_mutex);
-out:
- *lenp = len;
- *ppos += len;
module_put(ops->owner);
return 0;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index d1fa37fcce83..9e6668ee93de 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -3,19 +3,52 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-targets := image
-targets += bzImage
-subdir- := compressed
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
-$(obj)/image: vmlinux FORCE
- $(call if_changed,objcopy)
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+
+#
+# Use -march=z900 for als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
+AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
+AFLAGS_mem.o += -march=z900
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o += -march=z900
+endif
+
+CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
+
+obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o
+targets := bzImage startup.a $(obj-y)
+subdir- := compressed
+
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/compressed/vmlinux: FORCE
+$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+quiet_cmd_ar = AR $@
+ cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
+
+$(obj)/startup.a: $(OBJECTS) FORCE
+ $(call if_changed,ar)
+
install: $(CONFIGURE) $(obj)/bzImage
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
+
+chkbss := $(OBJECTS)
+chkbss-target := $(obj)/startup.a
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/als.c b/arch/s390/boot/als.c
index d1892bf36cab..d592e0d90d9f 100644
--- a/arch/s390/kernel/als.c
+++ b/arch/s390/boot/als.c
@@ -3,12 +3,10 @@
* Copyright IBM Corp. 2016
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
-#include "entry.h"
/*
* The code within this file will be called very early. It may _not_
@@ -18,9 +16,9 @@
* For temporary objects the stack (16k) should be used.
*/
-static unsigned long als[] __initdata = { FACILITIES_ALS };
+static unsigned long als[] = { FACILITIES_ALS };
-static void __init u16_to_hex(char *str, u16 val)
+static void u16_to_hex(char *str, u16 val)
{
int i, num;
@@ -33,9 +31,9 @@ static void __init u16_to_hex(char *str, u16 val)
*str = '\0';
}
-static void __init print_machine_type(void)
+static void print_machine_type(void)
{
- static char mach_str[80] __initdata = "Detected machine-type number: ";
+ static char mach_str[80] = "Detected machine-type number: ";
char type_str[5];
struct cpuid id;
@@ -46,7 +44,7 @@ static void __init print_machine_type(void)
sclp_early_printk(mach_str);
}
-static void __init u16_to_decimal(char *str, u16 val)
+static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@@ -60,9 +58,9 @@ static void __init u16_to_decimal(char *str, u16 val)
*str = '\0';
}
-static void __init print_missing_facilities(void)
+static void print_missing_facilities(void)
{
- static char als_str[80] __initdata = "Missing facilities: ";
+ static char als_str[80] = "Missing facilities: ";
unsigned long val;
char val_str[6];
int i, j, first;
@@ -95,7 +93,7 @@ static void __init print_missing_facilities(void)
sclp_early_printk("See Principles of Operations for facility bits\n");
}
-static void __init facility_mismatch(void)
+static void facility_mismatch(void)
{
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
@@ -103,7 +101,7 @@ static void __init facility_mismatch(void)
disabled_wait(0x8badcccc);
}
-void __init verify_facilities(void)
+void verify_facilities(void)
{
int i;
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index 2088cc140629..45aeb4f08752 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
sizes.h
vmlinux
vmlinux.lds
+vmlinux.scr.lds
vmlinux.bin.full
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 5766f7b9b271..04609478d18b 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,39 +6,29 @@
#
KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head.o
-
-KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
-KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
-OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
-OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
$(call if_changed,ld)
-TRIM_HEAD_SIZE := 0x11000
-
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - $(TRIM_HEAD_SIZE))/p'
+# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-quiet_cmd_trim_head = TRIM $@
- cmd_trim_head = tail -c +$$(($(TRIM_HEAD_SIZE) + 1)) $< > $@
-
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
@@ -48,21 +38,18 @@ $(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
-OBJCOPYFLAGS_vmlinux.bin.full := -R .comment -S
-$(obj)/vmlinux.bin.full: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
-$(obj)/vmlinux.bin: $(obj)/vmlinux.bin.full
- $(call if_changed,trim_head)
-
vmlinux.bin.all-y := $(obj)/vmlinux.bin
-suffix-$(CONFIG_KERNEL_GZIP) := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZ4) := lz4
-suffix-$(CONFIG_KERNEL_LZMA) := lzma
-suffix-$(CONFIG_KERNEL_LZO) := lzo
-suffix-$(CONFIG_KERNEL_XZ) := xz
+suffix-$(CONFIG_KERNEL_GZIP) := .gz
+suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+suffix-$(CONFIG_KERNEL_LZ4) := .lz4
+suffix-$(CONFIG_KERNEL_LZMA) := .lzma
+suffix-$(CONFIG_KERNEL_LZO) := .lzo
+suffix-$(CONFIG_KERNEL_XZ) := .xz
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
@@ -78,5 +65,9 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
$(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
$(call if_changed,ld)
+
+chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss-target := $(obj)/vmlinux.bin
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 9f94eca0f467..df8dbbc17bcc 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -15,7 +15,7 @@
#include "sizes.h"
__HEAD
-ENTRY(startup_continue)
+ENTRY(startup_decompressor)
basr %r13,0 # get base
.LPG1:
# setup stack
@@ -23,7 +23,7 @@ ENTRY(startup_continue)
aghi %r15,-160
brasl %r14,decompress_kernel
# Set up registers for memory mover. We move the decompressed image to
- # 0x11000, where startup_continue of the decompressed image is supposed
+ # 0x100000, where startup_continue of the decompressed image is supposed
# to be.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
@@ -33,7 +33,7 @@ ENTRY(startup_continue)
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# When the memory mover is done we pass control to
- # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+ # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
# the decompressed image.
lgr %r6,%r2
br %r1
@@ -47,6 +47,6 @@ mover_end:
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
- .quad 0x11000
+ .quad 0x100000
.Lmvsize:
.quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 511b2cc9b91a..f66ad73c205b 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -71,43 +71,6 @@ static int puts(const char *s)
return 0;
}
-void *memset(void *s, int c, size_t n)
-{
- char *xs;
-
- xs = s;
- while (n--)
- *xs++ = c;
- return s;
-}
-
-void *memcpy(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- while (n--)
- *d++ = *s++;
- return dest;
-}
-
-void *memmove(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- if (d <= s) {
- while (n--)
- *d++ = *s++;
- } else {
- d += n;
- s += n;
- while (n--)
- *--d = *--s;
- }
- return dest;
-}
-
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index d43c2db12d30..b16ac8b3c439 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -23,13 +23,10 @@ SECTIONS
*(.text.*)
_etext = . ;
}
- .rodata.compressed : {
- *(.rodata.compressed)
- }
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
- *(.rodata.*)
+ *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
_erodata = . ;
}
.data : {
@@ -38,6 +35,15 @@ SECTIONS
*(.data.*)
_edata = . ;
}
+ startup_continue = 0x100000;
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+ . = 0x100000;
+#else
+ . = ALIGN(8);
+#endif
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
. = ALIGN(256);
.bss : {
_bss = . ;
@@ -54,5 +60,6 @@ SECTIONS
*(.eh_frame)
*(__ex_table)
*(*__ksymtab*)
+ *(___kcrctab*)
}
}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr.lds.S
index 42a242597f34..ff01d18c9222 100644
--- a/arch/s390/boot/compressed/vmlinux.scr
+++ b/arch/s390/boot/compressed/vmlinux.scr.lds.S
@@ -2,10 +2,14 @@
SECTIONS
{
.rodata.compressed : {
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
input_len = .;
LONG(input_data_end - input_data) input_data = .;
+#endif
*(.data)
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
output_len = . - 4;
input_data_end = .;
+#endif
}
}
diff --git a/arch/s390/boot/ebcdic.c b/arch/s390/boot/ebcdic.c
new file mode 100644
index 000000000000..7391e7d36086
--- /dev/null
+++ b/arch/s390/boot/ebcdic.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ebcdic.c"
diff --git a/arch/s390/kernel/head.S b/arch/s390/boot/head.S
index 5c42f16a54c4..f721913b73f1 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/boot/head.S
@@ -272,14 +272,14 @@ iplstart:
.org 0x10000
ENTRY(startup)
j .Lep_startup_normal
- .org 0x10008
+ .org EP_OFFSET
#
# This is a list of s390 kernel entry points. At address 0x1000f the number of
# valid entry points is stored.
#
# IMPORTANT: Do not change this table, it is s390 kernel ABI!
#
- .ascii "S390EP"
+ .ascii EP_STRING
.byte 0x00,0x01
#
# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
@@ -310,10 +310,11 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
-# For uncompressed images, continue in
-# arch/s390/kernel/head64.S. For compressed images, continue in
-# arch/s390/boot/compressed/head.S.
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
jg startup_continue
+#else
+ jg startup_decompressor
+#endif
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/boot/head_kdump.S
index 174d6959bf5b..174d6959bf5b 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/boot/head_kdump.S
diff --git a/arch/s390/boot/mem.S b/arch/s390/boot/mem.S
new file mode 100644
index 000000000000..b33463633f03
--- /dev/null
+++ b/arch/s390/boot/mem.S
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../lib/mem.S"
diff --git a/arch/s390/boot/sclp_early_core.c b/arch/s390/boot/sclp_early_core.c
new file mode 100644
index 000000000000..5a19fd7020b5
--- /dev/null
+++ b/arch/s390/boot/sclp_early_core.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../drivers/s390/char/sclp_early_core.c"
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ad47abd08630..c54cb26eb7f5 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1035,7 +1035,6 @@ static struct aead_alg gcm_aes_aead = {
.chunksize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_priority = 900,
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 3b7f96c9eead..86aed30fad3a 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -128,7 +128,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a00c17f761c1..009572e8276d 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -78,7 +78,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 944aa6b237cd..62833a1d8724 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -71,7 +71,6 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -108,7 +107,6 @@ static struct shash_alg sha224_alg = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index b17eded532b1..be589c340d15 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -76,7 +76,6 @@ static struct shash_alg sha512_alg = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -115,7 +114,6 @@ static struct shash_alg sha384_alg = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index a2945b289a29..3452e18bb1ca 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -497,7 +497,7 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
}
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
@@ -544,7 +544,7 @@ static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
return PTR_ERR(rc);
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 06b513d192b9..c681329fdeec 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -36,7 +36,7 @@ struct hypfs_sb_info {
kuid_t uid; /* uid used for files and dirs */
kgid_t gid; /* gid used for files and dirs */
struct dentry *update_file; /* file to trigger update */
- time_t last_update; /* last update time in secs since 1970 */
+ time64_t last_update; /* last update, CLOCK_MONOTONIC time */
struct mutex lock; /* lock to protect update process */
};
@@ -52,7 +52,7 @@ static void hypfs_update_update(struct super_block *sb)
struct hypfs_sb_info *sb_info = sb->s_fs_info;
struct inode *inode = d_inode(sb_info->update_file);
- sb_info->last_update = get_seconds();
+ sb_info->last_update = ktime_get_seconds();
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
}
@@ -179,7 +179,7 @@ static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
* to restart data collection in this case.
*/
mutex_lock(&fs_info->lock);
- if (fs_info->last_update == get_seconds()) {
+ if (fs_info->last_update == ktime_get_seconds()) {
rc = -EBUSY;
goto out;
}
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c1bedb4c8de0..046e044a48d0 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -47,6 +47,50 @@ struct ap_queue_status {
};
/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "=d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ if (info)
+ *info = reg2;
+ return reg1;
+}
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
@@ -54,10 +98,57 @@ struct ap_queue_status {
*
* Returns AP queue status structure.
*/
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info);
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
+ int tbit,
+ unsigned long *info)
+{
+ if (tbit)
+ qid |= 1UL << 23; /* set T bit*/
+ return ap_tapq(qid, info);
+}
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (1UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (2UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(ZAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * struct ap_config_info - convenience struct for AP crypto
+ * config info as returned by the ap_qci() function.
+ */
struct ap_config_info {
unsigned int apsc : 1; /* S bit */
unsigned int apxa : 1; /* N bit */
@@ -74,50 +165,189 @@ struct ap_config_info {
unsigned char _reserved4[16];
} __aligned(8);
-/*
- * ap_query_configuration(): Fetch cryptographic config info
+/**
+ * ap_qci(): Get AP configuration data
*
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
+ * Returns 0 on success, or -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info);
+static inline int ap_qci(struct ap_config_info *config)
+{
+ register unsigned long reg0 asm ("0") = 4UL << 24;
+ register unsigned long reg1 asm ("1") = -EOPNOTSUPP;
+ register struct ap_config_info *reg2 asm ("2") = config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1)
+ : "d" (reg0), "d" (reg2)
+ : "cc", "memory");
+
+ return reg1;
+}
/*
* struct ap_qirq_ctrl - convenient struct for easy invocation
- * of the ap_queue_irq_ctrl() function. This struct is passed
- * as GR1 parameter to the PQAP(AQIC) instruction. For details
- * please see the AR documentation.
+ * of the ap_aqic() function. This struct is passed as GR1
+ * parameter to the PQAP(AQIC) instruction. For details please
+ * see the AR documentation.
*/
struct ap_qirq_ctrl {
unsigned int _res1 : 8;
- unsigned int zone : 8; /* zone info */
- unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
+ unsigned int zone : 8; /* zone info */
+ unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
unsigned int _res2 : 4;
- unsigned int gisc : 3; /* guest isc field */
+ unsigned int gisc : 3; /* guest isc field */
unsigned int _res3 : 6;
- unsigned int gf : 2; /* gisa format */
+ unsigned int gf : 2; /* gisa format */
unsigned int _res4 : 1;
- unsigned int gisa : 27; /* gisa origin */
+ unsigned int gisa : 27; /* gisa origin */
unsigned int _res5 : 1;
- unsigned int isc : 3; /* irq sub class */
+ unsigned int isc : 3; /* irq sub class */
};
/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
+ * ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl, see above
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
* @ind: The notification indicator byte
*
* Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ struct ap_qirq_ctrl qirqctrl,
+ void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+ register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(AQIC) */
+ : "=d" (reg1_out)
+ : "d" (reg0), "d" (reg1_in), "d" (reg2)
+ : "cc");
+ return reg1_out;
+}
+
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
*
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
+ * Returns AP queue status. Check response_code field for failures.
*/
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind);
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+ unsigned long long psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5)
+ : "cc", "memory");
+ return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+ unsigned long long *psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n" /* DQAP */
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
+ : : "cc", "memory");
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 4b55532f15c4..fd20ab5d4cf7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -55,17 +55,9 @@ static inline void atomic_add(int i, atomic_t *v)
__atomic_add(i, &v->counter);
}
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
-#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub(1, _v)
-#define atomic_dec_return(_v) atomic_sub_return(1, _v)
-#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
@@ -90,21 +82,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __atomic_cmpxchg(&v->counter, old, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic_cmpxchg(v, c, c + a);
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC64_INIT(i) { (i) }
static inline long atomic64_read(const atomic64_t *v)
@@ -168,50 +145,8 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
-{
- long c, old;
-
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic64_cmpxchg(v, c, c + i);
- if (likely(old == c))
- break;
- c = old;
- }
- return c != u;
-}
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
-
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add(1, _v)
-#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
-#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub(1, _v)
-#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index de023a9a88ca..bf2cbff926ef 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -2,7 +2,7 @@
/*
* CPU-measurement facilities
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -139,8 +139,14 @@ struct hws_trailer_entry {
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
- unsigned long long progusage1; /* 48 - reserved for programming use */
- unsigned long long progusage2; /* */
+ union { /* 48 - reserved for programming use */
+ struct {
+ unsigned int clock_base:1; /* in progusage2 */
+ unsigned long long progusage1:63;
+ unsigned long long progusage2;
+ };
+ unsigned long long progusage[2];
+ };
} __packed;
/* Load program parameter */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index cfccc0edd00d..8ea270fdc7fb 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,7 +4,7 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
-#ifdef CC_USING_HOTPATCH
+#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
#define MCOUNT_INSN_SIZE 6
#else
#define MCOUNT_INSN_SIZE 24
@@ -42,7 +42,7 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CC_USING_HOTPATCH
+#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
@@ -57,7 +57,7 @@ static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CC_USING_HOTPATCH
+#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
if (insn->disp == 0)
return 1;
#else
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index e07cce88dfb0..fcbd638fb9f4 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -9,6 +9,14 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
+/* Generic bits for GMAP notification on DAT table entry changes. */
+#define GMAP_NOTIFY_SHADOW 0x2
+#define GMAP_NOTIFY_MPROT 0x1
+
+/* Status bits only for huge segment entries */
+#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */
+
/**
* struct gmap_struct - guest address space
* @list: list head for the mm->context gmap list
@@ -132,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
int gmap_mprotect_notify(struct gmap *, unsigned long start,
unsigned long len, int prot);
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 9c5fc50204dd..2d1afa58a4b6 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,7 +37,10 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-#define arch_clear_hugepage_flags(page) do { } while (0)
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_arch_1, &page->flags);
+}
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 13de80cf741c..b106aa29bf55 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -68,8 +68,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *p);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a2188e309bd6..29c940bf8506 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -269,6 +269,7 @@ struct kvm_s390_sie_block {
__u8 reserved1c0[8]; /* 0x01c0 */
#define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000
+#define ECD_ETOKENF 0x02000000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -655,6 +656,7 @@ struct kvm_vcpu_arch {
seqcount_t cputm_seqcount;
__u64 cputm_start;
bool gs_enabled;
+ bool skey_enabled;
};
struct kvm_vm_stat {
@@ -793,12 +795,6 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS];
};
-struct kvm_s390_migration_state {
- unsigned long bitmap_size; /* in bits (number of guest pages) */
- atomic64_t dirty_pages; /* number of dirty pages */
- unsigned long *pgste_bitmap;
-};
-
struct kvm_arch{
void *sca;
int use_esca;
@@ -828,7 +824,8 @@ struct kvm_arch{
struct kvm_s390_vsie vsie;
u8 epdx;
u64 epoch;
- struct kvm_s390_migration_state *migration_state;
+ int migration_mode;
+ atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
struct kvm_s390_gisa *gisa;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5bc888841eaf..406d940173ab 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -185,7 +185,7 @@ struct lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
-} __packed;
+} __packed __aligned(8192);
#define S390_lowcore (*((struct lowcore *) 0))
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f5ff9dbad8ac..f31a15044c24 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -24,6 +24,8 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /* The gmaps associated with this context are allowed to use huge pages. */
+ unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d16bc79c30bb..0717ee76885d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -32,6 +32,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
case _REGION2_SIZE:
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index a01f81186e86..123dac3717b3 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
-#ifdef CONFIG_EXPOLINE
+#ifdef CC_USING_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
@@ -189,7 +189,7 @@ _LC_BR_R1 = __LC_BR_R1
.macro BASR_EX rsave,rtarget,ruse=%r1
basr \rsave,\rtarget
.endm
-#endif
+#endif /* CC_USING_EXPOLINE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 94f8db468c9b..10fe982f2b4b 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -51,6 +51,10 @@ struct zpci_fmb_fmt2 {
u64 max_work_units;
};
+struct zpci_fmb_fmt3 {
+ u64 tx_bytes;
+};
+
struct zpci_fmb {
u32 format : 8;
u32 fmt_ind : 24;
@@ -66,6 +70,7 @@ struct zpci_fmb {
struct zpci_fmb_fmt0 fmt0;
struct zpci_fmb_fmt1 fmt1;
struct zpci_fmb_fmt2 fmt2;
+ struct zpci_fmb_fmt3 fmt3;
};
} __packed __aligned(128);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5ab636089c60..0e7cb0dc9c33 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -268,8 +268,10 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
@@ -1101,7 +1103,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, pte_t pte);
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
+ pte_t *ptep);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
@@ -1116,6 +1119,10 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
unsigned long *oldpte, unsigned long *oldpgste);
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
index 6090670df51f..e297bcfc476f 100644
--- a/arch/s390/include/asm/purgatory.h
+++ b/arch/s390/include/asm/purgatory.h
@@ -13,11 +13,5 @@
int verify_sha256_digest(void);
-extern u64 kernel_entry;
-extern u64 kernel_type;
-
-extern u64 crash_start;
-extern u64 crash_size;
-
#endif /* __ASSEMBLY__ */
#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index de11ecc99c7c..9c9970a5dfb1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 54f81f8ed662..724faede8ac5 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,4 @@
#include <asm-generic/sections.h>
-extern char _ehead[];
-
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 9c30ebe046f3..1d66016f4170 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -9,8 +9,10 @@
#include <linux/const.h>
#include <uapi/asm/setup.h>
-
+#define EP_OFFSET 0x10008
+#define EP_STRING "S390EP"
#define PARMAREA 0x10400
+#define PARMAREA_END 0x11000
/*
* Machine features detected in early.c
diff --git a/arch/s390/include/uapi/asm/chsc.h b/arch/s390/include/uapi/asm/chsc.h
index dc329aa03f76..83a574e95b3a 100644
--- a/arch/s390/include/uapi/asm/chsc.h
+++ b/arch/s390/include/uapi/asm/chsc.h
@@ -23,29 +23,29 @@ struct chsc_async_header {
__u32 key : 4;
__u32 : 28;
struct subchannel_id sid;
-} __attribute__ ((packed));
+};
struct chsc_async_area {
struct chsc_async_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
-} __attribute__ ((packed));
+};
struct chsc_header {
__u16 length;
__u16 code;
-} __attribute__ ((packed));
+};
struct chsc_sync_area {
struct chsc_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
-} __attribute__ ((packed));
+};
struct chsc_response_struct {
__u16 length;
__u16 code;
__u32 parms;
__u8 data[CHSC_SIZE - 2 * sizeof(__u16) - sizeof(__u32)];
-} __attribute__ ((packed));
+};
struct chsc_chp_cd {
struct chp_id chpid;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 4cdaa55fabfe..9a50f02b9894 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -4,7 +4,7 @@
/*
* KVM s390 specific structures and definitions
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
struct {
__u64 reserved1[2];
__u64 gscb[4];
+ __u64 etoken;
+ __u64 etoken_extension;
};
};
};
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 3510c0fd06f4..39d901476ee5 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -111,4 +111,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2fed39b26b42..dbfd1730e631 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -9,39 +9,21 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE)
endif
-GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_early_nobss.o := n
-KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_early_nobss.o := n
-UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
#
-# Use -march=z900 for als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-
-CFLAGS_als.o += -D__NO_FORTIFY
-
-#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
CFLAGS_smp.o := -Wno-nonnull
@@ -61,13 +43,13 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o
-extra-y += head.o head64.o vmlinux.lds
+extra-y += head64.o vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
@@ -99,5 +81,5 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
-chkbss := head.o head64.o als.o early_nobss.o
+chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 607c5e9fba3d..2ce28bf0c5ec 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9f5ea9d87069..c3620bafc374 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -306,6 +306,15 @@ static void *kzalloc_panic(int len)
return rc;
}
+static const char *nt_name(Elf64_Word type)
+{
+ const char *name = "LINUX";
+
+ if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
+ name = KEXEC_CORE_NOTE_NAME;
+ return name;
+}
+
/*
* Initialize ELF note
*/
@@ -332,11 +341,26 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
- const char *note_name = "LINUX";
+ return nt_init_name(buf, type, desc, d_len, nt_name(type));
+}
+
+/*
+ * Calculate the size of ELF note
+ */
+static size_t nt_size_name(int d_len, const char *name)
+{
+ size_t size;
- if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
- note_name = KEXEC_CORE_NOTE_NAME;
- return nt_init_name(buf, type, desc, d_len, note_name);
+ size = sizeof(Elf64_Nhdr);
+ size += roundup(strlen(name) + 1, 4);
+ size += roundup(d_len, 4);
+
+ return size;
+}
+
+static inline size_t nt_size(Elf64_Word type, int d_len)
+{
+ return nt_size_name(d_len, nt_name(type));
}
/*
@@ -375,6 +399,29 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
}
/*
+ * Calculate size of ELF notes per cpu
+ */
+static size_t get_cpu_elf_notes_size(void)
+{
+ struct save_area *sa = NULL;
+ size_t size;
+
+ size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
+ size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
+ size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
+ size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
+ size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
+ size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
+ size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+ if (MACHINE_HAS_VX) {
+ size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
+ size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+ }
+
+ return size;
+}
+
+/*
* Initialize prpsinfo note (new kernel)
*/
static void *nt_prpsinfo(void *ptr)
@@ -429,6 +476,30 @@ static void *nt_vmcoreinfo(void *ptr)
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
}
+static size_t nt_vmcoreinfo_size(void)
+{
+ const char *name = "VMCOREINFO";
+ char nt_name[11];
+ Elf64_Nhdr note;
+ void *addr;
+
+ if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+ return 0;
+
+ if (copy_oldmem_kernel(&note, addr, sizeof(note)))
+ return 0;
+
+ memset(nt_name, 0, sizeof(nt_name));
+ if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+ sizeof(nt_name) - 1))
+ return 0;
+
+ if (strcmp(nt_name, name) != 0)
+ return 0;
+
+ return nt_size_name(note.n_descsz, name);
+}
+
/*
* Initialize final note (needed for /proc/vmcore code)
*/
@@ -539,6 +610,27 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
+static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+{
+ size_t size;
+
+ size = sizeof(Elf64_Ehdr);
+ /* PT_NOTES */
+ size += sizeof(Elf64_Phdr);
+ /* nt_prpsinfo */
+ size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+ /* regsets */
+ size += get_cpu_cnt() * get_cpu_elf_notes_size();
+ /* nt_vmcoreinfo */
+ size += nt_vmcoreinfo_size();
+ /* nt_final */
+ size += sizeof(Elf64_Nhdr);
+ /* PT_LOADS */
+ size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+
+ return size;
+}
+
/*
* Create ELF core header (new kernel)
*/
@@ -566,8 +658,8 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt();
- alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
- mem_chunk_cnt * sizeof(Elf64_Phdr);
+ alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 827699eb48fa..5b28b434f8a1 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -331,8 +331,20 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
+static void __init check_image_bootable(void)
+{
+ if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
+ return;
+
+ sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
+ sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
+ sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
+ disabled_wait(0xbadb007);
+}
+
void __init startup_init(void)
{
+ check_image_bootable();
time_early_init();
init_kernel_storage_key();
lockdep_off();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f03402efab4b..150130c897c3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -357,6 +357,10 @@ ENTRY(system_call)
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
+#ifdef CONFIG_DEBUG_RSEQ
+ lgr %r2,%r11
+ brasl %r14,rseq_syscall
+#endif
LOCKDEP_SYS_EXIT
.Lsysc_tif:
TSTMSK __PT_FLAGS(%r11),_PIF_WORK
@@ -1265,7 +1269,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: BR_EX %r14
+0: BR_EX %r14,%r11
.align 8
.Lcleanup_table:
@@ -1301,7 +1305,7 @@ cleanup_critical:
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- BR_EX %r14
+ BR_EX %r14,%r11
#endif
.Lcleanup_system_call:
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 961abfac2c5f..472fa2f1a4a5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -83,7 +83,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
DECLARE_PER_CPU(u64, mt_cycles[8]);
-void verify_facilities(void);
void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index dc76d813e420..84be7f02d0c2 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -61,7 +61,7 @@ unsigned long ftrace_plt;
static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
{
-#ifdef CC_USING_HOTPATCH
+#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 791cb9000e86..6d14ad42ba88 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -48,11 +48,23 @@ ENTRY(startup_continue)
# Early machine initialization and detection functions.
#
brasl %r14,startup_init
- lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
+
+# check control registers
+ stctg %c0,%c15,0(%r15)
+ oi 6(%r15),0x60 # enable sigp emergency & external call
+ oi 4(%r15),0x10 # switch on low address proctection
+ lctlg %c0,%c15,0(%r15)
+
+ lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
+ brasl %r14,start_kernel # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+ basr %r13,0
+ lpswe .Ldw-.(%r13) # load disabled wait psw
+
.align 16
.LPG1:
-.Lentry:.quad 0x0000000180000000,_stext
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
@@ -85,30 +97,5 @@ ENTRY(startup_continue)
.endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x60 # enable sigp emergency & external call
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
-
- lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
- brasl %r14,start_kernel # go to C code
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpswe .Ldw-.(%r13) # load disabled wait psw
-
- .align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 60f60afa645c..7c0a095e9c5f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs)
* If we have no pre-handler or it returned 0, we
* continue with single stepping. If we have a
* pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * for changing execution path, so get out doing
+ * nothing more here.
*/
push_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ pop_kprobe(kcb);
+ preempt_enable_no_resched();
return 1;
+ }
kcb->kprobe_status = KPROBE_HIT_SS;
}
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- /*
- * Continuation after the jprobe completed and
- * caused the jprobe_return trap. The jprobe
- * break_handler "returns" to the original
- * function that still has the kprobe breakpoint
- * installed. We continue with single stepping.
- */
- kcb->kprobe_status = KPROBE_HIT_SS;
- enable_singlestep(kcb, regs,
- (unsigned long) p->ainsn.insn);
- return 1;
- } /* else:
- * No kprobe at this address and the current kprobe
- * has no break handler (no jprobe!). The kernel just
- * exploded, let the standard trap handler pick up the
- * pieces.
- */
} /* else:
* No kprobe at this address and no active kprobe. The trap has
* not been caused by a kprobe breakpoint. The race of breakpoint
@@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->psw.addr = orig_ret_address;
- pop_kprobe(get_kprobe_ctlblk());
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->psw.addr = (unsigned long) jp->entry;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-
- /* r15 is the stack pointer */
- stack = (unsigned long) regs->gprs[15];
-
- memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- asm volatile(".word 0x0002");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
-
- stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
-
- /* Put the regs back */
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* put the stack back */
- memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 27110f3294ed..e93fbf02490c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -35,7 +35,7 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
-#ifndef CC_USING_HOTPATCH
+#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
aghi %r0,MCOUNT_RETURN_FIXUP
#endif
aghi %r15,-STACK_FRAME_SIZE
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 18ae7b9c71d6..bdddaae96559 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -35,6 +35,8 @@ early_param("nospec", nospec_setup_early);
static int __init nospec_report(void)
{
+ if (test_facility(156))
+ pr_info("Spectre V2 mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
@@ -56,7 +58,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
- if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ if (test_facility(156)) {
+ /*
+ * The machine supports etokens.
+ * Disable expolines and disable nobp.
+ */
+ if (IS_ENABLED(CC_USING_EXPOLINE))
+ nospec_disable = 1;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 8affad5f18cb..e30e580ae362 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -13,6 +13,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev,
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (test_facility(156))
+ return sprintf(buf, "Mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 0292d68e7dde..5c53e977be62 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -2,7 +2,7 @@
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
- * Copyright IBM Corp. 2013
+ * Copyright IBM Corp. 2013, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "cpum_sf"
@@ -665,7 +665,7 @@ static void cpumsf_output_event_pid(struct perf_event *event,
goto out;
/* Update the process ID (see also kernel/events/core.c) */
- data->tid_entry.pid = cpumsf_pid_type(event, pid, __PIDTYPE_TGID);
+ data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID);
data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
perf_output_sample(&handle, &header, data, event);
@@ -1587,6 +1587,17 @@ static void aux_buffer_free(void *data)
"%lu SDBTs\n", num_sdbt);
}
+static void aux_sdb_init(unsigned long sdb)
+{
+ struct hws_trailer_entry *te;
+
+ te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+
+ /* Save clock base */
+ te->clock_base = 1;
+ memcpy(&te->progusage2, &tod_clock_base[1], 8);
+}
+
/*
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
* @cpu: On which to allocate, -1 means current
@@ -1666,6 +1677,7 @@ static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
/* Tail is the entry in a SDBT */
*tail = (unsigned long)pages[i];
aux->sdb_index[i] = (unsigned long)pages[i];
+ aux_sdb_init((unsigned long)pages[i]);
}
sfb->num_sdb = nr_pages;
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index 54e2d634b849..4352a504f235 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -12,9 +12,6 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
{
freg_t fp;
- if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
- return 0;
-
if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
return regs->gprs[idx];
@@ -33,7 +30,8 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
if (idx == PERF_REG_S390_PC)
return regs->psw.addr;
- return regs->gprs[idx];
+ WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX);
+ return 0;
}
#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d82a9ec64ea9..c637c12f9e37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -674,12 +674,12 @@ static void __init reserve_kernel(void)
#ifdef CONFIG_DMA_API_DEBUG
/*
* DMA_API_DEBUG code stumbles over addresses from the
- * range [_ehead, _stext]. Mark the memory as reserved
+ * range [PARMAREA_END, _stext]. Mark the memory as reserved
* so it is not used for CONFIG_DMA_API_DEBUG=y.
*/
memblock_reserve(0, PFN_PHYS(start_pfn));
#else
- memblock_reserve(0, (unsigned long)_ehead);
+ memblock_reserve(0, PARMAREA_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 2d2960ab3e10..22f08245aa5d 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
}
/* No longer in a system call */
clear_pt_regs_flag(regs, PIF_SYSCALL);
-
+ rseq_signal_deliver(&ksig, regs);
if (is_compat_task())
handle_signal32(&ksig, oldset, regs);
else
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
index 8ff96c08955f..4d929edc80a6 100644
--- a/arch/s390/kernel/syscalls/Makefile
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -25,15 +25,15 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
define filechk_syshdr
- $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2"
+ $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
endef
define filechk_sysnr
- $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget))
+ $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $<
endef
define filechk_syscalls
- $(CONFIG_SHELL) '$(systbl)' -S
+ $(CONFIG_SHELL) '$(systbl)' -S < $<
endef
syshdr_abi_unistd_32 := common,32
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 8b210ead7956..022fc099b628 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -389,3 +389,5 @@
379 common statx sys_statx compat_sys_statx
380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
+382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+383 common rseq sys_rseq compat_sys_rseq
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 54f5496913fa..12f80d1f0415 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+#ifdef CONFIG_PROC_FS
+
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
@@ -301,6 +303,8 @@ static int __init sysinfo_create_proc(void)
}
device_initcall(sysinfo_create_proc);
+#endif /* CONFIG_PROC_FS */
+
/*
* Service levels interface.
*/
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cf561160ea88..e8766beee5ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
ext_to_timespec64(clk, ts);
}
-void read_boot_clock64(struct timespec64 *ts)
+void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+ struct timespec64 *boot_offset)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ struct timespec64 boot_time;
__u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
- memcpy(clk, tod_clock_base, 16);
- *(__u64 *) &clk[1] -= delta;
- if (*(__u64 *) &clk[1] > delta)
+ memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
+ *(__u64 *)&clk[1] -= delta;
+ if (*(__u64 *)&clk[1] > delta)
clk[0]--;
- ext_to_timespec64(clk, ts);
+ ext_to_timespec64(clk, &boot_time);
+
+ read_persistent_clock64(wall_time);
+ *boot_offset = timespec64_sub(*wall_time, boot_time);
}
static u64 read_tod_clock(struct clocksource *cs)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4b6e0397f66d..e8184a15578a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -579,41 +579,33 @@ early_param("topology", topology_setup);
static int topology_ctl_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
+ int enabled = topology_is_enabled();
int new_mode;
- char buf[2];
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &enabled,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- if (buf[0] != '0' && buf[0] != '1')
- return -EINVAL;
mutex_lock(&smp_cpu_state_mutex);
- new_mode = topology_get_mode(buf[0] == '1');
+ new_mode = topology_get_mode(enabled);
if (topology_mode != new_mode) {
topology_mode = new_mode;
topology_schedule_update();
}
mutex_unlock(&smp_cpu_state_mutex);
topology_flush_work();
-out:
- *lenp = len;
- *ppos += len;
- return 0;
+
+ return rc;
}
static struct ctl_table topology_ctl_table[] = {
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 09abae40f917..3031cc6dd0ab 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -47,7 +47,7 @@ static struct page **vdso64_pagelist;
*/
unsigned int __read_mostly vdso_enabled = 1;
-static int vdso_fault(const struct vm_special_mapping *sm,
+static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page **vdso_pagelist;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index f0414f52817b..b43f8d33a369 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -19,7 +19,7 @@
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
-ENTRY(startup)
+ENTRY(startup_continue)
jiffies = jiffies_64;
PHDRS {
@@ -30,16 +30,12 @@ PHDRS {
SECTIONS
{
- . = 0x00000000;
+ . = 0x100000;
+ _stext = .; /* Start of text section */
.text : {
/* Text and read-only data */
+ _text = .;
HEAD_TEXT
- /*
- * E.g. perf doesn't like symbols starting at address zero,
- * therefore skip the initial PSW and channel program located
- * at address zero and let _text start at 0x200.
- */
- _text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -47,6 +43,7 @@ SECTIONS
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ *(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning)
} :text = 0x0700
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index daa09f89ca2d..fcb55b02990e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
* yield-candidate.
*/
vcpu->preempted = true;
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
/*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3b7a5151b6a5..91ad4a9425c0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -172,6 +172,10 @@ static int nested;
module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
+/* allow 1m huge page guest backing, if !nested */
+static int hpage;
+module_param(hpage, int, 0444);
+MODULE_PARM_DESC(hpage, "1m huge page backing support");
/*
* For now we handle at most 16 double words as this is what the s390 base
@@ -475,6 +479,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_AIS_MIGRATION:
r = 1;
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ r = 0;
+ if (hpage)
+ r = 1;
+ break;
case KVM_CAP_S390_MEM_OP:
r = MEM_OP_MAX_SIZE;
break;
@@ -511,19 +520,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
}
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ struct kvm_memory_slot *memslot)
{
+ int i;
gfn_t cur_gfn, last_gfn;
- unsigned long address;
+ unsigned long gaddr, vmaddr;
struct gmap *gmap = kvm->arch.gmap;
+ DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
- /* Loop over all guest pages */
+ /* Loop over all guest segments */
+ cur_gfn = memslot->base_gfn;
last_gfn = memslot->base_gfn + memslot->npages;
- for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
- address = gfn_to_hva_memslot(memslot, cur_gfn);
+ for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
+ gaddr = gfn_to_gpa(cur_gfn);
+ vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
+ if (kvm_is_error_hva(vmaddr))
+ continue;
+
+ bitmap_zero(bitmap, _PAGE_ENTRIES);
+ gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
+ for (i = 0; i < _PAGE_ENTRIES; i++) {
+ if (test_bit(i, bitmap))
+ mark_page_dirty(kvm, cur_gfn + i);
+ }
- if (test_and_clear_guest_dirty(gmap->mm, address))
- mark_page_dirty(kvm, cur_gfn);
if (fatal_signal_pending(current))
return;
cond_resched();
@@ -667,6 +687,27 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus)
+ r = -EBUSY;
+ else if (!hpage || kvm->arch.use_cmma)
+ r = -EINVAL;
+ else {
+ r = 0;
+ kvm->mm->context.allow_gmap_hpage_1m = 1;
+ /*
+ * We might have to create fake 4k page
+ * tables. To avoid that the hardware works on
+ * stale PGSTEs, we emulate these instructions.
+ */
+ kvm->arch.use_skf = 0;
+ kvm->arch.use_pfmfi = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -714,10 +755,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (!sclp.has_cmma)
break;
- ret = -EBUSY;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
- if (!kvm->created_vcpus) {
+ if (kvm->created_vcpus)
+ ret = -EBUSY;
+ else if (kvm->mm->context.allow_gmap_hpage_1m)
+ ret = -EINVAL;
+ else {
kvm->arch.use_cmma = 1;
/* Not compatible with cmma. */
kvm->arch.use_pfmfi = 0;
@@ -862,54 +906,37 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
*/
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
- struct kvm_s390_migration_state *mgs;
struct kvm_memory_slot *ms;
- /* should be the only one */
struct kvm_memslots *slots;
- unsigned long ram_pages;
+ unsigned long ram_pages = 0;
int slotnr;
/* migration mode already enabled */
- if (kvm->arch.migration_state)
+ if (kvm->arch.migration_mode)
return 0;
-
slots = kvm_memslots(kvm);
if (!slots || !slots->used_slots)
return -EINVAL;
- mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
- if (!mgs)
- return -ENOMEM;
- kvm->arch.migration_state = mgs;
-
- if (kvm->arch.use_cmma) {
+ if (!kvm->arch.use_cmma) {
+ kvm->arch.migration_mode = 1;
+ return 0;
+ }
+ /* mark all the pages in active slots as dirty */
+ for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
+ ms = slots->memslots + slotnr;
/*
- * Get the first slot. They are reverse sorted by base_gfn, so
- * the first slot is also the one at the end of the address
- * space. We have verified above that at least one slot is
- * present.
+ * The second half of the bitmap is only used on x86,
+ * and would be wasted otherwise, so we put it to good
+ * use here to keep track of the state of the storage
+ * attributes.
*/
- ms = slots->memslots;
- /* round up so we only use full longs */
- ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
- /* allocate enough bytes to store all the bits */
- mgs->pgste_bitmap = vmalloc(ram_pages / 8);
- if (!mgs->pgste_bitmap) {
- kfree(mgs);
- kvm->arch.migration_state = NULL;
- return -ENOMEM;
- }
-
- mgs->bitmap_size = ram_pages;
- atomic64_set(&mgs->dirty_pages, ram_pages);
- /* mark all the pages in active slots as dirty */
- for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
- ms = slots->memslots + slotnr;
- bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
- }
-
- kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
+ memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
+ ram_pages += ms->npages;
}
+ atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
+ kvm->arch.migration_mode = 1;
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
return 0;
}
@@ -919,21 +946,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
*/
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
{
- struct kvm_s390_migration_state *mgs;
-
/* migration mode already disabled */
- if (!kvm->arch.migration_state)
+ if (!kvm->arch.migration_mode)
return 0;
- mgs = kvm->arch.migration_state;
- kvm->arch.migration_state = NULL;
-
- if (kvm->arch.use_cmma) {
+ kvm->arch.migration_mode = 0;
+ if (kvm->arch.use_cmma)
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
- /* We have to wait for the essa emulation to finish */
- synchronize_srcu(&kvm->srcu);
- vfree(mgs->pgste_bitmap);
- }
- kfree(mgs);
return 0;
}
@@ -961,7 +979,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
static int kvm_s390_vm_get_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- u64 mig = (kvm->arch.migration_state != NULL);
+ u64 mig = kvm->arch.migration_mode;
if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
return -ENXIO;
@@ -1540,6 +1558,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
uint8_t *keys;
uint64_t hva;
int srcu_idx, i, r = 0;
+ bool unlocked;
if (args->flags != 0)
return -EINVAL;
@@ -1564,9 +1583,11 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
goto out;
+ i = 0;
down_read(&current->mm->mmap_sem);
srcu_idx = srcu_read_lock(&kvm->srcu);
- for (i = 0; i < args->count; i++) {
+ while (i < args->count) {
+ unlocked = false;
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
@@ -1580,8 +1601,14 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
}
r = set_guest_storage_key(current->mm, hva, keys[i], 0);
- if (r)
- break;
+ if (r) {
+ r = fixup_user_fault(current, current->mm, hva,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (r)
+ break;
+ }
+ if (!r)
+ i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&current->mm->mmap_sem);
@@ -1600,6 +1627,134 @@ out:
#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
/*
+ * Similar to gfn_to_memslot, but returns the index of a memslot also when the
+ * address falls in a hole. In that case the index of one of the memslots
+ * bordering the hole is returned.
+ */
+static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
+{
+ int start = 0, end = slots->used_slots;
+ int slot = atomic_read(&slots->lru_slot);
+ struct kvm_memory_slot *memslots = slots->memslots;
+
+ if (gfn >= memslots[slot].base_gfn &&
+ gfn < memslots[slot].base_gfn + memslots[slot].npages)
+ return slot;
+
+ while (start < end) {
+ slot = start + (end - start) / 2;
+
+ if (gfn >= memslots[slot].base_gfn)
+ end = slot;
+ else
+ start = slot + 1;
+ }
+
+ if (gfn >= memslots[start].base_gfn &&
+ gfn < memslots[start].base_gfn + memslots[start].npages) {
+ atomic_set(&slots->lru_slot, start);
+ }
+
+ return start;
+}
+
+static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
+ u8 *res, unsigned long bufsize)
+{
+ unsigned long pgstev, hva, cur_gfn = args->start_gfn;
+
+ args->count = 0;
+ while (args->count < bufsize) {
+ hva = gfn_to_hva(kvm, cur_gfn);
+ /*
+ * We return an error if the first value was invalid, but we
+ * return successfully if at least one value was copied.
+ */
+ if (kvm_is_error_hva(hva))
+ return args->count ? 0 : -EFAULT;
+ if (get_pgste(kvm->mm, hva, &pgstev) < 0)
+ pgstev = 0;
+ res[args->count++] = (pgstev >> 24) & 0x43;
+ cur_gfn++;
+ }
+
+ return 0;
+}
+
+static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
+ unsigned long cur_gfn)
+{
+ int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
+ struct kvm_memory_slot *ms = slots->memslots + slotidx;
+ unsigned long ofs = cur_gfn - ms->base_gfn;
+
+ if (ms->base_gfn + ms->npages <= cur_gfn) {
+ slotidx--;
+ /* If we are above the highest slot, wrap around */
+ if (slotidx < 0)
+ slotidx = slots->used_slots - 1;
+
+ ms = slots->memslots + slotidx;
+ ofs = 0;
+ }
+ ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
+ while ((slotidx > 0) && (ofs >= ms->npages)) {
+ slotidx--;
+ ms = slots->memslots + slotidx;
+ ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
+ }
+ return ms->base_gfn + ofs;
+}
+
+static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
+ u8 *res, unsigned long bufsize)
+{
+ unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *ms;
+
+ cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
+ ms = gfn_to_memslot(kvm, cur_gfn);
+ args->count = 0;
+ args->start_gfn = cur_gfn;
+ if (!ms)
+ return 0;
+ next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
+ mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
+
+ while (args->count < bufsize) {
+ hva = gfn_to_hva(kvm, cur_gfn);
+ if (kvm_is_error_hva(hva))
+ return 0;
+ /* Decrement only if we actually flipped the bit to 0 */
+ if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
+ atomic64_dec(&kvm->arch.cmma_dirty_pages);
+ if (get_pgste(kvm->mm, hva, &pgstev) < 0)
+ pgstev = 0;
+ /* Save the value */
+ res[args->count++] = (pgstev >> 24) & 0x43;
+ /* If the next bit is too far away, stop. */
+ if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
+ return 0;
+ /* If we reached the previous "next", find the next one */
+ if (cur_gfn == next_gfn)
+ next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
+ /* Reached the end of memory or of the buffer, stop */
+ if ((next_gfn >= mem_end) ||
+ (next_gfn - args->start_gfn >= bufsize))
+ return 0;
+ cur_gfn++;
+ /* Reached the end of the current memslot, take the next one. */
+ if (cur_gfn - ms->base_gfn >= ms->npages) {
+ ms = gfn_to_memslot(kvm, cur_gfn);
+ if (!ms)
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/*
* This function searches for the next page with dirty CMMA attributes, and
* saves the attributes in the buffer up to either the end of the buffer or
* until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
@@ -1610,22 +1765,18 @@ out:
static int kvm_s390_get_cmma_bits(struct kvm *kvm,
struct kvm_s390_cmma_log *args)
{
- struct kvm_s390_migration_state *s = kvm->arch.migration_state;
- unsigned long bufsize, hva, pgstev, i, next, cur;
- int srcu_idx, peek, r = 0, rr;
- u8 *res;
-
- cur = args->start_gfn;
- i = next = pgstev = 0;
+ unsigned long bufsize;
+ int srcu_idx, peek, ret;
+ u8 *values;
- if (unlikely(!kvm->arch.use_cmma))
+ if (!kvm->arch.use_cmma)
return -ENXIO;
/* Invalid/unsupported flags were specified */
if (args->flags & ~KVM_S390_CMMA_PEEK)
return -EINVAL;
/* Migration mode query, and we are not doing a migration */
peek = !!(args->flags & KVM_S390_CMMA_PEEK);
- if (!peek && !s)
+ if (!peek && !kvm->arch.migration_mode)
return -EINVAL;
/* CMMA is disabled or was not used, or the buffer has length zero */
bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
@@ -1633,74 +1784,35 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
memset(args, 0, sizeof(*args));
return 0;
}
-
- if (!peek) {
- /* We are not peeking, and there are no dirty pages */
- if (!atomic64_read(&s->dirty_pages)) {
- memset(args, 0, sizeof(*args));
- return 0;
- }
- cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
- args->start_gfn);
- if (cur >= s->bitmap_size) /* nothing found, loop back */
- cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
- if (cur >= s->bitmap_size) { /* again! (very unlikely) */
- memset(args, 0, sizeof(*args));
- return 0;
- }
- next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
+ /* We are not peeking, and there are no dirty pages */
+ if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
+ memset(args, 0, sizeof(*args));
+ return 0;
}
- res = vmalloc(bufsize);
- if (!res)
+ values = vmalloc(bufsize);
+ if (!values)
return -ENOMEM;
- args->start_gfn = cur;
-
down_read(&kvm->mm->mmap_sem);
srcu_idx = srcu_read_lock(&kvm->srcu);
- while (i < bufsize) {
- hva = gfn_to_hva(kvm, cur);
- if (kvm_is_error_hva(hva)) {
- r = -EFAULT;
- break;
- }
- /* decrement only if we actually flipped the bit to 0 */
- if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
- atomic64_dec(&s->dirty_pages);
- r = get_pgste(kvm->mm, hva, &pgstev);
- if (r < 0)
- pgstev = 0;
- /* save the value */
- res[i++] = (pgstev >> 24) & 0x43;
- /*
- * if the next bit is too far away, stop.
- * if we reached the previous "next", find the next one
- */
- if (!peek) {
- if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
- break;
- if (cur == next)
- next = find_next_bit(s->pgste_bitmap,
- s->bitmap_size, cur + 1);
- /* reached the end of the bitmap or of the buffer, stop */
- if ((next >= s->bitmap_size) ||
- (next >= args->start_gfn + bufsize))
- break;
- }
- cur++;
- }
+ if (peek)
+ ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
+ else
+ ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&kvm->mm->mmap_sem);
- args->count = i;
- args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
- rr = copy_to_user((void __user *)args->values, res, args->count);
- if (rr)
- r = -EFAULT;
+ if (kvm->arch.migration_mode)
+ args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
+ else
+ args->remaining = 0;
- vfree(res);
- return r;
+ if (copy_to_user((void __user *)args->values, values, args->count))
+ ret = -EFAULT;
+
+ vfree(values);
+ return ret;
}
/*
@@ -2139,10 +2251,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
- if (kvm->arch.migration_state) {
- vfree(kvm->arch.migration_state->pgste_bitmap);
- kfree(kvm->arch.migration_state);
- }
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
}
@@ -2300,6 +2408,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (test_kvm_facility(vcpu->kvm, 133))
vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
+ if (test_kvm_facility(vcpu->kvm, 156))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
@@ -2549,7 +2659,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
}
if (test_kvm_facility(vcpu->kvm, 139))
vcpu->arch.sie_block->ecd |= ECD_MEF;
-
+ if (test_kvm_facility(vcpu->kvm, 156))
+ vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
if (vcpu->arch.sie_block->gd) {
vcpu->arch.sie_block->eca |= ECA_AIV;
VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
@@ -3467,6 +3578,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
preempt_enable();
}
+ /* SIE will load etoken directly from SDNX and therefore kvm_run */
kvm_run->kvm_dirty_regs = 0;
}
@@ -3506,7 +3618,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
__ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
}
-
+ /* SIE will save etoken directly into SDNX and therefore kvm_run */
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -4082,6 +4194,11 @@ static int __init kvm_s390_init(void)
return -ENODEV;
}
+ if (nested && hpage) {
+ pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+ return -EINVAL;
+ }
+
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index eb0eb60c7be6..d68f10441a16 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -205,13 +205,10 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
{
int rc;
- struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
trace_kvm_s390_skey_related_inst(vcpu);
/* Already enabled? */
- if (vcpu->kvm->arch.use_skf &&
- !(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
- !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
+ if (vcpu->arch.skey_enabled)
return 0;
rc = s390_enable_skey();
@@ -222,9 +219,10 @@ int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
if (!vcpu->kvm->arch.use_skf)
- sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
else
- sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
+ vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
+ vcpu->arch.skey_enabled = true;
return 0;
}
@@ -246,9 +244,10 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
static int handle_iske(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long gaddr, vmaddr;
unsigned char key;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_iske++;
@@ -262,18 +261,28 @@ static int handle_iske(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = get_guest_storage_key(current->mm, addr, &key);
- up_read(&current->mm->mmap_sem);
+ rc = get_guest_storage_key(current->mm, vmaddr, &key);
+
+ if (rc) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ up_read(&current->mm->mmap_sem);
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
@@ -281,8 +290,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
static int handle_rrbe(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long vmaddr, gaddr;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_rrbe++;
@@ -296,19 +306,27 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = reset_guest_reference_bit(current->mm, addr);
- up_read(&current->mm->mmap_sem);
+ rc = reset_guest_reference_bit(current->mm, vmaddr);
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+ up_read(&current->mm->mmap_sem);
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
@@ -323,6 +341,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
unsigned long start, end;
unsigned char key, oldkey;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_sske++;
@@ -355,19 +374,28 @@ static int handle_sske(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unlocked = false;
- if (kvm_is_error_hva(addr))
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- start += PAGE_SIZE;
+
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -948,15 +976,16 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long useraddr;
+ unsigned long vmaddr;
+ bool unlocked = false;
/* Translate guest address to host address */
- useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
- if (kvm_is_error_hva(useraddr))
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (clear_user((void __user *)useraddr, PAGE_SIZE))
+ if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
@@ -966,14 +995,20 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, useraddr,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
- start += PAGE_SIZE;
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
+ }
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
@@ -987,9 +1022,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return 0;
}
-static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
+/*
+ * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
+ */
+static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
{
- struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
int r1, r2, nappended, entries;
unsigned long gfn, hva, res, pgstev, ptev;
unsigned long *cbrlo;
@@ -1039,10 +1076,12 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
cbrlo[entries] = gfn << PAGE_SHIFT;
}
- if (orc && gfn < ms->bitmap_size) {
- /* increment only if we are really flipping the bit to 1 */
- if (!test_and_set_bit(gfn, ms->pgste_bitmap))
- atomic64_inc(&ms->dirty_pages);
+ if (orc) {
+ struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
+
+ /* Increment only if we are really flipping the bit */
+ if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
+ atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
}
return nappended;
@@ -1071,7 +1110,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
: ESSA_SET_STABLE_IF_RESIDENT))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- if (likely(!vcpu->kvm->arch.migration_state)) {
+ if (!vcpu->kvm->arch.migration_mode) {
/*
* CMMA is enabled in the KVM settings, but is disabled in
* the SIE block and in the mm_context, and we are not doing
@@ -1099,10 +1138,16 @@ static int handle_essa(struct kvm_vcpu *vcpu)
/* Retry the ESSA instruction */
kvm_s390_retry_instr(vcpu);
} else {
- /* Account for the possible extra cbrl entry */
- i = do_essa(vcpu, orc);
+ int srcu_idx;
+
+ down_read(&vcpu->kvm->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ i = __do_essa(vcpu, orc);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ up_read(&vcpu->kvm->mm->mmap_sem);
if (i < 0)
return i;
+ /* Account for the possible extra cbrl entry */
entries += i;
}
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 84c89cb9636f..63844b95c22c 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -2,7 +2,7 @@
/*
* kvm nested virtualization support for s390x
*
- * Copyright IBM Corp. 2016
+ * Copyright IBM Corp. 2016, 2018
*
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
*/
@@ -378,6 +378,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_facility(vcpu->kvm, 139))
scb_s->ecd |= scb_o->ecd & ECD_MEF;
+ /* etoken */
+ if (test_kvm_facility(vcpu->kvm, 156))
+ scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
+
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
out:
@@ -627,7 +631,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vsie_page->riccbd_gpa = gpa;
scb_s->riccbd = hpa;
}
- if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
+ if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
+ (scb_s->ecd & ECD_ETOKENF)) {
unsigned long sdnxc;
gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
@@ -818,6 +823,8 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
* - < 0 if an error occurred
*/
static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ __releases(vcpu->kvm->srcu)
+ __acquires(vcpu->kvm->srcu)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 2311f15be9cf..40c4d59c926e 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -17,7 +17,7 @@
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
- bzr %r14
+ jz .Lmemmove_exit
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
@@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
+.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
@@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/
ENTRY(memset)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@@ -80,6 +81,7 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
+.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
cghi %r4,1
@@ -115,7 +117,7 @@ EXPORT_SYMBOL(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -124,6 +126,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
+.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
@@ -145,9 +148,9 @@ EXPORT_SYMBOL(memcpy)
.macro __MEMSET bits,bytes,insn
ENTRY(__memset\bits)
ltgr %r4,%r4
- bzr %r14
+ jz .L__memset_exit\bits
cghi %r4,\bytes
- je .L__memset_exit\bits
+ je .L__memset_store\bits
aghi %r4,-(\bytes+1)
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -163,8 +166,9 @@ ENTRY(__memset\bits)
larl %r5,.L__memset_mvc\bits
ex %r4,0(%r5)
BR_EX %r14
-.L__memset_exit\bits:
+.L__memset_store\bits:
\insn %r3,0(%r2)
+.L__memset_exit\bits:
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 6cf024eb2085..510a18299196 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -191,12 +191,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
- if (timer_pending(&cmm_timer)) {
- if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
- return;
- }
- cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
- add_timer(&cmm_timer);
+ mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
}
static void cmm_timer_fn(struct timer_list *unused)
@@ -251,45 +246,42 @@ static int cmm_skip_blanks(char *cp, char **endp)
return str != cp;
}
-static struct ctl_table cmm_table[];
-
static int cmm_pages_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- char buf[16], *p;
- unsigned int len;
- long nr;
+ long nr = cmm_get_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- cmm_skip_blanks(buf, &p);
- nr = simple_strtoul(p, &p, 0);
- if (ctl == &cmm_table[0])
- cmm_set_pages(nr);
- else
- cmm_add_timed_pages(nr);
- } else {
- if (ctl == &cmm_table[0])
- nr = cmm_get_pages();
- else
- nr = cmm_get_timed_pages();
- len = sprintf(buf, "%ld\n", nr);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- }
- *lenp = len;
- *ppos += len;
+ cmm_set_pages(nr);
+ return 0;
+}
+
+static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ long nr = cmm_get_timed_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
+
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+
+ cmm_add_timed_pages(nr);
return 0;
}
@@ -338,7 +330,7 @@ static struct ctl_table cmm_table[] = {
{
.procname = "cmm_timed_pages",
.mode = 0644,
- .proc_handler = cmm_pages_handler,
+ .proc_handler = cmm_timed_pages_handler,
},
{
.procname = "cmm_timeout",
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 6ad15d3fab81..84111a43ea29 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -80,7 +80,7 @@ struct qin64 {
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e074480d3598..72af23bacbb5 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -341,7 +341,8 @@ static noinline int signal_return(struct pt_regs *regs)
return -EACCES;
}
-static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
+static noinline void do_fault_error(struct pt_regs *regs, int access,
+ vm_fault_t fault)
{
int si_code;
@@ -401,7 +402,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline int do_exception(struct pt_regs *regs, int access)
+static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
{
struct gmap *gmap;
struct task_struct *tsk;
@@ -411,7 +412,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
- int fault;
+ vm_fault_t fault;
tsk = current;
/*
@@ -502,6 +503,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
+ if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
@@ -562,7 +565,8 @@ out:
void do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- int access, fault;
+ int access;
+ vm_fault_t fault;
trans_exc_code = regs->int_parm_long;
/*
@@ -597,7 +601,8 @@ NOKPROBE_SYMBOL(do_protection_exception);
void do_dat_exception(struct pt_regs *regs)
{
- int access, fault;
+ int access;
+ vm_fault_t fault;
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bc56ec8abcf7..bb44990c8212 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2,8 +2,10 @@
/*
* KVM guest address space mapping code
*
- * Copyright IBM Corp. 2007, 2016
+ * Copyright IBM Corp. 2007, 2016, 2018
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
@@ -521,6 +523,9 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
rcu_read_unlock();
}
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
+ unsigned long gaddr);
+
/**
* gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
@@ -541,6 +546,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
+ u64 unprot;
int rc;
BUG_ON(gmap_is_shadow(gmap));
@@ -584,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
- /* large pmds cannot yet be handled */
- if (pmd_large(*pmd))
+ /* Are we allowed to use huge pages? */
+ if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL);
@@ -596,10 +602,22 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
- if (!rc)
- *table = pmd_val(*pmd);
- } else
- rc = 0;
+ if (!rc) {
+ if (pmd_large(*pmd)) {
+ *table = (pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
+ | _SEGMENT_ENTRY_GMAP_UC;
+ } else
+ *table = pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS;
+ }
+ } else if (*table & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
+ unprot = (u64)*table;
+ unprot &= ~_SEGMENT_ENTRY_PROTECT;
+ unprot |= _SEGMENT_ENTRY_GMAP_UC;
+ gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
+ }
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
radix_tree_preload_end();
@@ -690,6 +708,12 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ /*
+ * We do not discard pages that are backed by
+ * hugetlbfs, so we don't have to refault them.
+ */
+ if (vma && is_vm_hugetlb_page(vma))
+ continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
@@ -864,7 +888,128 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
*/
static void gmap_pte_op_end(spinlock_t *ptl)
{
- spin_unlock(ptl);
+ if (ptl)
+ spin_unlock(ptl);
+}
+
+/**
+ * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
+ * and return the pmd pointer
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ *
+ * Returns a pointer to the pmd for a guest address, or NULL
+ */
+static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
+{
+ pmd_t *pmdp;
+
+ BUG_ON(gmap_is_shadow(gmap));
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+
+ if (!pmdp || pmd_none(*pmdp)) {
+ spin_unlock(&gmap->guest_table_lock);
+ return NULL;
+ }
+
+ /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
+ if (!pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+ return pmdp;
+}
+
+/**
+ * gmap_pmd_op_end - release the guest_table_lock if needed
+ * @gmap: pointer to the guest mapping meta data structure
+ * @pmdp: pointer to the pmd
+ */
+static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
+{
+ if (pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+}
+
+/*
+ * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
+ * @pmdp: pointer to the pmd to be protected
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns:
+ * 0 if successfully protected
+ * -EAGAIN if a fixup is needed
+ * -EINVAL if unsupported notifier bits have been specified
+ *
+ * Expected to be called with sg->mm->mmap_sem in read and
+ * guest_table_lock held.
+ */
+static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
+ int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
+ pmd_t new = *pmdp;
+
+ /* Fixup needed */
+ if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
+ return -EAGAIN;
+
+ if (prot == PROT_NONE && !pmd_i) {
+ pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (prot == PROT_READ && !pmd_p) {
+ pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
+ pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (bits & GMAP_NOTIFY_MPROT)
+ pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
+
+ /* Shadow GMAP protection needs split PMDs */
+ if (bits & GMAP_NOTIFY_SHADOW)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * gmap_protect_pte - remove access rights to memory and set pgste bits
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @pmdp: pointer to the pmd associated with the pte
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns 0 if successfully protected, -ENOMEM if out of memory and
+ * -EAGAIN if a fixup is needed.
+ *
+ * Expected to be called with sg->mm->mmap_sem in read
+ */
+static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int rc;
+ pte_t *ptep;
+ spinlock_t *ptl = NULL;
+ unsigned long pbits = 0;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return -EAGAIN;
+
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
+ if (!ptep)
+ return -ENOMEM;
+
+ pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
+ pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
+ /* Protect and unlock. */
+ rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
+ gmap_pte_op_end(ptl);
+ return rc;
}
/*
@@ -883,30 +1028,45 @@ static void gmap_pte_op_end(spinlock_t *ptl)
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
{
- unsigned long vmaddr;
- spinlock_t *ptl;
- pte_t *ptep;
+ unsigned long vmaddr, dist;
+ pmd_t *pmdp;
int rc;
BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
- ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
- if (ptep) {
- rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
- gmap_pte_op_end(ptl);
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (pmdp) {
+ if (!pmd_large(*pmdp)) {
+ rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ len -= PAGE_SIZE;
+ gaddr += PAGE_SIZE;
+ }
+ } else {
+ rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
+ len = len < dist ? 0 : len - dist;
+ gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
}
if (rc) {
+ if (rc == -EINVAL)
+ return rc;
+
+ /* -EAGAIN, fixup of userspace mm and gmap */
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
if (rc)
return rc;
- continue;
}
- gaddr += PAGE_SIZE;
- len -= PAGE_SIZE;
}
return 0;
}
@@ -935,7 +1095,7 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
down_read(&gmap->mm->mmap_sem);
- rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
+ rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
up_read(&gmap->mm->mmap_sem);
return rc;
}
@@ -1474,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
unsigned long limit;
int rc;
+ BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
BUG_ON(gmap_is_shadow(parent));
spin_lock(&parent->shadow_lock);
sg = gmap_find_shadow(parent, asce, edat_level);
@@ -1526,7 +1687,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
down_read(&parent->mm->mmap_sem);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
- PROT_READ, PGSTE_VSIE_BIT);
+ PROT_READ, GMAP_NOTIFY_SHADOW);
up_read(&parent->mm->mmap_sem);
spin_lock(&parent->shadow_lock);
new->initialized = true;
@@ -2092,6 +2253,225 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
}
EXPORT_SYMBOL_GPL(ptep_notify);
+static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+}
+
+/**
+ * gmap_pmdp_xchg - exchange a gmap pmd with another
+ * @gmap: pointer to the guest address space structure
+ * @pmdp: pointer to the pmd entry
+ * @new: replacement entry
+ * @gaddr: the affected guest address
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
+ unsigned long gaddr)
+{
+ gaddr &= HPAGE_MASK;
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
+ IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *pmdp = new;
+}
+
+static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
+ int purge)
+{
+ pmd_t *pmdp;
+ struct gmap *gmap;
+ unsigned long gaddr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (pmdp) {
+ gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (purge)
+ __pmdp_csp(pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
+ * flushing
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 0);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
+
+/**
+ * gmap_pmdp_csp - csp all affected guest pmd entries
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 1);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
+
+/**
+ * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_LOCAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
+
+/**
+ * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
+
+/**
+ * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
+ * @gmap: pointer to guest address space
+ * @pmdp: pointer to the pmd to be tested
+ * @gaddr: virtual address in the guest address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return false;
+
+ /* Already protected memory, which did not change is clean */
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
+ return false;
+
+ /* Clear UC indication and reset protection */
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+ gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
+ return true;
+}
+
+/**
+ * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
+ * @gmap: pointer to guest address space
+ * @bitmap: dirty bitmap for this pmd
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: virtual address in the host address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr)
+{
+ int i;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (!pmdp)
+ return;
+
+ if (pmd_large(*pmdp)) {
+ if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
+ bitmap_fill(bitmap, _PAGE_ENTRIES);
+ } else {
+ for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
+ if (!ptep)
+ continue;
+ if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
+ set_bit(i, bitmap);
+ spin_unlock(ptl);
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
+}
+EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
static inline void thp_split_mm(struct mm_struct *mm)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2168,17 +2548,45 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
-static int __s390_enable_skey(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
+static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
/* Clear storage key */
ptep_zap_key(walk->mm, addr, pte);
return 0;
}
+static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+ unsigned long hmask, unsigned long next,
+ struct mm_walk *walk)
+{
+ pmd_t *pmd = (pmd_t *)pte;
+ unsigned long start, end;
+ struct page *page = pmd_page(*pmd);
+
+ /*
+ * The write check makes sure we do not set a key on shared
+ * memory. This is needed as the walker does not differentiate
+ * between actual guest memory and the process executable or
+ * shared libraries.
+ */
+ if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
+ return 0;
+
+ start = pmd_val(*pmd) & HPAGE_MASK;
+ end = start + HPAGE_SIZE - 1;
+ __storage_key_init_range(start, end);
+ set_bit(PG_arch_1, &page->flags);
+ return 0;
+}
+
int s390_enable_skey(void)
{
- struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+ struct mm_walk walk = {
+ .hugetlb_entry = __s390_enable_skey_hugetlb,
+ .pte_entry = __s390_enable_skey_pte,
+ };
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc = 0;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e804090f4470..b0246c705a19 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste)
return pte;
}
+static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+{
+ struct page *page;
+ unsigned long size, paddr;
+
+ if (!mm_uses_skeys(mm) ||
+ rste & _SEGMENT_ENTRY_INVALID)
+ return;
+
+ if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+ page = pud_page(__pud(rste));
+ size = PUD_SIZE;
+ paddr = rste & PUD_MASK;
+ } else {
+ page = pmd_page(__pmd(rste));
+ size = PMD_SIZE;
+ paddr = rste & PMD_MASK;
+ }
+
+ if (!test_and_set_bit(PG_arch_1, &page->flags))
+ __storage_key_init_range(paddr, paddr + size - 1);
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
else
rste |= _SEGMENT_ENTRY_LARGE;
+ clear_huge_pte_skeys(mm, rste);
pte_val(*ptep) = rste;
}
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 382153ff17e3..dc3cede7f2ec 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
- set_page_stable_dat(page, 0);
+ set_page_stable_dat(page, order);
else
set_page_unused(page, order);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index c44171588d08..f8c6faab41f4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -14,7 +14,7 @@
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
- asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
: [addr] "+a" (addr) : [skey] "d" (skey));
return addr;
}
@@ -23,8 +23,6 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
- if (!PAGE_DEFAULT_KEY)
- return;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
@@ -37,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
continue;
}
}
- page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
start += PAGE_SIZE;
}
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 84bd6329a88d..76d89ee8b428 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -28,7 +28,7 @@ static struct ctl_table page_table_sysctl[] = {
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
spin_unlock_bh(&mm->context.lock);
if (mask != 0)
return;
+ } else {
+ atomic_xor_bits(&page->_refcount, 3U << 24);
}
pgtable_page_dtor(page);
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
break;
/* fallthrough */
case 3: /* 4K page table with pgstes */
+ if (mask & 3)
+ atomic_xor_bits(&page->_refcount, 3 << 24);
pgtable_page_dtor(page);
__free_page(page);
break;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 301e466e4263..f2cc7da473e4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
mm->context.asce, IDTE_LOCAL);
else
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_local(mm, addr);
}
static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- if (MACHINE_HAS_TLB_GUEST)
+ if (MACHINE_HAS_TLB_GUEST) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
- else if (MACHINE_HAS_IDTE)
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else if (MACHINE_HAS_IDTE) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
- else
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else {
__pmdp_csp(pmdp);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_csp(mm, addr);
+ }
}
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
@@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
+ if (mm_has_pgste(mm))
+ gmap_pmdp_invalidate(mm, addr);
} else {
pmdp_idte_global(mm, addr, pmdp);
}
@@ -399,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
return old;
}
+static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ return NULL;
+ pmd = pmd_alloc(mm, pud, addr);
+ return pmd;
+}
+
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
@@ -693,40 +722,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/*
* Test and reset if a guest page is dirty
*/
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- spinlock_t *ptl;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pgste_t pgste;
- pte_t *ptep;
pte_t pte;
bool dirty;
int nodat;
- pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
- if (!p4d)
- return false;
- pud = pud_alloc(mm, p4d, addr);
- if (!pud)
- return false;
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return false;
- /* We can't run guests backed by huge pages, but userspace can
- * still set them up and then try to migrate them without any
- * migration support.
- */
- if (pmd_large(*pmd))
- return true;
-
- ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (unlikely(!ptep))
- return false;
-
pgste = pgste_get_lock(ptep);
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
pgste_val(pgste) &= ~PGSTE_UC_BIT;
@@ -742,21 +745,43 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
*ptep = pte;
}
pgste_set_unlock(ptep, pgste);
-
- spin_unlock(ptl);
return dirty;
}
-EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq)
{
- unsigned long keyul;
+ unsigned long keyul, paddr;
spinlock_t *ptl;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ /*
+ * Huge pmds need quiescing operations, they are
+ * always mapped.
+ */
+ page_set_storage_key(paddr, key, 1);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -767,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- unsigned long address, bits, skey;
+ unsigned long bits, skey;
- address = pte_val(*ptep) & PAGE_MASK;
- skey = (unsigned long) page_get_storage_key(address);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(paddr);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set storage key ACC and FP */
- page_set_storage_key(address, skey, !nq);
+ page_set_storage_key(paddr, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
}
@@ -830,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
+ unsigned long paddr;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
int cc = 0;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ cc = page_reset_referenced(paddr);
+ spin_unlock(ptl);
+ return cc;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -843,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
pgste_val(new) &= ~PGSTE_GR_BIT;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ cc = page_reset_referenced(paddr);
/* Merge real referenced bit into host-set */
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
}
@@ -862,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char *key)
{
+ unsigned long paddr;
spinlock_t *ptl;
pgste_t pgste;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ /* Not yet mapped memory has a zero key */
+ spin_unlock(ptl);
+ *key = 0;
+ return 0;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ *key = page_get_storage_key(paddr);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
pgste = pgste_get_lock(ptep);
*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+ paddr = pte_val(*ptep) & PAGE_MASK;
if (!(pte_val(*ptep) & _PAGE_INVALID))
- *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+ *key = page_get_storage_key(paddr);
/* Reflect guest's logical view, not physical */
*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
pgste_set_unlock(ptep, pgste);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d2db8acb1a55..d7052cbe984f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -485,8 +485,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r1 */
_EMIT2(0x07f1);
} else {
- /* larl %r1,.+14 */
- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
@@ -1286,6 +1284,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs;
}
if (bpf_jit_prog(&jit, fp)) {
+ bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 06a80434cfe6..5bd374491f94 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -134,6 +134,8 @@ void __init numa_setup(void)
{
pr_info("NUMA mode: %s\n", mode->name);
nodes_clear(node_possible_map);
+ /* Initially attach all possible CPUs to node 0. */
+ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
if (mode->setup)
mode->setup();
numa_setup_memory();
@@ -141,20 +143,6 @@ void __init numa_setup(void)
}
/*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
-static int __init numa_init_early(void)
-{
- /* Attach all possible CPUs to node 0 for now. */
- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
- return 0;
-}
-early_initcall(numa_init_early);
-
-/*
* numa_init_late() - Initialization initcall
*
* Register NUMA nodes.
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index b482e95b6249..57f7cdac70a3 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -48,6 +48,10 @@ static char *pci_fmt2_names[] = {
"Maximum work units",
};
+static char *pci_fmt3_names[] = {
+ "Transmitted bytes",
+};
+
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
@@ -112,6 +116,10 @@ static int pci_perf_show(struct seq_file *m, void *v)
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
+ case 3:
+ pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
+ &zdev->fmb->fmt3.tx_bytes);
+ break;
default:
seq_puts(m, "Unknown format\n");
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index 1ace023cbdce..ce6a3f75065b 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -7,13 +7,13 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
-$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
+$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
+$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
@@ -21,15 +21,15 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
-KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
+KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 660c96a05a9b..2e3707b12edd 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -243,33 +243,26 @@ gprregs:
.quad 0
.endr
-purgatory_sha256_digest:
- .global purgatory_sha256_digest
- .rept 32 /* SHA256_DIGEST_SIZE */
- .byte 0
- .endr
-
-purgatory_sha_regions:
- .global purgatory_sha_regions
- .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */
- .byte 0
- .endr
-
-kernel_entry:
- .global kernel_entry
- .quad 0
-
-kernel_type:
- .global kernel_type
- .quad 0
-
-crash_start:
- .global crash_start
- .quad 0
+/* Macro to define a global variable with name and size (in bytes) to be
+ * shared with C code.
+ *
+ * Add the .size and .type attribute to satisfy checks on the Elf_Sym during
+ * purgatory load.
+ */
+.macro GLOBAL_VARIABLE name,size
+\name:
+ .global \name
+ .size \name,\size
+ .type \name,object
+ .skip \size,0
+.endm
-crash_size:
- .global crash_size
- .quad 0
+GLOBAL_VARIABLE purgatory_sha256_digest,32
+GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
+GLOBAL_VARIABLE kernel_entry,8
+GLOBAL_VARIABLE kernel_type,8
+GLOBAL_VARIABLE crash_start,8
+GLOBAL_VARIABLE crash_size,8
.align PAGE_SIZE
stack:
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
index 4e2beb3c29b7..3528e6da4e87 100644
--- a/arch/s390/purgatory/purgatory.c
+++ b/arch/s390/purgatory/purgatory.c
@@ -12,15 +12,6 @@
#include <linux/string.h>
#include <asm/purgatory.h>
-struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
-u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
-
-u64 kernel_entry;
-u64 kernel_type;
-
-u64 crash_start;
-u64 crash_size;
-
int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index d92f2d94a5d9..9bba2c14e0ca 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -2,13 +2,22 @@
quiet_cmd_chkbss = CHKBSS $<
define cmd_chkbss
+ rm -f $@; \
if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
endef
-$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+chkbss-target ?= $(obj)/built-in.a
+ifneq (,$(findstring /,$(chkbss)))
+chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
+else
+chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+endif
+
+$(chkbss-target): $(chkbss-files)
+targets += $(notdir $(chkbss-files))
%.o.chkbss: %.o
$(call cmd,chkbss)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 90a8c9e84ca6..0c85aedcf9b3 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -4,7 +4,7 @@
* numbering scheme from the Princples of Operations: most significant bit
* has bit number 0.
*
- * Copyright IBM Corp. 2015
+ * Copyright IBM Corp. 2015, 2018
*
*/
@@ -106,6 +106,7 @@ static struct facility_def facility_defs[] = {
.name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){
+ 156, /* etoken facility */
-1 /* END */
}
},
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index 259aa0680d1a..a1bc02b29c81 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -257,7 +257,7 @@ static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
if (!desc->group)
exit(EXIT_FAILURE);
group = &desc->group[desc->nr_groups - 1];
- strncpy(group->opcode, insn->opcode, 2);
+ memcpy(group->opcode, insn->opcode, 2);
group->type = insn->type;
group->offset = offset;
group->count = 1;
@@ -283,7 +283,7 @@ static void print_opcode_table(struct gen_opcode *desc)
continue;
add_to_group(desc, insn, offset);
if (strncmp(opcode, insn->opcode, 2)) {
- strncpy(opcode, insn->opcode, 2);
+ memcpy(opcode, insn->opcode, 2);
printf("\t/* %.2s */ \\\n", opcode);
}
print_opcode(insn, offset);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index dd4f3d3e644f..1fb7b6d72baf 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -51,7 +51,6 @@ config SUPERH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_NMI
- select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
help
@@ -159,19 +158,18 @@ config SWAP_IO_SPACE
bool
config DMA_COHERENT
+ select DMA_DIRECT_OPS
bool
config DMA_NONCOHERENT
def_bool !DMA_COHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_NONCOHERENT_OPS
config PGTABLE_LEVELS
default 3 if X2TLB
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System type"
#
@@ -713,8 +711,6 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
-source "kernel/Kconfig.preempt"
-
config GUSA
def_bool y
depends on !SMP && SUPERH32
@@ -882,12 +878,6 @@ source "drivers/pcmcia/Kconfig"
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options (EXPERIMENTAL)"
source "kernel/power/Kconfig"
@@ -895,17 +885,3 @@ source "kernel/power/Kconfig"
source "drivers/cpuidle/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/sh/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index d0767672640d..010b6c33bbba 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
depends on SUPERH32
@@ -88,5 +85,3 @@ config MCOUNT
def_bool y
depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER
-
-endmenu
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index de8393cb7313..8f234d0435aa 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -1,40 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas - AP-325RXA
* (Compatible with Algo System ., LTD. - AP-320A)
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Author : Yusuke Goda <goda.yuske@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
-#include <linux/init.h>
+#include <asm/clock.h>
+#include <asm/io.h>
+#include <asm/suspend.h>
+
+#include <cpu/sh7723.h>
+
+#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
-#include <linux/mfd/tmio.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
+#include <linux/sh_intc.h>
#include <linux/smsc911x.h>
-#include <linux/gpio.h>
#include <linux/videodev2.h>
-#include <linux/sh_intc.h>
+
+#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/ov772x.h>
-#include <media/soc_camera.h>
-#include <linux/platform_data/media/soc_camera_platform.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
#include <video/sh_mobile_lcdc.h>
-#include <asm/io.h>
-#include <asm/clock.h>
-#include <asm/suspend.h>
-#include <cpu/sh7723.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu_dma_membase;
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
@@ -253,150 +258,25 @@ static struct platform_device lcdc_device = {
},
};
-static void camera_power(int val)
-{
- gpio_set_value(GPIO_PTZ5, val); /* RST_CAM/RSTB */
- mdelay(10);
-}
-
-#ifdef CONFIG_I2C
-/* support for the old ncm03j camera */
-static unsigned char camera_ncm03j_magic[] =
-{
- 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
- 0x1D, 0x00, 0x1E, 0x8A, 0x21, 0x00, 0x33, 0x36,
- 0x36, 0x60, 0x37, 0x08, 0x3B, 0x31, 0x44, 0x0F,
- 0x46, 0xF0, 0x4B, 0x28, 0x4C, 0x21, 0x4D, 0x55,
- 0x4E, 0x1B, 0x4F, 0xC7, 0x50, 0xFC, 0x51, 0x12,
- 0x58, 0x02, 0x66, 0xC0, 0x67, 0x46, 0x6B, 0xA0,
- 0x6C, 0x34, 0x7E, 0x25, 0x7F, 0x25, 0x8D, 0x0F,
- 0x92, 0x40, 0x93, 0x04, 0x94, 0x26, 0x95, 0x0A,
- 0x99, 0x03, 0x9A, 0xF0, 0x9B, 0x14, 0x9D, 0x7A,
- 0xC5, 0x02, 0xD6, 0x07, 0x59, 0x00, 0x5A, 0x1A,
- 0x5B, 0x2A, 0x5C, 0x37, 0x5D, 0x42, 0x5E, 0x56,
- 0xC8, 0x00, 0xC9, 0x1A, 0xCA, 0x2A, 0xCB, 0x37,
- 0xCC, 0x42, 0xCD, 0x56, 0xCE, 0x00, 0xCF, 0x1A,
- 0xD0, 0x2A, 0xD1, 0x37, 0xD2, 0x42, 0xD3, 0x56,
- 0x5F, 0x68, 0x60, 0x87, 0x61, 0xA3, 0x62, 0xBC,
- 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
-};
-
-static int camera_probe(void)
-{
- struct i2c_adapter *a = i2c_get_adapter(0);
- struct i2c_msg msg;
- int ret;
-
- if (!a)
- return -ENODEV;
-
- camera_power(1);
- msg.addr = 0x6e;
- msg.buf = camera_ncm03j_magic;
- msg.len = 2;
- msg.flags = 0;
- ret = i2c_transfer(a, &msg, 1);
- camera_power(0);
-
- return ret;
-}
-
-static int camera_set_capture(struct soc_camera_platform_info *info,
- int enable)
-{
- struct i2c_adapter *a = i2c_get_adapter(0);
- struct i2c_msg msg;
- int ret = 0;
- int i;
-
- camera_power(0);
- if (!enable)
- return 0; /* no disable for now */
-
- camera_power(1);
- for (i = 0; i < ARRAY_SIZE(camera_ncm03j_magic); i += 2) {
- u_int8_t buf[8];
-
- msg.addr = 0x6e;
- msg.buf = buf;
- msg.len = 2;
- msg.flags = 0;
-
- buf[0] = camera_ncm03j_magic[i];
- buf[1] = camera_ncm03j_magic[i + 1];
-
- ret = (ret < 0) ? ret : i2c_transfer(a, &msg, 1);
- }
-
- return ret;
-}
-
-static int ap325rxa_camera_add(struct soc_camera_device *icd);
-static void ap325rxa_camera_del(struct soc_camera_device *icd);
-
-static struct soc_camera_platform_info camera_info = {
- .format_name = "UYVY",
- .format_depth = 16,
- .format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .field = V4L2_FIELD_NONE,
- .width = 640,
- .height = 480,
+/* Powerdown/reset gpios for CEU image sensors */
+static struct gpiod_lookup_table ov7725_gpios = {
+ .dev_id = "0-0021",
+ .table = {
+ GPIO_LOOKUP("sh7723_pfc", GPIO_PTZ5, "reset", GPIO_ACTIVE_LOW),
},
- .mbus_param = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH,
- .mbus_type = V4L2_MBUS_PARALLEL,
- .set_capture = camera_set_capture,
-};
-
-static struct soc_camera_link camera_link = {
- .bus_id = 0,
- .add_device = ap325rxa_camera_add,
- .del_device = ap325rxa_camera_del,
- .module_name = "soc_camera_platform",
- .priv = &camera_info,
};
-static struct platform_device *camera_device;
-
-static void ap325rxa_camera_release(struct device *dev)
-{
- soc_camera_platform_release(&camera_device);
-}
-
-static int ap325rxa_camera_add(struct soc_camera_device *icd)
-{
- int ret = soc_camera_platform_add(icd, &camera_device, &camera_link,
- ap325rxa_camera_release, 0);
- if (ret < 0)
- return ret;
-
- ret = camera_probe();
- if (ret < 0)
- soc_camera_platform_del(icd, camera_device, &camera_link);
-
- return ret;
-}
-
-static void ap325rxa_camera_del(struct soc_camera_device *icd)
-{
- soc_camera_platform_del(icd, camera_device, &camera_link);
-}
-#endif /* CONFIG_I2C */
-
-static int ov7725_power(struct device *dev, int mode)
-{
- camera_power(0);
- if (mode)
- camera_power(1);
-
- return 0;
-}
-
-static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu0_pdata = {
+ .num_subdevs = 1,
+ .subdevs = {
+ { /* [0] = ov7725 */
+ .flags = 0,
+ .bus_width = 8,
+ .bus_shift = 0,
+ .i2c_adapter_id = 0,
+ .i2c_address = 0x21,
+ },
+ },
};
static struct resource ceu_resources[] = {
@@ -410,18 +290,15 @@ static struct resource ceu_resources[] = {
.start = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
-static struct platform_device ceu_device = {
- .name = "sh_mobile_ceu",
- .id = 0, /* "ceu0" clock */
+static struct platform_device ap325rxa_ceu_device = {
+ .name = "renesas-ceu",
+ .id = 0, /* "ceu.0" clock */
.num_resources = ARRAY_SIZE(ceu_resources),
.resource = ceu_resources,
.dev = {
- .platform_data = &sh_mobile_ceu_info,
+ .platform_data = &ceu0_pdata,
},
};
@@ -488,44 +365,18 @@ static struct platform_device sdhi1_cn7_device = {
},
};
-static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
- {
- I2C_BOARD_INFO("pcf8563", 0x51),
- },
-};
-
-static struct i2c_board_info ap325rxa_i2c_camera[] = {
- {
- I2C_BOARD_INFO("ov772x", 0x21),
- },
-};
-
static struct ov772x_camera_info ov7725_info = {
.flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
.edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
};
-static struct soc_camera_link ov7725_link = {
- .bus_id = 0,
- .power = ov7725_power,
- .board_info = &ap325rxa_i2c_camera[0],
- .i2c_adapter_id = 0,
- .priv = &ov7725_info,
-};
-
-static struct platform_device ap325rxa_camera[] = {
+static struct i2c_board_info ap325rxa_i2c_devices[] __initdata = {
{
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &ov7725_link,
- },
- }, {
- .name = "soc-camera-pdrv",
- .id = 1,
- .dev = {
- .platform_data = &camera_link,
- },
+ I2C_BOARD_INFO("pcf8563", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("ov772x", 0x21),
+ .platform_data = &ov7725_info,
},
};
@@ -533,12 +384,9 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
&smsc9118_device,
&ap325rxa_nor_flash_device,
&lcdc_device,
- &ceu_device,
&nand_flash_device,
&sdhi0_cn3_device,
&sdhi1_cn7_device,
- &ap325rxa_camera[0],
- &ap325rxa_camera[1],
};
extern char ap325rxa_sdram_enter_start;
@@ -649,8 +497,6 @@ static int __init ap325rxa_devices_setup(void)
__raw_writew(0xFFFF, PORT_DRVCRA);
__raw_writew(0xFFFF, PORT_DRVCRB);
- platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20);
-
/* SDHI0 - CN3 - SD CARD */
gpio_request(GPIO_FN_SDHI0CD_PTD, NULL);
gpio_request(GPIO_FN_SDHI0WP_PTD, NULL);
@@ -670,9 +516,25 @@ static int __init ap325rxa_devices_setup(void)
gpio_request(GPIO_FN_SDHI1CMD, NULL);
gpio_request(GPIO_FN_SDHI1CLK, NULL);
+ /* Add a clock alias for ov7725 xclk source. */
+ clk_add_alias(NULL, "0-0021", "video_clk", NULL);
+
+ /* Register RSTB gpio for ov7725 camera sensor. */
+ gpiod_add_lookup_table(&ov7725_gpios);
+
i2c_register_board_info(0, ap325rxa_i2c_devices,
ARRAY_SIZE(ap325rxa_i2c_devices));
+ /* Initialize CEU platform device separately to map memory first */
+ device_initialize(&ap325rxa_ceu_device.dev);
+ arch_setup_pdev_archdata(&ap325rxa_ceu_device);
+ dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+
+ platform_device_add(&ap325rxa_ceu_device);
+
return platform_add_devices(ap325rxa_devices,
ARRAY_SIZE(ap325rxa_devices));
}
@@ -689,7 +551,21 @@ static int ap325rxa_mode_pins(void)
return MODE_PIN5 | MODE_PIN8;
}
+/* Reserve a portion of memory for CEU buffers */
+static void __init ap325rxa_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+
+ ceu_dma_membase = phys;
+}
+
static struct sh_machine_vector mv_ap325rxa __initmv = {
.mv_name = "AP-325RXA",
.mv_mode_pins = ap325rxa_mode_pins,
+ .mv_mem_reserve = ap325rxa_mv_mem_reserve,
};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 6af7777332fc..e59c577ed871 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -1,41 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KFR2R09 board support code
*
* Copyright (C) 2009 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/mmc/host.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/onenand.h>
+
+#include <asm/clock.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+#include <asm/suspend.h>
+
+#include <cpu/sh7724.h>
+
+#include <linux/clkdev.h>
#include <linux/delay.h>
-#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
-#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/physmap.h>
#include <linux/platform_data/lv5207lp.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
+#include <linux/sh_intc.h>
#include <linux/usb/r8a66597.h>
#include <linux/videodev2.h>
-#include <linux/sh_intc.h>
+
+#include <mach/kfr2r09.h>
+
+#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/rj54n1cb0c.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
#include <video/sh_mobile_lcdc.h>
-#include <asm/suspend.h>
-#include <asm/clock.h>
-#include <asm/machvec.h>
-#include <asm/io.h>
-#include <cpu/sh7724.h>
-#include <mach/kfr2r09.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu_dma_membase;
+
+/* set VIO_CKO clock to 25MHz */
+#define CEU_MCLK_FREQ 25000000
+#define DRVCRB 0xA405018C
static struct mtd_partition kfr2r09_nor_flash_partitions[] =
{
@@ -230,8 +242,17 @@ static struct platform_device kfr2r09_usb0_gadget_device = {
.resource = kfr2r09_usb0_gadget_resources,
};
-static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu_pdata = {
+ .num_subdevs = 1,
+ .subdevs = {
+ { /* [0] = rj54n1cb0c */
+ .flags = 0,
+ .bus_width = 8,
+ .bus_shift = 0,
+ .i2c_adapter_id = 1,
+ .i2c_address = 0x50,
+ },
+ },
};
static struct resource kfr2r09_ceu_resources[] = {
@@ -246,109 +267,35 @@ static struct resource kfr2r09_ceu_resources[] = {
.end = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device kfr2r09_ceu_device = {
- .name = "sh_mobile_ceu",
+ .name = "renesas-ceu",
.id = 0, /* "ceu0" clock */
.num_resources = ARRAY_SIZE(kfr2r09_ceu_resources),
.resource = kfr2r09_ceu_resources,
.dev = {
- .platform_data = &sh_mobile_ceu_info,
+ .platform_data = &ceu_pdata,
},
};
-static struct i2c_board_info kfr2r09_i2c_camera = {
- I2C_BOARD_INFO("rj54n1cb0c", 0x50),
-};
-
-static struct clk *camera_clk;
-
-/* set VIO_CKO clock to 25MHz */
-#define CEU_MCLK_FREQ 25000000
-
-#define DRVCRB 0xA405018C
-static int camera_power(struct device *dev, int mode)
-{
- int ret;
-
- if (mode) {
- long rate;
-
- camera_clk = clk_get(NULL, "video_clk");
- if (IS_ERR(camera_clk))
- return PTR_ERR(camera_clk);
-
- rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ);
- ret = clk_set_rate(camera_clk, rate);
- if (ret < 0)
- goto eclkrate;
-
- /* set DRVCRB
- *
- * use 1.8 V for VccQ_VIO
- * use 2.85V for VccQ_SR
- */
- __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
-
- /* reset clear */
- ret = gpio_request(GPIO_PTB4, NULL);
- if (ret < 0)
- goto eptb4;
- ret = gpio_request(GPIO_PTB7, NULL);
- if (ret < 0)
- goto eptb7;
-
- ret = gpio_direction_output(GPIO_PTB4, 1);
- if (!ret)
- ret = gpio_direction_output(GPIO_PTB7, 1);
- if (ret < 0)
- goto egpioout;
- msleep(1);
-
- ret = clk_enable(camera_clk); /* start VIO_CKO */
- if (ret < 0)
- goto eclkon;
-
- return 0;
- }
-
- ret = 0;
-
- clk_disable(camera_clk);
-eclkon:
- gpio_set_value(GPIO_PTB7, 0);
-egpioout:
- gpio_set_value(GPIO_PTB4, 0);
- gpio_free(GPIO_PTB7);
-eptb7:
- gpio_free(GPIO_PTB4);
-eptb4:
-eclkrate:
- clk_put(camera_clk);
- return ret;
-}
-
static struct rj54n1_pdata rj54n1_priv = {
.mclk_freq = CEU_MCLK_FREQ,
.ioctl_high = false,
};
-static struct soc_camera_link rj54n1_link = {
- .power = camera_power,
- .board_info = &kfr2r09_i2c_camera,
- .i2c_adapter_id = 1,
- .priv = &rj54n1_priv,
+static struct i2c_board_info kfr2r09_i2c_camera = {
+ I2C_BOARD_INFO("rj54n1cb0c", 0x50),
+ .platform_data = &rj54n1_priv,
};
-static struct platform_device kfr2r09_camera = {
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &rj54n1_link,
+static struct gpiod_lookup_table rj54n1_gpios = {
+ .dev_id = "1-0050",
+ .table = {
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB4, "poweron",
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB7, "enable",
+ GPIO_ACTIVE_HIGH),
},
};
@@ -393,8 +340,6 @@ static struct platform_device *kfr2r09_devices[] __initdata = {
&kfr2r09_nand_flash_device,
&kfr2r09_sh_keysc_device,
&kfr2r09_sh_lcdc_device,
- &kfr2r09_ceu_device,
- &kfr2r09_camera,
&kfr2r09_sh_sdhi0_device,
};
@@ -533,6 +478,8 @@ extern char kfr2r09_sdram_leave_end;
static int __init kfr2r09_devices_setup(void)
{
+ static struct clk *camera_clk;
+
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
SUSP_SH_RSTANDBY,
@@ -622,8 +569,6 @@ static int __init kfr2r09_devices_setup(void)
gpio_request(GPIO_FN_VIO0_D1, NULL);
gpio_request(GPIO_FN_VIO0_D0, NULL);
- platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20);
-
/* SDHI0 connected to yc304 */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0D3, NULL);
@@ -635,6 +580,36 @@ static int __init kfr2r09_devices_setup(void)
i2c_register_board_info(0, &kfr2r09_backlight_board_info, 1);
+ /* Set camera clock frequency and register and alias for rj54n1. */
+ camera_clk = clk_get(NULL, "video_clk");
+ if (!IS_ERR(camera_clk)) {
+ clk_set_rate(camera_clk,
+ clk_round_rate(camera_clk, CEU_MCLK_FREQ));
+ clk_put(camera_clk);
+ }
+ clk_add_alias(NULL, "1-0050", "video_clk", NULL);
+
+ /* set DRVCRB
+ *
+ * use 1.8 V for VccQ_VIO
+ * use 2.85V for VccQ_SR
+ */
+ __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
+
+ gpiod_add_lookup_table(&rj54n1_gpios);
+
+ i2c_register_board_info(1, &kfr2r09_i2c_camera, 1);
+
+ /* Initialize CEU platform device separately to map memory first */
+ device_initialize(&kfr2r09_ceu_device.dev);
+ arch_setup_pdev_archdata(&kfr2r09_ceu_device);
+ dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+
+ platform_device_add(&kfr2r09_ceu_device);
+
return platform_add_devices(kfr2r09_devices,
ARRAY_SIZE(kfr2r09_devices));
}
@@ -651,10 +626,24 @@ static int kfr2r09_mode_pins(void)
return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8;
}
+/* Reserve a portion of memory for CEU buffers */
+static void __init kfr2r09_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+
+ ceu_dma_membase = phys;
+}
+
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_kfr2r09 __initmv = {
.mv_name = "kfr2r09",
.mv_mode_pins = kfr2r09_mode_pins,
+ .mv_mem_reserve = kfr2r09_mv_mem_reserve,
};
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 3d7d0046cf49..254f2c662703 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -28,7 +28,6 @@
#include <video/sh_mobile_lcdc.h>
#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/ov772x.h>
-#include <media/soc_camera.h>
#include <media/i2c/tw9910.h>
#include <asm/clock.h>
#include <asm/machvec.h>
@@ -351,8 +350,9 @@ static struct platform_device migor_ceu_device = {
static struct gpiod_lookup_table ov7725_gpios = {
.dev_id = "0-0021",
.table = {
- GPIO_LOOKUP("sh7722_pfc", GPIO_PTT0, "pwdn", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("sh7722_pfc", GPIO_PTT3, "rstb", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("sh7722_pfc", GPIO_PTT0, "powerdown",
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sh7722_pfc", GPIO_PTT3, "reset", GPIO_ACTIVE_LOW),
},
};
@@ -592,7 +592,7 @@ static int __init migor_devices_setup(void)
}
/* Add a clock alias for ov7725 xclk source. */
- clk_add_alias("xclk", "0-0021", "video_clk", NULL);
+ clk_add_alias(NULL, "0-0021", "video_clk", NULL);
/* Register GPIOs for video sources. */
gpiod_add_lookup_table(&ov7725_gpios);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 255952555656..fdbec22ae687 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -1,43 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/boards/se/7724/setup.c
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
+#include <asm/clock.h>
+#include <asm/heartbeat.h>
+#include <asm/io.h>
+#include <asm/suspend.h>
-#include <linux/init.h>
+#include <cpu/sh7724.h>
+
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/mmc/host.h>
+#include <linux/memblock.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
-#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
-#include <linux/smc91x.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/input/sh_keysc.h>
-#include <linux/usb/r8a66597.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
+#include <linux/smc91x.h>
+#include <linux/usb/r8a66597.h>
#include <linux/videodev2.h>
-#include <video/sh_mobile_lcdc.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
+#include <mach-se/mach/se7724.h>
+#include <media/drv-intf/renesas-ceu.h>
+
#include <sound/sh_fsi.h>
#include <sound/simple_card.h>
-#include <asm/io.h>
-#include <asm/heartbeat.h>
-#include <asm/clock.h>
-#include <asm/suspend.h>
-#include <cpu/sh7724.h>
-#include <mach-se/mach/se7724.h>
+
+#include <video/sh_mobile_lcdc.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu0_dma_membase;
+static phys_addr_t ceu1_dma_membase;
/*
* SWx 1234 5678
@@ -216,8 +222,8 @@ static struct platform_device lcdc_device = {
};
/* CEU0 */
-static struct sh_mobile_ceu_info sh_mobile_ceu0_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu0_pdata = {
+ .num_subdevs = 0,
};
static struct resource ceu0_resources[] = {
@@ -231,24 +237,21 @@ static struct resource ceu0_resources[] = {
.start = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device ceu0_device = {
- .name = "sh_mobile_ceu",
- .id = 0, /* "ceu0" clock */
+ .name = "renesas-ceu",
+ .id = 0, /* "ceu.0" clock */
.num_resources = ARRAY_SIZE(ceu0_resources),
.resource = ceu0_resources,
.dev = {
- .platform_data = &sh_mobile_ceu0_info,
+ .platform_data = &ceu0_pdata,
},
};
/* CEU1 */
-static struct sh_mobile_ceu_info sh_mobile_ceu1_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu1_pdata = {
+ .num_subdevs = 0,
};
static struct resource ceu1_resources[] = {
@@ -262,18 +265,15 @@ static struct resource ceu1_resources[] = {
.start = evt2irq(0x9e0),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device ceu1_device = {
- .name = "sh_mobile_ceu",
- .id = 1, /* "ceu1" clock */
+ .name = "renesas-ceu",
+ .id = 1, /* "ceu.1" clock */
.num_resources = ARRAY_SIZE(ceu1_resources),
.resource = ceu1_resources,
.dev = {
- .platform_data = &sh_mobile_ceu1_info,
+ .platform_data = &ceu1_pdata,
},
};
@@ -574,13 +574,16 @@ static struct platform_device vou_device = {
},
};
+static struct platform_device *ms7724se_ceu_devices[] __initdata = {
+ &ceu0_device,
+ &ceu1_device,
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
&lcdc_device,
&nor_flash_device,
- &ceu0_device,
- &ceu1_device,
&keysc_device,
&sh_eth_device,
&sh7724_usb0_host_device,
@@ -797,7 +800,6 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO0_CLK, NULL);
gpio_request(GPIO_FN_VIO0_FLD, NULL);
gpio_request(GPIO_FN_VIO0_HD, NULL);
- platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20);
/* enable CEU1 */
gpio_request(GPIO_FN_VIO1_D7, NULL);
@@ -812,7 +814,6 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO1_HD, NULL);
gpio_request(GPIO_FN_VIO1_VD, NULL);
gpio_request(GPIO_FN_VIO1_CLK, NULL);
- platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20);
/* KEYSC */
gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
@@ -934,12 +935,49 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_DV_VSYNC, NULL);
gpio_request(GPIO_FN_DV_HSYNC, NULL);
+ /* Initialize CEU platform devices separately to map memory first */
+ device_initialize(&ms7724se_ceu_devices[0]->dev);
+ arch_setup_pdev_archdata(ms7724se_ceu_devices[0]);
+ dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
+ ceu0_dma_membase, ceu0_dma_membase,
+ ceu0_dma_membase +
+ CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+ platform_device_add(ms7724se_ceu_devices[0]);
+
+ device_initialize(&ms7724se_ceu_devices[1]->dev);
+ arch_setup_pdev_archdata(ms7724se_ceu_devices[1]);
+ dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
+ ceu1_dma_membase, ceu1_dma_membase,
+ ceu1_dma_membase +
+ CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+ platform_device_add(ms7724se_ceu_devices[1]);
+
return platform_add_devices(ms7724se_devices,
ARRAY_SIZE(ms7724se_devices));
}
device_initcall(devices_setup);
+/* Reserve a portion of memory for CEU 0 and CEU 1 buffers */
+static void __init ms7724se_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+ ceu0_dma_membase = phys;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+ ceu1_dma_membase = phys;
+}
+
static struct sh_machine_vector mv_ms7724se __initmv = {
.mv_name = "ms7724se",
.mv_init_irq = init_se7724_IRQ,
+ .mv_mem_reserve = ms7724se_mv_mem_reserve,
};
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 46b2481eec90..26789ad28193 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -56,15 +56,15 @@ const struct of_cpu_method __cpu_method_of_table_sentinel
static void sh_of_smp_probe(void)
{
- struct device_node *np = 0;
- const char *method = 0;
+ struct device_node *np;
+ const char *method = NULL;
const struct of_cpu_method *m = __cpu_method_of_table;
pr_info("SH generic board support: scanning for cpus\n");
init_cpu_possible(cpumask_of(0));
- while ((np = of_find_node_by_type(np, "cpu"))) {
+ for_each_node_by_type(np, "cpu") {
const __be32 *cell = of_get_property(np, "reg", NULL);
u64 id = -1;
if (cell) id = of_read_number(cell, of_n_addr_cells(np));
@@ -80,6 +80,7 @@ static void sh_of_smp_probe(void)
if (!method) {
np = of_find_node_by_name(NULL, "cpus");
of_property_read_string(np, "enable-method", &method);
+ of_node_put(np);
}
pr_info("CPU enable method: %s\n", method);
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index e5b7437ab4af..8256626bc53c 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -160,8 +160,6 @@ static int __init pcibios_init(void)
for (hose = hose_head; hose; hose = hose->next)
pcibios_scanbus(hose);
- dma_debug_add_bus(&pci_bus_type);
-
pci_initialized = 1;
return 0;
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 46dd82ab2c29..6a5609a55965 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -2,6 +2,7 @@ generic-y += compat.h
generic-y += current.h
generic-y += delay.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += irq_regs.h
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 0fd0099f43cc..f37b95a80232 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -32,44 +32,9 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
-
- return c;
-}
-
#endif /* CONFIG_CPU_J2 */
#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index d103ab5a4e4b..b932e42ef028 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr);
void cpu_cache_init(void);
+static inline void *sh_cacheop_vaddr(void *vaddr)
+{
+ if (__in_29bit_mode())
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return vaddr;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
index 1e881f5db659..593a9704782b 100644
--- a/arch/sh/include/asm/cmpxchg-xchg.h
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -8,7 +8,8 @@
* This work is licensed under the terms of the GNU GPL, version 2. See the
* file "COPYING" in the main directory of this archive for more details.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <asm/byteorder.h>
/*
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
deleted file mode 100644
index 41167931e5d9..000000000000
--- a/arch/sh/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_DMA_MAPPING_H
-#define __ASM_SH_DMA_MAPPING_H
-
-extern const struct dma_map_ops *dma_ops;
-extern void no_iommu_init(void);
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return dma_ops;
-}
-
-extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs);
-extern void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
-
-void sh_sync_dma_for_device(void *vaddr, size_t size,
- enum dma_data_direction dir);
-
-#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 7431c172c0cb..199d17b765f2 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
struct arch_hw_breakpoint {
- char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
u16 len;
u16 type;
@@ -41,6 +40,7 @@ struct sh_ubc {
struct clk *clk; /* optional interface clock / MSTP bit */
};
+struct perf_event_attr;
struct perf_event;
struct task_struct;
struct pmu;
@@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type)
}
/* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h
index fd5f331a3912..927d80ba2332 100644
--- a/arch/sh/include/asm/kexec.h
+++ b/arch/sh/include/asm/kexec.h
@@ -4,6 +4,7 @@
#include <asm/ptrace.h>
#include <asm/string.h>
+#include <linux/kernel.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -61,7 +62,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
__asm__ __volatile__ ("stc gbr, %0" : "=r" (newregs->gbr));
__asm__ __volatile__ ("stc sr, %0" : "=r" (newregs->sr));
- newregs->pc = (unsigned long)current_text_addr();
+ newregs->pc = _THIS_IP_;
}
}
#else
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 85d8bcaa8493..6171682f7798 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -27,7 +27,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
-void jprobe_return_end(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -43,9 +42,6 @@ struct prev_kprobe {
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
- unsigned long jprobe_saved_r15;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index dc80041f7363..59673f8a3379 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -12,7 +12,7 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
-obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \
@@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
-
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-coherent.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
ccflags-y := -Werror
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index fe844222f3f6..af01664f7b4c 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -260,7 +260,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
- CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_DEV_ID("ceu.0", &mstp_clks[HWBLK_CEU]),
CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c
new file mode 100644
index 000000000000..a0021eef956b
--- /dev/null
+++ b/arch/sh/kernel/dma-coherent.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/addrspace.h>
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *ret, *ret_nocache;
+ int order = get_order(size);
+
+ gfp |= __GFP_ZERO;
+
+ ret = (void *)__get_free_pages(gfp, order);
+ if (!ret)
+ return NULL;
+
+ /*
+ * Pages from the page allocator may have data present in
+ * cache. So flush the cache before using uncached memory.
+ */
+ arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
+ DMA_BIDIRECTIONAL);
+
+ ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
+ if (!ret_nocache) {
+ free_pages((unsigned long)ret, order);
+ return NULL;
+ }
+
+ split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
+
+ *dma_handle = virt_to_phys(ret);
+ if (!WARN_ON(!dev))
+ *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
+
+ return ret_nocache;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ int order = get_order(size);
+ unsigned long pfn = (dma_handle >> PAGE_SHIFT);
+ int k;
+
+ if (!WARN_ON(!dev))
+ pfn += dev->dma_pfn_offset;
+
+ for (k = 0; k < (1 << order); k++)
+ __free_pages(pfn_to_page(pfn + k), 0);
+
+ iounmap(vaddr);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
+
+ switch (dir) {
+ case DMA_FROM_DEVICE: /* invalidate only */
+ __flush_invalidate_region(addr, size);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ __flush_wback_region(addr, size);
+ break;
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ __flush_purge_region(addr, size);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
deleted file mode 100644
index 3e3a32fc676e..000000000000
--- a/arch/sh/kernel/dma-nommu.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * DMA mapping support for platforms lacking IOMMUs.
- *
- * Copyright (C) 2009 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-
-static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t addr = page_to_phys(page) + offset
- - PFN_PHYS(dev->dma_pfn_offset);
-
- WARN_ON(size == 0);
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- sh_sync_dma_for_device(page_address(page) + offset, size, dir);
-
- return addr;
-}
-
-static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s;
- int i;
-
- WARN_ON(nents == 0 || sg[0].length == 0);
-
- for_each_sg(sg, s, nents, i) {
- dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
-
- BUG_ON(!sg_page(s));
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- sh_sync_dma_for_device(sg_virt(s), s->length, dir);
-
- s->dma_address = sg_phys(s) - offset;
- s->dma_length = s->length;
- }
-
- return nents;
-}
-
-#ifdef CONFIG_DMA_NONCOHERENT
-static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
-}
-
-static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nelems, i)
- sh_sync_dma_for_device(sg_virt(s), s->length, dir);
-}
-#endif
-
-const struct dma_map_ops nommu_dma_ops = {
- .alloc = dma_generic_alloc_coherent,
- .free = dma_generic_free_coherent,
- .map_page = nommu_map_page,
- .map_sg = nommu_map_sg,
-#ifdef CONFIG_DMA_NONCOHERENT
- .sync_single_for_device = nommu_sync_single_for_device,
- .sync_sg_for_device = nommu_sync_sg_for_device,
-#endif
-};
-
-void __init no_iommu_init(void)
-{
- if (dma_ops)
- return;
- dma_ops = &nommu_dma_ops;
-}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 1a2526676a87..bb511e2d9d68 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -599,7 +599,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
* time this function makes its first function call.
*/
if (!pc || !prev)
- pc = (unsigned long)current_text_addr();
+ pc = _THIS_IP_;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 8648ed05ccf0..d9ff3b42da7c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->len);
+ va = hw->address;
+ len = get_hbp_len(hw->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
return 0;
}
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = SH_BREAKPOINT_LEN_1;
+ hw->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = SH_BREAKPOINT_LEN_2;
+ hw->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = SH_BREAKPOINT_LEN_4;
+ hw->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->len = SH_BREAKPOINT_LEN_8;
+ hw->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_READ;
+ hw->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
- info->type = SH_BREAKPOINT_WRITE;
+ hw->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_RW;
+ hw->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
@@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
ret = -EINVAL;
- switch (info->len) {
+ switch (hw->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
@@ -249,17 +249,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
}
/*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)kallsyms_lookup_name(info->name);
-
- /*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
@@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
- if (!arch_check_bp_in_kernelspace(bp)) {
+ if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __user *)NULL, current);
}
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 52a5e11247d1..241e903dd3ee 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
}
goto no_kprobe;
}
@@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->pc = orig_ret_address;
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
@@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
- if (kprobe_handler(args->regs)) {
+ if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler &&
- p->break_handler(p, args->regs))
- ret = NOTIFY_STOP;
- }
}
}
}
@@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_r15 = regs->regs[15];
- addr = kcb->jprobe_saved_r15;
-
- /*
- * TBD: As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
- MIN_STACK_SIZE(addr));
-
- regs->pc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack_addr = kcb->jprobe_saved_r15;
- u8 *addr = (u8 *)regs->pc;
-
- if ((addr >= (u8 *)jprobe_return) &&
- (addr <= (u8 *)jprobe_return_end)) {
- *regs = kcb->jprobe_saved_regs;
-
- memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
-
- kcb->kprobe_status = KPROBE_HIT_SS;
- preempt_enable_no_resched();
- return 1;
- }
-
- return 0;
-}
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 50cdd1349015..02ed2df25a54 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -225,8 +225,6 @@ config HUGETLB_PAGE_SIZE_512MB
endchoice
-source "mm/Kconfig"
-
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index fceb2adfcac7..792f36129062 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -1,10 +1,6 @@
/*
- * arch/sh/mm/consistent.c
- *
* Copyright (C) 2004 - 2007 Paul Mundt
*
- * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -13,90 +9,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <asm/cacheflush.h>
-#include <asm/addrspace.h>
-
-const struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
-void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret, *ret_nocache;
- int order = get_order(size);
-
- gfp |= __GFP_ZERO;
-
- ret = (void *)__get_free_pages(gfp, order);
- if (!ret)
- return NULL;
-
- /*
- * Pages from the page allocator may have data present in
- * cache. So flush the cache before using uncached memory.
- */
- sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
-
- ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
- if (!ret_nocache) {
- free_pages((unsigned long)ret, order);
- return NULL;
- }
-
- split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
-
- *dma_handle = virt_to_phys(ret);
- if (!WARN_ON(!dev))
- *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
-
- return ret_nocache;
-}
-
-void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- int order = get_order(size);
- unsigned long pfn = dma_handle >> PAGE_SHIFT;
- int k;
-
- if (!WARN_ON(!dev))
- pfn += dev->dma_pfn_offset;
-
- for (k = 0; k < (1 << order); k++)
- __free_pages(pfn_to_page(pfn + k), 0);
-
- iounmap(vaddr);
-}
-
-void sh_sync_dma_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- void *addr;
-
- addr = __in_29bit_mode() ?
- (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- __flush_invalidate_region(addr, size);
- break;
- case DMA_TO_DEVICE: /* writeback only */
- __flush_wback_region(addr, size);
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- __flush_purge_region(addr, size);
- break;
- default:
- BUG();
- }
-}
-EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index b8e7bb84b6b1..6defd2c6d9b1 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -313,7 +313,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, unsigned int fault)
+ unsigned long address, vm_fault_t fault)
{
/*
* Pagefault was interrupted by SIGKILL. We have no reason to
@@ -396,7 +396,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4034035fbede..7713c084d040 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -339,22 +339,12 @@ void __init paging_init(void)
free_area_init_nodes(max_zone_pfns);
}
-/*
- * Early initialization for any I/O MMUs we might have.
- */
-static void __init iommu_init(void)
-{
- no_iommu_init();
-}
-
unsigned int mem_init_done = 0;
void __init mem_init(void)
{
pg_data_t *pgdat;
- iommu_init();
-
high_memory = NULL;
for_each_online_pgdat(pgdat)
high_memory = max_t(void *, high_memory,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0f535debf802..e6f2a38d2e61 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -45,9 +45,13 @@ config SPARC
select LOCKDEP_SMALL if LOCKDEP
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
+ select HAVE_MEMBLOCK
+ select NO_BOOTMEM
config SPARC32
def_bool !64BIT
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select DMA_NONCOHERENT_OPS
select GENERIC_ATOMIC64
select CLZ_TAB
select HAVE_UID16
@@ -60,7 +64,6 @@ config SPARC64
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
@@ -79,7 +82,6 @@ config SPARC64
select IRQ_PREFLOW_FASTEOI
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
- select NO_BOOTMEM
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
@@ -155,10 +157,6 @@ config PGTABLE_LEVELS
config ARCH_SUPPORTS_UPROBES
def_bool y if SPARC64
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config SMP
@@ -331,8 +329,6 @@ config FORCE_MAX_ZONEORDER
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
-source "mm/Kconfig"
-
if SPARC64
source "kernel/power/Kconfig"
endif
@@ -355,8 +351,6 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-source "kernel/Kconfig.preempt"
-
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on SPARC64
@@ -556,10 +550,6 @@ config SPARC64_PCI_MSI
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
config COMPAT
bool
depends on SPARC64
@@ -574,20 +564,4 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/sbus/char/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/sparc/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 4aef29a11925..50a918d496c8 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -1,12 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
-source "lib/Kconfig.debug"
-
config DEBUG_DCFLUSH
bool "D-cache flush debugging"
depends on SPARC64 && DEBUG_KERNEL
@@ -21,5 +18,3 @@ config FRAME_POINTER
bool
depends on MCOUNT
default y
-
-endmenu
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 966a13d2b127..e32ef20de567 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -9,10 +9,10 @@
# Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
# We are not yet configured - so test on arch
-ifeq ($(ARCH),sparc)
- KBUILD_DEFCONFIG := sparc32_defconfig
-else
+ifeq ($(ARCH),sparc64)
KBUILD_DEFCONFIG := sparc64_defconfig
+else
+ KBUILD_DEFCONFIG := sparc32_defconfig
endif
ifeq ($(CONFIG_SPARC32),y)
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index c9d2b922734b..bc9cc26efa3d 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -144,7 +144,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 1b3e47accc74..4d6d7faf728e 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 285268ca9279..54c4de2db188 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg sha256 = {
.cra_name = "sha256",
.cra_driver_name= "sha256-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +184,6 @@ static struct shash_alg sha224 = {
.cra_name = "sha224",
.cra_driver_name= "sha224-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 11eb36c3fc8c..4c55e97a4408 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -154,7 +154,6 @@ static struct shash_alg sha512 = {
.cra_name = "sha512",
.cra_driver_name= "sha512-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -170,7 +169,6 @@ static struct shash_alg sha384 = {
.cra_name = "sha384",
.cra_driver_name= "sha384-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index ac67828da201..410b263ef5c8 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += module.h
+generic-y += msi.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index d13ce517f4b9..94c930f0bc62 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *);
int atomic_fetch_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
-int __atomic_add_unless(atomic_t *, int, int);
+int atomic_fetch_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
#define atomic_set_release(v, i) atomic_set((v), (i))
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
-#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
-#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
@@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int);
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
-#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
-#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 28db058d471b..6963482c81d8 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -50,38 +50,6 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-#define atomic64_dec_return(v) atomic64_sub_return(1, v)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic64_inc_return(v) atomic64_add_return(1, v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic64_inc(v) atomic64_add(1, v)
-
-#define atomic_dec(v) atomic_sub(1, v)
-#define atomic64_dec(v) atomic64_sub(1, v)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
-
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
static inline int atomic_xchg(atomic_t *v, int new)
@@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 12ae33daf52f..e17566376934 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -7,7 +7,6 @@
#include <linux/dma-debug.h>
extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
@@ -15,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return &pci32_dma_ops;
+ return &dma_noncoherent_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
- return &pci32_dma_ops;
+ return &dma_noncoherent_ops;
#endif
return dma_ops;
}
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9a1e9cbc7e6d..b162c23ae8c2 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -243,35 +243,42 @@ void insb(unsigned long, void *, unsigned long);
void insw(unsigned long, void *, unsigned long);
void insl(unsigned long, void *, unsigned long);
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsb(void __iomem *port, void *buf, unsigned long count)
{
insb((unsigned long __force)port, buf, count);
}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsw(void __iomem *port, void *buf, unsigned long count)
{
insw((unsigned long __force)port, buf, count);
}
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsl(void __iomem *port, void *buf, unsigned long count)
{
insl((unsigned long __force)port, buf, count);
}
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesb(void __iomem *port, const void *buf, unsigned long count)
{
outsb((unsigned long __force)port, buf, count);
}
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesw(void __iomem *port, const void *buf, unsigned long count)
{
outsw((unsigned long __force)port, buf, count);
}
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesl(void __iomem *port, const void *buf, unsigned long count)
{
outsl((unsigned long __force)port, buf, count);
}
+#define ioread8_rep(p,d,l) readsb(p,d,l)
+#define ioread16_rep(p,d,l) readsw(p,d,l)
+#define ioread32_rep(p,d,l) readsl(p,d,l)
+#define iowrite8_rep(p,d,l) writesb(p,d,l)
+#define iowrite16_rep(p,d,l) writesw(p,d,l)
+#define iowrite32_rep(p,d,l) writesl(p,d,l)
+
/* Valid I/O Space regions are anywhere, because each PCI bus supported
* can live in an arbitrary area of the physical address range.
*/
diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h
index 3704490b4488..bfcaa6326c20 100644
--- a/arch/sparc/include/asm/kprobes.h
+++ b/arch/sparc/include/asm/kprobes.h
@@ -44,7 +44,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_orig_tnpc;
unsigned long kprobe_orig_tstate_pil;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h
deleted file mode 100644
index 3c17c1074431..000000000000
--- a/arch/sparc/include/asm/msi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * msi.h: Defines specific to the MBus - Sbus - Interface.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- */
-
-#ifndef _SPARC_MSI_H
-#define _SPARC_MSI_H
-
-/*
- * Locations of MSI Registers.
- */
-#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
-
-/*
- * Useful bits in the MSI Registers.
- */
-#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
-
-
-static inline void msi_set_sync(void)
-{
- __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
- "andn %%g3, %2, %%g3\n\t"
- "sta %%g3, [%0] %1\n\t" : :
- "r" (MSI_MBUS_ARBEN),
- "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
-}
-
-#endif /* !(_SPARC_MSI_H) */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index d58520c2e6ff..7ea35e5601b6 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -101,6 +101,9 @@
#define SO_ZEROCOPY 0x003e
+#define SO_TXTIME 0x003f
+#define SCM_TXTIME SO_TXTIME
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cca9134cfa7d..6799c93c9f27 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -38,6 +38,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
+#include <linux/dma-noncoherent.h>
#include <linux/of_device.h>
#include <asm/io.h>
@@ -434,42 +435,41 @@ arch_initcall(sparc_register_ioport);
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
-static void *pci32_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *pba, gfp_t gfp,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
- unsigned long len_total = PAGE_ALIGN(len);
+ unsigned long len_total = PAGE_ALIGN(size);
void *va;
struct resource *res;
int order;
- if (len == 0) {
+ if (size == 0) {
return NULL;
}
- if (len > 256*1024) { /* __get_free_pages() limit */
+ if (size > 256*1024) { /* __get_free_pages() limit */
return NULL;
}
order = get_order(len_total);
va = (void *) __get_free_pages(gfp, order);
if (va == NULL) {
- printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+ printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT);
goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk("pci_alloc_consistent: no core\n");
+ printk("%s: no core\n", __func__);
goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
+ printk("%s: cannot occupy 0x%lx", __func__, len_total);
goto err_nova;
}
srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
- *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
+ *dma_handle = virt_to_phys(va);
return (void *) res->start;
err_nova:
@@ -481,184 +481,53 @@ err_nopages:
}
/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
+ * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
+ * what *dma_ndler was set to.
*
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
-static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
{
struct resource *res;
if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)p)) == NULL) {
- printk("pci_free_consistent: cannot free %p\n", p);
+ (unsigned long)cpu_addr)) == NULL) {
+ printk("%s: cannot free %p\n", __func__, cpu_addr);
return;
}
- if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
- printk("pci_free_consistent: unaligned va %p\n", p);
+ if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
+ printk("%s: unaligned va %p\n", __func__, cpu_addr);
return;
}
- n = PAGE_ALIGN(n);
- if (resource_size(res) != n) {
- printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
- (long)resource_size(res), (long)n);
+ size = PAGE_ALIGN(size);
+ if (resource_size(res) != size) {
+ printk("%s: region 0x%lx asked 0x%zx\n", __func__,
+ (long)resource_size(res), size);
return;
}
- dma_make_coherent(ba, n);
- srmmu_unmapiorange((unsigned long)p, n);
+ dma_make_coherent(dma_addr, size);
+ srmmu_unmapiorange((unsigned long)cpu_addr, size);
release_resource(res);
kfree(res);
- free_pages((unsigned long)phys_to_virt(ba), get_order(n));
-}
-
-/*
- * Same as pci_map_single, but with pages.
- */
-static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- /* IIep is write-through, not flushing. */
- return page_to_phys(page) + offset;
-}
-
-static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_make_coherent(ba, PAGE_ALIGN(size));
-}
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int n;
-
- /* IIep is write-through, not flushing. */
- for_each_sg(sgl, sg, nents, n) {
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation before or after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first perform a pci_dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
-}
-
-static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
+ free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
}
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single_* but for a scatter-gather list,
- * same rules and usage.
- */
-static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
+/* IIep is write-through, not flushing on cpu to device transfer. */
-static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
+ if (dir != PCI_DMA_TODEVICE)
+ dma_make_coherent(paddr, PAGE_ALIGN(size));
}
-/* note: leon re-uses pci32_dma_ops */
-const struct dma_map_ops pci32_dma_ops = {
- .alloc = pci32_alloc_coherent,
- .free = pci32_free_coherent,
- .map_page = pci32_map_page,
- .unmap_page = pci32_unmap_page,
- .map_sg = pci32_map_sg,
- .unmap_sg = pci32_unmap_sg,
- .sync_single_for_cpu = pci32_sync_single_for_cpu,
- .sync_single_for_device = pci32_sync_single_for_device,
- .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
- .sync_sg_for_device = pci32_sync_sg_for_device,
-};
-EXPORT_SYMBOL(pci32_dma_ops);
-
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index ab4ba4347941..dfbca2470536 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
- } else {
- if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ ret = 1;
}
goto no_kprobe;
}
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -441,53 +437,6 @@ out:
exception_exit(prev_state);
}
-/* Jprobes support. */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
- regs->tpc = (unsigned long) jp->entry;
- regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
- regs->tstate |= TSTATE_PIL;
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- register unsigned long orig_fp asm("g1");
-
- orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
- __asm__ __volatile__("\n"
-"1: cmp %%sp, %0\n\t"
- "blu,a,pt %%xcc, 1b\n\t"
- " restore\n\t"
- ".globl jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
- "ta 0x70"
- : /* no outputs */
- : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- u32 *addr = (u32 *) regs->tpc;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (addr == (u32 *) jprobe_return_trap_instruction) {
- memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2ef8cfa9677e..f0eba72aa1ad 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
}
}
-static void init_tick_ops(struct sparc64_tick_ops *ops)
+static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{
unsigned long freq, quotient, tick;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 465a901a0ada..281fa634bb1a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
}
EXPORT_SYMBOL(atomic_cmpxchg);
-int __atomic_add_unless(atomic_t *v, int a, int u)
+int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(__atomic_add_unless);
+EXPORT_SYMBOL(atomic_fetch_add_unless);
/* Atomic operations are already serializing */
void atomic_set(atomic_t *v, int i)
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 9f75b6444bf1..b0440b0edd97 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -166,7 +166,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
- int fault, code;
+ int code;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (text_fault)
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 63166fcf9e25..8f8a604c1300 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -278,7 +278,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
- int si_code, fault_code, fault;
+ int si_code, fault_code;
+ vm_fault_t fault;
unsigned long address, mm_rss;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 95fe4f081ba3..92634d4e440c 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/poison.h>
#include <linux/gfp.h>
@@ -101,13 +102,46 @@ static unsigned long calc_max_low_pfn(void)
return tmp;
}
+static void __init find_ramdisk(unsigned long end_of_phys_memory)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long size;
+
+ /* Now have to check initial ramdisk, so that it won't pass
+ * the end of memory
+ */
+ if (sparc_ramdisk_image) {
+ if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
+ sparc_ramdisk_image -= KERNBASE;
+ initrd_start = sparc_ramdisk_image + phys_base;
+ initrd_end = initrd_start + sparc_ramdisk_size;
+ if (initrd_end > end_of_phys_memory) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+ initrd_end, end_of_phys_memory);
+ initrd_start = 0;
+ } else {
+ /* Reserve the initrd image area. */
+ size = initrd_end - initrd_start;
+ memblock_reserve(initrd_start, size);
+
+ initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
+ initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
+ }
+ }
+#endif
+}
+
unsigned long __init bootmem_init(unsigned long *pages_avail)
{
- unsigned long bootmap_size, start_pfn;
- unsigned long end_of_phys_memory = 0UL;
- unsigned long bootmap_pfn, bytes_avail, size;
+ unsigned long start_pfn, bytes_avail, size;
+ unsigned long end_of_phys_memory = 0;
+ unsigned long high_pages = 0;
int i;
+ memblock_set_bottom_up(true);
+ memblock_allow_resize();
+
bytes_avail = 0UL;
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
end_of_phys_memory = sp_banks[i].base_addr +
@@ -124,24 +158,25 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
if (sp_banks[i].num_bytes == 0) {
sp_banks[i].base_addr = 0xdeadbeef;
} else {
+ memblock_add(sp_banks[i].base_addr,
+ sp_banks[i].num_bytes);
sp_banks[i+1].num_bytes = 0;
sp_banks[i+1].base_addr = 0xdeadbeef;
}
break;
}
}
+ memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes);
}
/* Start with page aligned address of last symbol in kernel
- * image.
+ * image.
*/
start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
/* Now shift down to get the real physical page frame number. */
start_pfn >>= PAGE_SHIFT;
- bootmap_pfn = start_pfn;
-
max_pfn = end_of_phys_memory >> PAGE_SHIFT;
max_low_pfn = max_pfn;
@@ -150,85 +185,19 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
max_low_pfn = calc_max_low_pfn();
+ high_pages = calc_highpages();
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- calc_highpages() >> (20 - PAGE_SHIFT));
+ high_pages >> (20 - PAGE_SHIFT));
}
-#ifdef CONFIG_BLK_DEV_INITRD
- /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
- if (sparc_ramdisk_image) {
- if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
- sparc_ramdisk_image -= KERNBASE;
- initrd_start = sparc_ramdisk_image + phys_base;
- initrd_end = initrd_start + sparc_ramdisk_size;
- if (initrd_end > end_of_phys_memory) {
- printk(KERN_CRIT "initrd extends beyond end of memory "
- "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
- initrd_end, end_of_phys_memory);
- initrd_start = 0;
- }
- if (initrd_start) {
- if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
- initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
- bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
- }
- }
-#endif
- /* Initialize the boot-time allocator. */
- bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
- max_low_pfn);
-
- /* Now register the available physical memory with the
- * allocator.
- */
- *pages_avail = 0;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- unsigned long curr_pfn, last_pfn;
+ find_ramdisk(end_of_phys_memory);
- curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
- if (curr_pfn >= max_low_pfn)
- break;
-
- last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
-
- /*
- * .. finally, did all the rounding and playing
- * around just make the area go away?
- */
- if (last_pfn <= curr_pfn)
- continue;
-
- size = (last_pfn - curr_pfn) << PAGE_SHIFT;
- *pages_avail += last_pfn - curr_pfn;
-
- free_bootmem(sp_banks[i].base_addr, size);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- /* Reserve the initrd image area. */
- size = initrd_end - initrd_start;
- reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
- *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
- initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
- }
-#endif
/* Reserve the kernel text/data/bss. */
size = (start_pfn << PAGE_SHIFT) - phys_base;
- reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT);
- *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+ memblock_reserve(phys_base, size);
- /* Reserve the bootmem map. We do not account for it
- * in pages_avail because we will release that memory
- * in free_all_bootmem.
- */
- size = bootmap_size;
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
- *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size = memblock_phys_mem_size() - memblock_reserved_size();
+ *pages_avail = (size >> PAGE_SHIFT) - high_pages;
return max_pfn;
}
@@ -322,7 +291,7 @@ void __init mem_init(void)
map_high_region(start_pfn, end_pfn);
}
-
+
mem_init_print_info(NULL);
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 1d70c3f6d986..be9cb0065179 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -37,7 +37,6 @@
#include <asm/mbus.h>
#include <asm/page.h>
#include <asm/asi.h>
-#include <asm/msi.h>
#include <asm/smp.h>
#include <asm/io.h>
@@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
set_pte((pte_t *)ctxp, pte);
}
+/*
+ * Locations of MSI Registers.
+ */
+#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
+
+/*
+ * Useful bits in the MSI Registers.
+ */
+#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
+
+static void msi_set_sync(void)
+{
+ __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
+ "andn %%g3, %2, %%g3\n\t"
+ "sta %%g3, [%0] %1\n\t" : :
+ "r" (MSI_MBUS_ARBEN),
+ "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
+}
+
void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
unsigned long ptp; /* Physical address, shifted right by 4 */
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig
index 20da5a8ca949..6b9938919f0b 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig
@@ -1,4 +1,70 @@
# SPDX-License-Identifier: GPL-2.0
+
+menu "UML-specific options"
+
+config UML
+ bool
+ default y
+ select ARCH_HAS_KCOV
+ select ARCH_NO_PREEMPT
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_UID16
+ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_DEBUG_KMEMLEAK
+ select GENERIC_IRQ_SHOW
+ select GENERIC_CPU_DEVICES
+ select GENERIC_CLOCKEVENTS
+ select HAVE_GCC_PLUGINS
+ select TTY # Needed for line.c
+
+config MMU
+ bool
+ default y
+
+config NO_IOMEM
+ def_bool y
+
+config ISA
+ bool
+
+config SBUS
+ bool
+
+config PCI
+ bool
+
+config PCMCIA
+ bool
+
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+ default y
+
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
+config STACKTRACE_SUPPORT
+ bool
+ default y
+ select STACKTRACE
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config HZ
+ int
+ default 100
+
+config NR_CPUS
+ int
+ range 1 1
+ default 1
+
+source "arch/$(HEADER_ARCH)/um/Kconfig"
+
config STATIC_LINK
bool "Force a static link"
default n
@@ -10,8 +76,6 @@ config STATIC_LINK
Additionally, this option enables using higher memory spaces (up to
2.75G) for UML.
-source "mm/Kconfig"
-
config LD_SCRIPT_STATIC
bool
default y
@@ -23,8 +87,6 @@ config LD_SCRIPT_DYN
depends on !LD_SCRIPT_STATIC
select MODULE_REL_CRCS if MODVERSIONS
-source "fs/Kconfig.binfmt"
-
config HOSTFS
tristate "Host filesystem"
help
@@ -122,3 +184,7 @@ config SECCOMP
defined by each seccomp mode.
If unsure, say Y.
+
+endmenu
+
+source "arch/um/drivers/Kconfig"
diff --git a/arch/um/Kconfig.char b/arch/um/Kconfig.char
deleted file mode 100644
index f184bde7030e..000000000000
--- a/arch/um/Kconfig.char
+++ /dev/null
@@ -1,124 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menu "UML Character Devices"
-
-config STDERR_CONSOLE
- bool "stderr console"
- default y
- help
- console driver which dumps all printk messages to stderr.
-
-config SSL
- bool "Virtual serial line"
- help
- The User-Mode Linux environment allows you to create virtual serial
- lines on the UML that are usually made to show up on the host as
- ttys or ptys.
-
- See <http://user-mode-linux.sourceforge.net/old/input.html> for more
- information and command line examples of how to use this facility.
-
- Unless you have a specific reason for disabling this, say Y.
-
-config NULL_CHAN
- bool "null channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to a device similar to /dev/null. Data written to it disappears
- and there is never any data to be read.
-
-config PORT_CHAN
- bool "port channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host portals. They may be accessed with 'telnet <host>
- <port number>'. Any number of consoles and serial lines may be
- attached to a single portal, although what UML device you get when
- you telnet to that portal will be unpredictable.
- It is safe to say 'Y' here.
-
-config PTY_CHAN
- bool "pty channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host pseudo-terminals. Access to both traditional
- pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
- with this option. The assignment of UML devices to host devices
- will be announced in the kernel message log.
- It is safe to say 'Y' here.
-
-config TTY_CHAN
- bool "tty channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host terminals. Access to both virtual consoles
- (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
- /dev/pts/*) are controlled by this option.
- It is safe to say 'Y' here.
-
-config XTERM_CHAN
- bool "xterm channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to xterms. Each UML device so assigned will be brought up in
- its own xterm.
- It is safe to say 'Y' here.
-
-config NOCONFIG_CHAN
- bool
- default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
-
-config CON_ZERO_CHAN
- string "Default main console channel initialization"
- default "fd:0,fd:1"
- help
- This is the string describing the channel to which the main console
- will be attached by default. This value can be overridden from the
- command line. The default value is "fd:0,fd:1", which attaches the
- main console to stdin and stdout.
- It is safe to leave this unchanged.
-
-config CON_CHAN
- string "Default console channel initialization"
- default "xterm"
- help
- This is the string describing the channel to which all consoles
- except the main console will be attached by default. This value can
- be overridden from the command line. The default value is "xterm",
- which brings them up in xterms.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have X or xterm available.
-
-config SSL_CHAN
- string "Default serial line channel initialization"
- default "pty"
- help
- This is the string describing the channel to which the serial lines
- will be attached by default. This value can be overridden from the
- command line. The default value is "pty", which attaches them to
- traditional pseudo-terminals.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have a set of /dev/pty* devices.
-
-config UML_SOUND
- tristate "Sound support"
- help
- This option enables UML sound support. If enabled, it will pull in
- soundcore and the UML hostaudio relay, which acts as a intermediary
- between the host's dsp and mixer devices and the UML sound system.
- It is safe to say 'Y' here.
-
-config SOUND
- tristate
- default UML_SOUND
-
-config SOUND_OSS_CORE
- bool
- default UML_SOUND
-
-config HOSTAUDIO
- tristate
- default UML_SOUND
-
-endmenu
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
deleted file mode 100644
index 07f84c842cc3..000000000000
--- a/arch/um/Kconfig.common
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config UML
- bool
- default y
- select ARCH_HAS_KCOV
- select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_UID16
- select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_DEBUG_KMEMLEAK
- select GENERIC_IRQ_SHOW
- select GENERIC_CPU_DEVICES
- select GENERIC_CLOCKEVENTS
- select HAVE_GCC_PLUGINS
- select TTY # Needed for line.c
-
-config MMU
- bool
- default y
-
-config NO_IOMEM
- def_bool y
-
-config ISA
- bool
-
-config SBUS
- bool
-
-config PCI
- bool
-
-config PCMCIA
- bool
-
-config TRACE_IRQFLAGS_SUPPORT
- bool
- default y
-
-config LOCKDEP_SUPPORT
- bool
- default y
-
-config STACKTRACE_SUPPORT
- bool
- default y
- select STACKTRACE
-
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
-config HZ
- int
- default 100
-
-config NR_CPUS
- int
- range 1 1
- default 1
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 967d3109689f..2014597605ea 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config GPROF
bool "Enable gprof support"
@@ -37,5 +34,3 @@ config EARLY_PRINTK
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
-
-endmenu
diff --git a/arch/um/Kconfig.rest b/arch/um/Kconfig.rest
deleted file mode 100644
index 08327b9c0cbe..000000000000
--- a/arch/um/Kconfig.rest
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-source "arch/um/Kconfig.char"
-
-source "drivers/Kconfig"
-
-source "net/Kconfig"
-
-source "arch/um/Kconfig.net"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-source "arch/um/Kconfig.debug"
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e54dda8a0363..44ddc3e8fa66 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -113,17 +113,8 @@ define archhelp
echo ' find in the kernel root.'
endef
-KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig
-
archheaders:
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
- kbuild-file=$(HOST_DIR)/include/asm/Kbuild \
- obj=$(HOST_DIR)/include/generated/asm
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
- kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
- obj=$(HOST_DIR)/include/generated/uapi/asm
- $(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
-
+ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) asm-generic archheaders
archprepare: include/generated/user_constants.h
@@ -169,11 +160,11 @@ define filechk_gen-asm-offsets
echo " *"; \
echo " */"; \
echo ""; \
- sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
+ sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" < $<; \
echo ""; )
endef
include/generated/user_constants.h: $(HOST_DIR)/um/user-offsets.s
$(call filechk,gen-asm-offsets)
-export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH
+export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH
diff --git a/arch/um/Kconfig.net b/arch/um/drivers/Kconfig
index c390f3deb0dc..2b1aaf7755aa 100644
--- a/arch/um/Kconfig.net
+++ b/arch/um/drivers/Kconfig
@@ -1,5 +1,129 @@
# SPDX-License-Identifier: GPL-2.0
+menu "UML Character Devices"
+
+config STDERR_CONSOLE
+ bool "stderr console"
+ default y
+ help
+ console driver which dumps all printk messages to stderr.
+
+config SSL
+ bool "Virtual serial line"
+ help
+ The User-Mode Linux environment allows you to create virtual serial
+ lines on the UML that are usually made to show up on the host as
+ ttys or ptys.
+
+ See <http://user-mode-linux.sourceforge.net/old/input.html> for more
+ information and command line examples of how to use this facility.
+
+ Unless you have a specific reason for disabling this, say Y.
+
+config NULL_CHAN
+ bool "null channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to a device similar to /dev/null. Data written to it disappears
+ and there is never any data to be read.
+
+config PORT_CHAN
+ bool "port channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host portals. They may be accessed with 'telnet <host>
+ <port number>'. Any number of consoles and serial lines may be
+ attached to a single portal, although what UML device you get when
+ you telnet to that portal will be unpredictable.
+ It is safe to say 'Y' here.
+
+config PTY_CHAN
+ bool "pty channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host pseudo-terminals. Access to both traditional
+ pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
+ with this option. The assignment of UML devices to host devices
+ will be announced in the kernel message log.
+ It is safe to say 'Y' here.
+
+config TTY_CHAN
+ bool "tty channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host terminals. Access to both virtual consoles
+ (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
+ /dev/pts/*) are controlled by this option.
+ It is safe to say 'Y' here.
+
+config XTERM_CHAN
+ bool "xterm channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to xterms. Each UML device so assigned will be brought up in
+ its own xterm.
+ It is safe to say 'Y' here.
+
+config NOCONFIG_CHAN
+ bool
+ default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
+
+config CON_ZERO_CHAN
+ string "Default main console channel initialization"
+ default "fd:0,fd:1"
+ help
+ This is the string describing the channel to which the main console
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "fd:0,fd:1", which attaches the
+ main console to stdin and stdout.
+ It is safe to leave this unchanged.
+
+config CON_CHAN
+ string "Default console channel initialization"
+ default "xterm"
+ help
+ This is the string describing the channel to which all consoles
+ except the main console will be attached by default. This value can
+ be overridden from the command line. The default value is "xterm",
+ which brings them up in xterms.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have X or xterm available.
+
+config SSL_CHAN
+ string "Default serial line channel initialization"
+ default "pty"
+ help
+ This is the string describing the channel to which the serial lines
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "pty", which attaches them to
+ traditional pseudo-terminals.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have a set of /dev/pty* devices.
+
+config UML_SOUND
+ tristate "Sound support"
+ help
+ This option enables UML sound support. If enabled, it will pull in
+ soundcore and the UML hostaudio relay, which acts as a intermediary
+ between the host's dsp and mixer devices and the UML sound system.
+ It is safe to say 'Y' here.
+
+config SOUND
+ tristate
+ default UML_SOUND
+
+config SOUND_OSS_CORE
+ bool
+ default UML_SOUND
+
+config HOSTAUDIO
+ tristate
+ default UML_SOUND
+
+endmenu
+
menu "UML Network Devices"
depends on NET
@@ -211,4 +335,3 @@ config UML_NET_SLIRP
Startup example: "eth0=slirp,FE:FD:01:02:03:04,/usr/local/bin/slirp"
endmenu
-
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 16b3cebddafb..693319839f69 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -25,10 +25,10 @@ LDFLAGS_vde.o := -r $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o
$(obj)/pcap.o: $(obj)/pcap_kern.o $(obj)/pcap_user.o
- $(LD) -r -dp -o $@ $^ $(LDFLAGS) $(LDFLAGS_pcap.o)
+ $(LD) -r -dp -o $@ $^ $(ld_flags)
$(obj)/vde.o: $(obj)/vde_kern.o $(obj)/vde_user.o
- $(LD) -r -dp -o $@ $^ $(LDFLAGS) $(LDFLAGS_vde.o)
+ $(LD) -r -dp -o $@ $^ $(ld_flags)
#XXX: The call below does not work because the flags are added before the
# object name, so nothing from the library gets linked.
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ec9a42c14c56..cced82946042 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -72,7 +72,7 @@ good_area:
}
do {
- int fault;
+ vm_fault_t fault;
fault = handle_mm_fault(vma, address, flags);
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 03f991e44288..60eae744d8fd 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -63,10 +63,6 @@ config ARCH_MAY_HAVE_PC_FDC
config ZONE_DMA
def_bool y
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System Type"
config MMU
@@ -139,12 +135,8 @@ endmenu
menu "Kernel Features"
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
-
config LEDS
def_bool y
depends on GPIOLIB
@@ -181,12 +173,6 @@ config CMDLINE_FORCE
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -201,8 +187,6 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "net/Kconfig"
-
if ARCH_PUV3
config PUV3_GPIO
@@ -236,15 +220,3 @@ endmenu
endif
endif
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/unicore32/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index de8dae3abc0a..ca0ff97657ef 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config EARLY_PRINTK
def_bool DEBUG_OCD
@@ -30,5 +27,3 @@ config DEBUG_OCD
help
Say Y here if you want the debug print routines to direct their
output to the UniCore On-Chip-Debugger channel using CP #1.
-
-endmenu
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 381473412937..8f12a5b50a42 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -168,11 +168,11 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
return vma->vm_flags & mask ? false : true;
}
-static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- unsigned int flags, struct task_struct *tsk)
+static vm_fault_t __do_pf(struct mm_struct *mm, unsigned long addr,
+ unsigned int fsr, unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault;
+ vm_fault_t fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -209,7 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
- int fault, sig, code;
+ int sig, code;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f1dbb4ee19d7..512003f16889 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -63,7 +63,7 @@ config X86
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
- select ARCH_HAS_UACCESS_MCSAFE if X86_64
+ select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
@@ -75,6 +75,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_USE_BUILTIN_BSWAP
@@ -123,6 +124,7 @@ config X86
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
+ select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
@@ -180,13 +182,14 @@ config X86
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
+ select HOTPLUG_SMT if SMP
select IRQ_FORCED_THREADING
select NEED_SG_DMA_LENGTH
select PCI_LOCKLESS_CONFIG
@@ -345,8 +348,6 @@ config PGTABLE_LEVELS
default 3 if X86_PAE
default 2
-source "init/Kconfig"
-
config CC_HAS_SANE_STACKPROTECTOR
bool
default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC)) if 64BIT
@@ -355,8 +356,6 @@ config CC_HAS_SANE_STACKPROTECTOR
We have to make sure stack protector is unconditionally disabled if
the compiler produces broken code.
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config ZONE_DMA
@@ -1043,8 +1042,6 @@ config SCHED_MC_PRIO
If unsure say Y here.
-source "kernel/Kconfig.preempt"
-
config UP_LATE_INIT
def_bool y
depends on !SMP && X86_LOCAL_APIC
@@ -1638,8 +1635,6 @@ config ILLEGAL_POINTER_VALUE
default 0 if X86_32
default 0xdead000000000000 if X86_64
-source "mm/Kconfig"
-
config X86_PMEM_LEGACY_DEVICE
bool
@@ -2865,9 +2860,7 @@ config X86_SYSFB
endmenu
-menu "Executable file formats / Emulations"
-
-source "fs/Kconfig.binfmt"
+menu "Binary Emulations"
config IA32_EMULATION
bool "IA32 Emulation"
@@ -2937,20 +2930,6 @@ config X86_DMA_REMAP
config HAVE_GENERIC_GUP
def_bool y
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/x86/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
source "arch/x86/kvm/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c6dd1d980081..7d68f0c7cfb1 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config EARLY_PRINTK_USB
bool
@@ -410,5 +407,3 @@ endchoice
config FRAME_POINTER
depends on !UNWINDER_ORC && !UNWINDER_GUESS
bool
-
-endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index a08e82856563..7e3c07d6ad42 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -80,11 +80,6 @@ ifeq ($(CONFIG_X86_32),y)
# alignment instructions.
KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
- # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
- # a lot more stack due to the lack of sharing of stacklots:
- KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \
- $(call cc-option,-fno-unit-at-a-time))
-
# CPU-specific tuning. Anything which can be shared with UML should go here.
include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 45af19921ebd..5296f8c9e7f0 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -13,8 +13,6 @@ KBUILD_CFLAGS += $(call cc-option,-m32)
KBUILD_AFLAGS += $(call cc-option,-m32)
LINK-y += $(call cc-option,-m32)
-export LDFLAGS
-
LDS_EXTRA := -Ui386
export LDS_EXTRA
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 0d41d68131cc..2e1382486e91 100644
--- a/arch/x86/boot/bitops.h
+++ b/arch/x86/boot/bitops.h
@@ -17,6 +17,7 @@
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
#include <linux/types.h>
+#include <asm/asm.h>
static inline bool constant_test_bit(int nr, const void *addr)
{
@@ -28,7 +29,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
bool v;
const u32 *p = (const u32 *)addr;
- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr));
return v;
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fa42f895fdde..169c2feda14a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -106,9 +106,13 @@ define cmd_check_data_rel
done
endef
+# We need to run two commands under "if_changed", so merge them into a
+# single invocation.
+quiet_cmd_check-and-link-vmlinux = LD $@
+ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
+
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
- $(call if_changed,check_data_rel)
- $(call if_changed,ld)
+ $(call if_changed,check-and-link-vmlinux)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e57665b4ba1c..1458b1700fc7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c) \
\
table = (typeof(table))sys_table; \
\
- c->runtime_services = table->runtime; \
- c->boot_services = table->boottime; \
- c->text_output = table->con_out; \
+ c->runtime_services = table->runtime; \
+ c->boot_services = table->boottime; \
+ c->text_output = table->con_out; \
}
BOOT_SERVICES(32);
BOOT_SERVICES(64);
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_32_t *image = __image;
- efi_file_handle_32_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_64_t *image = __image;
- efi_file_handle_64_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
- if (efi_early->is64)
- return __open_volume64(__image, __fh);
-
- return __open_volume32(__image, __fh);
-}
-
void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
{
efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,23 +48,17 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
}
static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
struct pci_setup_rom *rom = NULL;
efi_status_t status;
unsigned long size;
- uint64_t attributes, romsize;
+ uint64_t romsize;
void *romimage;
- status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
- EfiPciIoAttributeOperationGet, 0ULL,
- &attributes);
- if (status != EFI_SUCCESS)
- return status;
-
/*
- * Some firmware images contain EFI function pointers at the place where the
- * romimage and romsize fields are supposed to be. Typically the EFI
+ * Some firmware images contain EFI function pointers at the place where
+ * the romimage and romsize fields are supposed to be. Typically the EFI
* code is mapped at high addresses, translating to an unrealistically
* large romsize. The UEFI spec limits the size of option ROMs to 16
* MiB so we reject any ROMs over 16 MiB in size to catch this.
@@ -140,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for rom\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
return status;
}
memset(rom, 0, sizeof(*rom));
- rom->data.type = SETUP_PCI;
- rom->data.len = size - sizeof(struct setup_data);
- rom->data.next = 0;
- rom->pcilen = pci->romsize;
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
*__rom = rom;
status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -185,96 +118,6 @@ free_struct:
return status;
}
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 *handles = (u32 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u32);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u32 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u64 *handles = (u64 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u64);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u64 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
/*
* There's no way to return an informative status from this function,
* because any analysis (and printing of error messages) needs to be
@@ -290,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
void **pci_handle = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
unsigned long size = 0;
+ unsigned long nr_pci;
+ struct setup_data *data;
+ int i;
status = efi_call_early(locate_handle,
EFI_LOCATE_BY_PROTOCOL,
@@ -301,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
size, (void **)&pci_handle);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
return;
}
@@ -313,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_early->is64)
- setup_efi_pci64(params, pci_handle, size);
- else
- setup_efi_pci32(params, pci_handle, size);
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+ for (i = 0; i < nr_pci; i++) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_call_early(handle_protocol,
+ efi_is_64bit() ? ((u64 *)pci_handle)[i]
+ : ((u32 *)pci_handle)[i],
+ &pci_proto, (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
free_handle:
efi_call_early(free_pool, pci_handle);
@@ -347,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
size + sizeof(struct setup_data), &new);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table,
- "Failed to alloc mem for properties\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
return;
}
@@ -364,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
new->next = 0;
data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
- if (!data)
+ if (!data) {
boot_params->hdr.setup_data = (unsigned long)new;
- else {
+ } else {
while (data->next)
data = (struct setup_data *)(unsigned long)data->next;
data->next = (unsigned long)new;
@@ -386,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
unsigned long nr_ugas;
- u32 *handles = (u32 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
int i;
- first_uga = NULL;
- nr_ugas = size / sizeof(u32);
- for (i = 0; i < nr_ugas; i++) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
- u32 handle = handles[i];
-
- status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
- return status;
-}
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- unsigned long nr_ugas;
- u64 *handles = (u64 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
- int i;
+ height = 0;
+ width = 0;
first_uga = NULL;
- nr_ugas = size / sizeof(u64);
+ nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
for (i = 0; i < nr_ugas; i++) {
efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
u32 w, h, depth, refresh;
void *pciio;
- u64 handle = handles[i];
+ unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+ : ((u32 *)uga_handle)[i];
status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
+ uga_proto, (void **)&uga);
if (status != EFI_SUCCESS)
continue;
+ pciio = NULL;
efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
+ status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+ &w, &h, &depth, &refresh);
if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
+ width = w;
+ height = h;
/*
* Once we've found a UGA supporting PCIIO,
@@ -473,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
}
}
- return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
- unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
-
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- if (efi_early->is64)
- status = setup_uga64(uga_handle, size, &width, &height);
- else
- status = setup_uga32(uga_handle, size, &width, &height);
-
if (!width && !height)
goto free_handle;
/* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
free_handle:
efi_call_early(free_pool, uga_handle);
+
return status;
}
@@ -592,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
return NULL;
- if (efi_early->is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -607,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
status = efi_low_alloc(sys_table, 0x4000, 1,
(unsigned long *)&boot_params);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+ efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
return NULL;
}
@@ -623,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
* Fill out some of the header fields ourselves because the
* EFI firmware loader doesn't load the first sector.
*/
- hdr->root_flags = 1;
- hdr->vid_mode = 0xffff;
- hdr->boot_flag = 0xAA55;
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
hdr->type_of_loader = 0x21;
@@ -633,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
if (!cmdline_ptr)
goto fail;
+
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
/* Fill in upper bits of command line address, NOP on 32 bit */
boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -669,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
return boot_params;
+
fail2:
efi_free(sys_table, options_size, hdr->cmd_line_ptr);
fail:
efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
return NULL;
}
@@ -684,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
unsigned long size;
e820ext->type = SETUP_E820_EXT;
- e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
e820ext->next = 0;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -698,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
params->hdr.setup_data = (unsigned long)e820ext;
}
-static efi_status_t setup_e820(struct boot_params *params,
- struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
{
struct boot_e820_entry *entry = params->e820_table;
struct efi_info *efi = &params->efi_info;
@@ -820,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
}
struct exit_boot_struct {
- struct boot_params *boot_params;
- struct efi_info *efi;
- struct setup_data *e820ext;
- __u32 e820ext_size;
- bool is64;
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -851,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
first = false;
}
- signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- p->efi->efi_systab = (unsigned long)sys_table_arg;
- p->efi->efi_memdesc_size = *map->desc_size;
- p->efi->efi_memdesc_version = *map->desc_ver;
- p->efi->efi_memmap = (unsigned long)*map->map;
- p->efi->efi_memmap_size = *map->map_size;
+ p->efi->efi_systab = (unsigned long)sys_table_arg;
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
- p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
- p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
return EFI_SUCCESS;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
@@ -880,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &mem_map;
- map.map_size = &map_sz;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_version;
- map.key_ptr = &key;
- map.buff_size = &buff_size;
- priv.boot_params = boot_params;
- priv.efi = &boot_params->efi_info;
- priv.e820ext = NULL;
- priv.e820ext_size = 0;
- priv.is64 = is64;
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+ priv.e820ext = NULL;
+ priv.e820ext_size = 0;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -898,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
if (status != EFI_SUCCESS)
return status;
- e820ext = priv.e820ext;
- e820ext_size = priv.e820ext_size;
+ e820ext = priv.e820ext;
+ e820ext_size = priv.e820ext_size;
+
/* Historic? */
- boot_params->alt_mem_k = 32 * 1024;
+ boot_params->alt_mem_k = 32 * 1024;
status = setup_e820(boot_params, e820ext, e820ext_size);
if (status != EFI_SUCCESS)
@@ -914,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
* On success we return a pointer to a boot_params structure, and NULL
* on failure.
*/
-struct boot_params *efi_main(struct efi_config *c,
- struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
{
struct desc_ptr *gdt = NULL;
efi_loaded_image_t *image;
@@ -924,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
struct desc_struct *desc;
void *handle;
efi_system_table_t *_table;
- bool is64;
efi_early = c;
_table = (efi_system_table_t *)(unsigned long)efi_early->table;
handle = (void *)(unsigned long)efi_early->image_handle;
- is64 = efi_early->is64;
sys_table = _table;
@@ -938,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- if (is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -963,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
goto fail;
}
@@ -971,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_low_alloc(sys_table, gdt->size, 8,
(unsigned long *)&gdt->address);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
goto fail;
}
@@ -994,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
hdr->code32_start = bzimage_addr;
}
- status = exit_boot(boot_params, handle, is64);
+ status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "exit_boot() failed!\n");
goto fail;
@@ -1008,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
if (IS_ENABLED(CONFIG_X86_64)) {
/* __KERNEL32_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
+
desc++;
} else {
/* Second entry is unused on 32-bit */
@@ -1028,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
}
/* __KERNEL_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+
if (IS_ENABLED(CONFIG_X86_64)) {
desc->l = 1;
desc->d = 0;
@@ -1044,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
desc->l = 0;
desc->d = SEG_OP_SIZE_32BIT;
}
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
/* __KERNEL_DS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
if (IS_ENABLED(CONFIG_X86_64)) {
/* Task segment value */
- desc->limit0 = 0x0000;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_TSS;
- desc->s = 0;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0x0;
- desc->avl = 0;
- desc->l = 0;
- desc->d = 0;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0x0000;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_TSS;
+ desc->s = 0;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0x0;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = 0;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
}
@@ -1088,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
return boot_params;
fail:
efi_printk(sys_table, "efi_main() failed!\n");
+
return NULL;
}
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e799dc5c6448..8297387c4676 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -12,22 +12,22 @@
#define DESC_TYPE_CODE_DATA (1 << 0)
-struct efi_uga_draw_protocol_32 {
+typedef struct {
u32 get_mode;
u32 set_mode;
u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
-struct efi_uga_draw_protocol_64 {
+typedef struct {
u64 get_mode;
u64 set_mode;
u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
-struct efi_uga_draw_protocol {
+typedef struct {
void *get_mode;
void *set_mode;
void *blt;
-};
+} efi_uga_draw_protocol_t;
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index b87a7582853d..d1e19f358b6e 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -23,11 +23,8 @@
* _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
* While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
* which is meaningless and will cause compiling error in some cases.
- * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
- * as empty.
*/
-#define _LINUX_EXPORT_H
-#define EXPORT_SYMBOL(sym)
+#define __DISABLE_EXPORTS
#include "misc.h"
#include "error.h"
@@ -102,7 +99,7 @@ static bool memmap_too_large;
/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-unsigned long long mem_limit = ULLONG_MAX;
+static unsigned long long mem_limit = ULLONG_MAX;
enum mem_avoid_index {
@@ -215,7 +212,36 @@ static void mem_avoid_memmap(char *str)
memmap_too_large = true;
}
-static int handle_mem_memmap(void)
+/* Store the number of 1GB huge pages which users specified: */
+static unsigned long max_gb_huge_pages;
+
+static void parse_gb_huge_pages(char *param, char *val)
+{
+ static bool gbpage_sz;
+ char *p;
+
+ if (!strcmp(param, "hugepagesz")) {
+ p = val;
+ if (memparse(p, &p) != PUD_SIZE) {
+ gbpage_sz = false;
+ return;
+ }
+
+ if (gbpage_sz)
+ warn("Repeatedly set hugeTLB page size of 1G!\n");
+ gbpage_sz = true;
+ return;
+ }
+
+ if (!strcmp(param, "hugepages") && gbpage_sz) {
+ p = val;
+ max_gb_huge_pages = simple_strtoull(p, &p, 0);
+ return;
+ }
+}
+
+
+static int handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
size_t len = strlen((char *)args);
@@ -223,7 +249,8 @@ static int handle_mem_memmap(void)
char *param, *val;
u64 mem_size;
- if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+ if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+ !strstr(args, "hugepages"))
return 0;
tmp_cmdline = malloc(len + 1);
@@ -248,6 +275,8 @@ static int handle_mem_memmap(void)
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(val);
+ } else if (strstr(param, "hugepages")) {
+ parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
char *p = val;
@@ -387,7 +416,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* We don't need to set a mapping for setup_data. */
/* Mark the memmap regions we need to avoid */
- handle_mem_memmap();
+ handle_mem_options();
#ifdef CONFIG_X86_VERBOSE_BOOTUP
/* Make sure video RAM can be used. */
@@ -466,6 +495,60 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
}
}
+/*
+ * Skip as many 1GB huge pages as possible in the passed region
+ * according to the number which users specified:
+ */
+static void
+process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
+{
+ unsigned long addr, size = 0;
+ struct mem_vector tmp;
+ int i = 0;
+
+ if (!max_gb_huge_pages) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ addr = ALIGN(region->start, PUD_SIZE);
+ /* Did we raise the address above the passed in memory entry? */
+ if (addr < region->start + region->size)
+ size = region->size - (addr - region->start);
+
+ /* Check how many 1GB huge pages can be filtered out: */
+ while (size > PUD_SIZE && max_gb_huge_pages) {
+ size -= PUD_SIZE;
+ max_gb_huge_pages--;
+ i++;
+ }
+
+ /* No good 1GB huge pages found: */
+ if (!i) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ /*
+ * Skip those 'i'*1GB good huge pages, and continue checking and
+ * processing the remaining head or tail part of the passed region
+ * if available.
+ */
+
+ if (addr >= region->start + image_size) {
+ tmp.start = region->start;
+ tmp.size = addr - region->start;
+ store_slot_info(&tmp, image_size);
+ }
+
+ size = region->size - (addr - region->start) - i * PUD_SIZE;
+ if (size >= image_size) {
+ tmp.start = addr + i * PUD_SIZE;
+ tmp.size = size;
+ store_slot_info(&tmp, image_size);
+ }
+}
+
static unsigned long slots_fetch_random(void)
{
unsigned long slot;
@@ -546,7 +629,7 @@ static void process_mem_region(struct mem_vector *entry,
/* If nothing overlaps, store the region and return. */
if (!mem_avoid_overlap(&region, &overlap)) {
- store_slot_info(&region, image_size);
+ process_gb_huge_pages(&region, image_size);
return;
}
@@ -556,7 +639,7 @@ static void process_mem_region(struct mem_vector *entry,
beginning.start = region.start;
beginning.size = overlap.start - region.start;
- store_slot_info(&beginning, image_size);
+ process_gb_huge_pages(&beginning, image_size);
}
/* Return if overlap extends to or past end of region. */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 8c5107545251..9e2157371491 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,3 +1,4 @@
+#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
#include "../string.h"
@@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data);
extern struct boot_params *boot_params;
int cmdline_find_option_bool(const char *option);
+static unsigned long find_trampoline_placement(void)
+{
+ unsigned long bios_start, ebda_start;
+ unsigned long trampoline_start;
+ struct boot_e820_entry *entry;
+ int i;
+
+ /*
+ * Find a suitable spot for the trampoline.
+ * This code is based on reserve_bios_regions().
+ */
+
+ ebda_start = *(unsigned short *)0x40e << 4;
+ bios_start = *(unsigned short *)0x413 << 10;
+
+ if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+ bios_start = BIOS_START_MAX;
+
+ if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
+ bios_start = ebda_start;
+
+ bios_start = round_down(bios_start, PAGE_SIZE);
+
+ /* Find the first usable memory region under bios_start. */
+ for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+ entry = &boot_params->e820_table[i];
+
+ /* Skip all entries above bios_start. */
+ if (bios_start <= entry->addr)
+ continue;
+
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_TYPE_RAM)
+ continue;
+
+ /* Adjust bios_start to the end of the entry if needed. */
+ if (bios_start > entry->addr + entry->size)
+ bios_start = entry->addr + entry->size;
+
+ /* Keep bios_start page-aligned. */
+ bios_start = round_down(bios_start, PAGE_SIZE);
+
+ /* Skip the entry if it's too small. */
+ if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+ continue;
+
+ break;
+ }
+
+ /* Place the trampoline just below the end of low memory */
+ return bios_start - TRAMPOLINE_32BIT_SIZE;
+}
+
struct paging_config paging_prepare(void *rmode)
{
struct paging_config paging_config = {};
- unsigned long bios_start, ebda_start;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
boot_params = rmode;
@@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode)
paging_config.l5_required = 1;
}
- /*
- * Find a suitable spot for the trampoline.
- * This code is based on reserve_bios_regions().
- */
-
- ebda_start = *(unsigned short *)0x40e << 4;
- bios_start = *(unsigned short *)0x413 << 10;
-
- if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
- bios_start = BIOS_START_MAX;
-
- if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
- bios_start = ebda_start;
-
- /* Place the trampoline just below the end of low memory, aligned to 4k */
- paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
- paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
+ paging_config.trampoline_start = find_trampoline_placement();
trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 16f49123d747..c4428a176973 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -13,6 +13,7 @@
*/
#include <linux/types.h>
+#include <asm/asm.h>
#include "ctype.h"
#include "string.h"
@@ -28,8 +29,8 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
- asm("repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ asm("repe; cmpsb" CC_SET(nz)
+ : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 9254e0b6cc06..5f7e43d4f64a 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -75,7 +75,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
+ ret
ENDPROC(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 5de7c0d46edf..acd11b3bf639 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis128_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis128_aesni_alg,
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S
index 9263c344f2c7..491dd61c845c 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -66,7 +66,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG0, MSG0
pxor MSG1, MSG1
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
state_store0
FRAME_END
+ ret
ENDPROC(crypto_aegis128l_aesni_enc_tail)
/*
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 876e4866e633..2071c3d1ae07 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis128l_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis128l_aesni_alg,
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S
index 1d977d515bf9..8870c7c5d9a4 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -59,7 +59,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
state_store0
FRAME_END
+ ret
ENDPROC(crypto_aegis256_aesni_enc_tail)
/*
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 2b5dd3af8f4d..b5f2a8fd5a71 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
}
};
-static const struct x86_cpu_id aesni_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AES),
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
static int __init crypto_aegis256_aesni_module_init(void)
{
- if (!x86_match_cpu(aesni_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_aegis256_aesni_alg,
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..9bd139569b41 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -258,7 +258,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.macro GCM_INIT Iv SUBKEY AAD AADLEN
mov \AADLEN, %r11
mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
- xor %r11, %r11
+ xor %r11d, %r11d
mov %r11, InLen(%arg2) # ctx_data.in_length = 0
mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
@@ -286,7 +286,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu HashKey(%arg2), %xmm13
add %arg5, InLen(%arg2)
- xor %r11, %r11 # initialise the data pointer offset as zero
+ xor %r11d, %r11d # initialise the data pointer offset as zero
PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
sub %r11, %arg5 # sub partial block data used
@@ -702,7 +702,7 @@ _no_extra_mask_1_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _dec_done_\@
@@ -737,7 +737,7 @@ _no_extra_mask_2_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _encode_done_\@
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index faecb1518bf8..1985ea0b551b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -463,7 +463,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
@@ -1770,7 +1770,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2ddbe3a1868b..3582ae885ee1 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -154,8 +154,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -315,9 +314,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S
index 37d422e77931..de182c460f82 100644
--- a/arch/x86/crypto/morus1280-avx2-asm.S
+++ b/arch/x86/crypto/morus1280-avx2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
vpxor MSG, MSG, MSG
mov %rcx, %r8
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
vmovdqu STATE4, (4 * 32)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus1280_avx2_enc_tail)
/*
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index f111f36d26dc..6634907d6ccd 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
-static const struct x86_cpu_id avx2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_AVX2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
-
static int __init crypto_morus1280_avx2_module_init(void)
{
- if (!x86_match_cpu(avx2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus1280_avx2_algs,
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S
index 1fe637c7be9d..da5d2905db60 100644
--- a/arch/x86/crypto/morus1280-sse2-asm.S
+++ b/arch/x86/crypto/morus1280-sse2-asm.S
@@ -235,7 +235,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG_LO, MSG_LO
pxor MSG_HI, MSG_HI
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
movdqu STATE4_HI, (9 * 16)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus1280_sse2_enc_tail)
/*
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 839270aa713c..95cf857d2cbb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
-static const struct x86_cpu_id sse2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
static int __init crypto_morus1280_sse2_module_init(void)
{
- if (!x86_match_cpu(sse2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus1280_sse2_algs,
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S
index 71c72a0a0862..414db480250e 100644
--- a/arch/x86/crypto/morus640-sse2-asm.S
+++ b/arch/x86/crypto/morus640-sse2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus640_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov %rcx, %r8
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
movdqu STATE4, (4 * 16)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus640_sse2_enc_tail)
/*
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 26b47e2db8d2..615fb7bc9a32 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
-static const struct x86_cpu_id sse2_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XMM2),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
static int __init crypto_morus640_sse2_module_init(void)
{
- if (!x86_match_cpu(sse2_cpu_id))
+ if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
return crypto_register_aeads(crypto_morus640_sse2_algs,
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 790377797544..f012b7e28ad1 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index e17655ffde79..b93805664c1d 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -746,9 +746,8 @@ static struct ahash_alg sha1_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -871,10 +870,16 @@ static struct ahash_alg sha1_mb_async_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
.cra_init = sha1_mb_async_init_tfm,
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 6204bd53528c..613d0bfc3d84 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -96,7 +96,7 @@
# cleanup workspace
mov $8, %ecx
mov %rsp, %rdi
- xor %rax, %rax
+ xor %eax, %eax
rep stosq
mov %rbp, %rsp # deallocate workspace
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739150e7..7391c7de72c7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -104,7 +104,6 @@ static struct shash_alg sha1_ssse3_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -157,7 +156,6 @@ static struct shash_alg sha1_avx_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +247,6 @@ static struct shash_alg sha1_avx2_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -307,7 +304,6 @@ static struct shash_alg sha1_ni_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 4c46ac1b6653..97c5fc43e115 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -745,9 +745,8 @@ static struct ahash_alg sha256_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -870,11 +869,16 @@ static struct ahash_alg sha256_mb_async_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha256_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 16c4ccb1f154..d2364c55bbde 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest(state , idx, 4) , %xmm0
+ vmovd _args_digest+4*32(state, idx, 4), %xmm1
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 9e79baf03a4b..773a873d2b28 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -177,7 +175,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -192,7 +189,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -343,7 +337,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +351,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 39e2bbdc1836..26b85678012d 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -778,9 +778,8 @@ static struct ahash_alg sha512_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -904,11 +903,16 @@ static struct ahash_alg sha512_mb_async_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha512_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2b0e2a6825f3..f1b811b60ba6 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -188,7 +186,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -203,7 +200,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index c371bfee137a..2767c625a52c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -65,7 +65,7 @@
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
# define preempt_stop(clobbers)
-# define resume_kernel restore_all
+# define resume_kernel restore_all_kernel
#endif
.macro TRACE_IRQS_IRET
@@ -77,6 +77,8 @@
#endif
.endm
+#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
+
/*
* User gs save/restore
*
@@ -154,7 +156,52 @@
#endif /* CONFIG_X86_32_LAZY_GS */
-.macro SAVE_ALL pt_regs_ax=%eax
+/* Unconditionally switch to user cr3 */
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+ movl %cr3, \scratch_reg
+ orl $PTI_SWITCH_MASK, \scratch_reg
+ movl \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro BUG_IF_WRONG_CR3 no_user_check=0
+#ifdef CONFIG_DEBUG_ENTRY
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ .if \no_user_check == 0
+ /* coming from usermode? */
+ testl $SEGMENT_RPL_MASK, PT_CS(%esp)
+ jz .Lend_\@
+ .endif
+ /* On user-cr3? */
+ movl %cr3, %eax
+ testl $PTI_SWITCH_MASK, %eax
+ jnz .Lend_\@
+ /* From userspace with kernel cr3 - BUG */
+ ud2
+.Lend_\@:
+#endif
+.endm
+
+/*
+ * Switch to kernel cr3 if not already loaded and return current cr3 in
+ * \scratch_reg
+ */
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ movl %cr3, \scratch_reg
+ /* Test if we are already on kernel CR3 */
+ testl $PTI_SWITCH_MASK, \scratch_reg
+ jz .Lend_\@
+ andl $(~PTI_SWITCH_MASK), \scratch_reg
+ movl \scratch_reg, %cr3
+ /* Return original CR3 in \scratch_reg */
+ orl $PTI_SWITCH_MASK, \scratch_reg
+.Lend_\@:
+.endm
+
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
cld
PUSH_GS
pushl %fs
@@ -173,6 +220,29 @@
movl $(__KERNEL_PERCPU), %edx
movl %edx, %fs
SET_KERNEL_GS %edx
+
+ /* Switch to kernel stack if necessary */
+.if \switch_stacks > 0
+ SWITCH_TO_KERNEL_STACK
+.endif
+
+.endm
+
+.macro SAVE_ALL_NMI cr3_reg:req
+ SAVE_ALL
+
+ BUG_IF_WRONG_CR3
+
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We can enter with either user or kernel cr3, the code will
+ * store the old cr3 in \cr3_reg and switches to the kernel cr3
+ * if necessary.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
+
+.Lend_\@:
.endm
/*
@@ -221,6 +291,349 @@
POP_GS_EX
.endm
+.macro RESTORE_ALL_NMI cr3_reg:req pop=0
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We enter with kernel cr3 and switch the cr3 to the value
+ * stored on \cr3_reg, which is either a user or a kernel cr3.
+ */
+ ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
+
+ testl $PTI_SWITCH_MASK, \cr3_reg
+ jz .Lswitched_\@
+
+ /* User cr3 in \cr3_reg - write it to hardware cr3 */
+ movl \cr3_reg, %cr3
+
+.Lswitched_\@:
+
+ BUG_IF_WRONG_CR3
+
+ RESTORE_REGS pop=\pop
+.endm
+
+.macro CHECK_AND_APPLY_ESPFIX
+#ifdef CONFIG_X86_ESPFIX32
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+
+ ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
+
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ /*
+ * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+ * are returning to the kernel.
+ * See comments in process.c:copy_thread() for details.
+ */
+ movb PT_OLDSS(%esp), %ah
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ jne .Lend_\@ # returning to user-space with LDT SS
+
+ /*
+ * Setup and switch to ESPFIX stack
+ *
+ * We're returning to userspace with a 16 bit stack. The CPU will not
+ * restore the high word of ESP for us on executing iret... This is an
+ * "official" bug of all the x86-compatible CPUs, which we can work
+ * around to make dosemu and wine happy. We do this by preloading the
+ * high word of ESP with the high word of the userspace ESP while
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+ shr $16, %edx
+ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
+ pushl $__ESPFIX_SS
+ pushl %eax /* new kernel esp */
+ /*
+ * Disable interrupts, but do not irqtrace this section: we
+ * will soon execute iret and the tracer was already set to
+ * the irqstate after the IRET:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ lss (%esp), %esp /* switch to espfix segment */
+.Lend_\@:
+#endif /* CONFIG_X86_ESPFIX32 */
+.endm
+
+/*
+ * Called with pt_regs fully populated and kernel segments loaded,
+ * so we can access PER_CPU and use the integer registers.
+ *
+ * We need to be very careful here with the %esp switch, because an NMI
+ * can happen everywhere. If the NMI handler finds itself on the
+ * entry-stack, it will overwrite the task-stack and everything we
+ * copied there. So allocate the stack-frame on the task-stack and
+ * switch to it before we do any copying.
+ */
+
+#define CS_FROM_ENTRY_STACK (1 << 31)
+#define CS_FROM_USER_CR3 (1 << 30)
+
+.macro SWITCH_TO_KERNEL_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ BUG_IF_WRONG_CR3
+
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+
+ /*
+ * %eax now contains the entry cr3 and we carry it forward in
+ * that register for the time this macro runs
+ */
+
+ /* Are we on the entry stack? Bail out if not! */
+ movl PER_CPU_VAR(cpu_entry_area), %ecx
+ addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+ subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
+ cmpl $SIZEOF_entry_stack, %ecx
+ jae .Lend_\@
+
+ /* Load stack pointer into %esi and %edi */
+ movl %esp, %esi
+ movl %esi, %edi
+
+ /* Move %edi to the top of the entry stack */
+ andl $(MASK_entry_stack), %edi
+ addl $(SIZEOF_entry_stack), %edi
+
+ /* Load top of task-stack into %edi */
+ movl TSS_entry2task_stack(%edi), %edi
+
+ /*
+ * Clear unused upper bits of the dword containing the word-sized CS
+ * slot in pt_regs in case hardware didn't clear it for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
+ /* Special case - entry from kernel mode via entry stack */
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
+ movb PT_CS(%esp), %cl
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+ movl PT_CS(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+#endif
+ cmpl $USER_RPL, %ecx
+ jb .Lentry_from_kernel_\@
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
+ jz .Lcopy_pt_regs_\@
+
+ /*
+ * Stack-frame contains 4 additional segment registers when
+ * coming from VM86 mode
+ */
+ addl $(4 * 4), %ecx
+
+#endif
+.Lcopy_pt_regs_\@:
+
+ /* Allocate frame on task-stack */
+ subl %ecx, %edi
+
+ /* Switch to task-stack */
+ movl %edi, %esp
+
+ /*
+ * We are now on the task-stack and can safely copy over the
+ * stack-frame
+ */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+ /*
+ * This handles the case when we enter the kernel from
+ * kernel-mode and %esp points to the entry-stack. When this
+ * happens we need to switch to the task-stack to run C code,
+ * but switch back to the entry-stack again when we approach
+ * iret and return to the interrupted code-path. This usually
+ * happens when we hit an exception while restoring user-space
+ * segment registers on the way back to user-space or when the
+ * sysenter handler runs with eflags.tf set.
+ *
+ * When we switch to the task-stack here, we can't trust the
+ * contents of the entry-stack anymore, as the exception handler
+ * might be scheduled out or moved to another CPU. Therefore we
+ * copy the complete entry-stack to the task-stack and set a
+ * marker in the iret-frame (bit 31 of the CS dword) to detect
+ * what we've done on the iret path.
+ *
+ * On the iret path we copy everything back and switch to the
+ * entry-stack, so that the interrupted kernel code-path
+ * continues on the same stack it was interrupted with.
+ *
+ * Be aware that an NMI can happen anytime in this code.
+ *
+ * %esi: Entry-Stack pointer (same as %esp)
+ * %edi: Top of the task stack
+ * %eax: CR3 on kernel entry
+ */
+
+ /* Calculate number of bytes on the entry stack in %ecx */
+ movl %esi, %ecx
+
+ /* %ecx to the top of entry-stack */
+ andl $(MASK_entry_stack), %ecx
+ addl $(SIZEOF_entry_stack), %ecx
+
+ /* Number of bytes on the entry stack to %ecx */
+ sub %esi, %ecx
+
+ /* Mark stackframe as coming from entry stack */
+ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+ /*
+ * Test the cr3 used to enter the kernel and add a marker
+ * so that we can switch back to it before iret.
+ */
+ testl $PTI_SWITCH_MASK, %eax
+ jz .Lcopy_pt_regs_\@
+ orl $CS_FROM_USER_CR3, PT_CS(%esp)
+
+ /*
+ * %esi and %edi are unchanged, %ecx contains the number of
+ * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+ * the stack-frame on task-stack and copy everything over
+ */
+ jmp .Lcopy_pt_regs_\@
+
+.Lend_\@:
+.endm
+
+/*
+ * Switch back from the kernel stack to the entry stack.
+ *
+ * The %esp register must point to pt_regs on the task stack. It will
+ * first calculate the size of the stack-frame to copy, depending on
+ * whether we return to VM86 mode or not. With that it uses 'rep movsl'
+ * to copy the contents of the stack over to the entry stack.
+ *
+ * We must be very careful here, as we can't trust the contents of the
+ * task-stack once we switched to the entry-stack. When an NMI happens
+ * while on the entry-stack, the NMI handler will switch back to the top
+ * of the task stack, overwriting our stack-frame we are about to copy.
+ * Therefore we switch the stack only after everything is copied over.
+ */
+.macro SWITCH_TO_ENTRY_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
+ jz .Lcopy_pt_regs_\@
+
+ /* Additional 4 registers to copy when returning to VM86 mode */
+ addl $(4 * 4), %ecx
+
+.Lcopy_pt_regs_\@:
+#endif
+
+ /* Initialize source and destination for movsl */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+ subl %ecx, %edi
+ movl %esp, %esi
+
+ /* Save future stack pointer in %ebx */
+ movl %edi, %ebx
+
+ /* Copy over the stack-frame */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /*
+ * Switch to entry-stack - needs to happen after everything is
+ * copied because the NMI handler will overwrite the task-stack
+ * when on entry-stack
+ */
+ movl %ebx, %esp
+
+.Lend_\@:
+.endm
+
+/*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack and/or user-cr3
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+ /*
+ * Test if we entered the kernel with the entry-stack. Most
+ * likely we did not, because this code only runs on the
+ * return-to-kernel path.
+ */
+ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Unlikely slow-path */
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+ /* Copy the remaining task-stack contents to entry-stack */
+ movl %esp, %esi
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+ /* Bytes on the task-stack to ecx */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+ subl %esi, %ecx
+
+ /* Allocate stack-frame on entry-stack */
+ subl %ecx, %edi
+
+ /*
+ * Save future stack-pointer, we must not switch until the
+ * copy is done, otherwise the NMI handler could destroy the
+ * contents of the task-stack we are about to copy.
+ */
+ movl %edi, %ebx
+
+ /* Do the copy */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /* Safe to switch to entry-stack now */
+ movl %ebx, %esp
+
+ /*
+ * We came from entry-stack and need to check if we also need to
+ * switch back to user cr3.
+ */
+ testl $CS_FROM_USER_CR3, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
+
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+.Lend_\@:
+.endm
/*
* %eax: prev task
* %edx: next task
@@ -351,9 +764,9 @@ ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz restore_all
+ jnz restore_all_kernel
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
+ jz restore_all_kernel
call preempt_schedule_irq
jmp .Lneed_resched
END(resume_kernel)
@@ -412,7 +825,21 @@ ENTRY(xen_sysenter_target)
* 0(%ebp) arg6
*/
ENTRY(entry_SYSENTER_32)
- movl TSS_sysenter_sp0(%esp), %esp
+ /*
+ * On entry-stack with all userspace-regs live - save and
+ * restore eflags and %eax to use it as scratch-reg for the cr3
+ * switch.
+ */
+ pushfl
+ pushl %eax
+ BUG_IF_WRONG_CR3 no_user_check=1
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+ popl %eax
+ popfl
+
+ /* Stack empty again, switch to task stack */
+ movl TSS_entry2task_stack(%esp), %esp
+
.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
@@ -421,7 +848,7 @@ ENTRY(entry_SYSENTER_32)
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
/*
* SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -460,25 +887,49 @@ ENTRY(entry_SYSENTER_32)
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
+ /*
+ * Setup entry stack - we keep the pointer in %eax and do the
+ * switch after almost all user-state is restored.
+ */
+
+ /* Load entry stack pointer and allocate frame for eflags/eax */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
+ subl $(2*4), %eax
+
+ /* Copy eflags and eax to entry stack */
+ movl PT_EFLAGS(%esp), %edi
+ movl PT_EAX(%esp), %esi
+ movl %edi, (%eax)
+ movl %esi, 4(%eax)
+
+ /* Restore user registers and segments */
movl PT_EIP(%esp), %edx /* pt_regs->ip */
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1: mov PT_FS(%esp), %fs
PTGS_TO_GS
+
popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
popl %esi /* pt_regs->si */
popl %edi /* pt_regs->di */
popl %ebp /* pt_regs->bp */
- popl %eax /* pt_regs->ax */
+
+ /* Switch to entry stack */
+ movl %eax, %esp
+
+ /* Now ready to switch the cr3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
/*
* Restore all flags except IF. (We restore IF separately because
* STI gives a one-instruction window in which we won't be interrupted,
* whereas POPF does not.)
*/
- addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
btrl $X86_EFLAGS_IF_BIT, (%esp)
+ BUG_IF_WRONG_CR3 no_user_check=1
popfl
+ popl %eax
/*
* Return back to the vDSO, which will pop ecx and edx.
@@ -532,7 +983,8 @@ ENDPROC(entry_SYSENTER_32)
ENTRY(entry_INT80_32)
ASM_CLAC
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+
+ SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
/*
* User mode is traced as though IRQs are on, and the interrupt gate
@@ -546,24 +998,17 @@ ENTRY(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
+ SWITCH_TO_ENTRY_STACK
.Lrestore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
-
- movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
- /*
- * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
- * are returning to the kernel.
- * See comments in process.c:copy_thread() for details.
- */
- movb PT_OLDSS(%esp), %ah
- movb PT_CS(%esp), %al
- andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
- cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je .Lldt_ss # returning to user-space with LDT SS
-#endif
+ CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck:
- RESTORE_REGS 4 # skip orig_eax/error_code
+ /* Switch back to user CR3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+ BUG_IF_WRONG_CR3
+
+ /* Restore user state */
+ RESTORE_REGS pop=4 # skip orig_eax/error_code
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -572,46 +1017,33 @@ restore_all:
*/
INTERRUPT_RETURN
+restore_all_kernel:
+ TRACE_IRQS_IRET
+ PARANOID_EXIT_TO_KERNEL_MODE
+ BUG_IF_WRONG_CR3
+ RESTORE_REGS 4
+ jmp .Lirq_return
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
- jmp common_exception
-.previous
- _ASM_EXTABLE(.Lirq_return, iret_exc)
-#ifdef CONFIG_X86_ESPFIX32
-.Lldt_ss:
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
- mov %esp, %edx /* load kernel esp */
- mov PT_OLDESP(%esp), %eax /* load userspace esp */
- mov %dx, %ax /* eax: new kernel esp */
- sub %eax, %edx /* offset (low word is 0) */
- shr $16, %edx
- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
- pushl $__ESPFIX_SS
- pushl %eax /* new kernel esp */
+#ifdef CONFIG_DEBUG_ENTRY
/*
- * Disable interrupts, but do not irqtrace this section: we
- * will soon execute iret and the tracer was already set to
- * the irqstate after the IRET:
+ * The stack-frame here is the one that iret faulted on, so its a
+ * return-to-user frame. We are on kernel-cr3 because we come here from
+ * the fixup code. This confuses the CR3 checker, so switch to user-cr3
+ * as the checker expects it.
*/
- DISABLE_INTERRUPTS(CLBR_ANY)
- lss (%esp), %esp /* switch to espfix segment */
- jmp .Lrestore_nocheck
+ pushl %eax
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+ popl %eax
#endif
+
+ jmp common_exception
+.previous
+ _ASM_EXTABLE(.Lirq_return, iret_exc)
ENDPROC(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -671,7 +1103,8 @@ END(irq_entries_start)
common_interrupt:
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
movl %esp, %eax
@@ -679,16 +1112,16 @@ common_interrupt:
jmp ret_from_intr
ENDPROC(common_interrupt)
-#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name) \
- ASM_CLAC; \
- pushl $~(nr); \
- SAVE_ALL; \
- ENCODE_FRAME_POINTER; \
- TRACE_IRQS_OFF \
- movl %esp, %eax; \
- call fn; \
- jmp ret_from_intr; \
+#define BUILD_INTERRUPT3(name, nr, fn) \
+ENTRY(name) \
+ ASM_CLAC; \
+ pushl $~(nr); \
+ SAVE_ALL switch_stacks=1; \
+ ENCODE_FRAME_POINTER; \
+ TRACE_IRQS_OFF \
+ movl %esp, %eax; \
+ call fn; \
+ jmp ret_from_intr; \
ENDPROC(name)
#define BUILD_INTERRUPT(name, nr) \
@@ -920,16 +1353,20 @@ common_exception:
pushl %es
pushl %ds
pushl %eax
+ movl $(__USER_DS), %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl $(__KERNEL_PERCPU), %eax
+ movl %eax, %fs
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
+ SWITCH_TO_KERNEL_STACK
ENCODE_FRAME_POINTER
cld
- movl $(__KERNEL_PERCPU), %ecx
- movl %ecx, %fs
UNWIND_ESPFIX_STACK
GS_TO_REG %ecx
movl PT_GS(%esp), %edi # get the function address
@@ -937,9 +1374,6 @@ common_exception:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
- movl $(__USER_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC %edi
@@ -948,40 +1382,12 @@ END(common_exception)
ENTRY(debug)
/*
- * #DB can happen at the first instruction of
- * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
- * happens, then we will be running on a very small stack. We
- * need to detect this condition and switch to the thread
- * stack before calling any C code at all.
- *
- * If you edit this code, keep in mind that NMIs can happen in here.
+ * Entry from sysenter is now handled in common_exception
*/
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
- ENCODE_FRAME_POINTER
- xorl %edx, %edx # error code 0
- movl %esp, %eax # pt_regs pointer
-
- /* Are we currently on the SYSENTER stack? */
- movl PER_CPU_VAR(cpu_entry_area), %ecx
- addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
- subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
- cmpl $SIZEOF_entry_stack, %ecx
- jb .Ldebug_from_sysenter_stack
-
- TRACE_IRQS_OFF
- call do_debug
- jmp ret_from_exception
-
-.Ldebug_from_sysenter_stack:
- /* We're on the SYSENTER stack. Switch off. */
- movl %esp, %ebx
- movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- TRACE_IRQS_OFF
- call do_debug
- movl %ebx, %esp
- jmp ret_from_exception
+ pushl $do_debug
+ jmp common_exception
END(debug)
/*
@@ -993,6 +1399,7 @@ END(debug)
*/
ENTRY(nmi)
ASM_CLAC
+
#ifdef CONFIG_X86_ESPFIX32
pushl %eax
movl %ss, %eax
@@ -1002,7 +1409,7 @@ ENTRY(nmi)
#endif
pushl %eax # pt_regs->orig_ax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1016,7 +1423,7 @@ ENTRY(nmi)
/* Not on SYSENTER stack. */
call do_nmi
- jmp .Lrestore_all_notrace
+ jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
/*
@@ -1027,7 +1434,11 @@ ENTRY(nmi)
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
movl %ebx, %esp
- jmp .Lrestore_all_notrace
+
+.Lnmi_return:
+ CHECK_AND_APPLY_ESPFIX
+ RESTORE_ALL_NMI cr3_reg=%edi pop=4
+ jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
.Lnmi_espfix_stack:
@@ -1042,12 +1453,12 @@ ENTRY(nmi)
pushl 16(%esp)
.endr
pushl %eax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
- RESTORE_REGS
+ RESTORE_ALL_NMI cr3_reg=%edi
lss 12+4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
@@ -1056,7 +1467,8 @@ END(nmi)
ENTRY(int3)
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 73a522d53b53..957dfb693ecc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -92,7 +92,7 @@ END(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -408,6 +408,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
+ UNWIND_HINT_EMPTY
movq %r12, %rdi
CALL_NOSPEC %rbx
/*
@@ -701,7 +702,7 @@ retint_kernel:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
@@ -981,7 +982,7 @@ ENTRY(\sym)
call \do_sym
- jmp error_exit /* %ebx: no swapgs flag */
+ jmp error_exit
.endif
END(\sym)
.endm
@@ -1222,7 +1223,6 @@ END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch GS if needed.
- * Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
UNWIND_HINT_FUNC
@@ -1269,7 +1269,6 @@ ENTRY(error_entry)
* for these here too.
*/
.Lerror_kernelspace:
- incl %ebx
leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx, RIP+8(%rsp)
je .Lerror_bad_iret
@@ -1303,28 +1302,20 @@ ENTRY(error_entry)
/*
* Pretend that the exception came from user mode: set up pt_regs
- * as if we faulted immediately after IRET and clear EBX so that
- * error_exit knows that we will be returning to user mode.
+ * as if we faulted immediately after IRET.
*/
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
- decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
END(error_entry)
-
-/*
- * On entry, EBX is a "return to kernel mode" flag:
- * 1: already in kernel mode, don't need SWAPGS
- * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
ENTRY(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
- testl %ebx, %ebx
- jnz retint_kernel
+ testb $3, CS(%rsp)
+ jz retint_kernel
jmp retint_user
END(error_exit)
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 261802b1cc50..9f695f517747 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -46,10 +46,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=4096 -z common-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -58,9 +56,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(src
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
@@ -95,10 +91,8 @@ CFLAGS_REMOVE_vvar.o = -pg
#
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
- -Wl,-soname=linux-vdso.so.1 \
- -Wl,-z,max-page-size=4096 \
- -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)
@@ -123,7 +117,7 @@ $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
$(call if_changed,vdso)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
targets += vdso32/vdso32.lds
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
@@ -157,13 +151,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/x86/entry/vdso/vdso-note.S b/arch/x86/entry/vdso/vdso-note.S
index 79a071e4357e..79423170118f 100644
--- a/arch/x86/entry/vdso/vdso-note.S
+++ b/arch/x86/entry/vdso/vdso-note.S
@@ -3,6 +3,7 @@
* Here we can supply some information useful to userland.
*/
+#include <linux/build-salt.h>
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
@@ -10,3 +11,5 @@
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+
+BUILD_SALT
diff --git a/arch/x86/entry/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S
index 9fd51f206314..e78047d119f6 100644
--- a/arch/x86/entry/vdso/vdso32/note.S
+++ b/arch/x86/entry/vdso/vdso32/note.S
@@ -4,6 +4,7 @@
* Here we can supply some information useful to userland.
*/
+#include <linux/build-salt.h>
#include <linux/version.h>
#include <linux/elfnote.h>
@@ -14,6 +15,8 @@ ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+BUILD_SALT
+
#ifdef CONFIG_XEN
/*
* Add a special note telling glibc's dynamic linker a fake hardware
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 4b98101209a1..d50bb4dc0650 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_event *event = pcpu->event;
- struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event *hwc;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
@@ -602,6 +602,10 @@ fail:
return 0;
}
+ if (WARN_ON_ONCE(!event))
+ goto fail;
+
+ hwc = &event->hw;
msr = hwc->config_base;
buf = ibs_data.regs;
rdmsrl(msr, *buf);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 707b2a96e516..035c37481f57 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
-
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
}
static void intel_pmu_del_event(struct perf_event *event)
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
x86_perf_event_update(event);
}
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
+ u64 ctrl_val, mask, bits = 0;
/*
- * Enable IRQ generation (0x8),
+ * Enable IRQ generation (0x8), if not PEBS,
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
- bits = 0x8ULL;
+ if (!event->attr.precise_ip)
+ bits |= 0x8;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event_is_checkpointed(event)))
cpuc->intel_cp_status |= (1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_enable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_enable_fixed(hwc);
+ intel_pmu_enable_fixed(event);
return;
}
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_enable(event);
-
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
@@ -2280,7 +2282,10 @@ again:
* counters from the GLOBAL_STATUS mask and we always process PEBS
* events via drain_pebs().
*/
- status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ status &= ~cpuc->pebs_enabled;
+ else
+ status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
/*
* PEBS overflow sets bit 62 in the global status register
@@ -2997,6 +3002,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
+
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
}
if (needs_branch_stack(event)) {
@@ -4069,7 +4077,6 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_skl();
x86_pmu.event_constraints = intel_slm_event_constraints;
- x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
x86_pmu.extra_regs = intel_glm_extra_regs;
/*
* It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
@@ -4079,6 +4086,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_prec_dist = true;
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8a10a045b57b..b7b01d762d32 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
ds->bts_buffer_base = (unsigned long) cea;
ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
ds->bts_index = ds->bts_buffer_base;
- max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
- ds->bts_absolute_maximum = ds->bts_buffer_base + max;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ (max / 16) * BTS_RECORD_SIZE;
return 0;
}
@@ -711,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
-struct event_constraint intel_glp_pebs_event_constraints[] = {
- /* Allow all events as PEBS with no flags */
- INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
- EVENT_CONSTRAINT_END
-};
-
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -869,6 +865,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
}
}
+ /*
+ * Extended PEBS support
+ * Makes the PEBS code search the normal constraints.
+ */
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ return NULL;
+
return &emptyconstraint;
}
@@ -894,10 +897,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
u64 threshold;
+ int reserved;
+
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
+ else
+ reserved = x86_pmu.max_pebs_events;
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
- x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
+ reserved * x86_pmu.pebs_record_size;
} else {
threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
}
@@ -961,7 +970,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- ds->pebs_event_reset[hwc->idx] =
+ unsigned int idx = hwc->idx;
+
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+ ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[hwc->idx] = 0;
@@ -1184,16 +1197,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
/*
+ * We must however always use iregs for the unwinder to stay sane; the
+ * record BP,SP,IP can point into thin air when the record is from a
+ * previous PMI context or an (I)RET happend between the record and
+ * PMI.
+ */
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ data->callchain = perf_callchain(event, iregs);
+
+ /*
* We use the interrupt regs as a base because the PEBS record does not
* contain a full regs set, specifically it seems to lack segment
* descriptors, which get used by things like user_mode().
*
* In the simple case fix up only the IP for PERF_SAMPLE_IP.
- *
- * We must however always use BP,SP from iregs for the unwinder to stay
- * sane; the record BP,SP can point into thin air when the record is
- * from a previous PMI context or an (I)RET happend between the record
- * and PMI.
*/
*regs = *iregs;
@@ -1212,15 +1229,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
regs->si = pebs->si;
regs->di = pebs->di;
- /*
- * Per the above; only set BP,SP if we don't need callchains.
- *
- * XXX: does this make sense?
- */
- if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
- }
+ regs->bp = pebs->bp;
+ regs->sp = pebs->sp;
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
@@ -1482,9 +1492,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct debug_store *ds = cpuc->ds;
struct perf_event *event;
void *base, *at, *top;
- short counts[MAX_PEBS_EVENTS] = {};
- short error[MAX_PEBS_EVENTS] = {};
- int bit, i;
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ int bit, i, size;
+ u64 mask;
if (!x86_pmu.pebs_active)
return;
@@ -1494,6 +1505,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
ds->pebs_index = ds->pebs_buffer_base;
+ mask = (1ULL << x86_pmu.max_pebs_events) - 1;
+ size = x86_pmu.max_pebs_events;
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
+ mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+ }
+
if (unlikely(base >= top)) {
/*
* The drain_pebs() could be called twice in a short period
@@ -1503,7 +1521,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* update the event->count for this case.
*/
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
- x86_pmu.max_pebs_events) {
+ size) {
event = cpuc->events[bit];
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_save_and_restart_reload(event, 0);
@@ -1516,12 +1534,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 pebs_status;
pebs_status = p->status & cpuc->pebs_enabled;
- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+ pebs_status &= mask;
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
for_each_set_bit(bit, (unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events)
+ size)
counts[bit]++;
continue;
@@ -1569,7 +1587,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
counts[bit]++;
}
- for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
+ for (bit = 0; bit < size; bit++) {
if ((counts[bit] == 0) && (error[bit] == 0))
continue;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index cf372b90557e..f3e006bed9a7 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void)
void intel_pmu_lbr_reset(void)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
if (!x86_pmu.lbr_nr)
return;
@@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_32();
else
intel_pmu_lbr_reset_64();
+
+ cpuc->last_task_ctx = NULL;
+ cpuc->last_log_id = 0;
}
/*
@@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx)
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
unsigned lbr_idx, mask;
u64 tos;
@@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
return;
}
- mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
- for (i = 0; i < tos; i++) {
+ /*
+ * Does not restore the LBR registers, if
+ * - No one else touched them, and
+ * - Did not enter C6
+ */
+ if ((task_ctx == cpuc->last_task_ctx) &&
+ (task_ctx->log_id == cpuc->last_log_id) &&
+ rdlbr_from(tos)) {
+ task_ctx->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+
+ for (; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrlbr_from(lbr_idx, 0);
+ wrlbr_to(lbr_idx, 0);
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+ }
+
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned lbr_idx, mask;
- u64 tos;
+ u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) {
@@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
- for (i = 0; i < tos; i++) {
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
- task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+ from = rdlbr_from(lbr_idx);
+ if (!from)
+ break;
+ task_ctx->lbr_from[i] = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
+
+ cpuc->last_task_ctx = task_ctx;
+ cpuc->last_log_id = ++task_ctx->log_id;
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
@@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
- bool need_info = false;
+ bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
@@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (cpuc->lbr_sel) {
need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
- num = tos;
+ call_stack = true;
}
for (i = 0; i < num; i++) {
@@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
from = rdlbr_from(lbr_idx);
to = rdlbr_to(lbr_idx);
+ /*
+ * Read LBR call stack entries
+ * until invalid entry (0s) is detected.
+ */
+ if (call_stack && !from)
+ break;
+
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index c9e1e0bef3c3..e17ab885b1e9 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -28,7 +28,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX 3
+#define UNCORE_EXTRA_PCI_DEV_MAX 4
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 87dc0263a2e1..51d7c117e3c7 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
+ BDX_PCI_QPI_PORT2_FILTER,
HSWEP_PCI_PCU_3,
};
@@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
},
{ /* QPI Port 0 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT0_FILTER),
},
{ /* QPI Port 1 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* QPI Port 2 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ BDX_PCI_QPI_PORT2_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9f3711470ec1..156286335351 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -163,6 +163,7 @@ struct intel_excl_cntrs {
unsigned core_id; /* per-core: core id */
};
+struct x86_perf_task_context;
#define MAX_LBR_ENTRIES 32
enum {
@@ -214,6 +215,8 @@ struct cpu_hw_events {
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
u64 br_sel;
+ struct x86_perf_task_context *last_task_ctx;
+ int last_log_id;
/*
* Intel host/guest exclude bits
@@ -648,8 +651,10 @@ struct x86_perf_task_context {
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
+ int valid_lbrs;
int lbr_callstack_users;
int lbr_stack_state;
+ int log_id;
};
#define x86_add_quirk(func_) \
@@ -668,6 +673,7 @@ do { \
#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
+#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index b173d404e3df..b21ee65c4101 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,2 +1,2 @@
-obj-y := hv_init.o mmu.o
+obj-y := hv_init.o mmu.o nested.o
obj-$(CONFIG_X86_64) += hv_apic.o
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f68855499391..5b0f613428c2 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -31,6 +31,8 @@
#include <asm/mshyperv.h>
#include <asm/apic.h>
+#include <asm/trace/hyperv.h>
+
static struct apic orig_apic;
static u64 hv_apic_icr_read(void)
@@ -99,6 +101,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
int nr_bank = 0;
int ret = 1;
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+
local_irq_save(flags);
arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -114,6 +119,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
}
+ if (nr_bank < 0)
+ goto ipi_mask_ex_done;
if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
@@ -128,10 +135,10 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex **arg;
- struct ipi_arg_non_ex *ipi_arg;
+ struct ipi_arg_non_ex ipi_arg;
int ret = 1;
- unsigned long flags;
+
+ trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask))
return true;
@@ -142,37 +149,43 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- return __send_ipi_mask_ex(mask, vector);
-
- local_irq_save(flags);
- arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
- ipi_arg = *arg;
- if (unlikely(!ipi_arg))
- goto ipi_mask_done;
-
- ipi_arg->vector = vector;
- ipi_arg->reserved = 0;
- ipi_arg->cpu_mask = 0;
+ /*
+ * From the supplied CPU set we need to figure out if we can get away
+ * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
+ * highest VP number in the set is < 64. As VP numbers are usually in
+ * ascending order and match Linux CPU ids, here is an optimization:
+ * we check the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
+ * a must. We will also check all VP numbers when walking the supplied
+ * CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
+ goto do_ex_hypercall;
+
+ ipi_arg.vector = vector;
+ ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ if (vcpu == VP_INVAL)
+ return false;
+
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.
*/
if (vcpu >= 64)
- goto ipi_mask_done;
+ goto do_ex_hypercall;
- __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+ __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
-
-ipi_mask_done:
- local_irq_restore(flags);
+ ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ ipi_arg.cpu_mask);
return ((ret == 0) ? true : false);
+
+do_ex_hypercall:
+ return __send_ipi_mask_ex(mask, vector);
}
static bool __send_ipi_one(int cpu, int vector)
@@ -228,10 +241,7 @@ static void hv_send_ipi_self(int vector)
void __init hv_apic_init(void)
{
if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pr_info("Hyper-V: Using ext hypercalls for IPI\n");
- else
- pr_info("Hyper-V: Using IPI hypercalls\n");
+ pr_info("Hyper-V: Using IPI hypercalls\n");
/*
* Set the IPI entry points.
*/
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 4c431e1c1eff..20c876c7c5bf 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -265,7 +265,7 @@ void __init hyperv_init(void)
{
u64 guest_id, required_msrs;
union hv_x64_msr_hypercall_contents hypercall_msr;
- int cpuhp;
+ int cpuhp, i;
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return;
@@ -293,6 +293,9 @@ void __init hyperv_init(void)
if (!hv_vp_index)
return;
+ for (i = 0; i < num_possible_cpus(); i++)
+ hv_vp_index[i] = VP_INVAL;
+
hv_vp_assist_page = kcalloc(num_possible_cpus(),
sizeof(*hv_vp_assist_page), GFP_KERNEL);
if (!hv_vp_assist_page) {
@@ -330,7 +333,7 @@ void __init hyperv_init(void)
* Register Hyper-V specific clocksource.
*/
#ifdef CONFIG_HYPERV_TSCPAGE
- if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+ if (ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE) {
union hv_x64_msr_hypercall_contents tsc_msr;
tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
@@ -359,7 +362,7 @@ register_msr_cs:
*/
hyperv_cs = &hyperv_cs_msr;
- if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+ if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
return;
@@ -423,6 +426,33 @@ void hyperv_report_panic(struct pt_regs *regs, long err)
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
+/**
+ * hyperv_report_panic_msg - report panic message to Hyper-V
+ * @pa: physical address of the panic page containing the message
+ * @size: size of the message in the page
+ */
+void hyperv_report_panic_msg(phys_addr_t pa, size_t size)
+{
+ /*
+ * P3 to contain the physical address of the panic page & P4 to
+ * contain the size of the panic data in that page. Rest of the
+ * registers are no-op when the NOTIFY_MSG flag is set.
+ */
+ wrmsrl(HV_X64_MSR_CRASH_P0, 0);
+ wrmsrl(HV_X64_MSR_CRASH_P1, 0);
+ wrmsrl(HV_X64_MSR_CRASH_P2, 0);
+ wrmsrl(HV_X64_MSR_CRASH_P3, pa);
+ wrmsrl(HV_X64_MSR_CRASH_P4, size);
+
+ /*
+ * Let Hyper-V know there is crash data available along with
+ * the panic message.
+ */
+ wrmsrl(HV_X64_MSR_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
+}
+EXPORT_SYMBOL_GPL(hyperv_report_panic_msg);
+
bool hv_is_hyperv_initialized(void)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index de27615c51ea..1147e1fed7ff 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -16,6 +16,8 @@
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -93,10 +95,29 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else {
+ /*
+ * From the supplied CPU set we need to figure out if we can get
+ * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
+ * hypercalls. This is possible when the highest VP number in
+ * the set is < 64. As VP numbers are usually in ascending order
+ * and match Linux CPU ids, here is an optimization: we check
+ * the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using *_EX hypercalls is a
+ * must. We will also check all VP numbers when walking the
+ * supplied CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+ goto do_ex_hypercall;
+
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
- if (vcpu >= 64)
+ if (vcpu == VP_INVAL) {
+ local_irq_restore(flags);
goto do_native;
+ }
+
+ if (vcpu >= 64)
+ goto do_ex_hypercall;
__set_bit(vcpu, (unsigned long *)
&flush->processor_mask);
@@ -123,7 +144,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
gva_n, 0, flush, NULL);
}
+ goto check_status;
+
+do_ex_hypercall:
+ status = hyperv_flush_tlb_others_ex(cpus, info);
+check_status:
local_irq_restore(flags);
if (!(status & HV_HYPERCALL_RESULT_MASK))
@@ -132,35 +158,22 @@ do_native:
native_flush_tlb_others(cpus, info);
}
-static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
struct hv_tlb_flush_ex **flush_pcpu;
struct hv_tlb_flush_ex *flush;
- u64 status = U64_MAX;
- unsigned long flags;
+ u64 status;
- trace_hyperv_mmu_flush_tlb_others(cpus, info);
-
- if (!hv_hypercall_pg)
- goto do_native;
-
- if (cpumask_empty(cpus))
- return;
-
- local_irq_save(flags);
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return U64_MAX;
flush_pcpu = (struct hv_tlb_flush_ex **)
this_cpu_ptr(hyperv_pcpu_input_arg);
flush = *flush_pcpu;
- if (unlikely(!flush)) {
- local_irq_restore(flags);
- goto do_native;
- }
-
if (info->mm) {
/*
* AddressSpace argument must match the CR3 with PCID bits
@@ -176,15 +189,10 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.valid_bank_mask = 0;
- if (!cpumask_equal(cpus, cpu_present_mask)) {
- flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
- }
-
- if (!nr_bank) {
- flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
- flush->flags |= HV_FLUSH_ALL_PROCESSORS;
- }
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
+ if (nr_bank < 0)
+ return U64_MAX;
/*
* We can flush not more than max_gvas with one hypercall. Flush the
@@ -213,12 +221,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
gva_n, nr_bank, flush, NULL);
}
- local_irq_restore(flags);
-
- if (!(status & HV_HYPERCALL_RESULT_MASK))
- return;
-do_native:
- native_flush_tlb_others(cpus, info);
+ return status;
}
void hyperv_setup_mmu_ops(void)
@@ -226,11 +229,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
- pr_info("Using hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
- } else {
- pr_info("Using ext hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
- }
+ pr_info("Using hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
}
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
new file mode 100644
index 000000000000..b8e60cc50461
--- /dev/null
+++ b/arch/x86/hyperv/nested.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V nested virtualization code.
+ *
+ * Copyright (C) 2018, Microsoft, Inc.
+ *
+ * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
+ */
+
+
+#include <linux/types.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+#include <asm/tlbflush.h>
+
+#include <asm/trace/hyperv.h>
+
+int hyperv_flush_guest_mapping(u64 as)
+{
+ struct hv_guest_mapping_flush **flush_pcpu;
+ struct hv_guest_mapping_flush *flush;
+ u64 status;
+ unsigned long flags;
+ int ret = -ENOTSUPP;
+
+ if (!hv_hypercall_pg)
+ goto fault;
+
+ local_irq_save(flags);
+
+ flush_pcpu = (struct hv_guest_mapping_flush **)
+ this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ flush = *flush_pcpu;
+
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto fault;
+ }
+
+ flush->address_space = as;
+ flush->flags = 0;
+
+ status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
+ flush, NULL);
+ local_irq_restore(flags);
+
+ if (!(status & HV_HYPERCALL_RESULT_MASK))
+ ret = 0;
+
+fault:
+ trace_hyperv_nested_flush_guest_mapping(as, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index de690c2d2e33..a0ab9ab61c75 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
+generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 74a9e06b6cfd..130e81e10fc7 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -10,6 +10,7 @@
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/msr.h>
+#include <asm/hardirq.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -502,12 +503,19 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif /* CONFIG_X86_LOCAL_APIC */
+#ifdef CONFIG_SMP
+bool apic_id_is_primary_thread(unsigned int id);
+#else
+static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
+#endif
+
extern void irq_enter(void);
extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
+ kvm_set_cpu_l1tf_flush_l1d();
}
static inline void entering_ack_irq(void)
@@ -520,6 +528,7 @@ static inline void ipi_entering_ack_irq(void)
{
irq_enter();
ack_APIC_irq();
+ kvm_set_cpu_l1tf_flush_l1d();
}
static inline void exiting_irq(void)
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index c356098b6fb9..4d4015ddcf26 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -7,8 +7,6 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
-#include <asm/nospec-branch.h>
-
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
- firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
- firmware_restrict_branch_speculation_end();
}
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
- firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
- firmware_restrict_branch_speculation_end();
return error;
}
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 219faaec51df..990770f9e76b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -46,6 +46,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1 _ASM_AX
+#define _ASM_ARG2 _ASM_DX
+#define _ASM_ARG3 _ASM_CX
+
+#define _ASM_ARG1L eax
+#define _ASM_ARG2L edx
+#define _ASM_ARG3L ecx
+
+#define _ASM_ARG1W ax
+#define _ASM_ARG2W dx
+#define _ASM_ARG3W cx
+
+#define _ASM_ARG1B al
+#define _ASM_ARG2B dl
+#define _ASM_ARG3B cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1 _ASM_DI
+#define _ASM_ARG2 _ASM_SI
+#define _ASM_ARG3 _ASM_DX
+#define _ASM_ARG4 _ASM_CX
+#define _ASM_ARG5 r8
+#define _ASM_ARG6 r9
+
+#define _ASM_ARG1Q rdi
+#define _ASM_ARG2Q rsi
+#define _ASM_ARG3Q rdx
+#define _ASM_ARG4Q rcx
+#define _ASM_ARG5Q r8
+#define _ASM_ARG6Q r9
+
+#define _ASM_ARG1L edi
+#define _ASM_ARG2L esi
+#define _ASM_ARG3L edx
+#define _ASM_ARG4L ecx
+#define _ASM_ARG5L r8d
+#define _ASM_ARG6L r9d
+
+#define _ASM_ARG1W di
+#define _ASM_ARG2W si
+#define _ASM_ARG3W dx
+#define _ASM_ARG4W cx
+#define _ASM_ARG5W r8w
+#define _ASM_ARG6W r9w
+
+#define _ASM_ARG1B dil
+#define _ASM_ARG2B sil
+#define _ASM_ARG3B dl
+#define _ASM_ARG4B cl
+#define _ASM_ARG5B r8b
+#define _ASM_ARG6B r9b
+
+#endif
+
/*
* Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool".
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 0db6bec95489..b143717b92b3 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -80,6 +80,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
@@ -91,6 +92,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
@@ -103,6 +105,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic_dec arch_atomic_dec
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
@@ -117,6 +120,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
@@ -130,6 +134,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
@@ -144,6 +149,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
@@ -173,9 +179,6 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
return arch_atomic_add_return(-i, v);
}
-#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
-#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
-
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
@@ -199,7 +202,7 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic_and(int i, atomic_t *v)
@@ -253,27 +256,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
-/**
- * __arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 92212bf0484f..ef959f02d070 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -158,6 +158,7 @@ static inline long long arch_atomic64_inc_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
static inline long long arch_atomic64_dec_return(atomic64_t *v)
{
@@ -166,6 +167,7 @@ static inline long long arch_atomic64_dec_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
/**
* arch_atomic64_add - add integer to atomic64 variable
@@ -198,25 +200,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
}
/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v)
-{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-
-/**
* arch_atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
@@ -229,6 +218,7 @@ static inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
@@ -236,46 +226,6 @@ static inline void arch_atomic64_dec(atomic64_t *v)
}
/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int arch_atomic64_dec_and_test(atomic64_t *v)
-{
- return arch_atomic64_dec_return(v) == 0;
-}
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_inc_and_test(atomic64_t *v)
-{
- return arch_atomic64_inc_return(v) == 0;
-}
-
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int arch_atomic64_add_negative(long long i, atomic64_t *v)
-{
- return arch_atomic64_add_return(i, v) < 0;
-}
-
-/**
* arch_atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
@@ -295,7 +245,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
return (int)a;
}
-
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
@@ -304,6 +254,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
return r;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 6106b59d3260..4343d9b4f30e 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -71,6 +71,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
@@ -82,6 +83,7 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
@@ -95,6 +97,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
@@ -110,6 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
@@ -123,6 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
@@ -137,6 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic64_add_negative arch_atomic64_add_negative
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
@@ -169,9 +175,6 @@ static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
return xadd(&v->counter, -i);
}
-#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v)))
-#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v)))
-
static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return arch_cmpxchg(&v->counter, old, new);
@@ -185,46 +188,7 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
{
- return xchg(&v->counter, new);
-}
-
-/**
- * arch_atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- s64 c = arch_atomic64_read(v);
- do {
- if (unlikely(c == u))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
- return true;
-}
-
-#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
-
-/*
- * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = arch_atomic64_read(v);
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
- return dec;
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic64_and(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index e3efd8a06066..a55d79b233d3 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
-#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index bfca3b346c74..072e5459fe2f 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -10,13 +10,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..89a048c2faec 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -219,6 +219,8 @@
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -229,7 +231,7 @@
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -341,6 +343,7 @@
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
@@ -373,5 +376,6 @@
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 0ab2ab27ad1f..b825cb201251 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -4,8 +4,8 @@
#include <linux/compiler.h>
#include <linux/init.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/setup.h>
static __always_inline __init void *dmi_alloc(unsigned len)
diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h
deleted file mode 100644
index 2a51d66689c5..000000000000
--- a/arch/x86/include/asm/export.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_64BIT
-#define KSYM_ALIGN 16
-#endif
-#include <asm-generic/export.h>
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 740a428acf1e..d9069bb26c7f 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -3,10 +3,12 @@
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
-#include <linux/irq.h>
typedef struct {
- unsigned int __softirq_pending;
+ u16 __softirq_pending;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+ u8 kvm_cpu_l1tf_flush_l1d;
+#endif
unsigned int __nmi_count; /* arch dependent */
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
@@ -58,4 +60,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu);
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static inline void kvm_set_cpu_l1tf_flush_l1d(void)
+{
+ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
+}
+
+static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
+{
+ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
+}
+
+static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+{
+ return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
+}
+#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
+static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
+
#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index f59c39835a5a..a1f0e90d0818 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type)
return HBP_NUM;
}
+struct perf_event_attr;
struct perf_event;
struct pmu;
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index b8c89265baf0..e977b6b3a538 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -35,9 +35,9 @@
/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
-#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
/* Partition reference TSC MSR is available */
-#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
+#define HV_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
/* A partition's reference time stamp counter (TSC) page */
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
@@ -60,7 +60,7 @@
* Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
* HV_X64_MSR_STIMER3_COUNT) available
*/
-#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
+#define HV_MSR_SYNTIMER_AVAILABLE (1 << 3)
/*
* APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
* are available
@@ -86,7 +86,7 @@
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
/* stimer Direct Mode is available */
-#define HV_X64_STIMER_DIRECT_MODE_AVAILABLE (1 << 19)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE (1 << 19)
/*
* Feature identification: EBX indicates which flags were specified at
@@ -160,9 +160,9 @@
#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
/*
- * Virtual APIC support
+ * Recommend not using Auto End-Of-Interrupt feature
*/
-#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
+#define HV_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
/*
* Recommend using cluster IPI hypercalls.
@@ -176,9 +176,10 @@
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED (1 << 14)
/*
- * Crash notification flag.
+ * Crash notification flags.
*/
-#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
/* MSR used to identify the guest OS. */
#define HV_X64_MSR_GUEST_OS_ID 0x40000000
@@ -309,6 +310,7 @@ struct ms_hyperv_tsc_page {
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
/* Nested features (CPUID 0x4000000A) EAX */
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
struct hv_reenlightenment_control {
@@ -350,6 +352,7 @@ struct hv_tsc_emulation_status {
#define HVCALL_SEND_IPI_EX 0x0015
#define HVCALL_POST_MESSAGE 0x005c
#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
@@ -741,6 +744,12 @@ struct ipi_arg_ex {
struct hv_vpset vp_set;
};
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+};
+
/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
struct hv_tlb_flush {
u64 address_space;
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 5cdcdbd4d892..89789e8c80f6 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -3,6 +3,7 @@
#define _ASM_X86_I8259_H
#include <linux/delay.h>
+#include <asm/io.h>
extern unsigned int cached_irq_mask;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index cf090e584202..7ed08a7c3398 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -76,4 +76,17 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
+/* Useful macros */
+#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
+{ \
+ .vendor = X86_VENDOR_INTEL, \
+ .family = _family, \
+ .model = _model, \
+ .feature = X86_FEATURE_ANY, \
+ .driver_data = (kernel_ulong_t)&_driver_data \
+}
+
+#define INTEL_CPU_FAM6(_model, _driver_data) \
+ INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index fe04491130ae..52f815a80539 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -80,35 +80,6 @@ enum intel_mid_cpu_type {
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
-/**
- * struct intel_mid_ops - Interface between intel-mid & sub archs
- * @arch_setup: arch_setup function to re-initialize platform
- * structures (x86_init, x86_platform_init)
- *
- * This structure can be extended if any new interface is required
- * between intel-mid & its sub arch files.
- */
-struct intel_mid_ops {
- void (*arch_setup)(void);
-};
-
-/* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
- [cpuid] = get_##cpuname##_ops
-
-/* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
-
-/*
- * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
- * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
- */
-#define INTEL_MID_OPS_INIT { \
- DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
- DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
- DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
-};
-
#ifdef CONFIG_X86_INTEL_MID
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -136,20 +107,6 @@ enum intel_mid_timer_options {
extern enum intel_mid_timer_options intel_mid_timer_options;
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define FSB_FREQ_83SKU 83200
-#define FSB_FREQ_100SKU 99840
-#define FSB_FREQ_133SKU 133000
-
-#define FSB_FREQ_167SKU 167000
-#define FSB_FREQ_200SKU 200000
-#define FSB_FREQ_267SKU 267000
-#define FSB_FREQ_333SKU 333000
-#define FSB_FREQ_400SKU 400000
-
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index 62a9f4966b42..ae26df1c2789 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -8,6 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
+#define MAX_FIXED_PEBS_EVENTS 3
/*
* A debug store configuration.
@@ -23,7 +24,7 @@ struct debug_store {
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
+ u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS];
} __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 89f08955fff7..c14f2a74b2be 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,7 +13,9 @@
* Interrupt control:
*/
-static inline unsigned long native_save_fl(void)
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
+extern inline unsigned long native_save_fl(void)
{
unsigned long flags;
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 367d99cff426..c8cec1b39b88 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -78,7 +78,7 @@ struct arch_specific_insn {
* boostable = true: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
- * a post_handler or break_handler).
+ * a post_handler).
*/
bool boostable;
bool if_modifier;
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_old_flags;
unsigned long kprobe_saved_flags;
- unsigned long *jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
deleted file mode 100644
index 46185263d9c2..000000000000
--- a/arch/x86/include/asm/kvm_guest.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_KVM_GUEST_H
-#define _ASM_X86_KVM_GUEST_H
-
-int kvm_setup_vsyscall_timeinfo(void);
-
-#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c13cd28d9d1b..00ddb0c9e612 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/tracepoint.h>
#include <linux/cpumask.h>
#include <linux/irq_work.h>
+#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -54,6 +55,7 @@
#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
+#define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5)
#define KVM_REQ_EVENT KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
@@ -75,13 +77,13 @@
#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
+#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
-#define CR3_PCID_INVD BIT_64(63)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -325,6 +327,16 @@ struct rsvd_bits_validate {
u64 bad_mt_xwr;
};
+struct kvm_mmu_root_info {
+ gpa_t cr3;
+ hpa_t hpa;
+};
+
+#define KVM_MMU_ROOT_INFO_INVALID \
+ ((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
+
+#define KVM_MMU_NUM_PREV_ROOTS 3
+
/*
* x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
* and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
@@ -344,7 +356,7 @@ struct kvm_mmu {
struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
- void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
+ void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
@@ -353,6 +365,7 @@ struct kvm_mmu {
u8 shadow_root_level;
u8 ept_ad;
bool direct_map;
+ struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
/*
* Bitmap; bit set = permission fault
@@ -713,6 +726,9 @@ struct kvm_vcpu_arch {
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
+
+ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
+ bool l1tf_flush_l1d;
};
struct kvm_lpage_info {
@@ -881,6 +897,7 @@ struct kvm_vcpu_stat {
u64 signal_exits;
u64 irq_window_exits;
u64 nmi_window_exits;
+ u64 l1d_flush;
u64 halt_exits;
u64 halt_successful_poll;
u64 halt_attempted_poll;
@@ -973,6 +990,15 @@ struct kvm_x86_ops {
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+ int (*tlb_remote_flush)(struct kvm *kvm);
+
+ /*
+ * Flush any TLB entries associated with the given GVA.
+ * Does not need to flush GPA->HPA mappings.
+ * Can potentially get non-canonical addresses through INVLPGs, which
+ * the implementation may choose to ignore if appropriate.
+ */
+ void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu);
@@ -1085,6 +1111,14 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu);
+ int (*get_nested_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ unsigned user_data_size);
+ int (*set_nested_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ struct kvm_nested_state *kvm_state);
+ void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
@@ -1117,6 +1151,16 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
return kvm_x86_ops->vm_free(kvm);
}
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+ if (kvm_x86_ops->tlb_remote_flush &&
+ !kvm_x86_ops->tlb_remote_flush(kvm))
+ return 0;
+ else
+ return -ENOTSUPP;
+}
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
@@ -1268,6 +1312,10 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state,
return !!(*irq_state);
}
+#define KVM_MMU_ROOT_CURRENT BIT(0)
+#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
+#define KVM_MMU_ROOTS_ALL (~0UL)
+
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
@@ -1279,7 +1327,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu);
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1298,7 +1346,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
+void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
@@ -1413,6 +1462,11 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
+int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
+ unsigned long ipi_bitmap_high, int min,
+ unsigned long icr, int op_64_bit);
+
+u64 kvm_get_arch_capabilities(void);
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 3aea2658323a..4c723632c036 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -7,7 +7,6 @@
#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
-extern int kvm_register_clock(char *txt);
#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index bbc796eb0a3b..eeeb9289c764 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -71,12 +71,7 @@ struct ldt_struct {
static inline void *ldt_slot_va(int slot)
{
-#ifdef CONFIG_X86_64
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
-#else
- BUG();
- return (void *)fix_to_virt(FIX_HOLE);
-#endif
}
/*
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 3cd14311edfa..f37704497d8f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -9,6 +9,8 @@
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
+#define VP_INVAL U32_MAX
+
struct ms_hyperv_info {
u32 features;
u32 misc_features;
@@ -20,7 +22,6 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
-
/*
* Generate the guest ID.
*/
@@ -75,8 +76,10 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
}
}
-#define hv_init_timer(timer, tick) wrmsrl(timer, tick)
-#define hv_init_timer_config(config, val) wrmsrl(config, val)
+#define hv_init_timer(timer, tick) \
+ wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
+#define hv_init_timer_config(timer, val) \
+ wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
@@ -89,8 +92,13 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
-#define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
-#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
+#define hv_get_synint_state(int_num, val) \
+ rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
+#define hv_set_synint_state(int_num, val) \
+ wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
+
+#define hv_get_crash_ctl(val) \
+ rdmsrl(HV_X64_MSR_CRASH_CTL, val)
void hyperv_callback_vector(void);
void hyperv_reenlightenment_vector(void);
@@ -193,6 +201,40 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
return hv_status;
}
+/* Fast hypercall with 16 bytes of input */
+static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
+{
+ u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
+
+#ifdef CONFIG_X86_64
+ {
+ __asm__ __volatile__("mov %4, %%r8\n"
+ CALL_NOSPEC
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+ : "r" (input2),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "r8", "r9", "r10", "r11");
+ }
+#else
+ {
+ u32 input1_hi = upper_32_bits(input1);
+ u32 input1_lo = lower_32_bits(input1);
+ u32 input2_hi = upper_32_bits(input2);
+ u32 input2_lo = lower_32_bits(input2);
+
+ __asm__ __volatile__ (CALL_NOSPEC
+ : "=A"(hv_status),
+ "+c"(input1_lo), ASM_CALL_CONSTRAINT
+ : "A" (control), "b" (input1_hi),
+ "D"(input2_hi), "S"(input2_lo),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc");
+ }
+#endif
+ return hv_status;
+}
+
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
* rep_count and varhead_size comply with Hyper-V hypercall definition.
@@ -281,6 +323,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
*/
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
+ if (vcpu == VP_INVAL)
+ return -1;
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
__set_bit(vcpu_offset, (unsigned long *)
@@ -295,6 +339,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void hyperv_report_panic(struct pt_regs *regs, long err);
+void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);
@@ -302,6 +347,7 @@ void hyperv_reenlightenment_intr(struct pt_regs *regs);
void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
+int hyperv_flush_guest_mapping(u64 as);
#ifdef CONFIG_X86_64
void hv_apic_init(void);
@@ -321,6 +367,7 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
{
return NULL;
}
+static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
#endif /* CONFIG_HYPERV */
#ifdef CONFIG_HYPERV_TSCPAGE
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 68b2c3150de1..4731f0cf97c5 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -70,12 +70,19 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO (1 << 4) /*
* Not susceptible to Speculative Store Bypass
* attack, so no Speculative Store Bypass
* control required.
*/
+#define MSR_IA32_FLUSH_CMD 0x0000010b
+#define L1D_FLUSH (1 << 0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f6f6c63da62f..fd2a8c1b88bc 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -214,7 +214,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
- SPECTRE_V2_IBRS,
+ SPECTRE_V2_IBRS_ENHANCED,
};
/* The Speculative Store Bypass disable variants */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..46f516dd80ce 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -88,6 +88,7 @@ struct orc_entry {
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
+ unsigned end:1;
} __packed;
/*
@@ -101,6 +102,7 @@ struct unwind_hint {
s16 sp_offset;
u8 sp_reg;
u8 type;
+ u8 end;
};
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index aa30c3241ea7..0d5c739eebd7 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -29,8 +29,13 @@
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT 44
+/*
+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
+ * but we need the full mask to make sure inverted PROT_NONE
+ * entries have all the host bits set in a guest.
+ * The real limit is still 44 bits.
+ */
+#define __PHYSICAL_MASK_SHIFT 52
#define __VIRTUAL_MASK_SHIFT 32
#else /* !CONFIG_X86_PAE */
diff --git a/arch/x86/include/asm/pci-direct.h b/arch/x86/include/asm/pci-direct.h
index e1084f71a295..94597a3cf3d0 100644
--- a/arch/x86/include/asm/pci-direct.h
+++ b/arch/x86/include/asm/pci-direct.h
@@ -15,8 +15,4 @@ extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
extern int early_pci_allowed(void);
-
-extern unsigned int pci_early_dump_regs;
-extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
-extern void early_dump_pci_devices(void);
#endif /* _ASM_X86_PCI_DIRECT_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a06b07399d17..e9202a0de8f0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -450,9 +450,10 @@ do { \
bool __ret; \
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
- asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
- : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
- : "b" (__n1), "c" (__n2), "a" (__o1)); \
+ asm volatile("cmpxchg8b "__percpu_arg(1) \
+ CC_SET(z) \
+ : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
+ : "b" (__n1), "c" (__n2)); \
__ret; \
})
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 685ffe8a0eaf..24c6cf5f16b7 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -19,6 +19,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
+#endif
*pmdp = pmd;
}
@@ -58,6 +61,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
+#endif
return __pmd(xchg((pmdval_t *)xp, 0));
}
#else
@@ -67,6 +73,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#ifdef CONFIG_SMP
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
+#endif
return __pud(xchg((pudval_t *)xp, 0));
}
#else
@@ -95,4 +104,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+/* No inverted PFNs on 2 level page tables */
+
+static inline u64 protnone_mask(u64 val)
+{
+ return 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ return val;
+}
+
+static inline bool __pte_needs_invert(u64 val)
+{
+ return false;
+}
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index f982ef808e7e..6deb6cd236e3 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -35,4 +35,7 @@ typedef union {
#define PTRS_PER_PTE 1024
+/* This covers all VMSPLIT_* and VMSPLIT_*_OPT variants */
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index f24df59c40b2..a564084c6141 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -98,6 +98,9 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
+#endif
set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
}
@@ -229,6 +232,10 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
{
union split_pud res, *orig = (union split_pud *)pudp;
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
+#endif
+
/* xchg acts as a barrier before setting of the high bits */
res.pud_low = xchg(&orig->pud_low, 0);
res.pud_high = orig->pud_high;
@@ -241,12 +248,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#endif
/* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
+
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
+/*
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
+ * __swp_entry().
+ */
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
+
+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
+/*
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
+ * intermediate representation, using the following macros based on 64bit
+ * __swp_type() and __swp_offset().
+ */
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
+
+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
+ __pteval_swp_offset(pte)))
#define gup_get_pte gup_get_pte
/*
@@ -295,4 +333,6 @@ static inline pte_t gup_get_pte(pte_t *ptep)
return pte;
}
+#include <asm/pgtable-invert.h>
+
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 6a59a6d0cc50..858358a82b14 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -21,9 +21,10 @@ typedef union {
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT
-#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
+#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
+ (pv_info.shared_kernel_pmd)))
#else
-#define SHARED_KERNEL_PMD 1
+#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
#endif
/*
@@ -45,5 +46,6 @@ typedef union {
#define PTRS_PER_PTE 512
#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
new file mode 100644
index 000000000000..a0c1525f1b6f
--- /dev/null
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PGTABLE_INVERT_H
+#define _ASM_PGTABLE_INVERT_H 1
+
+#ifndef __ASSEMBLY__
+
+/*
+ * A clear pte value is special, and doesn't get inverted.
+ *
+ * Note that even users that only pass a pgprot_t (rather
+ * than a full pte) won't trigger the special zero case,
+ * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED
+ * set. So the all zero case really is limited to just the
+ * cleared page table entry case.
+ */
+static inline bool __pte_needs_invert(u64 val)
+{
+ return val && !(val & _PAGE_PRESENT);
+}
+
+/* Get a mask to xor with the page table entry to get the correct pfn. */
+static inline u64 protnone_mask(u64 val)
+{
+ return __pte_needs_invert(val) ? ~0ull : 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ /*
+ * When a PTE transitions from NONE to !NONE or vice-versa
+ * invert the PFN part to stop speculation.
+ * pte_pfn undoes this when needed.
+ */
+ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
+ val = (val & ~mask) | (~val & mask);
+ return val;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5715647fc4fe..e4ffa565a69f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -30,11 +30,14 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
void ptdump_walk_pgd_level_checkwx(void);
+void ptdump_walk_user_pgd_level_checkwx(void);
#ifdef CONFIG_DEBUG_WX
-#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
#else
-#define debug_checkwx() do { } while (0)
+#define debug_checkwx() do { } while (0)
+#define debug_checkwx_user() do { } while (0)
#endif
/*
@@ -185,19 +188,29 @@ static inline int pte_special(pte_t pte)
return pte_flags(pte) & _PAGE_SPECIAL;
}
+/* Entries that were set to PROT_NONE are inverted */
+
+static inline u64 protnone_mask(u64 val);
+
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ phys_addr_t pfn = pte_val(pte);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pmd_val(pmd);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
static inline unsigned long pud_pfn(pud_t pud)
{
- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pud_val(pud);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
}
static inline unsigned long p4d_pfn(p4d_t p4d)
@@ -400,11 +413,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_RW);
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
pudval_t v = native_pud_val(pud);
@@ -459,11 +467,6 @@ static inline pud_t pud_mkwrite(pud_t pud)
return pud_set_flags(pud, _PAGE_RW);
}
-static inline pud_t pud_mknotpresent(pud_t pud)
-{
- return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
@@ -545,25 +548,45 @@ static inline pgprotval_t check_pgprot(pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PTE_PFN_MASK;
+ return __pte(pfn | check_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PMD_PAGE_MASK;
+ return __pmd(pfn | check_pgprot(pgprot));
}
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
- return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PUD_PAGE_MASK;
+ return __pud(pfn | check_pgprot(pgprot));
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return pfn_pmd(pmd_pfn(pmd),
+ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
+static inline pud_t pud_mknotpresent(pud_t pud)
+{
+ return pfn_pud(pud_pfn(pud),
+ __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- pteval_t val = pte_val(pte);
+ pteval_t val = pte_val(pte), oldval = val;
/*
* Chop off the NX bit (if present), and add the NX portion of
@@ -571,17 +594,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
val &= _PAGE_CHG_MASK;
val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmdval_t val = pmd_val(pmd);
+ pmdval_t val = pmd_val(pmd), oldval = val;
val &= _HPAGE_CHG_MASK;
val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
return __pmd(val);
}
@@ -640,8 +663,31 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return pgd;
+ return __pti_set_user_pgtbl(pgdp, pgd);
+}
+#else /* CONFIG_PAGE_TABLE_ISOLATION */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ return pgd;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
#endif /* __ASSEMBLY__ */
+
#ifdef CONFIG_X86_32
# include <asm/pgtable_32.h>
#else
@@ -1154,6 +1200,70 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
}
#endif
+/*
+ * Page table pages are page-aligned. The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+ unsigned long ptr = (unsigned long)__ptr;
+
+ return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
+}
+
+static inline int pgd_large(pgd_t pgd) { return 0; }
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
+ * the user one is in the last 4k. To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr |= BIT(bit);
+ return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr &= ~BIT(bit);
+ return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+ return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+ return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+ return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+ return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
@@ -1320,6 +1430,14 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
return __pte_access_permitted(pud_val(pud), write);
}
+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return boot_cpu_has_bug(X86_BUG_L1TF);
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 88a056b01db4..b3ec519e3982 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -34,8 +34,6 @@ static inline void check_pgt_cache(void) { }
void paging_init(void);
void sync_initial_page_table(void);
-static inline int pgd_large(pgd_t pgd) { return 0; }
-
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index d9a001a4a872..b0bc0fff5f1f 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -50,13 +50,18 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
& PMD_MASK)
-#define PKMAP_BASE \
+#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
+
+#define PKMAP_BASE \
+ ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
+
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
-# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
+# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif
#define MODULES_VADDR VMALLOC_START
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c5385f9a88f..f773d5e6c8cc 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -132,90 +132,6 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
#endif
}
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-/*
- * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
- * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
- * the user one is in the last 4k. To switch between them, you
- * just need to flip the 12th bit in their addresses.
- */
-#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
-
-/*
- * This generates better code than the inline assembly in
- * __set_bit().
- */
-static inline void *ptr_set_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr |= BIT(bit);
- return (void *)__ptr;
-}
-static inline void *ptr_clear_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr &= ~BIT(bit);
- return (void *)__ptr;
-}
-
-static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
-{
- return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
-{
- return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
-{
- return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
-{
- return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-#endif /* CONFIG_PAGE_TABLE_ISOLATION */
-
-/*
- * Page table pages are page-aligned. The lower half of the top
- * level is used for userspace and the top half for the kernel.
- *
- * Returns true for parts of the PGD that map userspace and
- * false for the parts that map the kernel.
- */
-static inline bool pgdp_maps_userspace(void *__ptr)
-{
- unsigned long ptr = (unsigned long)__ptr;
-
- return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
-}
-
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
-
-/*
- * Take a PGD location (pgdp) and a pgd value that needs to be set there.
- * Populates the user and returns the resulting PGD that must be set in
- * the kernel copy of the page tables.
- */
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- if (!static_cpu_has(X86_FEATURE_PTI))
- return pgd;
- return __pti_set_user_pgd(pgdp, pgd);
-}
-#else
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- return pgd;
-}
-#endif
-
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
@@ -226,7 +142,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
}
pgd = native_make_pgd(native_p4d_val(p4d));
- pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
+ pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
*p4dp = native_make_p4d(native_pgd_val(pgd));
}
@@ -237,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = pti_set_user_pgd(pgdp, pgd);
+ *pgdp = pti_set_user_pgtbl(pgdp, pgd);
}
static inline void native_pgd_clear(pgd_t *pgd)
@@ -255,7 +171,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
/*
* Level 4 access.
*/
-static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
@@ -273,7 +188,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
- * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
@@ -286,20 +201,34 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
*
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
* but also L and G.
+ *
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
*/
-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
-#define SWP_TYPE_BITS 5
-/* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
- & ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
-#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << (SWP_TYPE_FIRST_BIT)) \
- | ((offset) << SWP_OFFSET_FIRST_BIT) })
+/* Extract the high bits for type */
+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+/* Shift up (to get rid of type), then down to get value */
+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+/*
+ * Shift the offset up "too far" by TYPE bits, then down again
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
+ */
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
@@ -343,5 +272,7 @@ static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
return true;
}
+#include <asm/pgtable-invert.h>
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 054765ab2da2..04edd2d58211 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -115,6 +115,7 @@ extern unsigned int ptrs_per_p4d;
#define LDT_PGD_ENTRY_L5 -112UL
#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
#define __VMALLOC_BASE_L4 0xffffc90000000000UL
#define __VMALLOC_BASE_L5 0xffa0000000000000UL
@@ -153,4 +154,6 @@ extern unsigned int ptrs_per_p4d;
#define EARLY_DYNAMIC_PAGE_TABLES 64
+#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
+
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 99fff853c944..b64acb08a62b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -50,6 +50,7 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+#define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
@@ -266,14 +267,37 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef struct { pgdval_t pgd; } pgd_t;
+#ifdef CONFIG_X86_PAE
+
+/*
+ * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
+ * use it here.
+ */
+
+#define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
+#define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
+
+/*
+ * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
+ * All other bits are Reserved MBZ
+ */
+#define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
+ _PAGE_PWT | _PAGE_PCD | \
+ _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
+
+#else
+/* No need to mask any bits for !PAE */
+#define PGD_ALLOWED_BITS (~0ULL)
+#endif
+
static inline pgd_t native_make_pgd(pgdval_t val)
{
- return (pgd_t) { val };
+ return (pgd_t) { val & PGD_ALLOWED_BITS };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
- return pgd.pgd;
+ return pgd.pgd & PGD_ALLOWED_BITS;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 625a52a5594f..02c2cbda4a74 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -39,10 +39,6 @@
#define CR3_PCID_MASK 0xFFFull
#define CR3_NOFLUSH BIT_ULL(63)
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_PCID_USER_BIT 11
-#endif
-
#else
/*
* CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
@@ -53,4 +49,8 @@
#define CR3_NOFLUSH 0
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_PCID_USER_BIT 11
+#endif
+
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cfd29ee8c3da..682286aca881 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -181,6 +181,11 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
+static inline unsigned long l1tf_pfn_limit(void)
+{
+ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+}
+
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -966,6 +971,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+extern void free_kernel_image_pages(void *begin, void *end);
void default_idle(void);
#ifdef CONFIG_XEN
@@ -977,4 +983,16 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
void microcode_check(void);
+
+enum l1tf_mitigations {
+ L1TF_MITIGATION_OFF,
+ L1TF_MITIGATION_FLUSH_NOWARN,
+ L1TF_MITIGATION_FLUSH,
+ L1TF_MITIGATION_FLUSH_NOSMT,
+ L1TF_MITIGATION_FULL,
+ L1TF_MITIGATION_FULL_FORCE
+};
+
+extern enum l1tf_mitigations l1tf_mitigation;
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 38a17f1d5c9d..5df09a0b80b8 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -6,10 +6,9 @@
#ifdef CONFIG_PAGE_TABLE_ISOLATION
extern void pti_init(void);
extern void pti_check_boottime_disable(void);
-extern void pti_clone_kernel_text(void);
+extern void pti_finalize(void);
#else
static inline void pti_check_boottime_disable(void) { }
-static inline void pti_clone_kernel_text(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 9ef5ee03d2d7..159622ee0674 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -43,7 +43,7 @@ asm (".pushsection .text;"
"push %rdx;"
"mov $0x1,%eax;"
"xor %edx,%edx;"
- "lock cmpxchg %dl,(%rdi);"
+ LOCK_PREFIX "cmpxchg %dl,(%rdi);"
"cmp $0x1,%al;"
"jne .slowpath;"
"pop %rdx;"
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 4cf11d88d3b3..19b90521954c 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -5,6 +5,7 @@
* PaX/grsecurity.
*/
#include <linux/refcount.h>
+#include <asm/bug.h>
/*
* This is the first portion of the refcount error handling, which lives in
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 5c019d23d06b..4a911a382ade 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -7,6 +7,7 @@
extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[];
+extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index bd090367236c..34cffcef7375 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -46,6 +46,7 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index eb5f7999a893..36bd243843d6 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -87,15 +87,25 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
#endif
/* This is used when switching tasks or entering/exiting vm86 mode. */
-static inline void update_sp0(struct task_struct *task)
+static inline void update_task_stack(struct task_struct *task)
{
- /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
+ /* sp0 always points to the entry trampoline stack, which is constant: */
#ifdef CONFIG_X86_32
- load_sp0(task->thread.sp0);
+ if (static_cpu_has(X86_FEATURE_XENPV))
+ load_sp0(task->thread.sp0);
+ else
+ this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
+ /*
+ * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
+ * doesn't work on x86-32 because sp1 and
+ * cpu_current_top_of_stack have different values (because of
+ * the non-zero stack-padding on 32bit).
+ */
if (static_cpu_has(X86_FEATURE_XENPV))
load_sp0(task_top_of_stack(task));
#endif
+
}
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 2ecd34e2d46c..e85ff65c43c3 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern int after_bootmem;
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6690cd3fc8b1..511bf5fae8b8 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
- /*
- * If we have PCID, then switching to init_mm is reasonably
- * fast. If we don't have PCID, then switching to init_mm is
- * quite slow, so we try to defer it in the hopes that we can
- * avoid it entirely. The latter approach runs the risk of
- * receiving otherwise unnecessary IPIs.
- *
- * This choice is just a heuristic. The tlb code can handle this
- * function returning true or false regardless of whether we have
- * PCID.
- */
- return !static_cpu_has(X86_FEATURE_PCID);
-}
-
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
@@ -554,4 +538,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
native_flush_tlb_others(mask, info)
#endif
+extern void tlb_flush_remove_tables(struct mm_struct *mm);
+extern void tlb_flush_remove_tables_local(void *arg);
+
+#define HAVE_TLB_FLUSH_REMOVE_TABLES
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c1d2a9892352..453cf38a1c33 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -123,13 +123,17 @@ static inline int topology_max_smt_threads(void)
}
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
-extern int topology_phys_to_logical_pkg(unsigned int pkg);
+int topology_phys_to_logical_pkg(unsigned int pkg);
+bool topology_is_primary_thread(unsigned int cpu);
+bool topology_smt_supported(void);
#else
#define topology_max_packages() (1)
static inline int
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
static inline int topology_max_smt_threads(void) { return 1; }
+static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+static inline bool topology_smt_supported(void) { return false; }
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index 4253bca99989..2e6245a023ef 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -28,6 +28,35 @@ TRACE_EVENT(hyperv_mmu_flush_tlb_others,
__entry->addr, __entry->end)
);
+TRACE_EVENT(hyperv_nested_flush_guest_mapping,
+ TP_PROTO(u64 as, int ret),
+ TP_ARGS(as, ret),
+
+ TP_STRUCT__entry(
+ __field(u64, as)
+ __field(int, ret)
+ ),
+ TP_fast_assign(__entry->as = as;
+ __entry->ret = ret;
+ ),
+ TP_printk("address space %llx ret %d", __entry->as, __entry->ret)
+ );
+
+TRACE_EVENT(hyperv_send_ipi_mask,
+ TP_PROTO(const struct cpumask *cpus,
+ int vector),
+ TP_ARGS(cpus, vector),
+ TP_STRUCT__entry(
+ __field(unsigned int, ncpus)
+ __field(int, vector)
+ ),
+ TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+ __entry->vector = vector;
+ ),
+ TP_printk("ncpus %d vector %x",
+ __entry->ncpus, __entry->vector)
+ );
+
#endif /* CONFIG_HYPERV */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 2701d221583a..eb5bbfeccb66 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
-extern void tsc_early_delay_calibrate(void);
+extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
-extern unsigned long native_calibrate_cpu(void);
+extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 62acb613114b..a9d637bc301d 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
unsigned long ret;
__uaccess_begin();
- ret = memcpy_mcsafe(to, from, len);
+ /*
+ * Note, __memcpy_mcsafe() is explicitly used since it can
+ * handle exceptions / faults. memcpy_mcsafe() may fall back to
+ * memcpy() which lacks this handling.
+ */
+ ret = __memcpy_mcsafe(to, from, len);
__uaccess_end();
return ret;
}
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index bae46fc6b9de..0bcdb1279361 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -26,7 +26,7 @@
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
-.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0
#ifdef CONFIG_STACK_VALIDATION
.Lunwind_hint_ip_\@:
.pushsection .discard.unwind_hints
@@ -35,12 +35,14 @@
.short \sp_offset
.byte \sp_reg
.byte \type
+ .byte \end
+ .balign 4
.popsection
#endif
.endm
.macro UNWIND_HINT_EMPTY
- UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1
.endm
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
@@ -86,19 +88,21 @@
#else /* !__ASSEMBLY__ */
-#define UNWIND_HINT(sp_reg, sp_offset, type) \
+#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
- ".short " __stringify(sp_offset) "\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(end) "\n\t" \
+ ".balign 4 \n\t" \
".popsection\n\t"
-#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE, 0)
-#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE, 0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 6aa8499e1f62..9527ba5d62da 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -74,6 +74,7 @@
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_ENCLS_EXITING 0x00008000
#define SECONDARY_EXEC_RDSEED_EXITING 0x00010000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_XSAVES 0x00100000
@@ -213,6 +214,8 @@ enum vmcs_field {
VMWRITE_BITMAP_HIGH = 0x00002029,
XSS_EXIT_BITMAP = 0x0000202C,
XSS_EXIT_BITMAP_HIGH = 0x0000202D,
+ ENCLS_EXITING_BITMAP = 0x0000202E,
+ ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
TSC_MULTIPLIER = 0x00002032,
TSC_MULTIPLIER_HIGH = 0x00002033,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
@@ -576,4 +579,15 @@ enum vm_instruction_error_number {
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
};
+enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_AUTO,
+ VMENTER_L1D_FLUSH_NEVER,
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
+ VMENTER_L1D_FLUSH_EPT_DISABLED,
+ VMENTER_L1D_FLUSH_NOT_REQUIRED,
+};
+
+extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+
#endif
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index bfd882617613..6b2f90a0b149 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -209,24 +209,37 @@ extern struct { char _entry[32]; } hypercall_page[];
})
static inline long
-privcmd_call(unsigned call,
- unsigned long a1, unsigned long a2,
- unsigned long a3, unsigned long a4,
- unsigned long a5)
+xen_single_call(unsigned int call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
{
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
- stac();
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
- clac();
return (long)__res;
}
+static inline long
+privcmd_call(unsigned int call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
+{
+ long res;
+
+ stac();
+ res = xen_single_call(call, a1, a2, a3, a4, a5);
+ clac();
+
+ return res;
+}
+
static inline int
HYPERVISOR_set_trap_table(struct trap_info *table)
{
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index c535c2fdea13..86299efa804a 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -378,4 +378,41 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
+
+struct kvm_vmx_nested_state {
+ __u64 vmxon_pa;
+ __u64 vmcs_pa;
+
+ struct {
+ __u16 flags;
+ } smm;
+};
+
+/* for KVM_CAP_NESTED_STATE */
+struct kvm_nested_state {
+ /* KVM_STATE_* flags */
+ __u16 flags;
+
+ /* 0 for VMX, 1 for SVM. */
+ __u16 format;
+
+ /* 128 for SVM, 128 + VMCS size for VMX. */
+ __u32 size;
+
+ union {
+ /* VMXON, VMCS */
+ struct kvm_vmx_nested_state vmx;
+
+ /* Pad the header to 128 bytes. */
+ __u8 pad[120];
+ };
+
+ __u8 data[0];
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 0ede697c3961..19980ec1a316 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -28,6 +28,7 @@
#define KVM_FEATURE_PV_UNHALT 7
#define KVM_FEATURE_PV_TLB_FLUSH 9
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
+#define KVM_FEATURE_PV_SEND_IPI 11
#define KVM_HINTS_REALTIME 0
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 02d6f5cf4e70..8824d01c0c35 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
+obj-y += irqflags.o
obj-y += process.o
obj-y += fpu/
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index dde437f5d14f..158ad1483c43 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -108,7 +108,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
cx->type);
}
snprintf(cx->desc,
- ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
+ ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
cx->address);
out:
return retval;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a481763a3776..014f214da581 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
+ sync_core();
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
@@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
struct page *pages[2];
int i;
+ /*
+ * While boot memory allocator is runnig we cannot use struct
+ * pages as they are not yet initialized.
+ */
+ BUG_ON(!after_bootmem);
+
if (!core_kernel_text((unsigned long)addr)) {
pages[0] = vmalloc_to_page(addr);
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2aabd4cb0e3f..84132eddb5a8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -56,6 +56,7 @@
#include <asm/hypervisor.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/irq_regs.h>
unsigned int num_processors;
@@ -573,6 +574,9 @@ static u32 skx_deadline_rev(void)
case 0x04: return 0x02000014;
}
+ if (boot_cpu_data.x86_stepping > 4)
+ return 0;
+
return ~0U;
}
@@ -937,7 +941,7 @@ static int __init calibrate_APIC_clock(void)
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
pr_warning("APIC timer disabled due to verification failure\n");
- return -1;
+ return -1;
}
return 0;
@@ -2189,6 +2193,23 @@ static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
+#ifdef CONFIG_SMP
+/**
+ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+ * @id: APIC ID to check
+ */
+bool apic_id_is_primary_thread(unsigned int apicid)
+{
+ u32 mask;
+
+ if (smp_num_siblings == 1)
+ return true;
+ /* Isolate the SMT bit(s) in the APICID and check for 0 */
+ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
+ return !(apicid & mask);
+}
+#endif
+
/*
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
* and cpuid_to_apicid[] synchronized.
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3982f79d2377..ff0d14cd9e82 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index ce503c99f5c4..72a94401f9e0 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -12,6 +12,7 @@
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/hpet.h>
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 35aaee4fc028..9f148e3d45b4 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compiler.h>
@@ -218,7 +219,8 @@ static int reserve_irq_vector(struct irq_data *irqd)
return 0;
}
-static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+static int
+assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
bool resvd = apicd->has_reserved;
@@ -245,22 +247,12 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
return -EBUSY;
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
- if (vector > 0)
- apic_update_vector(irqd, vector, cpu);
trace_vector_alloc(irqd->irq, vector, resvd, vector);
- return vector;
-}
-
-static int assign_vector_locked(struct irq_data *irqd,
- const struct cpumask *dest)
-{
- struct apic_chip_data *apicd = apic_chip_data(irqd);
- int vector = allocate_vector(irqd, dest);
-
if (vector < 0)
return vector;
+ apic_update_vector(irqd, vector, cpu);
+ apic_update_irq_cfg(irqd, vector, cpu);
- apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
return 0;
}
@@ -433,7 +425,7 @@ static int activate_managed(struct irq_data *irqd)
pr_err("Managed startup irq %u, no vector available\n",
irqd->irq);
}
- return ret;
+ return ret;
}
static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d492752f79e1..391f358ebb4c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -394,10 +394,10 @@ extern int uv_hub_info_version(void)
EXPORT_SYMBOL(uv_hub_info_version);
/* Default UV memory block size is 2GB */
-static unsigned long mem_block_size = (2UL << 30);
+static unsigned long mem_block_size __initdata = (2UL << 30);
/* Kernel parameter to specify UV mem block size */
-static int parse_mem_block_size(char *ptr)
+static int __init parse_mem_block_size(char *ptr)
{
unsigned long size = memparse(ptr, NULL);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5d0de79fdab0..ec00d1ff5098 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -240,6 +240,7 @@
#include <asm/olpc.h>
#include <asm/paravirt.h>
#include <asm/reboot.h>
+#include <asm/nospec-branch.h>
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
extern int (*console_blank_hook)(int);
@@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
+ firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
apm_bios_call_asm(call->func, call->ebx, call->ecx,
&call->eax, &call->ebx, &call->ecx, &call->edx,
&call->esi);
APM_DO_RESTORE_SEGS;
+ firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
@@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
+ firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
&call->eax);
APM_DO_RESTORE_SEGS;
+ firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index dcb008c320fe..01de31db300d 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -103,4 +103,9 @@ void common(void) {
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
+ DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
+
+ /* Offset for sp0 and sp1 into the tss_struct */
+ OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+ OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index a4a3be399f4b..82826f2275cc 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -46,8 +46,14 @@ void foo(void)
OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
BLANK();
- /* Offset from the sysenter stack to tss.sp0 */
- DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+ /*
+ * Offset from the entry stack to task stack stored in TSS. Kernel entry
+ * happens on the per-cpu entry-stack, and the asm code switches to the
+ * task-stack pointer stored in x86_tss.sp1, which is a copy of
+ * task->thread.sp0 where entry code can find it.
+ */
+ DEFINE(TSS_entry2task_stack,
+ offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index b2dcd161f514..3b9405e7ba2b 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -65,8 +65,6 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
- OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
BLANK();
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7a40196967cb..347137e80bf5 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -35,7 +35,9 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o intel_rdt_ctrlmondata.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
+CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 082d7875cef8..22ab408177b2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
}
}
- set_cpu_cap(c, X86_FEATURE_K7);
-
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -315,6 +313,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
c->cpu_core_id %= cus_per_node;
}
+
+static void amd_get_topology_early(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_TOPOEXT))
+ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+}
+
/*
* Fixup core topology information for
* (1) AMD multi-node processors
@@ -334,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 0xff;
- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
@@ -543,7 +547,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
nodes_per_socket = ((value >> 3) & 7) + 1;
}
- if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+ if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
+ c->x86 >= 0x15 && c->x86 <= 0x17) {
unsigned int bit;
switch (c->x86) {
@@ -611,10 +617,19 @@ clear_sev:
static void early_init_amd(struct cpuinfo_x86 *c)
{
+ u64 value;
u32 dummy;
early_init_amd_mc(c);
+#ifdef CONFIG_X86_32
+ if (c->x86 == 6)
+ set_cpu_cap(c, X86_FEATURE_K7);
+#endif
+
+ if (c->x86 >= 0xf)
+ set_cpu_cap(c, X86_FEATURE_K8);
+
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/*
@@ -681,6 +696,22 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_E400);
early_detect_mem_encrypt(c);
+
+ /* Re-enable TopologyExtensions if switched off by BIOS */
+ if (c->x86 == 0x15 &&
+ (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
+ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+
+ if (msr_set_bit(0xc0011005, 54) > 0) {
+ rdmsrl(0xc0011005, value);
+ if (value & BIT_64(54)) {
+ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ }
+ }
+ }
+
+ amd_get_topology_early(c);
}
static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -772,19 +803,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
- /* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
- if (msr_set_bit(0xc0011005, 54) > 0) {
- rdmsrl(0xc0011005, value);
- if (value & BIT_64(54)) {
- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
- }
- }
- }
-
/*
* The way access filter has a performance penalty on some workloads.
* Disable it on the affected CPUs.
@@ -848,22 +866,12 @@ static void init_amd(struct cpuinfo_x86 *c)
cpu_detect_cache_sizes(c);
- /* Multi core CPU? */
- if (c->extended_cpuid_level >= 0x80000008) {
- amd_detect_cmp(c);
- amd_get_topology(c);
- srat_detect_node(c);
- }
-
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
+ amd_detect_cmp(c);
+ amd_get_topology(c);
+ srat_detect_node(c);
init_amd_cacheinfo(c);
- if (c->x86 >= 0xf)
- set_cpu_cap(c, X86_FEATURE_K8);
-
if (cpu_has(c, X86_FEATURE_XMM2)) {
unsigned long long val;
int ret;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 404df26b7de8..cb4a16292aa7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -22,15 +22,18 @@
#include <asm/processor-flags.h>
#include <asm/fpu/internal.h>
#include <asm/msr.h>
+#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
+#include <asm/e820/api.h>
#include <asm/hypervisor.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -56,6 +59,12 @@ void __init check_bugs(void)
{
identify_boot_cpu();
+ /*
+ * identify_boot_cpu() initialized SMT support information, let the
+ * core code know.
+ */
+ cpu_smt_check_topology_early();
+
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
@@ -82,6 +91,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
+ l1tf_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -130,6 +141,7 @@ static const char *spectre_v2_strings[] = {
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
};
#undef pr_fmt
@@ -155,7 +167,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
if (hostval != guestval) {
@@ -312,23 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -348,6 +344,13 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+ mode = SPECTRE_V2_IBRS_ENHANCED;
+ /* Force it so VMEXIT will restore correctly */
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ goto specv2_set_mode;
+ }
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_auto;
break;
@@ -385,26 +388,20 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
+specv2_set_mode:
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP nor PTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_PTI) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -414,9 +411,16 @@ retpoline_auto:
/*
* Retpoline means the kernel is safe because it has no indirect
- * branches. But firmware isn't, so use IBRS to protect that.
+ * branches. Enhanced IBRS protects firmware too, so, enable restricted
+ * speculation around firmware calls only when Enhanced IBRS isn't
+ * supported.
+ *
+ * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+ * the user might select retpoline on the kernel command line and if
+ * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+ * enable IBRS around firmware calls.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
@@ -533,9 +537,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
* use a completely different MSR and bit dependent on family.
*/
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+ !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
x86_amd_ssb_disable();
- else {
+ } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -652,8 +657,120 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+#undef pr_fmt
+#define pr_fmt(fmt) "L1TF: " fmt
+
+/* Default mitigation for L1TF-affected CPUs */
+enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+EXPORT_SYMBOL_GPL(l1tf_mitigation);
+#endif
+enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+
+static void __init l1tf_select_mitigation(void)
+{
+ u64 half_pa;
+
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ case L1TF_MITIGATION_FLUSH:
+ break;
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ case L1TF_MITIGATION_FULL:
+ cpu_smt_disable(false);
+ break;
+ case L1TF_MITIGATION_FULL_FORCE:
+ cpu_smt_disable(true);
+ break;
+ }
+
+#if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+#endif
+
+ /*
+ * This is extremely unlikely to happen because almost all
+ * systems have far more MAX_PA/2 than RAM can be fit into
+ * DIMM slots.
+ */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+
+static int __init l1tf_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ else if (!strcmp(str, "flush,nowarn"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
+ else if (!strcmp(str, "flush"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
+ else if (!strcmp(str, "flush,nosmt"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+ else if (!strcmp(str, "full"))
+ l1tf_mitigation = L1TF_MITIGATION_FULL;
+ else if (!strcmp(str, "full,force"))
+ l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
+
+ return 0;
+}
+early_param("l1tf", l1tf_cmdline);
+
+#undef pr_fmt
+
#ifdef CONFIG_SYSFS
+#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static const char *l1tf_vmx_states[] = {
+ [VMENTER_L1D_FLUSH_AUTO] = "auto",
+ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
+ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
+ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
+};
+
+static ssize_t l1tf_show_state(char *buf)
+{
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+ (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+ cpu_smt_control == CPU_SMT_ENABLED))
+ return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation]);
+
+ return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation],
+ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+}
+#else
+static ssize_t l1tf_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+}
+#endif
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -682,6 +799,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+ return l1tf_show_state(buf);
+ break;
default:
break;
}
@@ -708,4 +829,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index eb4cb3efd20e..84dee5ab745a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -661,33 +661,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
- int index_msb, core_bits;
- static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
- return;
+ return -1;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- goto out;
+ return -1;
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
- return;
+ return -1;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
+ if (smp_num_siblings == 1)
pr_info_once("CPU0: Hyper-Threading is disabled\n");
- goto out;
- }
+#endif
+ return 0;
+}
- if (smp_num_siblings <= 1)
- goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ int index_msb, core_bits;
+
+ if (detect_ht_early(c) < 0)
+ return;
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -700,15 +703,6 @@ void detect_ht(struct cpuinfo_x86 *c)
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
-
-out:
- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
- pr_info("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- pr_info("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
#endif
}
@@ -911,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
apply_forced_caps(c);
}
-static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
@@ -987,6 +981,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{}
};
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+ /* in addition to cpu_no_speculation */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ {}
+};
+
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
@@ -1005,6 +1014,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
if (x86_match_cpu(cpu_no_meltdown))
return;
@@ -1013,6 +1025,29 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+ if (x86_match_cpu(cpu_no_l1tf))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
+}
+
+/*
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
+ * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
+ */
+static void detect_nopl(void)
+{
+#ifdef CONFIG_X86_32
+ setup_clear_cpu_cap(X86_FEATURE_NOPL);
+#else
+ setup_force_cpu_cap(X86_FEATURE_NOPL);
+#endif
}
/*
@@ -1089,6 +1124,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
*/
if (!pgtable_l5_enabled())
setup_clear_cpu_cap(X86_FEATURE_LA57);
+
+ detect_nopl();
}
void __init early_cpu_init(void)
@@ -1124,24 +1161,6 @@ void __init early_cpu_init(void)
early_identify_cpu(&boot_cpu_data);
}
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
- set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
@@ -1204,8 +1223,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
get_model_name(c); /* Default name */
- detect_nopl(c);
-
detect_null_seg_behavior(c);
/*
@@ -1804,11 +1821,12 @@ void cpu_init(void)
enter_lazy_tlb(&init_mm, curr);
/*
- * Initialize the TSS. Don't bother initializing sp0, as the initial
- * task never enters user mode.
+ * Initialize the TSS. sp0 points to the entry trampoline stack
+ * regardless of what task is running.
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
+ load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 38216f678fc3..7b229afa0a37 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void get_cpu_cap(struct cpuinfo_x86 *c);
+extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern u32 get_scattered_cpuid_leaf(unsigned int level,
@@ -55,7 +56,9 @@ extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
+extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c);
+extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index eb75564f2d25..401e8c133108 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -301,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
}
check_mpx_erratum(c);
+
+ /*
+ * Get the number of SMT siblings early from the extended topology
+ * leaf, if available. Otherwise try the legacy SMT detection.
+ */
+ if (detect_extended_topology_early(c) < 0)
+ detect_ht_early(c);
}
#ifdef CONFIG_X86_32
@@ -465,14 +472,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
+#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
+ u32 msr_vpid_cap, msr_ept_cap;
clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
clear_cpu_cap(c, X86_FEATURE_VNMI);
clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
clear_cpu_cap(c, X86_FEATURE_EPT);
clear_cpu_cap(c, X86_FEATURE_VPID);
+ clear_cpu_cap(c, X86_FEATURE_EPT_AD);
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +497,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
(msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
+ if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
set_cpu_cap(c, X86_FEATURE_EPT);
+ rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
+ msr_ept_cap, msr_vpid_cap);
+ if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
+ set_cpu_cap(c, X86_FEATURE_EPT_AD);
+ }
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
set_cpu_cap(c, X86_FEATURE_VPID);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index ec4754f81cbd..abb71ac70443 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -859,6 +859,8 @@ static __init bool get_rdt_resources(void)
return (rdt_mon_capable || rdt_alloc_capable);
}
+static enum cpuhp_state rdt_online;
+
static int __init intel_rdt_late_init(void)
{
struct rdt_resource *r;
@@ -880,6 +882,7 @@ static int __init intel_rdt_late_init(void)
cpuhp_remove_state(state);
return ret;
}
+ rdt_online = state;
for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
@@ -891,3 +894,11 @@ static int __init intel_rdt_late_init(void)
}
late_initcall(intel_rdt_late_init);
+
+static void __exit intel_rdt_exit(void)
+{
+ cpuhp_remove_state(rdt_online);
+ rdtgroup_exit();
+}
+
+__exitcall(intel_rdt_exit);
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 39752825e376..4e588f36228f 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -81,6 +81,34 @@ enum rdt_group_type {
};
/**
+ * enum rdtgrp_mode - Mode of a RDT resource group
+ * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
+ * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
+ * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
+ * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
+ * allowed AND the allocations are Cache Pseudo-Locked
+ *
+ * The mode of a resource group enables control over the allowed overlap
+ * between allocations associated with different resource groups (classes
+ * of service). User is able to modify the mode of a resource group by
+ * writing to the "mode" resctrl file associated with the resource group.
+ *
+ * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
+ * writing the appropriate text to the "mode" file. A resource group enters
+ * "pseudo-locked" mode after the schemata is written while the resource
+ * group is in "pseudo-locksetup" mode.
+ */
+enum rdtgrp_mode {
+ RDT_MODE_SHAREABLE = 0,
+ RDT_MODE_EXCLUSIVE,
+ RDT_MODE_PSEUDO_LOCKSETUP,
+ RDT_MODE_PSEUDO_LOCKED,
+
+ /* Must be last */
+ RDT_NUM_MODES,
+};
+
+/**
* struct mongroup - store mon group's data in resctrl fs.
* @mon_data_kn kernlfs node for the mon_data directory
* @parent: parent rdtgrp
@@ -95,6 +123,43 @@ struct mongroup {
};
/**
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @r: RDT resource to which this pseudo-locked region
+ * belongs
+ * @d: RDT domain to which this pseudo-locked region
+ * belongs
+ * @cbm: bitmask of the pseudo-locked region
+ * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
+ * completion
+ * @thread_done: variable used by waitqueue to test if pseudo-locking
+ * thread completed
+ * @cpu: core associated with the cache on which the setup code
+ * will be run
+ * @line_size: size of the cache lines
+ * @size: size of pseudo-locked region in bytes
+ * @kmem: the kernel memory associated with pseudo-locked region
+ * @minor: minor number of character device associated with this
+ * region
+ * @debugfs_dir: pointer to this region's directory in the debugfs
+ * filesystem
+ * @pm_reqs: Power management QoS requests related to this region
+ */
+struct pseudo_lock_region {
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ u32 cbm;
+ wait_queue_head_t lock_thread_wq;
+ int thread_done;
+ int cpu;
+ unsigned int line_size;
+ unsigned int size;
+ void *kmem;
+ unsigned int minor;
+ struct dentry *debugfs_dir;
+ struct list_head pm_reqs;
+};
+
+/**
* struct rdtgroup - store rdtgroup's data in resctrl file system.
* @kn: kernfs node
* @rdtgroup_list: linked list for all rdtgroups
@@ -106,16 +171,20 @@ struct mongroup {
* @type: indicates type of this rdtgroup - either
* monitor only or ctrl_mon group
* @mon: mongroup related data
+ * @mode: mode of resource group
+ * @plr: pseudo-locked region
*/
struct rdtgroup {
- struct kernfs_node *kn;
- struct list_head rdtgroup_list;
- u32 closid;
- struct cpumask cpu_mask;
- int flags;
- atomic_t waitcount;
- enum rdt_group_type type;
- struct mongroup mon;
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ u32 closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+ enum rdt_group_type type;
+ struct mongroup mon;
+ enum rdtgrp_mode mode;
+ struct pseudo_lock_region *plr;
};
/* rdtgroup.flags */
@@ -148,6 +217,7 @@ extern struct list_head rdt_all_groups;
extern int max_name_width, max_data_width;
int __init rdtgroup_init(void);
+void __exit rdtgroup_exit(void);
/**
* struct rftype - describe each file in the resctrl file system
@@ -216,22 +286,24 @@ struct mbm_state {
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
* @new_ctrl: new ctrl value to be loaded
* @have_new_ctrl: did user provide new_ctrl for this domain
+ * @plr: pseudo-locked region (if any) associated with domain
*/
struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
- unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
- struct delayed_work mbm_over;
- struct delayed_work cqm_limbo;
- int mbm_work_cpu;
- int cqm_work_cpu;
- u32 *ctrl_val;
- u32 *mbps_val;
- u32 new_ctrl;
- bool have_new_ctrl;
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ u32 *ctrl_val;
+ u32 *mbps_val;
+ u32 new_ctrl;
+ bool have_new_ctrl;
+ struct pseudo_lock_region *plr;
};
/**
@@ -351,7 +423,7 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
+ int (*parse_ctrlval) (void *data, struct rdt_resource *r,
struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
@@ -359,8 +431,8 @@ struct rdt_resource {
unsigned long fflags;
};
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -368,7 +440,7 @@ extern struct rdt_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default;
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
-int __init rdtgroup_init(void);
+extern struct dentry *debugfs_resctrl;
enum {
RDT_RESOURCE_L3,
@@ -439,13 +511,32 @@ void rdt_last_cmd_printf(const char *fmt, ...);
void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn);
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask);
struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive);
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
+ u32 cbm);
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
+int rdtgroup_tasks_assigned(struct rdtgroup *r);
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
+int rdt_pseudo_lock_init(void);
+void rdt_pseudo_lock_release(void);
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+int update_domains(struct rdt_resource *r, int closid);
+void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 116d57b248d3..af358ca05160 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,9 +64,10 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
{
unsigned long data;
+ char *buf = _buf;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
@@ -87,7 +88,7 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
*/
-static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
@@ -122,22 +123,64 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
+struct rdt_cbm_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
{
- unsigned long data;
+ struct rdt_cbm_parse_data *data = _data;
+ struct rdtgroup *rdtgrp = data->rdtgrp;
+ u32 cbm_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if(!cbm_validate(buf, &data, r))
+ /*
+ * Cannot set up more than one pseudo-locked region in a cache
+ * hierarchy.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ rdtgroup_pseudo_locked_in_hierarchy(d)) {
+ rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
return -EINVAL;
- d->new_ctrl = data;
+ }
+
+ if (!cbm_validate(data->buf, &cbm_val, r))
+ return -EINVAL;
+
+ if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_SHAREABLE) &&
+ rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
+ rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The CBM may not overlap with the CBM of another closid if
+ * either is exclusive.
+ */
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+ rdt_last_cmd_printf("overlaps with exclusive group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+ if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ rdt_last_cmd_printf("overlaps with other group\n");
+ return -EINVAL;
+ }
+ }
+
+ d->new_ctrl = cbm_val;
d->have_new_ctrl = true;
return 0;
@@ -149,8 +192,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* separated by ";". The "id" is in decimal, and must match one of
* the "id"s for this resource.
*/
-static int parse_line(char *line, struct rdt_resource *r)
+static int parse_line(char *line, struct rdt_resource *r,
+ struct rdtgroup *rdtgrp)
{
+ struct rdt_cbm_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
@@ -167,15 +212,32 @@ next:
dom = strim(dom);
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
- if (r->parse_ctrlval(dom, r, d))
+ data.buf = dom;
+ data.rdtgrp = rdtgrp;
+ if (r->parse_ctrlval(&data, r, d))
return -EINVAL;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * In pseudo-locking setup mode and just
+ * parsed a valid CBM that should be
+ * pseudo-locked. Only one locked region per
+ * resource group and domain so just do
+ * the required initialization for single
+ * region and return.
+ */
+ rdtgrp->plr->r = r;
+ rdtgrp->plr->d = d;
+ rdtgrp->plr->cbm = d->new_ctrl;
+ d->plr = rdtgrp->plr;
+ return 0;
+ }
goto next;
}
}
return -EINVAL;
}
-static int update_domains(struct rdt_resource *r, int closid)
+int update_domains(struct rdt_resource *r, int closid)
{
struct msr_param msr_param;
cpumask_var_t cpu_mask;
@@ -220,13 +282,14 @@ done:
return 0;
}
-static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
+static int rdtgroup_parse_resource(char *resname, char *tok,
+ struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
for_each_alloc_enabled_rdt_resource(r) {
- if (!strcmp(resname, r->name) && closid < r->num_closid)
- return parse_line(tok, r);
+ if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
+ return parse_line(tok, r, rdtgrp);
}
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
return -EINVAL;
@@ -239,7 +302,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
- int closid, ret = 0;
+ int ret = 0;
/* Valid input requires a trailing newline */
if (nbytes == 0 || buf[nbytes - 1] != '\n')
@@ -253,7 +316,15 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
rdt_last_cmd_clear();
- closid = rdtgrp->closid;
+ /*
+ * No changes to pseudo-locked region allowed. It has to be removed
+ * and re-created instead.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("resource group is pseudo-locked\n");
+ goto out;
+ }
for_each_alloc_enabled_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
@@ -272,17 +343,27 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
ret = -EINVAL;
goto out;
}
- ret = rdtgroup_parse_resource(resname, tok, closid);
+ ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
if (ret)
goto out;
}
for_each_alloc_enabled_rdt_resource(r) {
- ret = update_domains(r, closid);
+ ret = update_domains(r, rdtgrp->closid);
if (ret)
goto out;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * If pseudo-locking fails we keep the resource group in
+ * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
+ * active and updated for just the domain the pseudo-locked
+ * region was requested for.
+ */
+ ret = rdtgroup_pseudo_lock_create(rdtgrp);
+ }
+
out:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -318,10 +399,18 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- closid = rdtgrp->closid;
- for_each_alloc_enabled_rdt_resource(r) {
- if (closid < r->num_closid)
- show_doms(s, r, closid);
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ for_each_alloc_enabled_rdt_resource(r)
+ seq_printf(s, "%s:uninitialized\n", r->name);
+ } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
+ rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+ } else {
+ closid = rdtgrp->closid;
+ for_each_alloc_enabled_rdt_resource(r) {
+ if (closid < r->num_closid)
+ show_doms(s, r, closid);
+ }
}
} else {
ret = -ENOENT;
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
new file mode 100644
index 000000000000..40f3903ae5d9
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -0,0 +1,1522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resource Director Technology (RDT)
+ *
+ * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Reinette Chatre <reinette.chatre@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/mman.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/intel-family.h>
+#include <asm/intel_rdt_sched.h>
+#include <asm/perf_event.h>
+
+#include "intel_rdt.h"
+
+#define CREATE_TRACE_POINTS
+#include "intel_rdt_pseudo_lock_event.h"
+
+/*
+ * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
+ * prefetcher state. Details about this register can be found in the MSR
+ * tables for specific platforms found in Intel's SDM.
+ */
+#define MSR_MISC_FEATURE_CONTROL 0x000001a4
+
+/*
+ * The bits needed to disable hardware prefetching varies based on the
+ * platform. During initialization we will discover which bits to use.
+ */
+static u64 prefetch_disable_bits;
+
+/*
+ * Major number assigned to and shared by all devices exposing
+ * pseudo-locked regions.
+ */
+static unsigned int pseudo_lock_major;
+static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
+static struct class *pseudo_lock_class;
+
+/**
+ * get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ *
+ * Capture the list of platforms that have been validated to support
+ * pseudo-locking. This includes testing to ensure pseudo-locked regions
+ * with low cache miss rates can be created under variety of load conditions
+ * as well as that these pseudo-locked regions can maintain their low cache
+ * miss rates under variety of load conditions for significant lengths of time.
+ *
+ * After a platform has been validated to support pseudo-locking its
+ * hardware prefetch disable bits are included here as they are documented
+ * in the SDM.
+ *
+ * When adding a platform here also add support for its cache events to
+ * measure_cycles_perf_fn()
+ *
+ * Return:
+ * If platform is supported, the bits to disable hardware prefetchers, 0
+ * if platform is not supported.
+ */
+static u64 get_prefetch_disable_bits(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ boot_cpu_data.x86 != 6)
+ return 0;
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_BROADWELL_X:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 3 DCU IP Prefetcher Disable (R/W)
+ * 63:4 Reserved
+ */
+ return 0xF;
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 Reserved
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 63:3 Reserved
+ */
+ return 0x5;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper to write 64bit value to MSR without tracing. Used when
+ * use of the cache should be restricted and use of registers used
+ * for local variables avoided.
+ */
+static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
+{
+ __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+}
+
+/**
+ * pseudo_lock_minor_get - Obtain available minor number
+ * @minor: Pointer to where new minor number will be stored
+ *
+ * A bitmask is used to track available minor numbers. Here the next free
+ * minor number is marked as unavailable and returned.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int pseudo_lock_minor_get(unsigned int *minor)
+{
+ unsigned long first_bit;
+
+ first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
+
+ if (first_bit == MINORBITS)
+ return -ENOSPC;
+
+ __clear_bit(first_bit, &pseudo_lock_minor_avail);
+ *minor = first_bit;
+
+ return 0;
+}
+
+/**
+ * pseudo_lock_minor_release - Return minor number to available
+ * @minor: The minor number made available
+ */
+static void pseudo_lock_minor_release(unsigned int minor)
+{
+ __set_bit(minor, &pseudo_lock_minor_avail);
+}
+
+/**
+ * region_find_by_minor - Locate a pseudo-lock region by inode minor number
+ * @minor: The minor number of the device representing pseudo-locked region
+ *
+ * When the character device is accessed we need to determine which
+ * pseudo-locked region it belongs to. This is done by matching the minor
+ * number of the device to the pseudo-locked region it belongs.
+ *
+ * Minor numbers are assigned at the time a pseudo-locked region is associated
+ * with a cache instance.
+ *
+ * Return: On success return pointer to resource group owning the pseudo-locked
+ * region, NULL on failure.
+ */
+static struct rdtgroup *region_find_by_minor(unsigned int minor)
+{
+ struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
+ rdtgrp_match = rdtgrp;
+ break;
+ }
+ }
+ return rdtgrp_match;
+}
+
+/**
+ * pseudo_lock_pm_req - A power management QoS request list entry
+ * @list: Entry within the @pm_reqs list for a pseudo-locked region
+ * @req: PM QoS request
+ */
+struct pseudo_lock_pm_req {
+ struct list_head list;
+ struct dev_pm_qos_request req;
+};
+
+static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req, *next;
+
+ list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
+ dev_pm_qos_remove_request(&pm_req->req);
+ list_del(&pm_req->list);
+ kfree(pm_req);
+ }
+}
+
+/**
+ * pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ *
+ * To prevent the cache from being affected by power management entering
+ * C6 has to be avoided. This is accomplished by requesting a latency
+ * requirement lower than lowest C6 exit latency of all supported
+ * platforms as found in the cpuidle state tables in the intel_idle driver.
+ * At this time it is possible to do so with a single latency requirement
+ * for all supported platforms.
+ *
+ * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
+ * the ACPI latencies need to be considered while keeping in mind that C2
+ * may be set to map to deeper sleep states. In this case the latency
+ * requirement needs to prevent entering C2 also.
+ */
+static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req;
+ int cpu;
+ int ret;
+
+ for_each_cpu(cpu, &plr->d->cpu_mask) {
+ pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
+ if (!pm_req) {
+ rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+ &pm_req->req,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 30);
+ if (ret < 0) {
+ rdt_last_cmd_printf("fail to add latency req cpu%d\n",
+ cpu);
+ kfree(pm_req);
+ ret = -1;
+ goto out_err;
+ }
+ list_add(&pm_req->list, &plr->pm_reqs);
+ }
+
+ return 0;
+
+out_err:
+ pseudo_lock_cstates_relax(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_region_clear - Reset pseudo-lock region data
+ * @plr: pseudo-lock region
+ *
+ * All content of the pseudo-locked region is reset - any memory allocated
+ * freed.
+ *
+ * Return: void
+ */
+static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
+{
+ plr->size = 0;
+ plr->line_size = 0;
+ kfree(plr->kmem);
+ plr->kmem = NULL;
+ plr->r = NULL;
+ if (plr->d)
+ plr->d->plr = NULL;
+ plr->d = NULL;
+ plr->cbm = 0;
+ plr->debugfs_dir = NULL;
+}
+
+/**
+ * pseudo_lock_region_init - Initialize pseudo-lock region information
+ * @plr: pseudo-lock region
+ *
+ * Called after user provided a schemata to be pseudo-locked. From the
+ * schemata the &struct pseudo_lock_region is on entry already initialized
+ * with the resource, domain, and capacity bitmask. Here the information
+ * required for pseudo-locking is deduced from this data and &struct
+ * pseudo_lock_region initialized further. This information includes:
+ * - size in bytes of the region to be pseudo-locked
+ * - cache line size to know the stride with which data needs to be accessed
+ * to be pseudo-locked
+ * - a cpu associated with the cache instance on which the pseudo-locking
+ * flow can be executed
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
+{
+ struct cpu_cacheinfo *ci;
+ int ret;
+ int i;
+
+ /* Pick the first cpu we find that is associated with the cache. */
+ plr->cpu = cpumask_first(&plr->d->cpu_mask);
+
+ if (!cpu_online(plr->cpu)) {
+ rdt_last_cmd_printf("cpu %u associated with cache not online\n",
+ plr->cpu);
+ ret = -ENODEV;
+ goto out_region;
+ }
+
+ ci = get_cpu_cacheinfo(plr->cpu);
+
+ plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == plr->r->cache_level) {
+ plr->line_size = ci->info_list[i].coherency_line_size;
+ return 0;
+ }
+ }
+
+ ret = -1;
+ rdt_last_cmd_puts("unable to determine cache line size\n");
+out_region:
+ pseudo_lock_region_clear(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_init - Initialize a pseudo-lock region
+ * @rdtgrp: resource group to which new pseudo-locked region will belong
+ *
+ * A pseudo-locked region is associated with a resource group. When this
+ * association is created the pseudo-locked region is initialized. The
+ * details of the pseudo-locked region are not known at this time so only
+ * allocation is done and association established.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_init(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr;
+
+ plr = kzalloc(sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ init_waitqueue_head(&plr->lock_thread_wq);
+ INIT_LIST_HEAD(&plr->pm_reqs);
+ rdtgrp->plr = plr;
+ return 0;
+}
+
+/**
+ * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
+ * @plr: pseudo-lock region
+ *
+ * Initialize the details required to set up the pseudo-locked region and
+ * allocate the contiguous memory that will be pseudo-locked to the cache.
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
+{
+ int ret;
+
+ ret = pseudo_lock_region_init(plr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We do not yet support contiguous regions larger than
+ * KMALLOC_MAX_SIZE.
+ */
+ if (plr->size > KMALLOC_MAX_SIZE) {
+ rdt_last_cmd_puts("requested region exceeds maximum size\n");
+ ret = -E2BIG;
+ goto out_region;
+ }
+
+ plr->kmem = kzalloc(plr->size, GFP_KERNEL);
+ if (!plr->kmem) {
+ rdt_last_cmd_puts("unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ret = 0;
+ goto out;
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * pseudo_lock_free - Free a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-locked region belonged
+ *
+ * The pseudo-locked region's resources have already been released, or not
+ * yet created at this point. Now it can be freed and disassociated from the
+ * resource group.
+ *
+ * Return: void
+ */
+static void pseudo_lock_free(struct rdtgroup *rdtgrp)
+{
+ pseudo_lock_region_clear(rdtgrp->plr);
+ kfree(rdtgrp->plr);
+ rdtgrp->plr = NULL;
+}
+
+/**
+ * pseudo_lock_fn - Load kernel memory into cache
+ * @_rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * This is the core pseudo-locking flow.
+ *
+ * First we ensure that the kernel memory cannot be found in the cache.
+ * Then, while taking care that there will be as little interference as
+ * possible, the memory to be loaded is accessed while core is running
+ * with class of service set to the bitmask of the pseudo-locked region.
+ * After this is complete no future CAT allocations will be allowed to
+ * overlap with this bitmask.
+ *
+ * Local register variables are utilized to ensure that the memory region
+ * to be locked is the only memory access made during the critical locking
+ * loop.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int pseudo_lock_fn(void *_rdtgrp)
+{
+ struct rdtgroup *rdtgrp = _rdtgrp;
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ u32 rmid_p, closid_p;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer, but the cost
+ * is that this variable will enter the cache through evicting the
+ * memory we are trying to lock into the cache. Thus expect lower
+ * pseudo-locking success rate when KASAN is active.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Make sure none of the allocated memory is cached. If it is we
+ * will get a cache hit in below loop from outside of pseudo-locked
+ * region.
+ * wbinvd (as opposed to clflush/clflushopt) is required to
+ * increase likelihood that allocated cache portion will be filled
+ * with associated memory.
+ */
+ native_wbinvd();
+
+ /*
+ * Always called with interrupts enabled. By disabling interrupts
+ * ensure that we will not be preempted during this critical section.
+ */
+ local_irq_disable();
+
+ /*
+ * Call wrmsr and rdmsr as directly as possible to avoid tracing
+ * clobbering local register variables or affecting cache accesses.
+ *
+ * Disable the hardware prefetcher so that when the end of the memory
+ * being pseudo-locked is reached the hardware will not read beyond
+ * the buffer and evict pseudo-locked memory read earlier from the
+ * cache.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ closid_p = this_cpu_read(pqr_state.cur_closid);
+ rmid_p = this_cpu_read(pqr_state.cur_rmid);
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ /*
+ * Critical section begin: start by writing the closid associated
+ * with the capacity bitmask of the cache region being
+ * pseudo-locked followed by reading of kernel memory to load it
+ * into the cache.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
+ /*
+ * Cache was flushed earlier. Now access kernel memory to read it
+ * into cache region associated with just activated plr->closid.
+ * Loop over data twice:
+ * - In first loop the cache region is shared with the page walker
+ * as it populates the paging structure caches (including TLB).
+ * - In the second loop the paging structure caches are used and
+ * cache region is populated with the memory being referenced.
+ */
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ for (i = 0; i < size; i += line_size) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Critical section end: restore closid with capacity bitmask that
+ * does not overlap with pseudo-locked region.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
+
+ /* Re-enable the hardware prefetcher(s) */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * rdtgroup_monitor_in_progress - Test if monitoring in progress
+ * @r: resource group being queried
+ *
+ * Return: 1 if monitor groups have been created for this resource
+ * group, 0 otherwise.
+ */
+static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
+{
+ return !list_empty(&rdtgrp->mon.crdtgrp_list);
+}
+
+/**
+ * rdtgroup_locksetup_user_restrict - Restrict user access to group
+ * @rdtgrp: resource group needing access restricted
+ *
+ * A resource group used for cache pseudo-locking cannot have cpus or tasks
+ * assigned to it. This is communicated to the user by restricting access
+ * to all the files that can be used to make such changes.
+ *
+ * Permissions restored with rdtgroup_locksetup_user_restore()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restriction of access an attempt will be made to restore permissions but
+ * the state of the mode of these files will be uncertain when a failure
+ * occurs.
+ */
+static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+err_cpus:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+err_tasks:
+ rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_user_restore - Restore user access to group
+ * @rdtgrp: resource group needing access restored
+ *
+ * Restore all file access previously removed using
+ * rdtgroup_locksetup_user_restrict()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restoration of access an attempt will be made to restrict permissions
+ * again but the state of the mode of these files will be uncertain when
+ * a failure occurs.
+ */
+static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+err_cpus:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+err_tasks:
+ rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_enter - Resource group enters locksetup mode
+ * @rdtgrp: resource group requested to enter locksetup mode
+ *
+ * A resource group enters locksetup mode to reflect that it would be used
+ * to represent a pseudo-locked region and is in the process of being set
+ * up to do so. A resource group used for a pseudo-locked region would
+ * lose the closid associated with it so we cannot allow it to have any
+ * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
+ * future. Monitoring of a pseudo-locked region is not allowed either.
+ *
+ * The above and more restrictions on a pseudo-locked region are checked
+ * for and enforced before the resource group enters the locksetup mode.
+ *
+ * Returns: 0 if the resource group successfully entered locksetup mode, <0
+ * on failure. On failure the last_cmd_status buffer is updated with text to
+ * communicate details of failure to the user.
+ */
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ /*
+ * The default resource group can neither be removed nor lose the
+ * default closid associated with it.
+ */
+ if (rdtgrp == &rdtgroup_default) {
+ rdt_last_cmd_puts("cannot pseudo-lock default group\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cache Pseudo-locking not supported when CDP is enabled.
+ *
+ * Some things to consider if you would like to enable this
+ * support (using L3 CDP as example):
+ * - When CDP is enabled two separate resources are exposed,
+ * L3DATA and L3CODE, but they are actually on the same cache.
+ * The implication for pseudo-locking is that if a
+ * pseudo-locked region is created on a domain of one
+ * resource (eg. L3CODE), then a pseudo-locked region cannot
+ * be created on that same domain of the other resource
+ * (eg. L3DATA). This is because the creation of a
+ * pseudo-locked region involves a call to wbinvd that will
+ * affect all cache allocations on particular domain.
+ * - Considering the previous, it may be possible to only
+ * expose one of the CDP resources to pseudo-locking and
+ * hide the other. For example, we could consider to only
+ * expose L3DATA and since the L3 cache is unified it is
+ * still possible to place instructions there are execute it.
+ * - If only one region is exposed to pseudo-locking we should
+ * still keep in mind that availability of a portion of cache
+ * for pseudo-locking should take into account both resources.
+ * Similarly, if a pseudo-locked region is created in one
+ * resource, the portion of cache used by it should be made
+ * unavailable to all future allocations from both resources.
+ */
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
+ rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+ rdt_last_cmd_puts("CDP enabled\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Not knowing the bits to disable prefetching implies that this
+ * platform does not support Cache Pseudo-Locking.
+ */
+ prefetch_disable_bits = get_prefetch_disable_bits();
+ if (prefetch_disable_bits == 0) {
+ rdt_last_cmd_puts("pseudo-locking not supported\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_monitor_in_progress(rdtgrp)) {
+ rdt_last_cmd_puts("monitoring in progress\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_tasks_assigned(rdtgrp)) {
+ rdt_last_cmd_puts("tasks assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_empty(&rdtgrp->cpu_mask)) {
+ rdt_last_cmd_puts("CPUs assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
+ rdt_last_cmd_puts("unable to modify resctrl permissions\n");
+ return -EIO;
+ }
+
+ ret = pseudo_lock_init(rdtgrp);
+ if (ret) {
+ rdt_last_cmd_puts("unable to init pseudo-lock region\n");
+ goto out_release;
+ }
+
+ /*
+ * If this system is capable of monitoring a rmid would have been
+ * allocated when the control group was created. This is not needed
+ * anymore when this group would be used for pseudo-locking. This
+ * is safe to call on platforms not capable of monitoring.
+ */
+ free_rmid(rdtgrp->mon.rmid);
+
+ ret = 0;
+ goto out;
+
+out_release:
+ rdtgroup_locksetup_user_restore(rdtgrp);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_exit - resource group exist locksetup mode
+ * @rdtgrp: resource group
+ *
+ * When a resource group exits locksetup mode the earlier restrictions are
+ * lifted.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ if (rdt_mon_capable) {
+ ret = alloc_rmid();
+ if (ret < 0) {
+ rdt_last_cmd_puts("out of RMIDs\n");
+ return ret;
+ }
+ rdtgrp->mon.rmid = ret;
+ }
+
+ ret = rdtgroup_locksetup_user_restore(rdtgrp);
+ if (ret) {
+ free_rmid(rdtgrp->mon.rmid);
+ return ret;
+ }
+
+ pseudo_lock_free(rdtgrp);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
+ * @d: RDT domain
+ * @_cbm: CBM to test
+ *
+ * @d represents a cache instance and @_cbm a capacity bitmask that is
+ * considered for it. Determine if @_cbm overlaps with any existing
+ * pseudo-locked region on @d.
+ *
+ * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * otherwise.
+ */
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *cbm_b;
+ unsigned int cbm_len;
+
+ if (d->plr) {
+ cbm_len = d->plr->r->cache.cbm_len;
+ cbm_b = (unsigned long *)&d->plr->cbm;
+ if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
+ * @d: RDT domain under test
+ *
+ * The setup of a pseudo-locked region affects all cache instances within
+ * the hierarchy of the region. It is thus essential to know if any
+ * pseudo-locked regions exist within a cache hierarchy to prevent any
+ * attempts to create new pseudo-locked regions in the same hierarchy.
+ *
+ * Return: true if a pseudo-locked region exists in the hierarchy of @d or
+ * if it is not possible to test due to memory allocation issue,
+ * false otherwise.
+ */
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
+{
+ cpumask_var_t cpu_with_psl;
+ struct rdt_resource *r;
+ struct rdt_domain *d_i;
+ bool ret = false;
+
+ if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
+ return true;
+
+ /*
+ * First determine which cpus have pseudo-locked regions
+ * associated with them.
+ */
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d_i, &r->domains, list) {
+ if (d_i->plr)
+ cpumask_or(cpu_with_psl, cpu_with_psl,
+ &d_i->cpu_mask);
+ }
+ }
+
+ /*
+ * Next test if new pseudo-locked region would intersect with
+ * existing region.
+ */
+ if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
+ ret = true;
+
+ free_cpumask_var(cpu_with_psl);
+ return ret;
+}
+
+/**
+ * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
+ * @_plr: pseudo-lock region to measure
+ *
+ * There is no deterministic way to test if a memory region is cached. One
+ * way is to measure how long it takes to read the memory, the speed of
+ * access is a good way to learn how close to the cpu the data was. Even
+ * more, if the prefetcher is disabled and the memory is read at a stride
+ * of half the cache line, then a cache miss will be easy to spot since the
+ * read of the first half would be significantly slower than the read of
+ * the second half.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int measure_cycles_lat_fn(void *_plr)
+{
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long i;
+ u64 start, end;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer to access memory.
+ * The cost is that accessing this pointer, which could be in
+ * cache, will be included in the measurement of memory read latency.
+ */
+ void *mem_r;
+#else
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ local_irq_disable();
+ /*
+ * The wrmsr call may be reordered with the assignment below it.
+ * Call wrmsr as directly as possible to avoid tracing clobbering
+ * local register variable used for memory pointer.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ mem_r = plr->kmem;
+ /*
+ * Dummy execute of the time measurement to load the needed
+ * instructions into the L1 instruction cache.
+ */
+ start = rdtsc_ordered();
+ for (i = 0; i < plr->size; i += 32) {
+ start = rdtsc_ordered();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ end = rdtsc_ordered();
+ trace_pseudo_lock_mem_latency((u32)(end - start));
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+static int measure_cycles_perf_fn(void *_plr)
+{
+ unsigned long long l3_hits = 0, l3_miss = 0;
+ u64 l3_hit_bits = 0, l3_miss_bits = 0;
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long long l2_hits, l2_miss;
+ u64 l2_hit_bits, l2_miss_bits;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use regular variables
+ * at the cost of including cache access latency to these variables
+ * in the measurements.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Non-architectural event for the Goldmont Microarchitecture
+ * from Intel x86 Architecture Software Developer Manual (SDM):
+ * MEM_LOAD_UOPS_RETIRED D1H (event number)
+ * Umask values:
+ * L1_HIT 01H
+ * L2_HIT 02H
+ * L1_MISS 08H
+ * L2_MISS 10H
+ *
+ * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
+ * has two "no fix" errata associated with it: BDM35 and BDM100. On
+ * this platform we use the following events instead:
+ * L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
+ * REFERENCES FFH
+ * MISS 3FH
+ * LONGEST_LAT_CACHE 2EH (Documented in SDM)
+ * REFERENCE 4FH
+ * MISS 41H
+ */
+
+ /*
+ * Start by setting flags for IA32_PERFEVTSELx:
+ * OS (Operating system mode) 0x2
+ * INT (APIC interrupt enable) 0x10
+ * EN (Enable counter) 0x40
+ *
+ * Then add the Umask value and event number to select performance
+ * event.
+ */
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
+ l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
+ break;
+ case INTEL_FAM6_BROADWELL_X:
+ /* On BDW the l2_hit_bits count references, not hits */
+ l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
+ l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
+ /* On BDW the l3_hit_bits count references, not hits */
+ l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
+ l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
+ break;
+ default:
+ goto out;
+ }
+
+ local_irq_disable();
+ /*
+ * Call wrmsr direcly to avoid the local register variables from
+ * being overwritten due to reordering of their assignment with
+ * the wrmsr calls.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ /* Disable events and reset counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
+ }
+ /* Set and enable the L2 counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits);
+ }
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ for (i = 0; i < size; i += line_size) {
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Call wrmsr directly (no tracing) to not influence
+ * the cache access counters as they are disabled.
+ */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
+ l2_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
+ l2_miss_bits & ~(0x40ULL << 16));
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits & ~(0x40ULL << 16));
+ }
+ l2_hits = native_read_pmc(0);
+ l2_miss = native_read_pmc(1);
+ if (l3_hit_bits > 0) {
+ l3_hits = native_read_pmc(2);
+ l3_miss = native_read_pmc(3);
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ /*
+ * On BDW we count references and misses, need to adjust. Sometimes
+ * the "hits" counter is a bit more than the references, for
+ * example, x references but x + 1 hits. To not report invalid
+ * hit values in this case we treat that as misses eaqual to
+ * references.
+ */
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
+ trace_pseudo_lock_l2(l2_hits, l2_miss);
+ if (l3_hit_bits > 0) {
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
+ trace_pseudo_lock_l3(l3_hits, l3_miss);
+ }
+
+out:
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ *
+ * The measurement of latency to access a pseudo-locked region should be
+ * done from a cpu that is associated with that pseudo-locked region.
+ * Determine which cpu is associated with this region and start a thread on
+ * that cpu to perform the measurement, wait for that thread to complete.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int cpu;
+ int ret = -1;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ plr->thread_done = 0;
+ cpu = cpumask_first(&plr->d->cpu_mask);
+ if (!cpu_online(cpu)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (sel == 1)
+ thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else if (sel == 2)
+ thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else
+ goto out;
+
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ goto out;
+ }
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+
+static ssize_t pseudo_lock_measure_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rdtgroup *rdtgrp = file->private_data;
+ size_t buf_size;
+ char buf[32];
+ int ret;
+ int sel;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ ret = kstrtoint(buf, 10, &sel);
+ if (ret == 0) {
+ if (sel != 1)
+ return -EINVAL;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (ret)
+ return ret;
+ ret = pseudo_lock_measure_cycles(rdtgrp, sel);
+ if (ret == 0)
+ ret = count;
+ debugfs_file_put(file->f_path.dentry);
+ }
+
+ return ret;
+}
+
+static const struct file_operations pseudo_measure_fops = {
+ .write = pseudo_lock_measure_trigger,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+/**
+ * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * Called when a resource group in the pseudo-locksetup mode receives a
+ * valid schemata that should be pseudo-locked. Since the resource group is
+ * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
+ * allocated and initialized with the essential information. If a failure
+ * occurs the resource group remains in the pseudo-locksetup mode with the
+ * &struct pseudo_lock_region associated with it, but cleared from all
+ * information and ready for the user to re-attempt pseudo-locking by
+ * writing the schemata again.
+ *
+ * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
+ * on failure. Descriptive error will be written to last_cmd_status buffer.
+ */
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int new_minor;
+ struct device *dev;
+ int ret;
+
+ ret = pseudo_lock_region_alloc(plr);
+ if (ret < 0)
+ return ret;
+
+ ret = pseudo_lock_cstates_constrain(plr);
+ if (ret < 0) {
+ ret = -EINVAL;
+ goto out_region;
+ }
+
+ plr->thread_done = 0;
+
+ thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
+ cpu_to_node(plr->cpu),
+ "pseudo_lock/%u", plr->cpu);
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ rdt_last_cmd_printf("locking thread returned error %d\n", ret);
+ goto out_cstates;
+ }
+
+ kthread_bind(thread, plr->cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0) {
+ /*
+ * If the thread does not get on the CPU for whatever
+ * reason and the process which sets up the region is
+ * interrupted then this will leave the thread in runnable
+ * state and once it gets on the CPU it will derefence
+ * the cleared, but not freed, plr struct resulting in an
+ * empty pseudo-locking loop.
+ */
+ rdt_last_cmd_puts("locking thread interrupted\n");
+ goto out_cstates;
+ }
+
+ ret = pseudo_lock_minor_get(&new_minor);
+ if (ret < 0) {
+ rdt_last_cmd_puts("unable to obtain a new minor number\n");
+ goto out_cstates;
+ }
+
+ /*
+ * Unlock access but do not release the reference. The
+ * pseudo-locked region will still be here on return.
+ *
+ * The mutex has to be released temporarily to avoid a potential
+ * deadlock with the mm->mmap_sem semaphore which is obtained in
+ * the device_create() and debugfs_create_dir() callpath below
+ * as well as before the mmap() callback is called.
+ */
+ mutex_unlock(&rdtgroup_mutex);
+
+ if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
+ plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
+ debugfs_resctrl);
+ if (!IS_ERR_OR_NULL(plr->debugfs_dir))
+ debugfs_create_file("pseudo_lock_measure", 0200,
+ plr->debugfs_dir, rdtgrp,
+ &pseudo_measure_fops);
+ }
+
+ dev = device_create(pseudo_lock_class, NULL,
+ MKDEV(pseudo_lock_major, new_minor),
+ rdtgrp, "%s", rdtgrp->kn->name);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ rdt_last_cmd_printf("failed to create character device: %d\n",
+ ret);
+ goto out_debugfs;
+ }
+
+ /* We released the mutex - check if group was removed while we did so */
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out_device;
+ }
+
+ plr->minor = new_minor;
+
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
+ closid_free(rdtgrp->closid);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
+
+ ret = 0;
+ goto out;
+
+out_device:
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
+out_debugfs:
+ debugfs_remove_recursive(plr->debugfs_dir);
+ pseudo_lock_minor_release(new_minor);
+out_cstates:
+ pseudo_lock_cstates_relax(plr);
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
+ * @rdtgrp: resource group to which the pseudo-locked region belongs
+ *
+ * The removal of a pseudo-locked region can be initiated when the resource
+ * group is removed from user space via a "rmdir" from userspace or the
+ * unmount of the resctrl filesystem. On removal the resource group does
+ * not go back to pseudo-locksetup mode before it is removed, instead it is
+ * removed directly. There is thus assymmetry with the creation where the
+ * &struct pseudo_lock_region is removed here while it was not created in
+ * rdtgroup_pseudo_lock_create().
+ *
+ * Return: void
+ */
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * Default group cannot be a pseudo-locked region so we can
+ * free closid here.
+ */
+ closid_free(rdtgrp->closid);
+ goto free;
+ }
+
+ pseudo_lock_cstates_relax(plr);
+ debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
+ pseudo_lock_minor_release(plr->minor);
+
+free:
+ pseudo_lock_free(rdtgrp);
+}
+
+static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = region_find_by_minor(iminor(inode));
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ filp->private_data = rdtgrp;
+ atomic_inc(&rdtgrp->waitcount);
+ /* Perform a non-seekable open - llseek is not supported */
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+ filp->private_data = NULL;
+ atomic_dec(&rdtgrp->waitcount);
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
+{
+ /* Not supported */
+ return -EINVAL;
+}
+
+static const struct vm_operations_struct pseudo_mmap_ops = {
+ .mremap = pseudo_lock_dev_mremap,
+};
+
+static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct pseudo_lock_region *plr;
+ struct rdtgroup *rdtgrp;
+ unsigned long physical;
+ unsigned long psize;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ plr = rdtgrp->plr;
+
+ /*
+ * Task is required to run with affinity to the cpus associated
+ * with the pseudo-locked region. If this is not the case the task
+ * may be scheduled elsewhere and invalidate entries in the
+ * pseudo-locked region.
+ */
+ if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ physical = __pa(plr->kmem) >> PAGE_SHIFT;
+ psize = plr->size - off;
+
+ if (off > plr->size) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ /*
+ * Ensure changes are carried directly to the memory being mapped,
+ * do not allow copy-on-write mapping.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ if (vsize > psize) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ memset(plr->kmem + off, 0, vsize);
+
+ if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
+ vsize, vma->vm_page_prot)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EAGAIN;
+ }
+ vma->vm_ops = &pseudo_mmap_ops;
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static const struct file_operations pseudo_lock_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = NULL,
+ .write = NULL,
+ .open = pseudo_lock_dev_open,
+ .release = pseudo_lock_dev_release,
+ .mmap = pseudo_lock_dev_mmap,
+};
+
+static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = dev_get_drvdata(dev);
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+}
+
+int rdt_pseudo_lock_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
+ if (ret < 0)
+ return ret;
+
+ pseudo_lock_major = ret;
+
+ pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
+ if (IS_ERR(pseudo_lock_class)) {
+ ret = PTR_ERR(pseudo_lock_class);
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ return ret;
+ }
+
+ pseudo_lock_class->devnode = pseudo_lock_devnode;
+ return 0;
+}
+
+void rdt_pseudo_lock_release(void)
+{
+ class_destroy(pseudo_lock_class);
+ pseudo_lock_class = NULL;
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ pseudo_lock_major = 0;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
new file mode 100644
index 000000000000..2c041e6d9f05
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM resctrl
+
+#if !defined(_TRACE_PSEUDO_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PSEUDO_LOCK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pseudo_lock_mem_latency,
+ TP_PROTO(u32 latency),
+ TP_ARGS(latency),
+ TP_STRUCT__entry(__field(u32, latency)),
+ TP_fast_assign(__entry->latency = latency),
+ TP_printk("latency=%u", __entry->latency)
+ );
+
+TRACE_EVENT(pseudo_lock_l2,
+ TP_PROTO(u64 l2_hits, u64 l2_miss),
+ TP_ARGS(l2_hits, l2_miss),
+ TP_STRUCT__entry(__field(u64, l2_hits)
+ __field(u64, l2_miss)),
+ TP_fast_assign(__entry->l2_hits = l2_hits;
+ __entry->l2_miss = l2_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l2_hits, __entry->l2_miss));
+
+TRACE_EVENT(pseudo_lock_l3,
+ TP_PROTO(u64 l3_hits, u64 l3_miss),
+ TP_ARGS(l3_hits, l3_miss),
+ TP_STRUCT__entry(__field(u64, l3_hits)
+ __field(u64, l3_miss)),
+ TP_fast_assign(__entry->l3_hits = l3_hits;
+ __entry->l3_miss = l3_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l3_hits, __entry->l3_miss));
+
+#endif /* _TRACE_PSEUDO_LOCK_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
+#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 749856a2e736..b799c00bef09 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -20,7 +20,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
+#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
@@ -55,6 +57,8 @@ static struct kernfs_node *kn_mondata;
static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512];
+struct dentry *debugfs_resctrl;
+
void rdt_last_cmd_clear(void)
{
lockdep_assert_held(&rdtgroup_mutex);
@@ -121,11 +125,65 @@ static int closid_alloc(void)
return closid;
}
-static void closid_free(int closid)
+void closid_free(int closid)
{
closid_free_map |= 1 << closid;
}
+/**
+ * closid_allocated - test if provided closid is in use
+ * @closid: closid to be tested
+ *
+ * Return: true if @closid is currently associated with a resource group,
+ * false if @closid is free
+ */
+static bool closid_allocated(unsigned int closid)
+{
+ return (closid_free_map & (1 << closid)) == 0;
+}
+
+/**
+ * rdtgroup_mode_by_closid - Return mode of resource group with closid
+ * @closid: closid if the resource group
+ *
+ * Each resource group is associated with a @closid. Here the mode
+ * of a resource group can be queried by searching for it using its closid.
+ *
+ * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
+ */
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
+{
+ struct rdtgroup *rdtgrp;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->closid == closid)
+ return rdtgrp->mode;
+ }
+
+ return RDT_NUM_MODES;
+}
+
+static const char * const rdt_mode_str[] = {
+ [RDT_MODE_SHAREABLE] = "shareable",
+ [RDT_MODE_EXCLUSIVE] = "exclusive",
+ [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
+ [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
+};
+
+/**
+ * rdtgroup_mode_str - Return the string representation of mode
+ * @mode: the resource group mode as &enum rdtgroup_mode
+ *
+ * Return: string representation of valid mode, "unknown" otherwise
+ */
+static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
+{
+ if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
+ return "unknown";
+
+ return rdt_mode_str[mode];
+}
+
/* set uid and gid of rdtgroup dirs and files to that of the creator */
static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
{
@@ -146,6 +204,7 @@ static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
int ret;
kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
0, rft->kf_ops, rft, NULL, NULL);
if (IS_ERR(kn))
return PTR_ERR(kn);
@@ -207,8 +266,12 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(&rdtgrp->cpu_mask));
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
+ else
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->cpu_mask));
} else {
ret = -ENOENT;
}
@@ -394,6 +457,13 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+
if (is_cpu_list(of))
ret = cpulist_parse(buf, newmask);
else
@@ -509,6 +579,32 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return ret;
}
+/**
+ * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
+ * @r: Resource group
+ *
+ * Return: 1 if tasks have been assigned to @r, 0 otherwise
+ */
+int rdtgroup_tasks_assigned(struct rdtgroup *r)
+{
+ struct task_struct *p, *t;
+ int ret = 0;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
+ (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+ ret = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int rdtgroup_task_write_permission(struct task_struct *task,
struct kernfs_open_file *of)
{
@@ -570,13 +666,22 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
rdt_last_cmd_clear();
- if (rdtgrp)
- ret = rdtgroup_move_task(pid, rdtgrp, of);
- else
- ret = -ENOENT;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+ ret = rdtgroup_move_task(pid, rdtgrp, of);
+
+unlock:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -662,6 +767,94 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
return 0;
}
+/**
+ * rdt_bit_usage_show - Display current usage of resources
+ *
+ * A domain is a shared resource that can now be allocated differently. Here
+ * we display the current regions of the domain as an annotated bitmask.
+ * For each domain of this resource its allocation bitmask
+ * is annotated as below to indicate the current usage of the corresponding bit:
+ * 0 - currently unused
+ * X - currently available for sharing and used by software and hardware
+ * H - currently used by hardware only but available for software use
+ * S - currently used and shareable by software only
+ * E - currently used exclusively by one resource group
+ * P - currently pseudo-locked by one resource group
+ */
+static int rdt_bit_usage_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+ u32 sw_shareable = 0, hw_shareable = 0;
+ u32 exclusive = 0, pseudo_locked = 0;
+ struct rdt_domain *dom;
+ int i, hwb, swb, excl, psl;
+ enum rdtgrp_mode mode;
+ bool sep = false;
+ u32 *ctrl;
+
+ mutex_lock(&rdtgroup_mutex);
+ hw_shareable = r->cache.shareable_bits;
+ list_for_each_entry(dom, &r->domains, list) {
+ if (sep)
+ seq_putc(seq, ';');
+ ctrl = dom->ctrl_val;
+ sw_shareable = 0;
+ exclusive = 0;
+ seq_printf(seq, "%d=", dom->id);
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (!closid_allocated(i))
+ continue;
+ mode = rdtgroup_mode_by_closid(i);
+ switch (mode) {
+ case RDT_MODE_SHAREABLE:
+ sw_shareable |= *ctrl;
+ break;
+ case RDT_MODE_EXCLUSIVE:
+ exclusive |= *ctrl;
+ break;
+ case RDT_MODE_PSEUDO_LOCKSETUP:
+ /*
+ * RDT_MODE_PSEUDO_LOCKSETUP is possible
+ * here but not included since the CBM
+ * associated with this CLOSID in this mode
+ * is not initialized and no task or cpu can be
+ * assigned this CLOSID.
+ */
+ break;
+ case RDT_MODE_PSEUDO_LOCKED:
+ case RDT_NUM_MODES:
+ WARN(1,
+ "invalid mode for closid %d\n", i);
+ break;
+ }
+ }
+ for (i = r->cache.cbm_len - 1; i >= 0; i--) {
+ pseudo_locked = dom->plr ? dom->plr->cbm : 0;
+ hwb = test_bit(i, (unsigned long *)&hw_shareable);
+ swb = test_bit(i, (unsigned long *)&sw_shareable);
+ excl = test_bit(i, (unsigned long *)&exclusive);
+ psl = test_bit(i, (unsigned long *)&pseudo_locked);
+ if (hwb && swb)
+ seq_putc(seq, 'X');
+ else if (hwb && !swb)
+ seq_putc(seq, 'H');
+ else if (!hwb && swb)
+ seq_putc(seq, 'S');
+ else if (excl)
+ seq_putc(seq, 'E');
+ else if (psl)
+ seq_putc(seq, 'P');
+ else /* Unused bits remain */
+ seq_putc(seq, '0');
+ }
+ sep = true;
+ }
+ seq_putc(seq, '\n');
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
static int rdt_min_bw_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -740,6 +933,269 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
return nbytes;
}
+/*
+ * rdtgroup_mode_show - Display mode of this resource group
+ */
+static int rdtgroup_mode_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
+
+ rdtgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Checks if provided @cbm intended to be used for @closid on domain
+ * @d overlaps with any other closids or other hardware usage associated
+ * with this domain. If @exclusive is true then only overlaps with
+ * resource groups in exclusive mode will be considered. If @exclusive
+ * is false then overlaps with any resource group or hardware entities
+ * will be considered.
+ *
+ * Return: false if CBM does not overlap, true if it does.
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *ctrl_b;
+ enum rdtgrp_mode mode;
+ u32 *ctrl;
+ int i;
+
+ /* Check for any overlap with regions used by hardware directly */
+ if (!exclusive) {
+ if (bitmap_intersects(cbm,
+ (unsigned long *)&r->cache.shareable_bits,
+ r->cache.cbm_len))
+ return true;
+ }
+
+ /* Check for overlap with other resource groups */
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ ctrl_b = (unsigned long *)ctrl;
+ mode = rdtgroup_mode_by_closid(i);
+ if (closid_allocated(i) && i != closid &&
+ mode != RDT_MODE_PSEUDO_LOCKSETUP) {
+ if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (exclusive) {
+ if (mode == RDT_MODE_EXCLUSIVE)
+ return true;
+ continue;
+ }
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
+ *
+ * An exclusive resource group implies that there should be no sharing of
+ * its allocated resources. At the time this group is considered to be
+ * exclusive this test can determine if its current schemata supports this
+ * setting by testing for overlap with all other resource groups.
+ *
+ * Return: true if resource group can be exclusive, false if there is overlap
+ * with allocations of other resource groups and thus this resource group
+ * cannot be exclusive.
+ */
+static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
+{
+ int closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
+ rdtgrp->closid, false))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * rdtgroup_mode_write - Modify the resource group's mode
+ *
+ */
+static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ enum rdtgrp_mode mode;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ rdt_last_cmd_clear();
+
+ mode = rdtgrp->mode;
+
+ if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
+ (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
+ (!strcmp(buf, "pseudo-locksetup") &&
+ mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
+ (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
+ goto out;
+
+ if (mode == RDT_MODE_PSEUDO_LOCKED) {
+ rdt_last_cmd_printf("cannot change pseudo-locked group\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(buf, "shareable")) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ } else if (!strcmp(buf, "exclusive")) {
+ if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
+ rdt_last_cmd_printf("schemata overlaps\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_EXCLUSIVE;
+ } else if (!strcmp(buf, "pseudo-locksetup")) {
+ ret = rdtgroup_locksetup_enter(rdtgrp);
+ if (ret)
+ goto out;
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
+ } else {
+ rdt_last_cmd_printf("unknown/unsupported mode\n");
+ ret = -EINVAL;
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
+}
+
+/**
+ * rdtgroup_cbm_to_size - Translate CBM to size in bytes
+ * @r: RDT resource to which @d belongs.
+ * @d: RDT domain instance.
+ * @cbm: bitmask for which the size should be computed.
+ *
+ * The bitmask provided associated with the RDT domain instance @d will be
+ * translated into how many bytes it represents. The size in bytes is
+ * computed by first dividing the total cache size by the CBM length to
+ * determine how many bytes each bit in the bitmask represents. The result
+ * is multiplied with the number of bits set in the bitmask.
+ */
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
+ struct rdt_domain *d, u32 cbm)
+{
+ struct cpu_cacheinfo *ci;
+ unsigned int size = 0;
+ int num_b, i;
+
+ num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == r->cache_level) {
+ size = ci->info_list[i].size / r->cache.cbm_len * num_b;
+ break;
+ }
+ }
+
+ return size;
+}
+
+/**
+ * rdtgroup_size_show - Display size in bytes of allocated regions
+ *
+ * The "size" file mirrors the layout of the "schemata" file, printing the
+ * size in bytes of each region instead of the capacity bitmask.
+ *
+ */
+static int rdtgroup_size_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ unsigned int size;
+ bool sep = false;
+ u32 cbm;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
+ size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+ rdtgrp->plr->d,
+ rdtgrp->plr->cbm);
+ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ goto out;
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ seq_printf(s, "%*s:", max_name_width, r->name);
+ list_for_each_entry(d, &r->domains, list) {
+ if (sep)
+ seq_putc(s, ';');
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ size = 0;
+ } else {
+ cbm = d->ctrl_val[rdtgrp->closid];
+ size = rdtgroup_cbm_to_size(r, d, cbm);
+ }
+ seq_printf(s, "%d=%u", d->id, size);
+ sep = true;
+ }
+ seq_putc(s, '\n');
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+
+ return 0;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
@@ -792,6 +1248,13 @@ static struct rftype res_common_files[] = {
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
+ .name = "bit_usage",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_bit_usage_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
+ },
+ {
.name = "min_bandwidth",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
@@ -853,6 +1316,22 @@ static struct rftype res_common_files[] = {
.seq_show = rdtgroup_schemata_show,
.fflags = RF_CTRL_BASE,
},
+ {
+ .name = "mode",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_mode_write,
+ .seq_show = rdtgroup_mode_show,
+ .fflags = RF_CTRL_BASE,
+ },
+ {
+ .name = "size",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdtgroup_size_show,
+ .fflags = RF_CTRL_BASE,
+ },
+
};
static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
@@ -883,6 +1362,103 @@ error:
return ret;
}
+/**
+ * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ *
+ * The permissions of named resctrl file, directory, or link are modified
+ * to not allow read, write, or execute by any user.
+ *
+ * WARNING: This function is intended to communicate to the user that the
+ * resctrl file has been locked down - that it is not relevant to the
+ * particular state the system finds itself in. It should not be relied
+ * on to protect from user access because after the file's permissions
+ * are restricted the user can still change the permissions using chmod
+ * from the command line.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn;
+ int ret = 0;
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ iattr.ia_mode = S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode = S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode = S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
+/**
+ * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ * @mask: Mask of permissions that should be restored
+ *
+ * Restore the permissions of the named file. If @name is a directory the
+ * permissions of its parent will be used.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn, *parent;
+ struct rftype *rfts, *rft;
+ int ret, len;
+
+ rfts = res_common_files;
+ len = ARRAY_SIZE(res_common_files);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ if (!strcmp(rft->name, name))
+ iattr.ia_mode = rft->mode & mask;
+ }
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ parent = kernfs_get_parent(kn);
+ if (parent) {
+ iattr.ia_mode |= parent->mode;
+ kernfs_put(parent);
+ }
+ iattr.ia_mode |= S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode |= S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode |= S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
unsigned long fflags)
{
@@ -1224,6 +1800,9 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
@@ -1289,10 +1868,16 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
+ ret = rdt_pseudo_lock_init();
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_mondata;
+ }
+
dentry = kernfs_mount(fs_type, flags, rdt_root,
RDTGROUP_SUPER_MAGIC, NULL);
if (IS_ERR(dentry))
- goto out_mondata;
+ goto out_psl;
if (rdt_alloc_capable)
static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
@@ -1310,6 +1895,8 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out;
+out_psl:
+ rdt_pseudo_lock_release();
out_mondata:
if (rdt_mon_capable)
kernfs_remove(kn_mondata);
@@ -1447,6 +2034,10 @@ static void rmdir_all_sub(void)
if (rdtgrp == &rdtgroup_default)
continue;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
+
/*
* Give any CPUs back to the default group. We cannot copy
* cpu_online_mask because a CPU might have executed the
@@ -1483,6 +2074,8 @@ static void rdt_kill_sb(struct super_block *sb)
reset_all_ctrls(r);
cdp_disable_all();
rmdir_all_sub();
+ rdt_pseudo_lock_release();
+ rdtgroup_default.mode = RDT_MODE_SHAREABLE;
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -1503,7 +2096,8 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
struct kernfs_node *kn;
int ret = 0;
- kn = __kernfs_create_file(parent_kn, name, 0444, 0,
+ kn = __kernfs_create_file(parent_kn, name, 0444,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
&kf_mondata_ops, priv, NULL, NULL);
if (IS_ERR(kn))
return PTR_ERR(kn);
@@ -1682,6 +2276,114 @@ out_destroy:
return ret;
}
+/**
+ * cbm_ensure_valid - Enforce validity on provided CBM
+ * @_val: Candidate CBM
+ * @r: RDT resource to which the CBM belongs
+ *
+ * The provided CBM represents all cache portions available for use. This
+ * may be represented by a bitmap that does not consist of contiguous ones
+ * and thus be an invalid CBM.
+ * Here the provided CBM is forced to be a valid CBM by only considering
+ * the first set of contiguous bits as valid and clearing all bits.
+ * The intention here is to provide a valid default CBM with which a new
+ * resource group is initialized. The user can follow this with a
+ * modification to the CBM if the default does not satisfy the
+ * requirements.
+ */
+static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
+{
+ /*
+ * Convert the u32 _val to an unsigned long required by all the bit
+ * operations within this function. No more than 32 bits of this
+ * converted value can be accessed because all bit operations are
+ * additionally provided with cbm_len that is initialized during
+ * hardware enumeration using five bits from the EAX register and
+ * thus never can exceed 32 bits.
+ */
+ unsigned long *val = (unsigned long *)_val;
+ unsigned int cbm_len = r->cache.cbm_len;
+ unsigned long first_bit, zero_bit;
+
+ if (*val == 0)
+ return;
+
+ first_bit = find_first_bit(val, cbm_len);
+ zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
+
+ /* Clear any remaining bits to ensure contiguous region */
+ bitmap_clear(val, zero_bit, cbm_len - zero_bit);
+}
+
+/**
+ * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations. That is, all shareable and unused bits.
+ *
+ * All-zero CBM is invalid. If there are no more shareable bits available
+ * on any domain then the entire allocation will fail.
+ */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ u32 used_b = 0, unused_b = 0;
+ u32 closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ enum rdtgrp_mode mode;
+ struct rdt_domain *d;
+ int i, ret;
+ u32 *ctrl;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ used_b |= *ctrl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ if (bitmap_weight((unsigned long *) &d->new_ctrl,
+ r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("no space on %s:%d\n",
+ r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+ }
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ ret = update_domains(r, rdtgrp->closid);
+ if (ret < 0) {
+ rdt_last_cmd_puts("failed to initialize allocations\n");
+ return ret;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ }
+
+ return 0;
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
struct kernfs_node *prgrp_kn,
const char *name, umode_t mode,
@@ -1700,6 +2402,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ if (rtype == RDTMON_GROUP &&
+ (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto out_unlock;
+ }
+
/* allocate the rdtgroup. */
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
@@ -1840,6 +2550,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = 0;
rdtgrp->closid = closid;
+ ret = rdtgroup_init_alloc(rdtgrp);
+ if (ret < 0)
+ goto out_id_free;
+
list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
if (rdt_mon_capable) {
@@ -1850,15 +2564,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
if (ret) {
rdt_last_cmd_puts("kernfs subdir error\n");
- goto out_id_free;
+ goto out_del_list;
}
}
goto out_unlock;
+out_del_list:
+ list_del(&rdtgrp->rdtgroup_list);
out_id_free:
closid_free(closid);
- list_del(&rdtgrp->rdtgroup_list);
out_common_fail:
mkdir_rdt_prepare_clean(rdtgrp);
out_unlock:
@@ -1945,6 +2660,21 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
return 0;
}
+static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+ struct rdtgroup *rdtgrp)
+{
+ rdtgrp->flags = RDT_DELETED;
+ list_del(&rdtgrp->rdtgroup_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ return 0;
+}
+
static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_var_t tmpmask)
{
@@ -1970,7 +2700,6 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid_rmid(tmpmask, NULL);
- rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
free_rmid(rdtgrp->mon.rmid);
@@ -1979,14 +2708,7 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
*/
free_all_child_rdtgrp(rdtgrp);
- list_del(&rdtgrp->rdtgroup_list);
-
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
- kernfs_remove(rdtgrp->kn);
+ rdtgroup_ctrl_remove(kn, rdtgrp);
return 0;
}
@@ -2014,13 +2736,19 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group.
*/
- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
- ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
- else if (rdtgrp->type == RDTMON_GROUP &&
- is_mon_groups(parent_kn, kn->name))
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+ } else {
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ }
+ } else if (rdtgrp->type == RDTMON_GROUP &&
+ is_mon_groups(parent_kn, kn->name)) {
ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
- else
+ } else {
ret = -EPERM;
+ }
out:
rdtgroup_kn_unlock(kn);
@@ -2046,7 +2774,8 @@ static int __init rdtgroup_setup_root(void)
int ret;
rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
- KERNFS_ROOT_CREATE_DEACTIVATED,
+ KERNFS_ROOT_CREATE_DEACTIVATED |
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
&rdtgroup_default);
if (IS_ERR(rdt_root))
return PTR_ERR(rdt_root);
@@ -2102,6 +2831,29 @@ int __init rdtgroup_init(void)
if (ret)
goto cleanup_mountpoint;
+ /*
+ * Adding the resctrl debugfs directory here may not be ideal since
+ * it would let the resctrl debugfs directory appear on the debugfs
+ * filesystem before the resctrl filesystem is mounted.
+ * It may also be ok since that would enable debugging of RDT before
+ * resctrl is mounted.
+ * The reason why the debugfs directory is created here and not in
+ * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
+ * during the debugfs directory creation also &sb->s_type->i_mutex_key
+ * (the lockdep class of inode->i_rwsem). Other filesystem
+ * interactions (eg. SyS_getdents) have the lock ordering:
+ * &sb->s_type->i_mutex_key --> &mm->mmap_sem
+ * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+ * is taken, thus creating dependency:
+ * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+ * issues considering the other two lock dependencies.
+ * By creating the debugfs directory here we avoid a dependency
+ * that may cause deadlock (even though file operations cannot
+ * occur until the filesystem is mounted, but I do not know how to
+ * tell lockdep that).
+ */
+ debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
+
return 0;
cleanup_mountpoint:
@@ -2111,3 +2863,11 @@ cleanup_root:
return ret;
}
+
+void __exit rdtgroup_exit(void)
+{
+ debugfs_remove_recursive(debugfs_resctrl);
+ unregister_filesystem(&rdt_fs_type);
+ sysfs_remove_mount_point(fs_kobj, "resctrl");
+ kernfs_destroy_root(rdt_root);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index c102ad51025e..4b767284b7f5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- /* We hope get_seconds stays lockless */
- m->time = get_seconds();
+ /* need the internal __ version to avoid deadlocks */
+ m->time = __ktime_get_real_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
}
#endif
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ * skip those CPUs which remain looping in the 1st kernel - see
+ * crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+ if (cpu_is_offline(cpu) ||
+ (crashing_cpu != -1 && crashing_cpu != cpu)) {
+ u64 mcgstatus;
+
+ mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ if (mcgstatus & MCG_STATUS_RIPV) {
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+ unsigned long *toclear, unsigned long *valid_banks,
+ int no_way_out, int *worst)
+{
+ struct mca_config *cfg = &mca_cfg;
+ int severity, i;
+
+ for (i = 0; i < cfg->banks; i++) {
+ __clear_bit(i, toclear);
+ if (!test_bit(i, valid_banks))
+ continue;
+
+ if (!mce_banks[i].ctl)
+ continue;
+
+ m->misc = 0;
+ m->addr = 0;
+ m->bank = i;
+
+ m->status = mce_rdmsrl(msr_ops.status(i));
+ if (!(m->status & MCI_STATUS_VAL))
+ continue;
+
+ /*
+ * Corrected or non-signaled errors are handled by
+ * machine_check_poll(). Leave them alone, unless this panics.
+ */
+ if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+ !no_way_out)
+ continue;
+
+ /* Set taint even when machine check was not enabled. */
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+ severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+ /*
+ * When machine check was for corrected/deferred handler don't
+ * touch, unless we're panicking.
+ */
+ if ((severity == MCE_KEEP_SEVERITY ||
+ severity == MCE_UCNA_SEVERITY) && !no_way_out)
+ continue;
+
+ __set_bit(i, toclear);
+
+ /* Machine check event was not enabled. Clear, but ignore. */
+ if (severity == MCE_NO_SEVERITY)
+ continue;
+
+ mce_read_aux(m, i);
+
+ /* assuming valid severity level != 0 */
+ m->severity = severity;
+
+ mce_log(m);
+
+ if (severity > *worst) {
+ *final = *m;
+ *worst = severity;
+ }
+ }
+
+ /* mce_clear_state will clear *final, save locally for use later */
+ *m = *final;
+}
+
/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
*/
void do_machine_check(struct pt_regs *regs, long error_code)
{
+ DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+ DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
+ int cpu = smp_processor_id();
+ char *msg = "Unknown";
struct mce m, *final;
- int i;
int worst = 0;
- int severity;
/*
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
int order = -1;
+
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
*/
int no_way_out = 0;
+
/*
* If kill_it gets set, there might be a way to recover from this
* error.
*/
int kill_it = 0;
- DECLARE_BITMAP(toclear, MAX_NR_BANKS);
- DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
- char *msg = "Unknown";
/*
* MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
* on Intel.
*/
int lmce = 1;
- int cpu = smp_processor_id();
-
- /*
- * Cases where we avoid rendezvous handler timeout:
- * 1) If this CPU is offline.
- *
- * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
- * skip those CPUs which remain looping in the 1st kernel - see
- * crash_nmi_callback().
- *
- * Note: there still is a small window between kexec-ing and the new,
- * kdump kernel establishing a new #MC handler where a broadcasted MCE
- * might not get handled properly.
- */
- if (cpu_is_offline(cpu) ||
- (crashing_cpu != -1 && crashing_cpu != cpu)) {
- u64 mcgstatus;
- mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
- if (mcgstatus & MCG_STATUS_RIPV) {
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
- return;
- }
- }
+ if (__mc_check_crashing_cpu(cpu))
+ return;
ist_enter(regs);
this_cpu_inc(mce_exception_count);
- if (!cfg->banks)
- goto out;
-
mce_gather_info(&m, regs);
m.tsc = rdtsc();
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
order = mce_start(&no_way_out);
}
- for (i = 0; i < cfg->banks; i++) {
- __clear_bit(i, toclear);
- if (!test_bit(i, valid_banks))
- continue;
- if (!mce_banks[i].ctl)
- continue;
-
- m.misc = 0;
- m.addr = 0;
- m.bank = i;
-
- m.status = mce_rdmsrl(msr_ops.status(i));
- if ((m.status & MCI_STATUS_VAL) == 0)
- continue;
-
- /*
- * Non uncorrected or non signaled errors are handled by
- * machine_check_poll. Leave them alone, unless this panics.
- */
- if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
- !no_way_out)
- continue;
-
- /*
- * Set taint even when machine check was not enabled.
- */
- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
- /*
- * When machine check was for corrected/deferred handler don't
- * touch, unless we're panicing.
- */
- if ((severity == MCE_KEEP_SEVERITY ||
- severity == MCE_UCNA_SEVERITY) && !no_way_out)
- continue;
- __set_bit(i, toclear);
- if (severity == MCE_NO_SEVERITY) {
- /*
- * Machine check event was not enabled. Clear, but
- * ignore.
- */
- continue;
- }
-
- mce_read_aux(&m, i);
-
- /* assuming valid severity level != 0 */
- m.severity = severity;
-
- mce_log(&m);
-
- if (severity > worst) {
- *final = m;
- worst = severity;
- }
- }
-
- /* mce_clear_state will clear *final, save locally for use later */
- m = *final;
+ __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
if (!no_way_out)
mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (worst > 0)
mce_report_event(regs);
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
sync_core();
if (worst != MCE_AR_SEVERITY && !kill_it)
@@ -2165,9 +2177,6 @@ static ssize_t store_int_with_restart(struct device *s,
if (check_interval == old_check_interval)
return ret;
- if (check_interval < 1)
- check_interval = 1;
-
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 08286269fd24..b9bc8a1a584e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -509,12 +509,20 @@ static struct platform_device *microcode_pdev;
static int check_online_cpus(void)
{
- if (num_online_cpus() == num_present_cpus())
- return 0;
+ unsigned int cpu;
- pr_err("Not all CPUs online, aborting microcode update.\n");
+ /*
+ * Make sure all CPUs are online. It's fine for SMT to be disabled if
+ * all the primary threads are still online.
+ */
+ for_each_present_cpu(cpu) {
+ if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
+ pr_err("Not all CPUs online, aborting microcode update.\n");
+ return -EINVAL;
+ }
+ }
- return -EINVAL;
+ return 0;
}
static atomic_t late_cpus_in;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 031082c96db8..ad12733f6058 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -41,7 +41,7 @@ static void (*hv_stimer0_handler)(void);
static void (*hv_kexec_handler)(void);
static void (*hv_crash_handler)(struct pt_regs *regs);
-void hyperv_vector_handler(struct pt_regs *regs)
+__visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -50,7 +50,7 @@ void hyperv_vector_handler(struct pt_regs *regs)
if (vmbus_handler)
vmbus_handler();
- if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
ack_APIC_irq();
exiting_irq();
@@ -300,7 +300,7 @@ static void __init ms_hyperv_init_platform(void)
hyperv_reenlightenment_vector);
/* Setup the IDT for stimer0 */
- if (ms_hyperv.misc_features & HV_X64_STIMER_DIRECT_MODE_AVAILABLE)
+ if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
alloc_intr_gate(HYPERV_STIMER0_VECTOR,
hv_stimer0_callback_vector);
#endif
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 4021d3859499..40eee6cc4124 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
memset(line, 0, LINE_SIZE);
- length = strncpy_from_user(line, buf, LINE_SIZE - 1);
+ len = min_t(size_t, len, LINE_SIZE - 1);
+ length = strncpy_from_user(line, buf, len);
if (length < 0)
return length;
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 81c0afb39d0a..71ca064e3794 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -22,18 +22,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
-/*
- * Check for extended topology enumeration cpuid leaf 0xb and if it
- * exists, use it for populating initial_apicid and cpu topology
- * detection.
- */
-int detect_extended_topology(struct cpuinfo_x86 *c)
+int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- unsigned int eax, ebx, ecx, edx, sub_index;
- unsigned int ht_mask_width, core_plus_mask_width;
- unsigned int core_select_mask, core_level_siblings;
- static bool printed;
+ unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 0xb)
return -1;
@@ -52,10 +44,30 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
+ smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+#endif
+ return 0;
+}
+
+/*
+ * Check for extended topology enumeration cpuid leaf 0xb and if it
+ * exists, use it for populating initial_apicid and cpu topology
+ * detection.
+ */
+int detect_extended_topology(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+
+ if (detect_extended_topology_early(c) < 0)
+ return -1;
/*
* Populate HT related information from sub-leaf level 0.
*/
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
@@ -86,15 +98,6 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
- if (!printed) {
- pr_info("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if (c->x86_max_cores > 1)
- pr_info("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
#endif
return 0;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 666a284116ac..9c8652974f8e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,8 +22,6 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-#define OPCODE_BUFSIZE 64
-
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
static int die_counter;
@@ -93,26 +91,18 @@ static void printk_stack_address(unsigned long address, int reliable,
*/
void show_opcodes(u8 *rip, const char *loglvl)
{
- unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
+#define PROLOGUE_SIZE 42
+#define EPILOGUE_SIZE 21
+#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
- u8 *ip;
- int i;
-
- printk("%sCode: ", loglvl);
-
- ip = (u8 *)rip - code_prologue;
- if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
- pr_cont("Bad RIP value.\n");
- return;
- }
- for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
- if (ip == rip)
- pr_cont("<%02x> ", opcodes[i]);
- else
- pr_cont("%02x ", opcodes[i]);
+ if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+ printk("%sCode: Bad RIP value.\n", loglvl);
+ } else {
+ printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+ __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+ opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
}
- pr_cont("\n");
}
void show_ip(struct pt_regs *regs, const char *loglvl)
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index da5d8ac60062..50d5848bf22e 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
return bsm & INTEL_BSM_MASK;
}
+static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
+{
+ u64 bsm;
+
+ bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
+ bsm &= INTEL_BSM_MASK;
+ bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
+
+ return bsm;
+}
+
static resource_size_t __init i830_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
@@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = {
.stolen_size = chv_stolen_size,
};
+static const struct intel_early_ops gen11_early_ops __initconst = {
+ .stolen_base = gen11_stolen_base,
+ .stolen_size = gen9_stolen_size,
+};
+
static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_early_ops),
@@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_CFL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
+ INTEL_ICL_11_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index f92a6593de1e..2ea85b32421a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -10,6 +10,7 @@
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
#include <asm/traps.h>
+#include <asm/irq_regs.h>
#include <linux/hardirq.h>
#include <linux/pkeys.h>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index abe6df15a8fb..30f9cb2c0b55 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,11 +512,18 @@ ENTRY(initial_code)
ENTRY(setup_once_ref)
.long setup_once
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define PGD_ALIGN (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL 1024
+#else
+#define PGD_ALIGN (PAGE_SIZE)
+#define PTI_USER_PGD_FILL 0
+#endif
/*
* BSS section
*/
__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
+ .align PGD_ALIGN
#ifdef CONFIG_X86_PAE
.globl initial_pg_pmd
initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
initial_page_table:
.fill 1024,4,0
#endif
+ .align PGD_ALIGN
initial_pg_fixmap:
.fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
- .fill 4096,1,0
.globl swapper_pg_dir
+ .align PGD_ALIGN
swapper_pg_dir:
.fill 1024,4,0
+ .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+ .fill 4096,1,0
EXPORT_SYMBOL(empty_zero_page)
/*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
#ifdef CONFIG_X86_PAE
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE
+ .align PGD_ALIGN
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 8344dd2f310a..15ebc2fc166e 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -235,7 +235,7 @@ ENTRY(secondary_startup_64)
* address given in m16:64.
*/
pushq $.Lafter_lret # put return address on stack for unwinder
- xorq %rbp, %rbp # clear frame pointer
+ xorl %ebp, %ebp # clear frame pointer
movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 346b24883911..b0acb22e5a46 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,6 +1,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766d46b6..34a5c1715148 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
set_dr_addr_mask(0, i);
}
-/*
- * Check for virtual address in kernel space.
- */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+static int arch_bp_generic_len(int x86_len)
{
- unsigned int len;
- unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- va = info->address;
- len = bp->attr.bp_len;
-
- /*
- * We don't need to worry about va + len - 1 overflowing:
- * we already require that va is aligned to a multiple of len.
- */
- return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+ switch (x86_len) {
+ case X86_BREAKPOINT_LEN_1:
+ return HW_BREAKPOINT_LEN_1;
+ case X86_BREAKPOINT_LEN_2:
+ return HW_BREAKPOINT_LEN_2;
+ case X86_BREAKPOINT_LEN_4:
+ return HW_BREAKPOINT_LEN_4;
+#ifdef CONFIG_X86_64
+ case X86_BREAKPOINT_LEN_8:
+ return HW_BREAKPOINT_LEN_8;
+#endif
+ default:
+ return -EINVAL;
+ }
}
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
+ int len;
+
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
@@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
}
/* Len */
- switch (x86_len) {
- case X86_BREAKPOINT_LEN_1:
- *gen_len = HW_BREAKPOINT_LEN_1;
- break;
- case X86_BREAKPOINT_LEN_2:
- *gen_len = HW_BREAKPOINT_LEN_2;
- break;
- case X86_BREAKPOINT_LEN_4:
- *gen_len = HW_BREAKPOINT_LEN_4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- *gen_len = HW_BREAKPOINT_LEN_8;
- break;
-#endif
- default:
+ len = arch_bp_generic_len(x86_len);
+ if (len < 0)
return -EINVAL;
- }
+ *gen_len = len;
return 0;
}
-
-static int arch_build_bp_info(struct perf_event *bp)
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long va;
+ int len;
- info->address = bp->attr.bp_addr;
+ va = hw->address;
+ len = arch_bp_generic_len(hw->len);
+ WARN_ON_ONCE(len < 0);
+
+ /*
+ * We don't need to worry about va + len - 1 overflowing:
+ * we already require that va is aligned to a multiple of len.
+ */
+ return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+}
+
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ hw->address = attr->bp_addr;
+ hw->mask = 0;
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_W:
- info->type = X86_BREAKPOINT_WRITE;
+ hw->type = X86_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = X86_BREAKPOINT_RW;
+ hw->type = X86_BREAKPOINT_RW;
break;
case HW_BREAKPOINT_X:
/*
@@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp)
* acceptable for kprobes. On non-kprobes kernels, we don't
* allow kernel breakpoints at all.
*/
- if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+ if (attr->bp_addr >= TASK_SIZE_MAX) {
#ifdef CONFIG_KPROBES
- if (within_kprobe_blacklist(bp->attr.bp_addr))
+ if (within_kprobe_blacklist(attr->bp_addr))
return -EINVAL;
#else
return -EINVAL;
#endif
}
- info->type = X86_BREAKPOINT_EXECUTE;
+ hw->type = X86_BREAKPOINT_EXECUTE;
/*
* x86 inst breakpoints need to have a specific undefined len.
* But we still need to check userspace is not trying to setup
* an unsupported length, to get a range breakpoint for example.
*/
- if (bp->attr.bp_len == sizeof(long)) {
- info->len = X86_BREAKPOINT_LEN_X;
+ if (attr->bp_len == sizeof(long)) {
+ hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
default:
@@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp)
}
/* Len */
- info->mask = 0;
-
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->len = X86_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = X86_BREAKPOINT_LEN_2;
+ hw->len = X86_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = X86_BREAKPOINT_LEN_4;
+ hw->len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case HW_BREAKPOINT_LEN_8:
- info->len = X86_BREAKPOINT_LEN_8;
+ hw->len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
/* AMD range breakpoint */
- if (!is_power_of_2(bp->attr.bp_len))
+ if (!is_power_of_2(attr->bp_len))
return -EINVAL;
- if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+ if (attr->bp_addr & (attr->bp_len - 1))
return -EINVAL;
if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range.
*/
- info->mask = bp->attr.bp_len - 1;
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->mask = attr->bp_len - 1;
+ hw->len = X86_BREAKPOINT_LEN_1;
}
return 0;
@@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- switch (info->len) {
+ switch (hw->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
- if (info->mask)
- align = info->mask;
+ if (hw->mask)
+ align = hw->mask;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
@@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 86c4439f9d74..519649ddf100 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/init.h>
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 74383a3780dc..01adea278a71 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -8,6 +8,7 @@
#include <asm/traps.h>
#include <asm/proto.h>
#include <asm/desc.h>
+#include <asm/hw_irq.h>
struct idt_data {
unsigned int vector;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 328d027d829d..59b5f2ea7c2f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -10,6 +10,7 @@
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c1bdbd3d3232..95600a99ae93 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -11,6 +11,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d86e344f5b3d..0469cd078db1 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -11,6 +11,7 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/ftrace.h>
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644
index 000000000000..ddeeaac8adda
--- /dev/null
+++ b/arch/x86/kernel/irqflags.S
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+ pushf
+ pop %_ASM_AX
+ ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+ push %_ASM_ARG1
+ popf
+ ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 772196c1b8c4..a0693b71cfc1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/kprobes.h>
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e56c95be2808..eeea935e9bb5 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
BUG();
}
-static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t),
- int init)
+static void __ref __jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ void *(*poker)(void *, const void *, size_t),
+ int init)
{
union jump_code_union code;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+ if (early_boot_irqs_disabled)
+ poker = text_poke_early;
+
if (type == JUMP_LABEL_JMP) {
if (init) {
/*
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 7326078eaa7a..278cd07228dd 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len,
- NULL,
+ VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index ae38dccf0c8f..2b949f4fd4d8 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
}
#endif
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6f4d42377fe5..b0d1e81c96bb 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -66,8 +66,6 @@
#include "common.h"
-void jprobe_return_end(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
- (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
- src, real, insn->displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
* stepping.
*/
regs->ip = (unsigned long)p->ainsn.insn;
- preempt_enable_no_resched();
return;
}
#endif
@@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_err("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
default:
@@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
- * We don't want to be preempted for the entire
- * duration of kprobe processing. We conditionally
- * re-enable preemption at the end of this function,
- * and also in reenter_kprobe() and setup_singlestep().
+ * We don't want to be preempted for the entire duration of kprobe
+ * processing. Since int3 and debug trap disables irqs and we clear
+ * IF while singlestepping, it must be no preemptible.
*/
- preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe(addr);
@@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * pre-handler and it returned non-zero, that means
+ * user handler setup registers to exit to another
+ * instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
return 1;
}
} else if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
* the original instruction.
*/
regs->ip = (unsigned long)addr;
- preempt_enable_no_resched();
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- setup_singlestep(p, regs, kcb, 0);
- return 1;
- }
} /* else: not a kprobe fault; let the kernel handle it */
- preempt_enable_no_resched();
return 0;
}
NOKPROBE_SYMBOL(kprobe_int3_handler);
@@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs)
}
reset_current_kprobe();
out:
- preempt_enable_no_resched();
-
/*
* if somebody else is singlestepping across a probe point, flags
* will have TF set, in which case, continue the remaining processing
@@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- preempt_enable_no_resched();
} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/*
@@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = stack_addr(regs);
- addr = (unsigned long)(kcb->jprobe_saved_sp);
-
- /*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
- * raw stack chunk with redzones:
- */
- __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
- regs->ip = (unsigned long)(jp->entry);
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /* Unpoison stack redzones in the frames we are going to jump over. */
- kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
-
- asm volatile (
-#ifdef CONFIG_X86_64
- " xchg %%rbx,%%rsp \n"
-#else
- " xchgl %%ebx,%%esp \n"
-#endif
- " int3 \n"
- " .globl jprobe_return_end\n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-NOKPROBE_SYMBOL(jprobe_return_end);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->ip - 1);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- void *saved_sp = kcb->jprobe_saved_sp;
-
- if ((addr > (u8 *) jprobe_return) &&
- (addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != saved_sp) {
- struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk(KERN_ERR
- "current sp %p does not match saved sp %p\n",
- stack_addr(regs), saved_sp);
- printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk(KERN_ERR "Current registers\n");
- show_regs(regs);
- BUG();
- }
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 8dc0161cec8f..ef819e19650b 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -25,36 +25,6 @@
#include "common.h"
-static nokprobe_inline
-void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_ip)
-{
- /*
- * Emulate singlestep (and also recover regs->ip)
- * as if there is a 5byte nop
- */
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_ip)
- regs->ip = orig_ip;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p)) {
- __skip_singlestep(p, regs, kcb, 0);
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
- /* To emulate trap based kprobes, preempt_disable here */
- preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
- __skip_singlestep(p, regs, kcb, orig_ip);
- preempt_enable_no_resched();
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ regs->ip = orig_ip;
}
/*
- * If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe, and keep preempt count +1.
+ * If pre_handler returns !0, it changes regs->ip. We have to
+ * skip emulating post_handler.
*/
+ __this_cpu_write(current_kprobe, NULL);
}
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 203d398802a3..eaf02f2e7300 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
if (!reenter)
reset_current_kprobe();
- preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5b2300b818af..0f471bd93417 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -45,7 +45,6 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
-#include <asm/kvm_guest.h>
static int kvmapf = 1;
@@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
-static int kvmclock_vsyscall = 1;
-static int __init parse_no_kvmclock_vsyscall(char *arg)
-{
- kvmclock_vsyscall = 0;
- return 0;
-}
-
-early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -154,7 +144,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
for (;;) {
if (!n.halted)
- prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link))
break;
@@ -188,7 +178,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
if (n->halted)
smp_send_reschedule(n->cpu);
else if (swq_has_sleeper(&n->wq))
- swake_up(&n->wq);
+ swake_up_one(&n->wq);
}
static void apf_task_wake_all(void)
@@ -454,6 +444,98 @@ static void __init sev_map_percpu_data(void)
}
#ifdef CONFIG_SMP
+#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
+
+static void __send_ipi_mask(const struct cpumask *mask, int vector)
+{
+ unsigned long flags;
+ int cpu, apic_id, icr;
+ int min = 0, max = 0;
+#ifdef CONFIG_X86_64
+ __uint128_t ipi_bitmap = 0;
+#else
+ u64 ipi_bitmap = 0;
+#endif
+
+ if (cpumask_empty(mask))
+ return;
+
+ local_irq_save(flags);
+
+ switch (vector) {
+ default:
+ icr = APIC_DM_FIXED | vector;
+ break;
+ case NMI_VECTOR:
+ icr = APIC_DM_NMI;
+ break;
+ }
+
+ for_each_cpu(cpu, mask) {
+ apic_id = per_cpu(x86_cpu_to_apicid, cpu);
+ if (!ipi_bitmap) {
+ min = max = apic_id;
+ } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
+ ipi_bitmap <<= min - apic_id;
+ min = apic_id;
+ } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
+ max = apic_id < max ? max : apic_id;
+ } else {
+ kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+ (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+ min = max = apic_id;
+ ipi_bitmap = 0;
+ }
+ __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
+ }
+
+ if (ipi_bitmap) {
+ kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+ (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
+{
+ __send_ipi_mask(mask, vector);
+}
+
+static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
+{
+ unsigned int this_cpu = smp_processor_id();
+ struct cpumask new_mask;
+ const struct cpumask *local_mask;
+
+ cpumask_copy(&new_mask, mask);
+ cpumask_clear_cpu(this_cpu, &new_mask);
+ local_mask = &new_mask;
+ __send_ipi_mask(local_mask, vector);
+}
+
+static void kvm_send_ipi_allbutself(int vector)
+{
+ kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
+}
+
+static void kvm_send_ipi_all(int vector)
+{
+ __send_ipi_mask(cpu_online_mask, vector);
+}
+
+/*
+ * Set the IPI entry points
+ */
+static void kvm_setup_pv_ipi(void)
+{
+ apic->send_IPI_mask = kvm_send_ipi_mask;
+ apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+ apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
+ apic->send_IPI_all = kvm_send_ipi_all;
+ pr_info("KVM setup pv IPIs\n");
+}
+
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{
native_smp_prepare_cpus(max_cpus);
@@ -560,9 +642,6 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
- if (kvmclock_vsyscall)
- kvm_setup_vsyscall_timeinfo();
-
#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
@@ -624,12 +703,27 @@ static uint32_t __init kvm_detect(void)
return kvm_cpuid_base();
}
+static void __init kvm_apic_init(void)
+{
+#if defined(CONFIG_SMP)
+ if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
+ kvm_setup_pv_ipi();
+#endif
+}
+
+static void __init kvm_init_platform(void)
+{
+ kvmclock_init();
+ x86_platform.apic_post_init = kvm_apic_init;
+}
+
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.name = "KVM",
.detect = kvm_detect,
.type = X86_HYPER_KVM,
.init.guest_late_init = kvm_guest_init,
.init.x2apic_available = kvm_para_available,
+ .init.init_platform = kvm_init_platform,
};
static __init int activate_jump_labels(void)
@@ -748,6 +842,10 @@ void __init kvm_spinlock_init(void)
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
return;
+ /* Don't use the pvqspinlock code if there is only 1 vCPU. */
+ if (num_possible_cpus() == 1)
+ return;
+
__pv_init_lock_hash();
pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index bf8d1eb7fca3..1e6764648af3 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -23,30 +23,57 @@
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <linux/memblock.h>
+#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/kvmclock.h>
-static int kvmclock __ro_after_init = 1;
-static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
-static u64 kvm_sched_clock_offset;
+static int kvmclock __initdata = 1;
+static int kvmclock_vsyscall __initdata = 1;
+static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static u64 kvm_sched_clock_offset __ro_after_init;
-static int parse_no_kvmclock(char *arg)
+static int __init parse_no_kvmclock(char *arg)
{
kvmclock = 0;
return 0;
}
early_param("no-kvmclock", parse_no_kvmclock);
-/* The hypervisor will put information about time periodically here */
-static struct pvclock_vsyscall_time_info *hv_clock;
-static struct pvclock_wall_clock *wall_clock;
+static int __init parse_no_kvmclock_vsyscall(char *arg)
+{
+ kvmclock_vsyscall = 0;
+ return 0;
+}
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
+/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
+#define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS)
+#define HVC_BOOT_ARRAY_SIZE \
+ (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
+
+static struct pvclock_vsyscall_time_info
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock;
+static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+ return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+ return this_cpu_read(hv_clock_per_cpu);
+}
/*
* The wallclock is the time of day when we booted. Since then, some time may
@@ -55,21 +82,10 @@ static struct pvclock_wall_clock *wall_clock;
*/
static void kvm_get_wallclock(struct timespec64 *now)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- int low, high;
- int cpu;
-
- low = (int)slow_virt_to_phys(wall_clock);
- high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
-
- native_write_msr(msr_kvm_wall_clock, low, high);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- pvclock_read_wallclock(wall_clock, vcpu_time, now);
-
- put_cpu();
+ wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
+ preempt_disable();
+ pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
+ preempt_enable();
}
static int kvm_set_wallclock(const struct timespec64 *now)
@@ -79,14 +95,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)
static u64 kvm_clock_read(void)
{
- struct pvclock_vcpu_time_info *src;
u64 ret;
- int cpu;
preempt_disable_notrace();
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
- ret = pvclock_clocksource_read(src);
+ ret = pvclock_clocksource_read(this_cpu_pvti());
preempt_enable_notrace();
return ret;
}
@@ -112,11 +124,11 @@ static inline void kvm_sched_clock_init(bool stable)
kvm_sched_clock_offset = kvm_clock_read();
pv_time_ops.sched_clock = kvm_sched_clock_read;
- printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
- kvm_sched_clock_offset);
+ pr_info("kvm-clock: using sched offset of %llu cycles",
+ kvm_sched_clock_offset);
BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
- sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
+ sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
}
/*
@@ -130,18 +142,11 @@ static inline void kvm_sched_clock_init(bool stable)
*/
static unsigned long kvm_get_tsc_khz(void)
{
- struct pvclock_vcpu_time_info *src;
- int cpu;
- unsigned long tsc_khz;
-
- cpu = get_cpu();
- src = &hv_clock[cpu].pvti;
- tsc_khz = pvclock_tsc_khz(src);
- put_cpu();
- return tsc_khz;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ return pvclock_tsc_khz(this_cpu_pvti());
}
-static void kvm_get_preset_lpj(void)
+static void __init kvm_get_preset_lpj(void)
{
unsigned long khz;
u64 lpj;
@@ -155,49 +160,40 @@ static void kvm_get_preset_lpj(void)
bool kvm_check_and_clear_guest_paused(void)
{
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
bool ret = false;
- struct pvclock_vcpu_time_info *src;
- int cpu = smp_processor_id();
- if (!hv_clock)
+ if (!src)
return ret;
- src = &hv_clock[cpu].pvti;
- if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
- src->flags &= ~PVCLOCK_GUEST_STOPPED;
+ if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs();
ret = true;
}
-
return ret;
}
struct clocksource kvm_clock = {
- .name = "kvm-clock",
- .read = kvm_clock_get_cycles,
- .rating = 400,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "kvm-clock",
+ .read = kvm_clock_get_cycles,
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
EXPORT_SYMBOL_GPL(kvm_clock);
-int kvm_register_clock(char *txt)
+static void kvm_register_clock(char *txt)
{
- int cpu = smp_processor_id();
- int low, high, ret;
- struct pvclock_vcpu_time_info *src;
-
- if (!hv_clock)
- return 0;
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
+ u64 pa;
- src = &hv_clock[cpu].pvti;
- low = (int)slow_virt_to_phys(src) | 1;
- high = ((u64)slow_virt_to_phys(src) >> 32);
- ret = native_write_msr_safe(msr_kvm_system_time, low, high);
- printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
- cpu, high, low, txt);
+ if (!src)
+ return;
- return ret;
+ pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
+ wrmsrl(msr_kvm_system_time, pa);
+ pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
static void kvm_save_sched_clock_state(void)
@@ -212,11 +208,7 @@ static void kvm_restore_sched_clock_state(void)
#ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void)
{
- /*
- * Now that the first cpu already had this clocksource initialized,
- * we shouldn't fail.
- */
- WARN_ON(kvm_register_clock("secondary cpu clock"));
+ kvm_register_clock("secondary cpu clock");
}
#endif
@@ -244,98 +236,84 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
-static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
- phys_addr_t align)
+static int __init kvm_setup_vsyscall_timeinfo(void)
{
- phys_addr_t mem;
+#ifdef CONFIG_X86_64
+ u8 flags;
- mem = memblock_alloc(size, align);
- if (!mem)
+ if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
return 0;
- if (sev_active()) {
- if (early_set_memory_decrypted((unsigned long)__va(mem), size))
- goto e_free;
- }
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+ return 0;
- return mem;
-e_free:
- memblock_free(mem, size);
+ kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
return 0;
}
+early_initcall(kvm_setup_vsyscall_timeinfo);
-static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
+static int kvmclock_setup_percpu(unsigned int cpu)
{
- if (sev_active())
- early_set_memory_encrypted((unsigned long)__va(addr), size);
+ struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
+
+ /*
+ * The per cpu area setup replicates CPU0 data to all cpu
+ * pointers. So carefully check. CPU0 has been set up in init
+ * already.
+ */
+ if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
+ return 0;
+
+ /* Use the static page for the first CPUs, allocate otherwise */
+ if (cpu < HVC_BOOT_ARRAY_SIZE)
+ p = &hv_clock_boot[cpu];
+ else
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
- memblock_free(addr, size);
+ per_cpu(hv_clock_per_cpu, cpu) = p;
+ return p ? 0 : -ENOMEM;
}
void __init kvmclock_init(void)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned long mem, mem_wall_clock;
- int size, cpu, wall_clock_size;
u8 flags;
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- if (!kvm_para_available())
+ if (!kvm_para_available() || !kvmclock)
return;
- if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
- } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
- return;
-
- wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
- mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
- if (!mem_wall_clock)
- return;
-
- wall_clock = __va(mem_wall_clock);
- memset(wall_clock, 0, wall_clock_size);
-
- mem = kvm_memblock_alloc(size, PAGE_SIZE);
- if (!mem) {
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
return;
}
- hv_clock = __va(mem);
- memset(hv_clock, 0, size);
-
- if (kvm_register_clock("primary cpu clock")) {
- hv_clock = NULL;
- kvm_memblock_free(mem, size);
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
+ kvmclock_setup_percpu, NULL) < 0) {
return;
}
- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ pr_info("kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
+ this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
+ kvm_register_clock("primary cpu clock");
+ pvclock_set_pvti_cpu0_va(hv_clock_boot);
+
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
- cpu = get_cpu();
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
- put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC
- x86_cpuinit.early_percpu_clock_init =
- kvm_setup_secondary_clock;
+ x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
#endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
@@ -347,34 +325,3 @@ void __init kvmclock_init(void)
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.name = "KVM";
}
-
-int __init kvm_setup_vsyscall_timeinfo(void)
-{
-#ifdef CONFIG_X86_64
- int cpu;
- u8 flags;
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned int size;
-
- if (!hv_clock)
- return 0;
-
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
- if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
- put_cpu();
- return 1;
- }
-
- pvclock_set_pvti_cpu0_va(hv_clock);
- put_cpu();
-
- kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
-#endif
- return 0;
-}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c9b14020f4dd..733e6ace0fa4 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -100,6 +100,102 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
return new_ldt;
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+static void do_sanity_check(struct mm_struct *mm,
+ bool had_kernel_mapping,
+ bool had_user_mapping)
+{
+ if (mm->context.ldt) {
+ /*
+ * We already had an LDT. The top-level entry should already
+ * have been allocated and synchronized with the usermode
+ * tables.
+ */
+ WARN_ON(!had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(!had_user_mapping);
+ } else {
+ /*
+ * This is the first time we're mapping an LDT for this process.
+ * Sync the pgd to the usermode tables.
+ */
+ WARN_ON(had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(had_user_mapping);
+ }
+}
+
+#ifdef CONFIG_X86_PAE
+
+static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+
+ if (pgd->pgd == 0)
+ return NULL;
+
+ p4d = p4d_offset(pgd, va);
+ if (p4d_none(*p4d))
+ return NULL;
+
+ pud = pud_offset(p4d, va);
+ if (pud_none(*pud))
+ return NULL;
+
+ return pmd_offset(pud, va);
+}
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pmd(u_pmd, *k_pmd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ bool had_kernel, had_user;
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+ had_kernel = (k_pmd->pmd != 0);
+ had_user = (u_pmd->pmd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#else /* !CONFIG_X86_PAE */
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ bool had_kernel = (pgd->pgd != 0);
+ bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#endif /* CONFIG_X86_PAE */
+
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
@@ -115,9 +211,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- bool is_vmalloc, had_top_level_entry;
unsigned long va;
+ bool is_vmalloc;
spinlock_t *ptl;
pgd_t *pgd;
int i;
@@ -131,13 +226,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
*/
WARN_ON(ldt->slot != -1);
+ /* Check if the current mappings are sane */
+ sanity_check_ldt_mapping(mm);
+
/*
* Did we already have the top level entry allocated? We can't
* use pgd_none() for this because it doens't do anything on
* 4-level page table kernels.
*/
pgd = pgd_offset(mm, LDT_BASE_ADDR);
- had_top_level_entry = (pgd->pgd != 0);
is_vmalloc = is_vmalloc_addr(ldt->entries);
@@ -172,41 +269,31 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
pte_unmap_unlock(ptep, ptl);
}
- if (mm->context.ldt) {
- /*
- * We already had an LDT. The top-level entry should already
- * have been allocated and synchronized with the usermode
- * tables.
- */
- WARN_ON(!had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI))
- WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
- } else {
- /*
- * This is the first time we're mapping an LDT for this process.
- * Sync the pgd to the usermode tables.
- */
- WARN_ON(had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI)) {
- WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
- set_pgd(kernel_to_user_pgdp(pgd), *pgd);
- }
- }
+ /* Propagate LDT mapping to the user page-table */
+ map_ldt_struct_to_user(mm);
va = (unsigned long)ldt_slot_va(slot);
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
ldt->slot = slot;
-#endif
return 0;
}
+#else /* !CONFIG_PAGE_TABLE_ISOLATION */
+
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+ return 0;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
static void free_ldt_pgtables(struct mm_struct *mm)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
struct mmu_gather tlb;
unsigned long start = LDT_BASE_ADDR;
- unsigned long end = start + (1UL << PGDIR_SHIFT);
+ unsigned long end = LDT_END_ADDR;
if (!static_cpu_has(X86_FEATURE_PTI))
return;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index d1ab07ec8c9a..5409c2800ab5 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -56,7 +56,7 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
- free_page((unsigned long)image->arch.pgd);
+ free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
@@ -72,7 +72,8 @@ static void machine_kexec_free_page_tables(struct kimage *image)
static int machine_kexec_alloc_page_tables(struct kimage *image)
{
- image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PGD_ALLOCATION_ORDER);
#ifdef CONFIG_X86_PAE
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 99dc79e76bdc..930c88341e4e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (tgt_clobbers & ~site_clobbers)
- return len; /* target would clobber too much for this site */
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe8; /* call */
b->delta = delta;
@@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe9; /* jmp */
b->delta = delta;
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 9edadabf04f6..9cb98f7b07c9 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -20,7 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index ab5d9dd668d2..acfd04121da3 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -155,9 +155,6 @@ static int __init pci_iommu_init(void)
{
struct iommu_table_entry *p;
-#ifdef CONFIG_PCI
- dma_debug_add_bus(&pci_bus_type);
-#endif
x86_init.iommu.iommu_init();
for (p = __iommu_table; p < __iommu_table_end; p++) {
@@ -175,7 +172,7 @@ rootfs_initcall(pci_iommu_init);
static int via_no_dac_cb(struct pci_dev *pdev, void *data)
{
- pdev->dev.dma_32bit_limit = true;
+ pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
return 0;
}
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 4dfd90a75e63..2e9006c1e240 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -60,7 +60,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
p->detect, q->detect);
/* Heavy handed way..*/
- x->depend = 0;
+ x->depend = NULL;
}
}
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index da5190a1ea16..4a710ffffd9a 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -9,6 +9,6 @@ static __init int add_pcspkr(void)
pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
- return IS_ERR(pd) ? PTR_ERR(pd) : 0;
+ return PTR_ERR_OR_ZERO(pd);
}
device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 30ca2d1a9231..c93fcfdf1673 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
-#ifdef CONFIG_X86_64
/*
* .sp1 is cpu_current_top_of_stack. The init task never
* runs user code, but cpu_current_top_of_stack should still
* be well defined before the first context switch.
*/
.sp1 = TOP_OF_INIT_STACK,
-#endif
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0ae659de21eb..2924fd447e61 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -285,7 +285,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info(). Refresh the SYSENTER configuration in
* case prev or next is vm86.
*/
- update_sp0(next_p);
+ update_task_stack(next_p);
refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) +
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 12bb445fb98d..476e3ddf8890 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -478,7 +478,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
/* Reload sp0. */
- update_sp0(next_p);
+ update_task_stack(next_p);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2f86d883dd95..b4866badb235 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
+ /*
+ * Make sure page 0 is always reserved because on systems with
+ * L1TF its contents can be leaked to user processes.
+ */
+ memblock_reserve(0, PAGE_SIZE);
+
early_reserve_initrd();
/*
@@ -866,6 +872,8 @@ void __init setup_arch(char **cmdline_p)
idt_setup_early_traps();
early_cpu_init();
+ arch_init_ideal_nops();
+ jump_label_init();
early_ioremap_init();
setup_olpc_ofw_pgd();
@@ -991,11 +999,6 @@ void __init setup_arch(char **cmdline_p)
setup_clear_cpu_cap(X86_FEATURE_APIC);
}
-#ifdef CONFIG_PCI
- if (pci_early_dump_regs)
- early_dump_pci_devices();
-#endif
-
e820__reserve_setup_data();
e820__finish_early_params();
@@ -1012,6 +1015,7 @@ void __init setup_arch(char **cmdline_p)
*/
init_hypervisor_platform();
+ tsc_early_init();
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
@@ -1197,11 +1201,6 @@ void __init setup_arch(char **cmdline_p)
memblock_find_dma_reserve();
-#ifdef CONFIG_KVM_GUEST
- kvmclock_init();
-#endif
-
- tsc_early_delay_calibrate();
if (!early_xdbc_setup_hardware())
early_xdbc_register_console();
@@ -1272,8 +1271,6 @@ void __init setup_arch(char **cmdline_p)
mcheck_init();
- arch_init_ideal_nops();
-
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 5c574dff4c1a..04adc8d60aed 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
+ kvm_set_cpu_l1tf_flush_l1d();
if (trace_resched_ipi_enabled()) {
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c2f7d1d2a5c3..f02ecaf97904 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -80,6 +80,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
+#include <asm/hw_irq.h>
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -221,6 +222,11 @@ static void notrace start_secondary(void *unused)
#ifdef CONFIG_X86_32
/* switch away from the initial page table */
load_cr3(swapper_pg_dir);
+ /*
+ * Initialize the CR4 shadow before doing anything that could
+ * try to read it.
+ */
+ cr4_init_shadow();
__flush_tlb_all();
#endif
load_current_idt();
@@ -266,6 +272,23 @@ static void notrace start_secondary(void *unused)
}
/**
+ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
+ * @cpu: CPU to check
+ */
+bool topology_is_primary_thread(unsigned int cpu)
+{
+ return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
+}
+
+/**
+ * topology_smt_supported - Check whether SMT is supported by the CPUs
+ */
+bool topology_smt_supported(void)
+{
+ return smp_num_siblings > 1;
+}
+
+/**
* topology_phys_to_logical_pkg - Map a physical package id to a logical
*
* Returns logical package id or -1 if not found
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 093f2ea5dd56..7627455047c2 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -81,16 +81,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-#define STACKTRACE_DUMP_ONCE(task) ({ \
- static bool __section(.data.unlikely) __dumped; \
- \
- if (!__dumped) { \
- __dumped = true; \
- WARN_ON(1); \
- show_stack(task, NULL); \
- } \
-})
-
static int __always_inline
__save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
@@ -99,30 +89,25 @@ __save_stack_trace_reliable(struct stack_trace *trace,
struct pt_regs *regs;
unsigned long addr;
- for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
+ for (unwind_start(&state, task, NULL, NULL);
+ !unwind_done(&state) && !unwind_error(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state, NULL);
if (regs) {
+ /* Success path for user tasks */
+ if (user_mode(regs))
+ goto success;
+
/*
* Kernel mode registers on the stack indicate an
* in-kernel interrupt or exception (e.g., preemption
* or a page fault), which can make frame pointers
* unreliable.
*/
- if (!user_mode(regs))
- return -EINVAL;
- /*
- * The last frame contains the user mode syscall
- * pt_regs. Skip it and finish the unwind.
- */
- unwind_next_frame(&state);
- if (!unwind_done(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
- }
- break;
}
addr = unwind_get_return_address(&state);
@@ -132,21 +117,22 @@ __save_stack_trace_reliable(struct stack_trace *trace,
* generated code which __kernel_text_address() doesn't know
* about.
*/
- if (!addr) {
- STACKTRACE_DUMP_ONCE(task);
+ if (!addr)
return -EINVAL;
- }
if (save_stack_address(trace, addr, false))
return -EINVAL;
}
/* Check for stack corruption */
- if (unwind_error(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (unwind_error(&state))
+ return -EINVAL;
+
+ /* Success path for non-user tasks, i.e. kthreads and idle tasks */
+ if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
- }
+success:
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 774ebafa97c4..be01328eb755 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -12,6 +12,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i8253.h>
#include <linux/time.h>
#include <linux/export.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 74392d9d51e0..1463468ba9a0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -33,16 +33,13 @@ EXPORT_SYMBOL(cpu_khz);
unsigned int __read_mostly tsc_khz;
EXPORT_SYMBOL(tsc_khz);
+#define KHZ 1000
+
/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
static int __read_mostly tsc_unstable;
-/* native_sched_clock() is called before tsc_init(), so
- we must start with the TSC soft disabled to prevent
- erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
-static int __read_mostly tsc_disabled = -1;
-
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
@@ -106,23 +103,6 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static void cyc2ns_data_init(struct cyc2ns_data *data)
-{
- data->cyc2ns_mul = 0;
- data->cyc2ns_shift = 0;
- data->cyc2ns_offset = 0;
-}
-
-static void __init cyc2ns_init(int cpu)
-{
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
- cyc2ns_data_init(&c2n->data[0]);
- cyc2ns_data_init(&c2n->data[1]);
-
- seqcount_init(&c2n->seq);
-}
-
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
@@ -138,18 +118,11 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
return ns;
}
-static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
unsigned long long ns_now;
struct cyc2ns_data data;
struct cyc2ns *c2n;
- unsigned long flags;
-
- local_irq_save(flags);
- sched_clock_idle_sleep_event();
-
- if (!khz)
- goto done;
ns_now = cycles_2_ns(tsc_now);
@@ -181,13 +154,56 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
c2n->data[0] = data;
raw_write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
+}
+
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sched_clock_idle_sleep_event();
+
+ if (khz)
+ __set_cyc2ns_scale(khz, cpu, tsc_now);
-done:
sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
/*
+ * Initialize cyc2ns for boot cpu
+ */
+static void __init cyc2ns_init_boot_cpu(void)
+{
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+
+ seqcount_init(&c2n->seq);
+ __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
+}
+
+/*
+ * Secondary CPUs do not run through tsc_init(), so set up
+ * all the scale factors for all CPUs, assuming the same
+ * speed as the bootup CPU. (cpufreq notifiers will fix this
+ * up if their speed diverges)
+ */
+static void __init cyc2ns_init_secondary_cpus(void)
+{
+ unsigned int cpu, this_cpu = smp_processor_id();
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+ struct cyc2ns_data *data = c2n->data;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != this_cpu) {
+ seqcount_init(&c2n->seq);
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
+ c2n->data[0] = data[0];
+ c2n->data[1] = data[1];
+ }
+ }
+}
+
+/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
@@ -248,8 +264,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
- pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
- tsc_disabled = 1;
+ mark_tsc_unstable("boot parameter notsc");
return 1;
}
#else
@@ -665,30 +680,17 @@ static unsigned long cpu_khz_from_cpuid(void)
return eax_base_mhz * 1000;
}
-/**
- * native_calibrate_cpu - calibrate the cpu on boot
+/*
+ * calibrate cpu using pit, hpet, and ptimer methods. They are available
+ * later in boot after acpi is initialized.
*/
-unsigned long native_calibrate_cpu(void)
+static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- unsigned long flags, latch, ms, fast_calibrate;
+ unsigned long flags, latch, ms;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
-
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
-
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
-
/*
* Run 5 calibration loops to get the lowest frequency value
* (the best estimate). We use two different calibration modes
@@ -831,6 +833,37 @@ unsigned long native_calibrate_cpu(void)
return tsc_pit_min;
}
+/**
+ * native_calibrate_cpu_early - can calibrate the cpu early in boot
+ */
+unsigned long native_calibrate_cpu_early(void)
+{
+ unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
+
+ if (!fast_calibrate)
+ fast_calibrate = cpu_khz_from_msr();
+ if (!fast_calibrate) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ }
+ return fast_calibrate;
+}
+
+
+/**
+ * native_calibrate_cpu - calibrate the cpu
+ */
+static unsigned long native_calibrate_cpu(void)
+{
+ unsigned long tsc_freq = native_calibrate_cpu_early();
+
+ if (!tsc_freq)
+ tsc_freq = pit_hpet_ptimer_calibrate_cpu();
+
+ return tsc_freq;
+}
+
void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
@@ -1307,7 +1340,7 @@ unreg:
static int __init init_tsc_clocksource(void)
{
- if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
+ if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
return 0;
if (tsc_unstable)
@@ -1341,40 +1374,22 @@ unreg:
*/
device_initcall(init_tsc_clocksource);
-void __init tsc_early_delay_calibrate(void)
+static bool __init determine_cpu_tsc_frequencies(bool early)
{
- unsigned long lpj;
-
- if (!boot_cpu_has(X86_FEATURE_TSC))
- return;
-
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
- tsc_khz = tsc_khz ? : cpu_khz;
- if (!tsc_khz)
- return;
-
- lpj = tsc_khz * 1000;
- do_div(lpj, HZ);
- loops_per_jiffy = lpj;
-}
-
-void __init tsc_init(void)
-{
- u64 lpj, cyc;
- int cpu;
-
- if (!boot_cpu_has(X86_FEATURE_TSC)) {
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
+ /* Make sure that cpu and tsc are not already calibrated */
+ WARN_ON(cpu_khz || tsc_khz);
+
+ if (early) {
+ cpu_khz = x86_platform.calibrate_cpu();
+ tsc_khz = x86_platform.calibrate_tsc();
+ } else {
+ /* We should not be here with non-native cpu calibration */
+ WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
+ cpu_khz = pit_hpet_ptimer_calibrate_cpu();
}
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
/*
- * Trust non-zero tsc_khz as authorative,
+ * Trust non-zero tsc_khz as authoritative,
* and use it to sanity check cpu_khz,
* which will be off if system timer is off.
*/
@@ -1383,52 +1398,78 @@ void __init tsc_init(void)
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
cpu_khz = tsc_khz;
- if (!tsc_khz) {
- mark_tsc_unstable("could not calculate TSC khz");
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
- }
+ if (tsc_khz == 0)
+ return false;
pr_info("Detected %lu.%03lu MHz processor\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
+ (unsigned long)cpu_khz / KHZ,
+ (unsigned long)cpu_khz % KHZ);
if (cpu_khz != tsc_khz) {
pr_info("Detected %lu.%03lu MHz TSC",
- (unsigned long)tsc_khz / 1000,
- (unsigned long)tsc_khz % 1000);
+ (unsigned long)tsc_khz / KHZ,
+ (unsigned long)tsc_khz % KHZ);
}
+ return true;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = tsc_khz * KHZ;
+ do_div(lpj, HZ);
+ return lpj;
+}
+
+static void __init tsc_enable_sched_clock(void)
+{
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
+ cyc2ns_init_boot_cpu();
+ static_branch_enable(&__use_tsc);
+}
+
+void __init tsc_early_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_TSC))
+ return;
+ if (!determine_cpu_tsc_frequencies(true))
+ return;
+ loops_per_jiffy = get_loops_per_jiffy();
+ tsc_enable_sched_clock();
+}
+
+void __init tsc_init(void)
+{
/*
- * Secondary CPUs do not run through tsc_init(), so set up
- * all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * native_calibrate_cpu_early can only calibrate using methods that are
+ * available early in boot.
*/
- cyc = rdtsc();
- for_each_possible_cpu(cpu) {
- cyc2ns_init(cpu);
- set_cyc2ns_scale(tsc_khz, cpu, cyc);
- }
+ if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
+ x86_platform.calibrate_cpu = native_calibrate_cpu;
- if (tsc_disabled > 0)
+ if (!boot_cpu_has(X86_FEATURE_TSC)) {
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
+ }
- /* now allow native_sched_clock() to use rdtsc */
+ if (!tsc_khz) {
+ /* We failed to determine frequencies earlier, try again */
+ if (!determine_cpu_tsc_frequencies(false)) {
+ mark_tsc_unstable("could not calculate TSC khz");
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+ tsc_enable_sched_clock();
+ }
- tsc_disabled = 0;
- static_branch_enable(&__use_tsc);
+ cyc2ns_init_secondary_cpus();
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
- lpj = ((u64)tsc_khz * 1000);
- do_div(lpj, HZ);
- lpj_fine = lpj;
-
+ lpj_fine = get_loops_per_jiffy();
use_tsc_delay();
check_system_tsc_reliable();
@@ -1455,7 +1496,7 @@ unsigned long calibrate_delay_is_known(void)
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu);
- if (tsc_disabled || !constant_tsc || !mask)
+ if (!constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 19afdbd7d0a7..27ef714d886c 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -1,17 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * tsc_msr.c - TSC frequency enumeration via MSR
+ * TSC frequency enumeration via MSR
*
- * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 2013, 2018 Intel Corporation
* Author: Bin Gao <bin.gao@intel.com>
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
+
#include <asm/apic.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
#include <asm/param.h>
+#include <asm/tsc.h>
#define MAX_NUM_FREQS 9
@@ -23,44 +25,48 @@
* field msr_plat does.
*/
struct freq_desc {
- u8 x86_family; /* CPU family */
- u8 x86_model; /* model */
u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
u32 freqs[MAX_NUM_FREQS];
};
-static struct freq_desc freq_desc_tables[] = {
- /* PNW */
- { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
- /* CLV+ */
- { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
- /* TNG - Intel Atom processor Z3400 series */
- { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
- /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
- { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
- /* ANN - Intel Atom processor Z3500 series */
- { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
- /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
- { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
- 80000, 93300, 90000, 88900, 87500 } },
+/*
+ * Penwell and Clovertrail use spread spectrum clock,
+ * so the freq number is not exactly the same as reported
+ * by MSR based on SDM.
+ */
+static const struct freq_desc freq_desc_pnw = {
+ 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 }
};
-static int match_cpu(u8 family, u8 model)
-{
- int i;
+static const struct freq_desc freq_desc_clv = {
+ 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 }
+};
- for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
- if ((family == freq_desc_tables[i].x86_family) &&
- (model == freq_desc_tables[i].x86_model))
- return i;
- }
+static const struct freq_desc freq_desc_byt = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 }
+};
- return -1;
-}
+static const struct freq_desc freq_desc_cht = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 }
+};
-/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
-#define id_to_freq(cpu_index, freq_id) \
- (freq_desc_tables[cpu_index].freqs[freq_id])
+static const struct freq_desc freq_desc_tng = {
+ 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 }
+};
+
+static const struct freq_desc freq_desc_ann = {
+ 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 }
+};
+
+static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
+ INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw),
+ INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt),
+ INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht),
+ INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng),
+ INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann),
+ {}
+};
/*
* MSR-based CPU/TSC frequency discovery for certain CPUs.
@@ -70,18 +76,17 @@ static int match_cpu(u8 family, u8 model)
*/
unsigned long cpu_khz_from_msr(void)
{
- u32 lo, hi, ratio, freq_id, freq;
+ u32 lo, hi, ratio, freq;
+ const struct freq_desc *freq_desc;
+ const struct x86_cpu_id *id;
unsigned long res;
- int cpu_index;
-
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
- cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
- if (cpu_index < 0)
+ id = x86_match_cpu(tsc_msr_cpu_ids);
+ if (!id)
return 0;
- if (freq_desc_tables[cpu_index].msr_plat) {
+ freq_desc = (struct freq_desc *)id->driver_data;
+ if (freq_desc->msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
ratio = (lo >> 8) & 0xff;
} else {
@@ -91,8 +96,9 @@ unsigned long cpu_khz_from_msr(void)
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
- freq_id = lo & 0x7;
- freq = id_to_freq(cpu_index, freq_id);
+
+ /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+ freq = freq_desc->freqs[lo & 0x7];
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
res = freq * ratio;
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index feb28fee6cea..26038eacf74a 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -198,7 +198,7 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
- return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
+ return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
}
#ifdef CONFIG_MODULES
@@ -352,7 +352,7 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
+ unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc;
bool indirect = false;
@@ -363,9 +363,9 @@ bool unwind_next_frame(struct unwind_state *state)
/* Don't let modules unload while we're reading their ORC data. */
preempt_disable();
- /* Have we reached the end? */
+ /* End-of-stack check for user tasks: */
if (state->regs && user_mode(state->regs))
- goto done;
+ goto the_end;
/*
* Find the orc_entry associated with the text address.
@@ -374,9 +374,16 @@ bool unwind_next_frame(struct unwind_state *state)
* calls and calls to noreturn functions.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
- if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
- goto done;
- orig_ip = state->ip;
+ if (!orc)
+ goto err;
+
+ /* End-of-stack check for kernel threads: */
+ if (orc->sp_reg == ORC_REG_UNDEFINED) {
+ if (!orc->end)
+ goto err;
+
+ goto the_end;
+ }
/* Find the previous frame's stack: */
switch (orc->sp_reg) {
@@ -402,7 +409,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r10;
break;
@@ -411,7 +418,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r13;
break;
@@ -420,7 +427,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->di;
break;
@@ -429,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->dx;
break;
@@ -437,12 +444,12 @@ bool unwind_next_frame(struct unwind_state *state)
default:
orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
- goto done;
+ goto err;
}
if (indirect) {
if (!deref_stack_reg(state, sp, &sp))
- goto done;
+ goto err;
}
/* Find IP, SP and possibly regs: */
@@ -451,7 +458,7 @@ bool unwind_next_frame(struct unwind_state *state)
ip_p = sp - sizeof(long);
if (!deref_stack_reg(state, ip_p, &state->ip))
- goto done;
+ goto err;
state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->ip, (void *)ip_p);
@@ -465,7 +472,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (struct pt_regs *)sp;
@@ -477,7 +484,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (void *)sp - IRET_FRAME_OFFSET;
@@ -500,18 +507,18 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_PREV_SP:
if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
case ORC_REG_BP:
if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
default:
orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
- goto done;
+ goto err;
}
/* Prevent a recursive loop due to bad ORC data: */
@@ -520,13 +527,16 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp <= prev_sp) {
orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
- goto done;
+ goto err;
}
preempt_enable();
return true;
-done:
+err:
+ state->error = true;
+
+the_end:
preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 9d0b5af7db91..1c03e4aa6474 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -149,7 +149,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
- update_sp0(tsk);
+ update_task_stack(tsk);
refresh_sysenter_cs(&tsk->thread);
vm86->saved_sp0 = 0;
preempt_enable();
@@ -374,7 +374,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
refresh_sysenter_cs(&tsk->thread);
}
- update_sp0(tsk);
+ update_task_stack(tsk);
preempt_enable();
if (vm86->flags & VM86_SCREEN_BITMAP)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 5e1458f609a1..8bde0a419f86 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -55,19 +55,22 @@ jiffies_64 = jiffies;
* so we can enable protection checks as well as retain 2MB large page
* mappings for kernel text.
*/
-#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
+#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
-#define X64_ALIGN_RODATA_END \
+#define X86_ALIGN_RODATA_END \
. = ALIGN(HPAGE_SIZE); \
- __end_rodata_hpage_align = .;
+ __end_rodata_hpage_align = .; \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
#else
-#define X64_ALIGN_RODATA_BEGIN
-#define X64_ALIGN_RODATA_END
+#define X86_ALIGN_RODATA_BEGIN
+#define X86_ALIGN_RODATA_END \
+ . = ALIGN(PAGE_SIZE); \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
@@ -141,9 +144,9 @@ SECTIONS
/* .text should occupy whole number of pages */
. = ALIGN(PAGE_SIZE);
- X64_ALIGN_RODATA_BEGIN
+ X86_ALIGN_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
- X64_ALIGN_RODATA_END
+ X86_ALIGN_RODATA_END
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 3ab867603e81..2792b5573818 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -109,7 +109,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
struct x86_platform_ops x86_platform __ro_after_init = {
- .calibrate_cpu = native_calibrate_cpu,
+ .calibrate_cpu = native_calibrate_cpu_early,
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 92fd433c50b9..1bbec387d289 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
def_bool y
bool "AMD Secure Encrypted Virtualization (SEV) support"
depends on KVM_AMD && X86_64
- depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+ depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
---help---
Provides support for launching Encrypted VMs on AMD processors.
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7e042e3d47fd..7bcfa61375c0 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -621,7 +621,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
(1 << KVM_FEATURE_PV_UNHALT) |
(1 << KVM_FEATURE_PV_TLB_FLUSH) |
- (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
+ (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
+ (1 << KVM_FEATURE_PV_SEND_IPI);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4c4f4263420c..106482da6388 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4191,7 +4191,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
maxphyaddr = 36;
rsvd = rsvd_bits(maxphyaddr, 63);
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
- rsvd &= ~CR3_PCID_INVD;
+ rsvd &= ~X86_CR3_PCID_NOFLUSH;
}
if (new_val & rsvd)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index af8caf965baa..01d209ab5481 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
int ret;
- if (!synic->active)
+ if (!synic->active && !host)
return 1;
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
return ret;
}
-static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
+static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
+ bool host)
{
int ret;
- if (!synic->active)
+ if (!synic->active && !host)
return 1;
ret = 0;
@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
case HV_X64_MSR_TSC_EMULATION_STATUS:
hv->hv_tsc_emulation_status = data;
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ /* read-only, but still ignore it if host-initiated */
+ if (!host)
+ return 1;
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
data, host);
}
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
+ /* read-only, but still ignore it if host-initiated */
+ if (!host)
+ return 1;
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0;
}
-static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
+ bool host)
{
u64 data = 0;
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
- return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
+ return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG:
@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return kvm_hv_set_msr(vcpu, msr, data, host);
}
-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
if (kvm_hv_msr_partition_wide(msr)) {
int r;
@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
- return kvm_hv_get_msr(vcpu, msr, pdata);
+ return kvm_hv_get_msr(vcpu, msr, pdata, host);
}
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 837465d69c6d..d6aa969e20f1 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
}
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
bool kvm_hv_hypercall_enabled(struct kvm *kvm);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b5cd8465d44f..0cefba28c864 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -547,6 +547,46 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
irq->level, irq->trig_mode, dest_map);
}
+int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
+ unsigned long ipi_bitmap_high, int min,
+ unsigned long icr, int op_64_bit)
+{
+ int i;
+ struct kvm_apic_map *map;
+ struct kvm_vcpu *vcpu;
+ struct kvm_lapic_irq irq = {0};
+ int cluster_size = op_64_bit ? 64 : 32;
+ int count = 0;
+
+ irq.vector = icr & APIC_VECTOR_MASK;
+ irq.delivery_mode = icr & APIC_MODE_MASK;
+ irq.level = (icr & APIC_INT_ASSERT) != 0;
+ irq.trig_mode = icr & APIC_INT_LEVELTRIG;
+
+ if (icr & APIC_DEST_MASK)
+ return -KVM_EINVAL;
+ if (icr & APIC_SHORT_MASK)
+ return -KVM_EINVAL;
+
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
+
+ /* Bits above cluster_size are masked in the caller. */
+ for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
+
+ min += cluster_size;
+ for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
+
+ rcu_read_unlock();
+ return count;
+}
+
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
@@ -1379,7 +1419,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
* using swait_active() is safe.
*/
if (swait_active(q))
- swake_up(q);
+ swake_up_one(q);
if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d594690d8b95..a282321329b5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -178,7 +178,24 @@ struct kvm_shadow_walk_iterator {
unsigned index;
};
-#define for_each_shadow_entry(_vcpu, _addr, _walker) \
+static const union kvm_mmu_page_role mmu_base_role_mask = {
+ .cr0_wp = 1,
+ .cr4_pae = 1,
+ .nxe = 1,
+ .smep_andnot_wp = 1,
+ .smap_andnot_wp = 1,
+ .smm = 1,
+ .guest_mode = 1,
+ .ad_disabled = 1,
+};
+
+#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
+ for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
+ (_root), (_addr)); \
+ shadow_walk_okay(&(_walker)); \
+ shadow_walk_next(&(_walker)))
+
+#define for_each_shadow_entry(_vcpu, _addr, _walker) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
@@ -221,7 +238,20 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
+/*
+ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
+ * to guard against L1TF attacks.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
+
+/*
+ * The number of high-order 1 bits to use in the mask above.
+ */
+static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
+
static void mmu_spte_set(u64 *sptep, u64 spte);
+static union kvm_mmu_page_role
+kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
@@ -308,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
{
unsigned int gen = kvm_current_mmio_generation(vcpu);
u64 mask = generation_mmio_spte_mask(gen);
+ u64 gpa = gfn << PAGE_SHIFT;
access &= ACC_WRITE_MASK | ACC_USER_MASK;
- mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
+ mask |= shadow_mmio_value | access;
+ mask |= gpa | shadow_nonpresent_or_rsvd_mask;
+ mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
+ << shadow_nonpresent_or_rsvd_mask_len;
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
@@ -323,8 +357,14 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
- return (spte & ~mask) >> PAGE_SHIFT;
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
+ shadow_nonpresent_or_rsvd_mask;
+ u64 gpa = spte & ~mask;
+
+ gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
+ & shadow_nonpresent_or_rsvd_mask;
+
+ return gpa >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
@@ -381,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-static void kvm_mmu_clear_all_pte_masks(void)
+static void kvm_mmu_reset_all_pte_masks(void)
{
shadow_user_mask = 0;
shadow_accessed_mask = 0;
@@ -391,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
shadow_mmio_mask = 0;
shadow_present_mask = 0;
shadow_acc_track_mask = 0;
+
+ /*
+ * If the CPU has 46 or less physical address bits, then set an
+ * appropriate mask to guard against L1TF attacks. Otherwise, it is
+ * assumed that the CPU is not vulnerable to L1TF.
+ */
+ if (boot_cpu_data.x86_phys_bits <
+ 52 - shadow_nonpresent_or_rsvd_mask_len)
+ shadow_nonpresent_or_rsvd_mask =
+ rsvd_bits(boot_cpu_data.x86_phys_bits -
+ shadow_nonpresent_or_rsvd_mask_len,
+ boot_cpu_data.x86_phys_bits - 1);
}
static int is_cpuid_PSE36(void)
@@ -890,7 +942,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- page = (void *)__get_free_page(GFP_KERNEL);
+ page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;
@@ -1986,7 +2038,7 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
return 0;
}
-static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
{
}
@@ -2117,12 +2169,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
- if (sp->role.cr4_pae != !!is_pae(vcpu)) {
- kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
- return false;
- }
-
- if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
+ if (sp->role.cr4_pae != !!is_pae(vcpu)
+ || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -2392,11 +2440,12 @@ out:
return sp;
}
-static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
- struct kvm_vcpu *vcpu, u64 addr)
+static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
+ struct kvm_vcpu *vcpu, hpa_t root,
+ u64 addr)
{
iterator->addr = addr;
- iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
+ iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL &&
@@ -2405,6 +2454,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
+ /*
+ * prev_root is currently only used for 64-bit hosts. So only
+ * the active root_hpa is valid here.
+ */
+ BUG_ON(root != vcpu->arch.mmu.root_hpa);
+
iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
@@ -2414,6 +2469,13 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
}
}
+static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
+ struct kvm_vcpu *vcpu, u64 addr)
+{
+ shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
+ addr);
+}
+
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
if (iterator->level < PT_PAGE_TABLE_LEVEL)
@@ -2702,6 +2764,45 @@ static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
kvm_unsync_page(vcpu, sp);
}
+ /*
+ * We need to ensure that the marking of unsync pages is visible
+ * before the SPTE is updated to allow writes because
+ * kvm_mmu_sync_roots() checks the unsync flags without holding
+ * the MMU lock and so can race with this. If the SPTE was updated
+ * before the page had been marked as unsync-ed, something like the
+ * following could happen:
+ *
+ * CPU 1 CPU 2
+ * ---------------------------------------------------------------------
+ * 1.2 Host updates SPTE
+ * to be writable
+ * 2.1 Guest writes a GPTE for GVA X.
+ * (GPTE being in the guest page table shadowed
+ * by the SP from CPU 1.)
+ * This reads SPTE during the page table walk.
+ * Since SPTE.W is read as 1, there is no
+ * fault.
+ *
+ * 2.2 Guest issues TLB flush.
+ * That causes a VM Exit.
+ *
+ * 2.3 kvm_mmu_sync_pages() reads sp->unsync.
+ * Since it is false, so it just returns.
+ *
+ * 2.4 Guest accesses GVA X.
+ * Since the mapping in the SP was not updated,
+ * so the old mapping for GVA X incorrectly
+ * gets used.
+ * 1.1 Host marks SP
+ * as unsync
+ * (sp->unsync = true)
+ *
+ * The write barrier below ensures that 1.1 happens before 1.2 and thus
+ * the situation in 2.4 does not arise. The implicit barrier in 2.2
+ * pairs with this write barrier.
+ */
+ smp_wmb();
+
return false;
}
@@ -2724,6 +2825,10 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
return true;
}
+/* Bits which may be returned by set_spte() */
+#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
+#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
+
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
@@ -2800,7 +2905,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
- ret = 1;
+ ret |= SET_SPTE_WRITE_PROTECTED_PT;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
}
@@ -2816,7 +2921,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
set_pte:
if (mmu_spte_update(sptep, spte))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
done:
return ret;
}
@@ -2827,7 +2932,9 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
{
int was_rmapped = 0;
int rmap_count;
+ int set_spte_ret;
int ret = RET_PF_RETRY;
+ bool flush = false;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
@@ -2844,22 +2951,25 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, sptep);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ flush = true;
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %llx new %llx\n",
spte_to_pfn(*sptep), pfn);
drop_spte(vcpu->kvm, sptep);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ flush = true;
} else
was_rmapped = 1;
}
- if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
- true, host_writable)) {
+ set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
+ speculative, true, host_writable);
+ if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault)
ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+ if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
if (unlikely(is_mmio_spte(*sptep)))
ret = RET_PF_EMULATE;
@@ -3358,26 +3468,47 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
*root_hpa = INVALID_PAGE;
}
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu)
+/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
{
int i;
LIST_HEAD(invalid_list);
struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
- if (!VALID_PAGE(mmu->root_hpa))
- return;
+ BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
+
+ /* Before acquiring the MMU lock, see if we need to do any real work. */
+ if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
+ VALID_PAGE(mmu->prev_roots[i].hpa))
+ break;
+
+ if (i == KVM_MMU_NUM_PREV_ROOTS)
+ return;
+ }
spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
- (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
- mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, &invalid_list);
- } else {
- for (i = 0; i < 4; ++i)
- if (mmu->pae_root[i] != 0)
- mmu_free_root_page(vcpu->kvm, &mmu->pae_root[i],
- &invalid_list);
- mmu->root_hpa = INVALID_PAGE;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
+ mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
+ &invalid_list);
+
+ if (free_active_root) {
+ if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
+ (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+ mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
+ &invalid_list);
+ } else {
+ for (i = 0; i < 4; ++i)
+ if (mmu->pae_root[i] != 0)
+ mmu_free_root_page(vcpu->kvm,
+ &mmu->pae_root[i],
+ &invalid_list);
+ mmu->root_hpa = INVALID_PAGE;
+ }
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3546,7 +3677,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
return mmu_alloc_shadow_roots(vcpu);
}
-static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_mmu_page *sp;
@@ -3558,14 +3689,39 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
return;
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
- kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
+
sp = page_header(root);
+
+ /*
+ * Even if another CPU was marking the SP as unsync-ed
+ * simultaneously, any guest page table changes are not
+ * guaranteed to be visible anyway until this VCPU issues a TLB
+ * flush strictly after those changes are made. We only need to
+ * ensure that the other CPU sets these flags before any actual
+ * changes to the page tables are made. The comments in
+ * mmu_need_write_protect() describe what could go wrong if this
+ * requirement isn't satisfied.
+ */
+ if (!smp_load_acquire(&sp->unsync) &&
+ !smp_load_acquire(&sp->unsync_children))
+ return;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+
mmu_sync_children(vcpu, sp);
+
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+ spin_unlock(&vcpu->kvm->mmu_lock);
return;
}
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -3575,13 +3731,8 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
mmu_sync_children(vcpu, sp);
}
}
- kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
-}
-void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
-{
- spin_lock(&vcpu->kvm->mmu_lock);
- mmu_sync_roots(vcpu);
+ kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
spin_unlock(&vcpu->kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
@@ -3840,6 +3991,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
{
int r = 1;
+ vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) {
default:
trace_kvm_page_fault(fault_address, error_code);
@@ -3947,16 +4099,107 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->nx = false;
}
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
+/*
+ * Find out if a previously cached root matching the new CR3/role is available.
+ * The current root is also inserted into the cache.
+ * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
+ * returned.
+ * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
+ * false is returned. This root should now be freed by the caller.
+ */
+static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ union kvm_mmu_page_role new_role)
+{
+ uint i;
+ struct kvm_mmu_root_info root;
+ struct kvm_mmu *mmu = &vcpu->arch.mmu;
+
+ root.cr3 = mmu->get_cr3(vcpu);
+ root.hpa = mmu->root_hpa;
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ swap(root, mmu->prev_roots[i]);
+
+ if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
+ page_header(root.hpa) != NULL &&
+ new_role.word == page_header(root.hpa)->role.word)
+ break;
+ }
+
+ mmu->root_hpa = root.hpa;
+
+ return i < KVM_MMU_NUM_PREV_ROOTS;
+}
+
+static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ union kvm_mmu_page_role new_role,
+ bool skip_tlb_flush)
{
- kvm_mmu_free_roots(vcpu);
+ struct kvm_mmu *mmu = &vcpu->arch.mmu;
+
+ /*
+ * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
+ * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
+ * later if necessary.
+ */
+ if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
+ mmu->root_level >= PT64_ROOT_4LEVEL) {
+ if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
+ return false;
+
+ if (cached_root_available(vcpu, new_cr3, new_role)) {
+ /*
+ * It is possible that the cached previous root page is
+ * obsolete because of a change in the MMU
+ * generation number. However, that is accompanied by
+ * KVM_REQ_MMU_RELOAD, which will free the root that we
+ * have set here and allocate a new one.
+ */
+
+ kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+ if (!skip_tlb_flush) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_x86_ops->tlb_flush(vcpu, true);
+ }
+
+ /*
+ * The last MMIO access's GVA and GPA are cached in the
+ * VCPU. When switching to a new CR3, that GVA->GPA
+ * mapping may no longer be valid. So clear any cached
+ * MMIO info even when we don't need to sync the shadow
+ * page tables.
+ */
+ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+
+ __clear_sp_write_flooding_count(
+ page_header(mmu->root_hpa));
+
+ return true;
+ }
+ }
+
+ return false;
}
+static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ union kvm_mmu_page_role new_role,
+ bool skip_tlb_flush)
+{
+ if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
+ kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT);
+}
+
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
+{
+ __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
+ skip_tlb_flush);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
+
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
@@ -4431,7 +4674,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->shadow_root_level = level;
- context->root_hpa = INVALID_PAGE;
context->direct_map = false;
}
@@ -4461,7 +4703,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- context->root_hpa = INVALID_PAGE;
context->direct_map = false;
}
@@ -4471,20 +4712,32 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
+static union kvm_mmu_page_role
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
+{
+ union kvm_mmu_page_role role = {0};
+
+ role.guest_mode = is_guest_mode(vcpu);
+ role.smm = is_smm(vcpu);
+ role.ad_disabled = (shadow_accessed_mask == 0);
+ role.level = kvm_x86_ops->get_tdp_level(vcpu);
+ role.direct = true;
+ role.access = ACC_ALL;
+
+ return role;
+}
+
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.mmu;
- context->base_role.word = 0;
- context->base_role.guest_mode = is_guest_mode(vcpu);
- context->base_role.smm = is_smm(vcpu);
- context->base_role.ad_disabled = (shadow_accessed_mask == 0);
+ context->base_role.word = mmu_base_role_mask.word &
+ kvm_calc_tdp_mmu_root_page_role(vcpu).word;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
- context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
context->get_cr3 = get_cr3;
@@ -4519,13 +4772,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+static union kvm_mmu_page_role
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_page_role role = {0};
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- struct kvm_mmu *context = &vcpu->arch.mmu;
- MMU_WARN_ON(VALID_PAGE(context->root_hpa));
+ role.nxe = is_nx(vcpu);
+ role.cr4_pae = !!is_pae(vcpu);
+ role.cr0_wp = is_write_protection(vcpu);
+ role.smep_andnot_wp = smep && !is_write_protection(vcpu);
+ role.smap_andnot_wp = smap && !is_write_protection(vcpu);
+ role.guest_mode = is_guest_mode(vcpu);
+ role.smm = is_smm(vcpu);
+ role.direct = !is_paging(vcpu);
+ role.access = ACC_ALL;
+
+ if (!is_long_mode(vcpu))
+ role.level = PT32E_ROOT_LEVEL;
+ else if (is_la57_mode(vcpu))
+ role.level = PT64_ROOT_5LEVEL;
+ else
+ role.level = PT64_ROOT_4LEVEL;
+
+ return role;
+}
+
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *context = &vcpu->arch.mmu;
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -4536,26 +4812,34 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
else
paging32_init_context(vcpu, context);
- context->base_role.nxe = is_nx(vcpu);
- context->base_role.cr4_pae = !!is_pae(vcpu);
- context->base_role.cr0_wp = is_write_protection(vcpu);
- context->base_role.smep_andnot_wp
- = smep && !is_write_protection(vcpu);
- context->base_role.smap_andnot_wp
- = smap && !is_write_protection(vcpu);
- context->base_role.guest_mode = is_guest_mode(vcpu);
- context->base_role.smm = is_smm(vcpu);
+ context->base_role.word = mmu_base_role_mask.word &
+ kvm_calc_shadow_mmu_root_page_role(vcpu).word;
reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
+static union kvm_mmu_page_role
+kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
+{
+ union kvm_mmu_page_role role = vcpu->arch.mmu.base_role;
+
+ role.level = PT64_ROOT_4LEVEL;
+ role.direct = false;
+ role.ad_disabled = !accessed_dirty;
+ role.guest_mode = true;
+ role.access = ACC_ALL;
+
+ return role;
+}
+
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
- bool accessed_dirty)
+ bool accessed_dirty, gpa_t new_eptp)
{
struct kvm_mmu *context = &vcpu->arch.mmu;
+ union kvm_mmu_page_role root_page_role =
+ kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
- MMU_WARN_ON(VALID_PAGE(context->root_hpa));
-
+ __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
context->shadow_root_level = PT64_ROOT_4LEVEL;
context->nx = true;
@@ -4566,10 +4850,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
context->root_level = PT64_ROOT_4LEVEL;
- context->root_hpa = INVALID_PAGE;
context->direct_map = false;
- context->base_role.ad_disabled = !accessed_dirty;
- context->base_role.guest_mode = 1;
+ context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
@@ -4632,8 +4914,17 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
update_last_nonleaf_level(vcpu, g_context);
}
-static void init_kvm_mmu(struct kvm_vcpu *vcpu)
+void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
{
+ if (reset_roots) {
+ uint i;
+
+ vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+ }
+
if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
@@ -4641,11 +4932,21 @@ static void init_kvm_mmu(struct kvm_vcpu *vcpu)
else
init_kvm_softmmu(vcpu);
}
+EXPORT_SYMBOL_GPL(kvm_init_mmu);
+
+static union kvm_mmu_page_role
+kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
+{
+ if (tdp_enabled)
+ return kvm_calc_tdp_mmu_root_page_role(vcpu);
+ else
+ return kvm_calc_shadow_mmu_root_page_role(vcpu);
+}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
- init_kvm_mmu(vcpu);
+ kvm_init_mmu(vcpu, true);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -4660,8 +4961,8 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
kvm_mmu_sync_roots(vcpu);
if (r)
goto out;
- /* set_cr3() should ensure TLB has been flushed */
- vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+ kvm_mmu_load_cr3(vcpu);
+ kvm_x86_ops->tlb_flush(vcpu, true);
out:
return r;
}
@@ -4669,7 +4970,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
- kvm_mmu_free_roots(vcpu);
+ kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
@@ -4822,16 +5123,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush;
- union kvm_mmu_page_role mask = { };
-
- mask.cr0_wp = 1;
- mask.cr4_pae = 1;
- mask.nxe = 1;
- mask.smep_andnot_wp = 1;
- mask.smap_andnot_wp = 1;
- mask.smm = 1;
- mask.guest_mode = 1;
- mask.ad_disabled = 1;
/*
* If we don't have indirect shadow pages, it means no page is
@@ -4875,7 +5166,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
- & mask.word) && rmap_can_add(vcpu))
+ & mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte))
remote_flush = true;
@@ -5000,12 +5291,67 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
- vcpu->arch.mmu.invlpg(vcpu, gva);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ int i;
+
+ /* INVLPG on a * non-canonical address is a NOP according to the SDM. */
+ if (is_noncanonical_address(gva, vcpu))
+ return;
+
+ mmu->invlpg(vcpu, gva, mmu->root_hpa);
+
+ /*
+ * INVLPG is required to invalidate any global mappings for the VA,
+ * irrespective of PCID. Since it would take us roughly similar amount
+ * of work to determine whether any of the prev_root mappings of the VA
+ * is marked global, or to just sync it blindly, so we might as well
+ * just always sync it.
+ *
+ * Mappings not reachable via the current cr3 or the prev_roots will be
+ * synced when switching to that cr3, so nothing needs to be done here
+ * for them.
+ */
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (VALID_PAGE(mmu->prev_roots[i].hpa))
+ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+
+ kvm_x86_ops->tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
+void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
+{
+ struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ bool tlb_flush = false;
+ uint i;
+
+ if (pcid == kvm_get_active_pcid(vcpu)) {
+ mmu->invlpg(vcpu, gva, mmu->root_hpa);
+ tlb_flush = true;
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
+ pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
+ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+ tlb_flush = true;
+ }
+ }
+
+ if (tlb_flush)
+ kvm_x86_ops->tlb_flush_gva(vcpu, gva);
+
+ ++vcpu->stat.invlpg;
+
+ /*
+ * Mappings not reachable via the current cr3 or the prev_roots will be
+ * synced when switching to that cr3, so nothing needs to be done here
+ * for them.
+ */
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
+
void kvm_enable_tdp(void)
{
tdp_enabled = true;
@@ -5029,6 +5375,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
struct page *page;
int i;
+ if (tdp_enabled)
+ return 0;
+
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
@@ -5047,11 +5396,16 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
+ uint i;
+
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa;
vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+
return alloc_mmu_pages(vcpu);
}
@@ -5059,7 +5413,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
- init_kvm_mmu(vcpu);
+ kvm_init_mmu(vcpu, true);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
@@ -5499,7 +5853,7 @@ int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
- kvm_mmu_clear_all_pte_masks();
+ kvm_mmu_reset_all_pte_masks();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 5b408c0ad612..1fab69c0b2f3 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -61,9 +61,10 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
- bool accessed_dirty);
+ bool accessed_dirty, gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
@@ -85,6 +86,27 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
return kvm_mmu_load(vcpu);
}
+static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
+{
+ BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
+
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
+ ? cr3 & X86_CR3_PCID_MASK
+ : 0;
+}
+
+static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
+{
+ return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
+}
+
+static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
+{
+ if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa |
+ kvm_get_active_pcid(vcpu));
+}
+
/*
* Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6288e9d7068e..14ffd973df54 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -181,7 +181,7 @@ no_present:
* set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
* to signify readability since it isn't used in the EPT case
*/
-static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
+static inline unsigned FNAME(gpte_access)(u64 gpte)
{
unsigned access;
#if PTTYPE == PTTYPE_EPT
@@ -394,8 +394,8 @@ retry_walk:
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
- walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
+ walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
goto error;
@@ -508,7 +508,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
gfn = gpte_to_gfn(gpte);
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
@@ -856,7 +856,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
@@ -871,13 +871,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/
mmu_topup_memory_caches(vcpu);
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ if (!VALID_PAGE(root_hpa)) {
WARN_ON(1);
return;
}
spin_lock(&vcpu->kvm->mmu_lock);
- for_each_shadow_entry(vcpu, gva, iterator) {
+ for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
@@ -968,6 +968,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
+ int set_spte_ret = 0;
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
@@ -1002,7 +1003,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
- pte_access &= FNAME(gpte_access)(vcpu, gpte);
+ pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
@@ -1024,12 +1025,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
- set_spte(vcpu, &sp->spt[i], pte_access,
- PT_PAGE_TABLE_LEVEL, gfn,
- spte_to_pfn(sp->spt[i]), true, false,
- host_writable);
+ set_spte_ret |= set_spte(vcpu, &sp->spt[i],
+ pte_access, PT_PAGE_TABLE_LEVEL,
+ gfn, spte_to_pfn(sp->spt[i]),
+ true, false, host_writable);
}
+ if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
return nr_present;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f059a73f0fd0..6276140044d0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2884,7 +2884,6 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT);
- svm_flush_tlb(vcpu, true);
}
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -5435,6 +5434,13 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
svm->asid_generation--;
}
+static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ invlpga(gva, svm->vmcb->control.asid);
+}
+
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
@@ -5580,8 +5586,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
clgi();
- local_irq_enable();
-
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
* it's non-zero. Since vmentry is serialising on affected CPUs, there
@@ -5590,6 +5594,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
*/
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+ local_irq_enable();
+
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -5712,12 +5718,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
-
reload_tss(vcpu);
local_irq_disable();
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
@@ -5766,7 +5772,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_CR);
- svm_flush_tlb(vcpu, true);
}
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -5779,8 +5784,6 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
/* Also sync guest cr3 here in case we live migrate */
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
mark_dirty(svm->vmcb, VMCB_CR);
-
- svm_flush_tlb(vcpu, true);
}
static int is_disabled(void)
@@ -7090,6 +7093,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.set_rflags = svm_set_rflags,
.tlb_flush = svm_flush_tlb,
+ .tlb_flush_gva = svm_flush_tlb_gva,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1689f433f3a0..8dae47e7267a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -38,6 +38,7 @@
#include "kvm_cache_regs.h"
#include "x86.h"
+#include <asm/asm.h>
#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/desc.h>
@@ -188,23 +189,204 @@ module_param(ple_window_max, uint, 0444);
extern const ulong vmx_return;
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+/* Storage for pre module init parameter parsing */
+static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+static const struct {
+ const char *option;
+ bool for_parse;
+} vmentry_l1d_param[] = {
+ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
+ [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
+ [VMENTER_L1D_FLUSH_COND] = {"cond", true},
+ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
+ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
+ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
+};
+
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+{
+ struct page *page;
+ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+ u64 msr;
+
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+ if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+ return 0;
+ }
+ }
+
+ /* If set to auto use the default l1tf mitigation method */
+ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ l1tf = VMENTER_L1D_FLUSH_NEVER;
+ break;
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ l1tf = VMENTER_L1D_FLUSH_COND;
+ break;
+ case L1TF_MITIGATION_FULL:
+ case L1TF_MITIGATION_FULL_FORCE:
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ break;
+ }
+ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ }
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+ static_branch_enable(&vmx_l1d_should_flush);
+ else
+ static_branch_disable(&vmx_l1d_should_flush);
+
+ if (l1tf == VMENTER_L1D_FLUSH_COND)
+ static_branch_enable(&vmx_l1d_flush_cond);
+ else
+ static_branch_disable(&vmx_l1d_flush_cond);
+ return 0;
+}
+
+static int vmentry_l1d_flush_parse(const char *s)
+{
+ unsigned int i;
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+ if (vmentry_l1d_param[i].for_parse &&
+ sysfs_streq(s, vmentry_l1d_param[i].option))
+ return i;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+{
+ int l1tf, ret;
+
+ l1tf = vmentry_l1d_flush_parse(s);
+ if (l1tf < 0)
+ return l1tf;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+
+ /*
+ * Has vmx_init() run already? If not then this is the pre init
+ * parameter parsing. In that case just store the value and let
+ * vmx_init() do the proper setup after enable_ept has been
+ * established.
+ */
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
+ vmentry_l1d_flush_param = l1tf;
+ return 0;
+ }
+
+ mutex_lock(&vmx_l1d_flush_mutex);
+ ret = vmx_setup_l1d_flush(l1tf);
+ mutex_unlock(&vmx_l1d_flush_mutex);
+ return ret;
+}
+
+static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+{
+ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
+ return sprintf(s, "???\n");
+
+ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+}
+
+static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+};
+module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
+enum ept_pointers_status {
+ EPT_POINTERS_CHECK = 0,
+ EPT_POINTERS_MATCH = 1,
+ EPT_POINTERS_MISMATCH = 2
+};
+
struct kvm_vmx {
struct kvm kvm;
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
+
+ enum ept_pointers_status ept_pointers_match;
+ spinlock_t ept_pointer_lock;
};
#define NR_AUTOLOAD_MSRS 8
+struct vmcs_hdr {
+ u32 revision_id:31;
+ u32 shadow_vmcs:1;
+};
+
struct vmcs {
- u32 revision_id;
+ struct vmcs_hdr hdr;
u32 abort;
char data[0];
};
/*
+ * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
+ * and whose values change infrequently, but are not constant. I.e. this is
+ * used as a write-through cache of the corresponding VMCS fields.
+ */
+struct vmcs_host_state {
+ unsigned long cr3; /* May not match real cr3 */
+ unsigned long cr4; /* May not match real cr4 */
+ unsigned long gs_base;
+ unsigned long fs_base;
+
+ u16 fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+ u16 ds_sel, es_sel;
+#endif
+};
+
+/*
* Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
* remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
* loaded on this CPU (so we can clear them if the CPU goes down).
@@ -215,14 +397,13 @@ struct loaded_vmcs {
int cpu;
bool launched;
bool nmi_known_unmasked;
- unsigned long vmcs_host_cr3; /* May not match real cr3 */
- unsigned long vmcs_host_cr4; /* May not match real cr4 */
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
unsigned long *msr_bitmap;
struct list_head loaded_vmcss_on_cpu_link;
+ struct vmcs_host_state host_state;
};
struct shared_msr_entry {
@@ -253,7 +434,7 @@ struct __packed vmcs12 {
/* According to the Intel spec, a VMCS region must start with the
* following two fields. Then follow implementation-specific data.
*/
- u32 revision_id;
+ struct vmcs_hdr hdr;
u32 abort;
u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
@@ -421,7 +602,7 @@ struct __packed vmcs12 {
"Offset of " #field " in struct vmcs12 has changed.")
static inline void vmx_check_vmcs12_offsets(void) {
- CHECK_OFFSET(revision_id, 0);
+ CHECK_OFFSET(hdr, 0);
CHECK_OFFSET(abort, 4);
CHECK_OFFSET(launch_state, 8);
CHECK_OFFSET(io_bitmap_a, 40);
@@ -640,6 +821,12 @@ struct nested_vmx {
*/
struct vmcs12 *cached_vmcs12;
/*
+ * Cache of the guest's shadow VMCS, existing outside of guest
+ * memory. Loaded from guest memory during VM entry. Flushed
+ * to guest memory during VM exit.
+ */
+ struct vmcs12 *cached_shadow_vmcs12;
+ /*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
*/
@@ -757,6 +944,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+struct vmx_msrs {
+ unsigned int nr;
+ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
+};
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
@@ -784,26 +976,20 @@ struct vcpu_vmx {
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
- * guest (L2), it points to a different VMCS.
+ * guest (L2), it points to a different VMCS. loaded_cpu_state points
+ * to the VMCS whose state is loaded into the CPU registers that only
+ * need to be switched when transitioning to/from the kernel; a NULL
+ * value indicates that host state is loaded.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
+ struct loaded_vmcs *loaded_cpu_state;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
- unsigned nr;
- struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
- struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+ struct vmx_msrs guest;
+ struct vmx_msrs host;
} msr_autoload;
- struct {
- int loaded;
- u16 fs_sel, gs_sel, ldt_sel;
-#ifdef CONFIG_X86_64
- u16 ds_sel, es_sel;
-#endif
- int gs_ldt_reload_needed;
- int fs_reload_needed;
- u64 msr_host_bndcfgs;
- } host_state;
+
struct {
int vm86_active;
ulong save_rflags;
@@ -853,6 +1039,7 @@ struct vcpu_vmx {
*/
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits;
+ u64 ept_pointer;
};
enum segment_cache_field {
@@ -1072,6 +1259,11 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_vmcs12;
}
+static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
+}
+
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
@@ -1342,6 +1534,48 @@ static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
* GUEST_IA32_RTIT_CTL = 0x00002814,
*/
}
+
+/* check_ept_pointer() should be under protection of ept_pointer_lock. */
+static void check_ept_pointer_match(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ u64 tmp_eptp = INVALID_PAGE;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!VALID_PAGE(tmp_eptp)) {
+ tmp_eptp = to_vmx(vcpu)->ept_pointer;
+ } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
+ to_kvm_vmx(kvm)->ept_pointers_match
+ = EPT_POINTERS_MISMATCH;
+ return;
+ }
+ }
+
+ to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
+}
+
+static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
+{
+ int ret;
+
+ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
+ check_ept_pointer_match(kvm);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+
+out:
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ return ret;
+}
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -1456,6 +1690,12 @@ static inline bool cpu_has_vmx_virtual_intr_delivery(void)
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
}
+static inline bool cpu_has_vmx_encls_vmexit(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENCLS_EXITING;
+}
+
/*
* Comment's format: document - errata name - stepping - processor name.
* Refer from
@@ -1716,6 +1956,12 @@ static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
CPU_BASED_MONITOR_TRAP_FLAG;
}
+static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
+ SECONDARY_EXEC_SHADOW_VMCS;
+}
+
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
@@ -1796,6 +2042,11 @@ static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
VMX_VMFUNC_EPTP_SWITCHING);
}
+static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
+}
+
static inline bool is_nmi(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -1826,11 +2077,12 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
u64 rsvd : 48;
u64 gva;
} operand = { vpid, 0, gva };
+ bool error;
- asm volatile (__ex(ASM_VMX_INVVPID)
- /* CF==1 or ZF==1 --> rc = -1 */
- "; ja 1f ; ud2 ; 1:"
- : : "a"(&operand), "c"(ext) : "cc", "memory");
+ asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
+ : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
+ : "memory");
+ BUG_ON(error);
}
static inline void __invept(int ext, u64 eptp, gpa_t gpa)
@@ -1838,11 +2090,12 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
+ bool error;
- asm volatile (__ex(ASM_VMX_INVEPT)
- /* CF==1 or ZF==1 --> rc = -1 */
- "; ja 1f ; ud2 ; 1:\n"
- : : "a" (&operand), "c" (ext) : "cc", "memory");
+ asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
+ : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
+ : "memory");
+ BUG_ON(error);
}
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
@@ -1858,12 +2111,12 @@ static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
static void vmcs_clear(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
- u8 error;
+ bool error;
- asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
- : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
- : "cc", "memory");
- if (error)
+ asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
+ : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+ : "memory");
+ if (unlikely(error))
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
vmcs, phys_addr);
}
@@ -1880,15 +2133,15 @@ static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
static void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
- u8 error;
+ bool error;
if (static_branch_unlikely(&enable_evmcs))
return evmcs_load(phys_addr);
- asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
- : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
- : "cc", "memory");
- if (error)
+ asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
+ : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+ : "memory");
+ if (unlikely(error))
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
}
@@ -1966,6 +2219,19 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
__loaded_vmcs_clear, loaded_vmcs, 1);
}
+static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
+{
+ if (vpid == 0)
+ return true;
+
+ if (cpu_has_vmx_invvpid_individual_addr()) {
+ __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
+ return true;
+ }
+
+ return false;
+}
+
static inline void vpid_sync_vcpu_single(int vpid)
{
if (vpid == 0)
@@ -2100,10 +2366,10 @@ static noinline void vmwrite_error(unsigned long field, unsigned long value)
static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
{
- u8 error;
+ bool error;
- asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
- : "=q"(error) : "a"(value), "d"(field) : "cc");
+ asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
+ : CC_OUT(na) (error) : "a"(value), "d"(field));
if (unlikely(error))
vmwrite_error(field, value);
}
@@ -2377,9 +2643,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vm_exit_controls_clearbit(vmx, exit);
}
+static int find_msr(struct vmx_msrs *m, unsigned int msr)
+{
+ unsigned int i;
+
+ for (i = 0; i < m->nr; ++i) {
+ if (m->val[i].index == msr)
+ return i;
+ }
+ return -ENOENT;
+}
+
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
- unsigned i;
+ int i;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
@@ -2400,18 +2677,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
}
break;
}
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+ goto skip_guest;
+ --m->guest.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
- for (i = 0; i < m->nr; ++i)
- if (m->guest[i].index == msr)
- break;
-
- if (i == m->nr)
+skip_guest:
+ i = find_msr(&m->host, msr);
+ if (i < 0)
return;
- --m->nr;
- m->guest[i] = m->guest[m->nr];
- m->host[i] = m->host[m->nr];
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+
+ --m->host.nr;
+ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -2426,9 +2706,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
- u64 guest_val, u64 host_val)
+ u64 guest_val, u64 host_val, bool entry_only)
{
- unsigned i;
+ int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
@@ -2463,24 +2743,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
- for (i = 0; i < m->nr; ++i)
- if (m->guest[i].index == msr)
- break;
+ i = find_msr(&m->guest, msr);
+ if (!entry_only)
+ j = find_msr(&m->host, msr);
- if (i == NR_AUTOLOAD_MSRS) {
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
- } else if (i == m->nr) {
- ++m->nr;
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
+ if (i < 0) {
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+
+ if (entry_only)
+ return;
- m->guest[i].index = msr;
- m->guest[i].value = guest_val;
- m->host[i].index = msr;
- m->host[i].value = host_val;
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
}
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
@@ -2524,7 +2811,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
- guest_efer, host_efer);
+ guest_efer, host_efer, false);
return false;
} else {
guest_efer &= ~ignore_bits;
@@ -2566,112 +2853,150 @@ static unsigned long segment_base(u16 selector)
}
#endif
-static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs_host_state *host_state;
#ifdef CONFIG_X86_64
int cpu = raw_smp_processor_id();
#endif
+ unsigned long fs_base, gs_base;
+ u16 fs_sel, gs_sel;
int i;
- if (vmx->host_state.loaded)
+ if (vmx->loaded_cpu_state)
return;
- vmx->host_state.loaded = 1;
+ vmx->loaded_cpu_state = vmx->loaded_vmcs;
+ host_state = &vmx->loaded_cpu_state->host_state;
+
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
*/
- vmx->host_state.ldt_sel = kvm_read_ldt();
- vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+ host_state->ldt_sel = kvm_read_ldt();
#ifdef CONFIG_X86_64
- save_fsgs_for_kvm();
- vmx->host_state.fs_sel = current->thread.fsindex;
- vmx->host_state.gs_sel = current->thread.gsindex;
-#else
- savesegment(fs, vmx->host_state.fs_sel);
- savesegment(gs, vmx->host_state.gs_sel);
-#endif
- if (!(vmx->host_state.fs_sel & 7)) {
- vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
- vmx->host_state.fs_reload_needed = 0;
+ savesegment(ds, host_state->ds_sel);
+ savesegment(es, host_state->es_sel);
+
+ gs_base = cpu_kernelmode_gs_base(cpu);
+ if (likely(is_64bit_mm(current->mm))) {
+ save_fsgs_for_kvm();
+ fs_sel = current->thread.fsindex;
+ gs_sel = current->thread.gsindex;
+ fs_base = current->thread.fsbase;
+ vmx->msr_host_kernel_gs_base = current->thread.gsbase;
} else {
- vmcs_write16(HOST_FS_SELECTOR, 0);
- vmx->host_state.fs_reload_needed = 1;
- }
- if (!(vmx->host_state.gs_sel & 7))
- vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
- else {
- vmcs_write16(HOST_GS_SELECTOR, 0);
- vmx->host_state.gs_ldt_reload_needed = 1;
+ savesegment(fs, fs_sel);
+ savesegment(gs, gs_sel);
+ fs_base = read_msr(MSR_FS_BASE);
+ vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
}
-#ifdef CONFIG_X86_64
- savesegment(ds, vmx->host_state.ds_sel);
- savesegment(es, vmx->host_state.es_sel);
-
- vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
- vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
-
- vmx->msr_host_kernel_gs_base = current->thread.gsbase;
if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
- vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
- vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
+ savesegment(fs, fs_sel);
+ savesegment(gs, gs_sel);
+ fs_base = segment_base(fs_sel);
+ gs_base = segment_base(gs_sel);
#endif
- if (boot_cpu_has(X86_FEATURE_MPX))
- rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
+
+ if (unlikely(fs_sel != host_state->fs_sel)) {
+ if (!(fs_sel & 7))
+ vmcs_write16(HOST_FS_SELECTOR, fs_sel);
+ else
+ vmcs_write16(HOST_FS_SELECTOR, 0);
+ host_state->fs_sel = fs_sel;
+ }
+ if (unlikely(gs_sel != host_state->gs_sel)) {
+ if (!(gs_sel & 7))
+ vmcs_write16(HOST_GS_SELECTOR, gs_sel);
+ else
+ vmcs_write16(HOST_GS_SELECTOR, 0);
+ host_state->gs_sel = gs_sel;
+ }
+ if (unlikely(fs_base != host_state->fs_base)) {
+ vmcs_writel(HOST_FS_BASE, fs_base);
+ host_state->fs_base = fs_base;
+ }
+ if (unlikely(gs_base != host_state->gs_base)) {
+ vmcs_writel(HOST_GS_BASE, gs_base);
+ host_state->gs_base = gs_base;
+ }
+
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
}
-static void __vmx_load_host_state(struct vcpu_vmx *vmx)
+static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
{
- if (!vmx->host_state.loaded)
+ struct vmcs_host_state *host_state;
+
+ if (!vmx->loaded_cpu_state)
return;
+ WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
+ host_state = &vmx->loaded_cpu_state->host_state;
+
++vmx->vcpu.stat.host_state_reload;
- vmx->host_state.loaded = 0;
+ vmx->loaded_cpu_state = NULL;
+
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
- if (vmx->host_state.gs_ldt_reload_needed) {
- kvm_load_ldt(vmx->host_state.ldt_sel);
+ if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
+ kvm_load_ldt(host_state->ldt_sel);
#ifdef CONFIG_X86_64
- load_gs_index(vmx->host_state.gs_sel);
+ load_gs_index(host_state->gs_sel);
#else
- loadsegment(gs, vmx->host_state.gs_sel);
+ loadsegment(gs, host_state->gs_sel);
#endif
}
- if (vmx->host_state.fs_reload_needed)
- loadsegment(fs, vmx->host_state.fs_sel);
+ if (host_state->fs_sel & 7)
+ loadsegment(fs, host_state->fs_sel);
#ifdef CONFIG_X86_64
- if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
- loadsegment(ds, vmx->host_state.ds_sel);
- loadsegment(es, vmx->host_state.es_sel);
+ if (unlikely(host_state->ds_sel | host_state->es_sel)) {
+ loadsegment(ds, host_state->ds_sel);
+ loadsegment(es, host_state->es_sel);
}
#endif
invalidate_tss_limit();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
- if (vmx->host_state.msr_host_bndcfgs)
- wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
load_fixmap_gdt(raw_smp_processor_id());
}
-static void vmx_load_host_state(struct vcpu_vmx *vmx)
+#ifdef CONFIG_X86_64
+static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
- preempt_disable();
- __vmx_load_host_state(vmx);
- preempt_enable();
+ if (is_long_mode(&vmx->vcpu)) {
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ rdmsrl(MSR_KERNEL_GS_BASE,
+ vmx->msr_guest_kernel_gs_base);
+ preempt_enable();
+ }
+ return vmx->msr_guest_kernel_gs_base;
}
+static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
+{
+ if (is_long_mode(&vmx->vcpu)) {
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ wrmsrl(MSR_KERNEL_GS_BASE, data);
+ preempt_enable();
+ }
+ vmx->msr_guest_kernel_gs_base = data;
+}
+#endif
+
static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -2813,7 +3138,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
vmx_vcpu_pi_put(vcpu);
- __vmx_load_host_state(to_vmx(vcpu));
+ vmx_prepare_switch_to_host(to_vmx(vcpu));
}
static bool emulation_required(struct kvm_vcpu *vcpu)
@@ -3034,7 +3359,7 @@ static bool vmx_rdtscp_supported(void)
static bool vmx_invpcid_supported(void)
{
- return cpu_has_vmx_invpcid() && enable_ept;
+ return cpu_has_vmx_invpcid();
}
/*
@@ -3277,6 +3602,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING;
+ /*
+ * We can emulate "VMCS shadowing," even if the hardware
+ * doesn't support it.
+ */
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_SHADOW_VMCS;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
@@ -3744,8 +4075,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
- vmx_load_host_state(vmx);
- msr_info->data = vmx->msr_guest_kernel_gs_base;
+ msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
break;
#endif
case MSR_EFER:
@@ -3845,8 +4175,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_GS_BASE, data);
break;
case MSR_KERNEL_GS_BASE:
- vmx_load_host_state(vmx);
- vmx->msr_guest_kernel_gs_base = data;
+ vmx_write_guest_kernel_gs_base(vmx, data);
break;
#endif
case MSR_IA32_SYSENTER_CS:
@@ -3978,7 +4307,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.ia32_xss = data;
if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
- vcpu->arch.ia32_xss, host_xss);
+ vcpu->arch.ia32_xss, host_xss, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
@@ -4234,7 +4563,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_PML |
SECONDARY_EXEC_TSC_SCALING |
- SECONDARY_EXEC_ENABLE_VMFUNC;
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_ENCLS_EXITING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -4322,11 +4652,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
vmcs_conf->order = get_order(vmcs_conf->size);
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
- /* KVM supports Enlightened VMCS v1 only */
- if (static_branch_unlikely(&enable_evmcs))
- vmcs_conf->revision_id = KVM_EVMCS_VERSION;
- else
- vmcs_conf->revision_id = vmx_msr_low;
+ vmcs_conf->revision_id = vmx_msr_low;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
@@ -4385,7 +4711,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
return 0;
}
-static struct vmcs *alloc_vmcs_cpu(int cpu)
+static struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
{
int node = cpu_to_node(cpu);
struct page *pages;
@@ -4396,7 +4722,15 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
return NULL;
vmcs = page_address(pages);
memset(vmcs, 0, vmcs_config.size);
- vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+
+ /* KVM supports Enlightened VMCS v1 only */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
+ else
+ vmcs->hdr.revision_id = vmcs_config.revision_id;
+
+ if (shadow)
+ vmcs->hdr.shadow_vmcs = 1;
return vmcs;
}
@@ -4420,14 +4754,14 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
}
-static struct vmcs *alloc_vmcs(void)
+static struct vmcs *alloc_vmcs(bool shadow)
{
- return alloc_vmcs_cpu(raw_smp_processor_id());
+ return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
}
static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
{
- loaded_vmcs->vmcs = alloc_vmcs();
+ loaded_vmcs->vmcs = alloc_vmcs(false);
if (!loaded_vmcs->vmcs)
return -ENOMEM;
@@ -4449,6 +4783,9 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
evmcs->hv_enlightenments_control.msr_bitmap = 1;
}
}
+
+ memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
+
return 0;
out_vmcs:
@@ -4558,12 +4895,25 @@ static __init int alloc_kvm_area(void)
for_each_possible_cpu(cpu) {
struct vmcs *vmcs;
- vmcs = alloc_vmcs_cpu(cpu);
+ vmcs = alloc_vmcs_cpu(false, cpu);
if (!vmcs) {
free_kvm_area();
return -ENOMEM;
}
+ /*
+ * When eVMCS is enabled, alloc_vmcs_cpu() sets
+ * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+ * revision_id reported by MSR_IA32_VMX_BASIC.
+ *
+ * However, even though not explictly documented by
+ * TLFS, VMXArea passed as VMXON argument should
+ * still be marked with revision_id reported by
+ * physical CPU.
+ */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->hdr.revision_id = vmcs_config.revision_id;
+
per_cpu(vmxarea, cpu) = vmcs;
}
return 0;
@@ -4719,10 +5069,18 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return;
/*
- * Force kernel_gs_base reloading before EFER changes, as control
- * of this msr depends on is_long_mode().
+ * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
+ * 64-bit mode as a 64-bit kernel may frequently access the
+ * MSR. This means we need to manually save/restore the MSR
+ * when switching between guest and host state, but only if
+ * the guest is in 64-bit mode. Sync our cached value if the
+ * guest is transitioning to 32-bit mode and the CPU contains
+ * guest state, i.e. the cache is stale.
*/
- vmx_load_host_state(to_vmx(vcpu));
+#ifdef CONFIG_X86_64
+ if (!(efer & EFER_LMA))
+ (void)vmx_read_guest_kernel_gs_base(vmx);
+#endif
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -4779,6 +5137,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
}
+static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ int vpid = to_vmx(vcpu)->vpid;
+
+ if (!vpid_sync_vcpu_addr(vpid, addr))
+ vpid_sync_context(vpid);
+
+ /*
+ * If VPIDs are not supported or enabled, then the above is a no-op.
+ * But we don't really need a TLB flush in that case anyway, because
+ * each VM entry/exit includes an implicit flush when VPID is 0.
+ */
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -4960,6 +5332,7 @@ static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long guest_cr3;
u64 eptp;
@@ -4967,15 +5340,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (enable_ept) {
eptp = construct_eptp(vcpu, cr3);
vmcs_write64(EPT_POINTER, eptp);
+
+ if (kvm_x86_ops->tlb_remote_flush) {
+ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ to_vmx(vcpu)->ept_pointer = eptp;
+ to_kvm_vmx(kvm)->ept_pointers_match
+ = EPT_POINTERS_CHECK;
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ }
+
if (enable_unrestricted_guest || is_paging(vcpu) ||
is_guest_mode(vcpu))
guest_cr3 = kvm_read_cr3(vcpu);
else
- guest_cr3 = to_kvm_vmx(vcpu->kvm)->ept_identity_map_addr;
+ guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
ept_load_pdptrs(vcpu);
}
- vmx_flush_tlb(vcpu, true);
vmcs_writel(GUEST_CR3, guest_cr3);
}
@@ -5911,19 +6292,19 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
*/
cr3 = __read_cr3();
vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
- vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
- vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64
/*
* Load null selectors, so we can avoid reloading them in
- * __vmx_load_host_state(), in case userspace uses the null selectors
- * too (the expected case).
+ * vmx_prepare_switch_to_host(), in case userspace uses
+ * the null selectors too (the expected case).
*/
vmcs_write16(HOST_DS_SELECTOR, 0);
vmcs_write16(HOST_ES_SELECTOR, 0);
@@ -6048,8 +6429,6 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
enable_unrestricted_guest = 0;
- /* Enable INVPCID for non-ept guests may cause performance regression. */
- exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
}
if (!enable_unrestricted_guest)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
@@ -6178,9 +6557,6 @@ static void ept_set_mmio_spte_mask(void)
*/
static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
-#ifdef CONFIG_X86_64
- unsigned long a;
-#endif
int i;
if (enable_shadow_vmcs) {
@@ -6235,24 +6611,17 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmx_set_constant_host_state(vmx);
-#ifdef CONFIG_X86_64
- rdmsrl(MSR_FS_BASE, a);
- vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
- rdmsrl(MSR_GS_BASE, a);
- vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
-#else
vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
-#endif
if (cpu_has_vmx_vmfunc())
vmcs_write64(VM_FUNCTION_CONTROL, 0);
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -6272,8 +6641,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
+ vmx->arch_capabilities = kvm_get_arch_capabilities();
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
@@ -6293,6 +6661,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
+
+ if (cpu_has_vmx_encls_vmexit())
+ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
}
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -7478,6 +7849,7 @@ static void vmx_enable_tdp(void)
static __init int hardware_setup(void)
{
+ unsigned long host_bndcfgs;
int r = -ENOMEM, i;
rdmsrl_safe(MSR_EFER, &host_efer);
@@ -7502,6 +7874,11 @@ static __init int hardware_setup(void)
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
+ if (boot_cpu_has(X86_FEATURE_MPX)) {
+ rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
+ WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
+ }
+
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
@@ -7538,6 +7915,12 @@ static __init int hardware_setup(void)
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
+ && enable_ept)
+ kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
+#endif
+
if (!cpu_has_vmx_ple()) {
ple_gap = 0;
ple_window = 0;
@@ -7564,6 +7947,11 @@ static __init int hardware_setup(void)
else
kvm_disable_tdp();
+ if (!nested) {
+ kvm_x86_ops->get_nested_state = NULL;
+ kvm_x86_ops->set_nested_state = NULL;
+ }
+
/*
* Only enable PML when hardware supports PML feature, and both EPT
* and EPT A/D bit features are enabled -- PML depends on them to work.
@@ -7840,10 +8228,35 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
return 0;
}
+/*
+ * Allocate a shadow VMCS and associate it with the currently loaded
+ * VMCS, unless such a shadow VMCS already exists. The newly allocated
+ * VMCS is also VMCLEARed, so that it is ready for use.
+ */
+static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
+
+ /*
+ * We should allocate a shadow vmcs for vmcs01 only when L1
+ * executes VMXON and free it when L1 executes VMXOFF.
+ * As it is invalid to execute VMXON twice, we shouldn't reach
+ * here when vmcs01 already have an allocated shadow vmcs.
+ */
+ WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
+
+ if (!loaded_vmcs->shadow_vmcs) {
+ loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
+ if (loaded_vmcs->shadow_vmcs)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+ }
+ return loaded_vmcs->shadow_vmcs;
+}
+
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs *shadow_vmcs;
int r;
r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
@@ -7854,25 +8267,26 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
- if (enable_shadow_vmcs) {
- shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs)
- goto out_shadow_vmcs;
- /* mark vmcs as shadow */
- shadow_vmcs->revision_id |= (1u << 31);
- /* init shadow vmcs */
- vmcs_clear(shadow_vmcs);
- vmx->vmcs01.shadow_vmcs = shadow_vmcs;
- }
+ vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ if (!vmx->nested.cached_shadow_vmcs12)
+ goto out_cached_shadow_vmcs12;
+
+ if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
+ goto out_shadow_vmcs;
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+ vmx->nested.vpid02 = allocate_vpid();
+
vmx->nested.vmxon = true;
return 0;
out_shadow_vmcs:
+ kfree(vmx->nested.cached_shadow_vmcs12);
+
+out_cached_shadow_vmcs12:
kfree(vmx->nested.cached_vmcs12);
out_cached_vmcs12:
@@ -7915,7 +8329,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
/* CPL=0 must be checked manually. */
if (vmx_get_cpl(vcpu)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
+ kvm_inject_gp(vcpu, 0);
return 1;
}
@@ -7978,15 +8392,16 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
*/
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{
- if (vmx_get_cpl(vcpu)) {
+ if (!to_vmx(vcpu)->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
- if (!to_vmx(vcpu)->nested.vmxon) {
- kvm_queue_exception(vcpu, UD_VECTOR);
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
return 0;
}
+
return 1;
}
@@ -8039,6 +8454,7 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->vmcs01.shadow_vmcs = NULL;
}
kfree(vmx->nested.cached_vmcs12);
+ kfree(vmx->nested.cached_shadow_vmcs12);
/* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
kvm_release_page_dirty(vmx->nested.apic_access_page);
@@ -8124,7 +8540,7 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
* some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
* 64-bit fields are to be returned).
*/
-static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
+static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
unsigned long field, u64 *ret)
{
short offset = vmcs_field_to_offset(field);
@@ -8133,7 +8549,7 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
if (offset < 0)
return offset;
- p = ((char *)(get_vmcs12(vcpu))) + offset;
+ p = (char *)vmcs12 + offset;
switch (vmcs_field_width(field)) {
case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
@@ -8155,10 +8571,10 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
}
-static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
+static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field);
- char *p = ((char *) get_vmcs12(vcpu)) + offset;
+ char *p = (char *)vmcs12 + offset;
if (offset < 0)
return offset;
@@ -8211,7 +8627,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
field_value = __vmcs_readl(field);
- vmcs12_write_any(&vmx->vcpu, field, field_value);
+ vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
}
/*
* Skip the VM-exit information fields if they are read-only.
@@ -8246,7 +8662,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
for (q = 0; q < ARRAY_SIZE(fields); q++) {
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
- vmcs12_read_any(&vmx->vcpu, field, &field_value);
+ vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
__vmcs_writel(field, field_value);
}
}
@@ -8276,6 +8692,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
+ struct vmcs12 *vmcs12;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -8283,10 +8700,24 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_vmcs12(vcpu))
return kvm_skip_emulated_instruction(vcpu);
+ if (!is_guest_mode(vcpu))
+ vmcs12 = get_vmcs12(vcpu);
+ else {
+ /*
+ * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
+ * to shadowed-field sets the ALU flags for VMfailInvalid.
+ */
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
+ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ vmcs12 = get_shadow_vmcs12(vcpu);
+ }
+
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
- if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
+ if (vmcs12_read_any(vmcs12, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
@@ -8328,6 +8759,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
*/
u64 field_value = 0;
struct x86_exception e;
+ struct vmcs12 *vmcs12;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -8362,23 +8794,44 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
- if (vmcs12_write_any(vcpu, field, field_value) < 0) {
+ if (!is_guest_mode(vcpu))
+ vmcs12 = get_vmcs12(vcpu);
+ else {
+ /*
+ * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
+ * to shadowed-field sets the ALU flags for VMfailInvalid.
+ */
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
+ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ vmcs12 = get_shadow_vmcs12(vcpu);
+
+ }
+
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
- switch (field) {
+ /*
+ * Do not track vmcs12 dirty-state if in guest-mode
+ * as we actually dirty shadow vmcs12 instead of vmcs12.
+ */
+ if (!is_guest_mode(vcpu)) {
+ switch (field) {
#define SHADOW_FIELD_RW(x) case x:
#include "vmx_shadow_fields.h"
- /*
- * The fields that can be updated by L1 without a vmexit are
- * always updated in the vmcs02, the others go down the slow
- * path of prepare_vmcs02.
- */
- break;
- default:
- vmx->nested.dirty_vmcs12 = true;
- break;
+ /*
+ * The fields that can be updated by L1 without a vmexit are
+ * always updated in the vmcs02, the others go down the slow
+ * path of prepare_vmcs02.
+ */
+ break;
+ default:
+ vmx->nested.dirty_vmcs12 = true;
+ break;
+ }
}
nested_vmx_succeed(vcpu);
@@ -8429,7 +8882,9 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page);
- if (new_vmcs12->revision_id != VMCS12_REVISION) {
+ if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+ (new_vmcs12->hdr.shadow_vmcs &&
+ !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kunmap(page);
kvm_release_page_clean(page);
nested_vmx_failValid(vcpu,
@@ -8456,21 +8911,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
/* Emulate the VMPTRST instruction */
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- gva_t vmcs_gva;
+ unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+ u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
+ gva_t gva;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, true, &vmcs_gva))
+ if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
- (void *)&to_vmx(vcpu)->nested.current_vmptr,
- sizeof(u64), &e)) {
+ if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+ sizeof(gpa_t), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -8628,6 +9082,105 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
+static int handle_invpcid(struct kvm_vcpu *vcpu)
+{
+ u32 vmx_instruction_info;
+ unsigned long type;
+ bool pcid_enabled;
+ gva_t gva;
+ struct x86_exception e;
+ unsigned i;
+ unsigned long roots_to_free = 0;
+ struct {
+ u64 pcid;
+ u64 gla;
+ } operand;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ if (type > 3) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ /* According to the Intel instruction reference, the memory operand
+ * is read even if it isn't needed (e.g., for type==all)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (operand.pcid >> 12 != 0) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+
+ switch (type) {
+ case INVPCID_TYPE_INDIV_ADDR:
+ if ((!pcid_enabled && (operand.pcid != 0)) ||
+ is_noncanonical_address(operand.gla, vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ case INVPCID_TYPE_SINGLE_CTXT:
+ if (!pcid_enabled && (operand.pcid != 0)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ if (kvm_get_active_pcid(vcpu) == operand.pcid) {
+ kvm_mmu_sync_roots(vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3)
+ == operand.pcid)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+ kvm_mmu_free_roots(vcpu, roots_to_free);
+ /*
+ * If neither the current cr3 nor any of the prev_roots use the
+ * given PCID, then nothing needs to be done here because a
+ * resync will happen anyway before switching to any other CR3.
+ */
+
+ return kvm_skip_emulated_instruction(vcpu);
+
+ case INVPCID_TYPE_ALL_NON_GLOBAL:
+ /*
+ * Currently, KVM doesn't mark global entries in the shadow
+ * page tables, so a non-global flush just degenerates to a
+ * global flush. If needed, we could optimize this later by
+ * keeping track of global entries in shadow page tables.
+ */
+
+ /* fall-through */
+ case INVPCID_TYPE_ALL_INCL_GLOBAL:
+ kvm_mmu_unload(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ default:
+ BUG(); /* We have already checked above that type <= 3 */
+ }
+}
+
static int handle_pml_full(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
@@ -8777,6 +9330,17 @@ fail:
return 1;
}
+static int handle_encls(struct kvm_vcpu *vcpu)
+{
+ /*
+ * SGX virtualization is not yet supported. There is no software
+ * enable bit for SGX, so we have to trap ENCLS and inject a #UD
+ * to prevent the guest from executing ENCLS.
+ */
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -8831,8 +9395,10 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
[EXIT_REASON_PML_FULL] = handle_pml_full,
+ [EXIT_REASON_INVPCID] = handle_invpcid,
[EXIT_REASON_VMFUNC] = handle_vmfunc,
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
+ [EXIT_REASON_ENCLS] = handle_encls,
};
static const int kvm_vmx_max_exit_handlers =
@@ -9003,6 +9569,30 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
return false;
}
+static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12, gpa_t bitmap)
+{
+ u32 vmx_instruction_info;
+ unsigned long field;
+ u8 b;
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12))
+ return true;
+
+ /* Decode instruction info and find the field to access */
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+ /* Out-of-range fields always cause a VM exit from L2 to L1 */
+ if (field >> 15)
+ return true;
+
+ if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
+ return true;
+
+ return 1 & (b >> (field & 7));
+}
+
/*
* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
* should handle it ourselves in L0 (and then continue L2). Only call this
@@ -9087,10 +9677,15 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
+ case EXIT_REASON_VMREAD:
+ return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
+ vmcs12->vmread_bitmap);
+ case EXIT_REASON_VMWRITE:
+ return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
+ vmcs12->vmwrite_bitmap);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
- case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
- case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
/*
@@ -9174,6 +9769,9 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
return false;
+ case EXIT_REASON_ENCLS:
+ /* SGX is never exposed to L1 */
+ return false;
default:
return true;
}
@@ -9523,6 +10121,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Software based L1D cache flush which is used when microcode providing
+ * the cache control MSR is not loaded.
+ *
+ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
+ * flush it is required to read in 64 KiB because the replacement algorithm
+ * is not exactly LRU. This could be sized at runtime via topology
+ * information but as all relevant affected CPUs have 32KiB L1D cache size
+ * there is no point in doing so.
+ */
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+{
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+ bool flush_l1d;
+
+ /*
+ * Clear the per-vcpu flush bit, it gets set again
+ * either from vcpu_run() or from one of the unsafe
+ * VMEXIT handlers.
+ */
+ flush_l1d = vcpu->arch.l1tf_flush_l1d;
+ vcpu->arch.l1tf_flush_l1d = false;
+
+ /*
+ * Clear the per-cpu flush bit, it gets set again from
+ * the interrupt handlers.
+ */
+ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+ kvm_clear_cpu_l1tf_flush_l1d();
+
+ if (!flush_l1d)
+ return;
+ }
+
+ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ return;
+ }
+
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+ "xorl %%eax, %%eax\n\t"
+ "cpuid\n\t"
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+}
+
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -9924,7 +10595,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
- msrs[i].host);
+ msrs[i].host, false);
}
static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
@@ -9978,15 +10649,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
}
cr4 = cr4_read_shadow();
- if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
- vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
@@ -10019,6 +10690,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
(unsigned long)&current_evmcs->host_rsp : 0;
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -10179,9 +10853,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
*
- * We can't defer this to vmx_load_host_state() since that function
- * may be executed in interrupt context, which saves and restore segments
- * around it, nullifying its effect.
+ * We can't defer this to vmx_prepare_switch_to_host() since that
+ * function may be executed in interrupt context, which saves and
+ * restore segments around it, nullifying its effect.
*/
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
@@ -10242,8 +10916,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
return;
cpu = get_cpu();
- vmx->loaded_vmcs = vmcs;
vmx_vcpu_put(vcpu);
+ vmx->loaded_vmcs = vmcs;
vmx_vcpu_load(vcpu, cpu);
put_cpu();
}
@@ -10346,11 +11020,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
- if (nested) {
+ if (nested)
nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
kvm_vcpu_apicv_active(&vmx->vcpu));
- vmx->nested.vpid02 = allocate_vpid();
- }
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@@ -10367,7 +11039,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
- free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
@@ -10381,10 +11052,39 @@ free_vcpu:
return ERR_PTR(err);
}
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
static int vmx_vm_init(struct kvm *kvm)
{
+ spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
+
if (!ple_gap)
kvm->arch.pause_in_guest = true;
+
+ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ /* 'I explicitly don't care' is set */
+ break;
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ case L1TF_MITIGATION_FULL:
+ /*
+ * Warn upon starting the first VM in a potentially
+ * insecure environment.
+ */
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ pr_warn_once(L1TF_MSG_SMT);
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
+ pr_warn_once(L1TF_MSG_L1D);
+ break;
+ case L1TF_MITIGATION_FULL_FORCE:
+ /* Flush is enforced */
+ break;
+ }
+ }
return 0;
}
@@ -10583,11 +11283,11 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
return 1;
- kvm_mmu_unload(vcpu);
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.msrs.ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT,
- nested_ept_ad_enabled(vcpu));
+ nested_ept_ad_enabled(vcpu),
+ nested_ept_get_cr3(vcpu));
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
@@ -10635,9 +11335,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct page *page;
u64 hpa;
@@ -10878,6 +11578,38 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
return true;
}
+static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vmcs12 *shadow;
+ struct page *page;
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
+ vmcs12->vmcs_link_pointer == -1ull)
+ return;
+
+ shadow = get_shadow_vmcs12(vcpu);
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
+
+ memcpy(shadow, kmap(page), VMCS12_SIZE);
+
+ kunmap(page);
+ kvm_release_page_clean(page);
+}
+
+static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
+ vmcs12->vmcs_link_pointer == -1ull)
+ return;
+
+ kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
+ get_shadow_vmcs12(vcpu), VMCS12_SIZE);
+}
+
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@@ -10935,11 +11667,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field,
unsigned long addr_field)
{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int maxphyaddr;
u64 count, addr;
- if (vmcs12_read_any(vcpu, count_field, &count) ||
- vmcs12_read_any(vcpu, addr_field, &addr)) {
+ if (vmcs12_read_any(vmcs12, count_field, &count) ||
+ vmcs12_read_any(vmcs12, addr_field, &addr)) {
WARN_ON(1);
return -EINVAL;
}
@@ -10989,6 +11722,19 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
return 0;
}
+static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_shadow_vmcs(vmcs12))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
+ !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
@@ -11138,12 +11884,16 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
return 1;
}
}
-
- vcpu->arch.cr3 = cr3;
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
}
- kvm_mmu_reset_context(vcpu);
+ if (!nested_ept)
+ kvm_mmu_new_cr3(vcpu, cr3, false);
+
+ vcpu->arch.cr3 = cr3;
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ kvm_init_mmu(vcpu, false);
+
return 0;
}
@@ -11230,7 +11980,8 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Set host-state according to L0's settings (vmcs12 is irrelevant here)
* Some constant fields are set here by vmx_set_constant_host_state().
* Other fields are different per CPU, and will be set later when
- * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ * vmx_vcpu_load() is called, and when vmx_prepare_switch_to_guest()
+ * is called.
*/
vmx_set_constant_host_state(vmx);
@@ -11238,10 +11989,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Set the MSR load/store lists to match L0's settings.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
set_cr4_guest_host_mask(vmx);
@@ -11302,11 +12053,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
- /*
- * Not in vmcs02: GUEST_PML_INDEX, HOST_FS_SELECTOR, HOST_GS_SELECTOR,
- * HOST_FS_BASE, HOST_GS_BASE.
- */
-
if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
@@ -11371,6 +12117,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
exec_control |= vmcs12_exec_ctrl;
}
+ /* VMCS shadowing for L2 is emulated for now */
+ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
vmcs_write16(GUEST_INTR_STATUS,
vmcs12->guest_intr_status);
@@ -11383,6 +12132,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
vmcs_write64(APIC_ACCESS_ADDR, -1ull);
+ if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
+ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -11590,6 +12342,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (nested_vmx_check_pml_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.msrs.procbased_ctls_low,
vmx->nested.msrs.procbased_ctls_high) ||
@@ -11690,6 +12445,33 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
return 0;
}
+static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int r;
+ struct page *page;
+ struct vmcs12 *shadow;
+
+ if (vmcs12->vmcs_link_pointer == -1ull)
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
+ return -EINVAL;
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
+ if (is_error_page(page))
+ return -EINVAL;
+
+ r = 0;
+ shadow = kmap(page);
+ if (shadow->hdr.revision_id != VMCS12_REVISION ||
+ shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
+ r = -EINVAL;
+ kunmap(page);
+ kvm_release_page_clean(page);
+ return r;
+}
+
static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 *exit_qual)
{
@@ -11701,8 +12483,7 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
return 1;
- if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS) &&
- vmcs12->vmcs_link_pointer != -1ull) {
+ if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
*exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
return 1;
}
@@ -11749,13 +12530,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0;
}
-static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
+/*
+ * If exit_qual is NULL, this is being called from state restore (either RSM
+ * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
+ */
+static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u32 msr_entry_idx;
- u32 exit_qual;
- int r;
+ bool from_vmentry = !!exit_qual;
+ u32 dummy_exit_qual;
+ int r = 0;
enter_guest_mode(vcpu);
@@ -11769,17 +12554,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
r = EXIT_REASON_INVALID_STATE;
- if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+ if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
goto fail;
- nested_get_vmcs12_pages(vcpu, vmcs12);
+ if (from_vmentry) {
+ nested_get_vmcs12_pages(vcpu);
- r = EXIT_REASON_MSR_LOAD_FAIL;
- msr_entry_idx = nested_vmx_load_msr(vcpu,
- vmcs12->vm_entry_msr_load_addr,
- vmcs12->vm_entry_msr_load_count);
- if (msr_entry_idx)
- goto fail;
+ r = EXIT_REASON_MSR_LOAD_FAIL;
+ *exit_qual = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (*exit_qual)
+ goto fail;
+ } else {
+ /*
+ * The MMU is not initialized to point at the right entities yet and
+ * "get pages" would need to read data from the guest (i.e. we will
+ * need to perform gpa to hpa translation). Request a call
+ * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
+ * have already been set at vmentry time and should not be reset.
+ */
+ kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+ }
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11794,8 +12590,7 @@ fail:
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
- return 1;
+ return r;
}
/*
@@ -11818,6 +12613,17 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmcs12 = get_vmcs12(vcpu);
+ /*
+ * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
+ * that there *is* a valid VMCS pointer, RFLAGS.CF is set
+ * rather than RFLAGS.ZF, and no error number is stored to the
+ * VM-instruction error field.
+ */
+ if (vmcs12->hdr.shadow_vmcs) {
+ nested_vmx_failInvalid(vcpu);
+ goto out;
+ }
+
if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
@@ -11872,12 +12678,28 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
*/
vmx->nested.nested_run_pending = 1;
- ret = enter_vmx_non_root_mode(vcpu);
+ ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
if (ret) {
+ nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
vmx->nested.nested_run_pending = 0;
- return ret;
+ return 1;
}
+ /* Hide L1D cache contents from the nested guest. */
+ vmx->vcpu.arch.l1tf_flush_l1d = true;
+
+ /*
+ * Must happen outside of enter_vmx_non_root_mode() as it will
+ * also be used as part of restoring nVMX state for
+ * snapshot restore (migration).
+ *
+ * In this flow, it is assumed that vmcs12 cache was
+ * trasferred as part of captured nVMX state and should
+ * therefore not be read from guest memory (which may not
+ * exist on destination host yet).
+ */
+ nested_cache_shadow_vmcs12(vcpu, vmcs12);
+
/*
* If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
* by event injection, halt vcpu.
@@ -12387,6 +13209,17 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
exit_qualification);
+ /*
+ * Must happen outside of sync_vmcs12() as it will
+ * also be used to capture vmcs12 cache as part of
+ * capturing nVMX state for snapshot (migration).
+ *
+ * Otherwise, this flush will dirty guest memory at a
+ * point it is already assumed by user-space to be
+ * immutable.
+ */
+ nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
+
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
@@ -12398,8 +13231,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
@@ -12961,7 +13794,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
- ret = enter_vmx_non_root_mode(vcpu);
+ ret = enter_vmx_non_root_mode(vcpu, NULL);
vcpu->arch.hflags |= HF_SMM_MASK;
if (ret)
return ret;
@@ -12976,6 +13809,199 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ u32 user_data_size)
+{
+ struct vcpu_vmx *vmx;
+ struct vmcs12 *vmcs12;
+ struct kvm_nested_state kvm_state = {
+ .flags = 0,
+ .format = 0,
+ .size = sizeof(kvm_state),
+ .vmx.vmxon_pa = -1ull,
+ .vmx.vmcs_pa = -1ull,
+ };
+
+ if (!vcpu)
+ return kvm_state.size + 2 * VMCS12_SIZE;
+
+ vmx = to_vmx(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+ if (nested_vmx_allowed(vcpu) &&
+ (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
+ kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
+ kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
+
+ if (vmx->nested.current_vmptr != -1ull) {
+ kvm_state.size += VMCS12_SIZE;
+
+ if (is_guest_mode(vcpu) &&
+ nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull)
+ kvm_state.size += VMCS12_SIZE;
+ }
+
+ if (vmx->nested.smm.vmxon)
+ kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
+
+ if (vmx->nested.smm.guest_mode)
+ kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
+
+ if (is_guest_mode(vcpu)) {
+ kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
+
+ if (vmx->nested.nested_run_pending)
+ kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+ }
+ }
+
+ if (user_data_size < kvm_state.size)
+ goto out;
+
+ if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
+ return -EFAULT;
+
+ if (vmx->nested.current_vmptr == -1ull)
+ goto out;
+
+ /*
+ * When running L2, the authoritative vmcs12 state is in the
+ * vmcs02. When running L1, the authoritative vmcs12 state is
+ * in the shadow vmcs linked to vmcs01, unless
+ * sync_shadow_vmcs is set, in which case, the authoritative
+ * vmcs12 state is in the vmcs12 already.
+ */
+ if (is_guest_mode(vcpu))
+ sync_vmcs12(vcpu, vmcs12);
+ else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+
+ if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull) {
+ if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
+ get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+ return -EFAULT;
+ }
+
+out:
+ return kvm_state.size;
+}
+
+static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ struct kvm_nested_state *kvm_state)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12;
+ u32 exit_qual;
+ int ret;
+
+ if (kvm_state->format != 0)
+ return -EINVAL;
+
+ if (!nested_vmx_allowed(vcpu))
+ return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
+
+ if (kvm_state->vmx.vmxon_pa == -1ull) {
+ if (kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
+ if (kvm_state->vmx.vmcs_pa != -1ull)
+ return -EINVAL;
+
+ vmx_leave_nested(vcpu);
+ return 0;
+ }
+
+ if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
+ return -EINVAL;
+
+ if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ return -EINVAL;
+
+ if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+ !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+ return -EINVAL;
+
+ if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+ (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+ return -EINVAL;
+
+ if (kvm_state->vmx.smm.flags &
+ ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
+ return -EINVAL;
+
+ if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+ !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
+ return -EINVAL;
+
+ vmx_leave_nested(vcpu);
+ if (kvm_state->vmx.vmxon_pa == -1ull)
+ return 0;
+
+ vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
+ ret = enter_vmx_operation(vcpu);
+ if (ret)
+ return ret;
+
+ set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+
+ if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
+ vmx->nested.smm.vmxon = true;
+ vmx->nested.vmxon = false;
+
+ if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
+ vmx->nested.smm.guest_mode = true;
+ }
+
+ vmcs12 = get_vmcs12(vcpu);
+ if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (vmcs12->hdr.revision_id != VMCS12_REVISION)
+ return -EINVAL;
+
+ if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+ return 0;
+
+ vmx->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
+ if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull) {
+ struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
+ if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+ return -EINVAL;
+
+ if (copy_from_user(shadow_vmcs12,
+ user_kvm_nested_state->data + VMCS12_SIZE,
+ sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+ !shadow_vmcs12->hdr.shadow_vmcs)
+ return -EINVAL;
+ }
+
+ if (check_vmentry_prereqs(vcpu, vmcs12) ||
+ check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ return -EINVAL;
+
+ if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
+ vmx->nested.nested_run_pending = 1;
+
+ vmx->nested.dirty_vmcs12 = true;
+ ret = enter_vmx_non_root_mode(vcpu, NULL);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -12995,7 +14021,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.vcpu_free = vmx_free_vcpu,
.vcpu_reset = vmx_vcpu_reset,
- .prepare_guest_switch = vmx_save_host_state,
+ .prepare_guest_switch = vmx_prepare_switch_to_guest,
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,
@@ -13028,6 +14054,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.set_rflags = vmx_set_rflags,
.tlb_flush = vmx_flush_tlb,
+ .tlb_flush_gva = vmx_flush_tlb_gva,
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
@@ -13110,12 +14137,61 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.setup_mce = vmx_setup_mce,
+ .get_nested_state = vmx_get_nested_state,
+ .set_nested_state = vmx_set_nested_state,
+ .get_vmcs12_pages = nested_get_vmcs12_pages,
+
.smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
};
+static void vmx_cleanup_l1d_flush(void)
+{
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
+ /* Restore state so sysfs ignores VMX */
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+}
+
+static void vmx_exit(void)
+{
+#ifdef CONFIG_KEXEC_CORE
+ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ synchronize_rcu();
+#endif
+
+ kvm_exit();
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (static_branch_unlikely(&enable_evmcs)) {
+ int cpu;
+ struct hv_vp_assist_page *vp_ap;
+ /*
+ * Reset everything to support using non-enlightened VMCS
+ * access later (e.g. when we reload the module with
+ * enlightened_vmcs=0)
+ */
+ for_each_online_cpu(cpu) {
+ vp_ap = hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->current_nested_vmcs = 0;
+ vp_ap->enlighten_vmentry = 0;
+ }
+
+ static_branch_disable(&enable_evmcs);
+ }
+#endif
+ vmx_cleanup_l1d_flush();
+}
+module_exit(vmx_exit);
+
static int __init vmx_init(void)
{
int r;
@@ -13150,10 +14226,25 @@ static int __init vmx_init(void)
#endif
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
- __alignof__(struct vcpu_vmx), THIS_MODULE);
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
+ /*
+ * Must be called after kvm_init() so enable_ept is properly set
+ * up. Hand the parameter mitigation value in which was stored in
+ * the pre module init parser. If no parameter was given, it will
+ * contain 'auto' which will be turned into the default 'cond'
+ * mitigation mode.
+ */
+ if (boot_cpu_has(X86_BUG_L1TF)) {
+ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+ if (r) {
+ vmx_exit();
+ return r;
+ }
+ }
+
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
@@ -13162,39 +14253,4 @@ static int __init vmx_init(void)
return 0;
}
-
-static void __exit vmx_exit(void)
-{
-#ifdef CONFIG_KEXEC_CORE
- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
- synchronize_rcu();
-#endif
-
- kvm_exit();
-
-#if IS_ENABLED(CONFIG_HYPERV)
- if (static_branch_unlikely(&enable_evmcs)) {
- int cpu;
- struct hv_vp_assist_page *vp_ap;
- /*
- * Reset everything to support using non-enlightened VMCS
- * access later (e.g. when we reload the module with
- * enlightened_vmcs=0)
- */
- for_each_online_cpu(cpu) {
- vp_ap = hv_get_vp_assist_page(cpu);
-
- if (!vp_ap)
- continue;
-
- vp_ap->current_nested_vmcs = 0;
- vp_ap->enlighten_vmentry = 0;
- }
-
- static_branch_disable(&enable_evmcs);
- }
-#endif
-}
-
-module_init(vmx_init)
-module_exit(vmx_exit)
+module_init(vmx_init);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0046aa70205a..506bd2b4b8bb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -195,6 +195,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "req_event", VCPU_STAT(req_event) },
+ { "l1d_flush", VCPU_STAT(l1d_flush) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
@@ -847,16 +848,21 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ bool skip_tlb_flush = false;
#ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
- if (pcid_enabled)
- cr3 &= ~CR3_PCID_INVD;
+ if (pcid_enabled) {
+ skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
+ cr3 &= ~X86_CR3_PCID_NOFLUSH;
+ }
#endif
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ if (!skip_tlb_flush) {
+ kvm_mmu_sync_roots(vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
return 0;
}
@@ -867,9 +873,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
+ kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
- kvm_mmu_new_cr3(vcpu);
+
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -1097,15 +1104,41 @@ static u32 msr_based_features[] = {
MSR_F10H_DECFG,
MSR_IA32_UCODE_REV,
+ MSR_IA32_ARCH_CAPABILITIES,
};
static unsigned int num_msr_based_features;
+u64 kvm_get_arch_capabilities(void)
+{
+ u64 data;
+
+ rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
+
+ /*
+ * If we're doing cache flushes (either "always" or "cond")
+ * we will do one whenever the guest does a vmlaunch/vmresume.
+ * If an outer hypervisor is doing the cache flush for us
+ * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
+ * capability to the guest too, and if EPT is disabled we're not
+ * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
+ * require a nested hypervisor to do a flush of its own.
+ */
+ if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
+ data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
+
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
+ case MSR_IA32_ARCH_CAPABILITIES:
+ msr->data = kvm_get_arch_capabilities();
+ break;
case MSR_IA32_UCODE_REV:
- rdmsrl(msr->index, msr->data);
+ rdmsrl_safe(msr->index, &msr->data);
break;
default:
if (kvm_x86_ops->get_msr_feature(msr))
@@ -2158,10 +2191,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.mcg_status = data;
break;
case MSR_IA32_MCG_CTL:
- if (!(mcg_cap & MCG_CTL_P))
+ if (!(mcg_cap & MCG_CTL_P) &&
+ (data || !msr_info->host_initiated))
return 1;
if (data != 0 && data != ~(u64)0)
- return -1;
+ return 1;
vcpu->arch.mcg_ctl = data;
break;
default:
@@ -2549,7 +2583,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
}
EXPORT_SYMBOL_GPL(kvm_get_msr);
-static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
u64 data;
u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2564,7 +2598,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.mcg_cap;
break;
case MSR_IA32_MCG_CTL:
- if (!(mcg_cap & MCG_CTL_P))
+ if (!(mcg_cap & MCG_CTL_P) && !host)
return 1;
data = vcpu->arch.mcg_ctl;
break;
@@ -2697,7 +2731,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
- return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
+ return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
+ msr_info->host_initiated);
case MSR_K7_CLK_CTL:
/*
* Provide expected ramp-up count for K7. All other
@@ -2718,7 +2753,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case HV_X64_MSR_TSC_EMULATION_CONTROL:
case HV_X64_MSR_TSC_EMULATION_STATUS:
return kvm_hv_get_msr_common(vcpu,
- msr_info->index, &msr_info->data);
+ msr_info->index, &msr_info->data,
+ msr_info->host_initiated);
break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
@@ -2942,6 +2978,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_X2APIC_API:
r = KVM_X2APIC_API_VALID_FLAGS;
break;
+ case KVM_CAP_NESTED_STATE:
+ r = kvm_x86_ops->get_nested_state ?
+ kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+ break;
default:
break;
}
@@ -3958,6 +3998,56 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+ case KVM_GET_NESTED_STATE: {
+ struct kvm_nested_state __user *user_kvm_nested_state = argp;
+ u32 user_data_size;
+
+ r = -EINVAL;
+ if (!kvm_x86_ops->get_nested_state)
+ break;
+
+ BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+ if (get_user(user_data_size, &user_kvm_nested_state->size))
+ return -EFAULT;
+
+ r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+ user_data_size);
+ if (r < 0)
+ return r;
+
+ if (r > user_data_size) {
+ if (put_user(r, &user_kvm_nested_state->size))
+ return -EFAULT;
+ return -E2BIG;
+ }
+ r = 0;
+ break;
+ }
+ case KVM_SET_NESTED_STATE: {
+ struct kvm_nested_state __user *user_kvm_nested_state = argp;
+ struct kvm_nested_state kvm_state;
+
+ r = -EINVAL;
+ if (!kvm_x86_ops->set_nested_state)
+ break;
+
+ if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
+ return -EFAULT;
+
+ if (kvm_state.size < sizeof(kvm_state))
+ return -EINVAL;
+
+ if (kvm_state.flags &
+ ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
+ return -EINVAL;
+
+ /* nested_run_pending implies guest_mode. */
+ if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
+ return -EINVAL;
+
+ r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -4874,6 +4964,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{
+ /* kvm_write_guest_virt_system can pull in tons of pages. */
+ vcpu->arch.l1tf_flush_l1d = true;
+
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
@@ -6050,6 +6143,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+ vcpu->arch.l1tf_flush_l1d = true;
+
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
@@ -6471,20 +6566,22 @@ static void kvm_set_mmio_spte_mask(void)
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
- /* Mask the reserved physical address bits. */
- mask = rsvd_bits(maxphyaddr, 51);
+
+ /*
+ * Mask the uppermost physical address bit, which would be reserved as
+ * long as the supported physical address width is less than 52.
+ */
+ mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
-#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
- if (maxphyaddr == 52)
+ if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
mask &= ~1ull;
-#endif
kvm_mmu_set_mmio_spte_mask(mask, mask);
}
@@ -6737,6 +6834,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_CLOCK_PAIRING:
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
break;
+ case KVM_HC_SEND_IPI:
+ ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
+ break;
#endif
default:
ret = -KVM_ENOSYS;
@@ -7203,8 +7303,9 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end,
+ bool blockable)
{
unsigned long apic_address;
@@ -7215,6 +7316,8 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+
+ return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
@@ -7255,6 +7358,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_immediate_exit = false;
if (kvm_request_pending(vcpu)) {
+ if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
+ kvm_x86_ops->get_vmcs12_pages(vcpu);
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
@@ -7270,6 +7375,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
+ if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
+ kvm_mmu_load_cr3(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
@@ -7579,6 +7686,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
if (kvm_vcpu_running(vcpu)) {
@@ -7980,6 +8088,10 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ (sregs->cr4 & X86_CR4_OSXSAVE))
+ return -EINVAL;
+
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
/*
* When EFER.LME and CR0.PG are set, the processor is in
@@ -8010,10 +8122,6 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
struct desc_ptr dt;
int ret = -EINVAL;
- if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- (sregs->cr4 & X86_CR4_OSXSAVE))
- goto out;
-
if (kvm_valid_sregs(vcpu, sregs))
goto out;
@@ -8698,6 +8806,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
+ vcpu->arch.l1tf_flush_l1d = true;
kvm_x86_ops->sched_in(vcpu, cpu);
}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 298ef1479240..3b24dc05251c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -256,7 +256,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
- xorq %rax, %rax
+ xorl %eax, %eax
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 2f3c9196b834..a12afff146d1 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -111,6 +111,8 @@ static struct addr_marker address_markers[] = {
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD ((pgd_t *) &init_top_pgt)
+
#else /* CONFIG_X86_64 */
enum address_markers_idx {
@@ -121,6 +123,9 @@ enum address_markers_idx {
#ifdef CONFIG_HIGHMEM
PKMAP_BASE_NR,
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ LDT_NR,
+#endif
CPU_ENTRY_AREA_NR,
FIXADDR_START_NR,
END_OF_SPACE_NR,
@@ -134,11 +139,16 @@ static struct addr_marker address_markers[] = {
#ifdef CONFIG_HIGHMEM
[PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ [LDT_NR] = { 0UL, "LDT remap" },
+#endif
[CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
[FIXADDR_START_NR] = { 0UL, "Fixmap area" },
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD (swapper_pg_dir)
+
#endif /* !CONFIG_X86_64 */
/* Multipliers for offsets within the PTEs */
@@ -496,11 +506,7 @@ static inline bool is_hypervisor_range(int idx)
static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx, bool dmesg)
{
-#ifdef CONFIG_X86_64
- pgd_t *start = (pgd_t *) &init_top_pgt;
-#else
- pgd_t *start = swapper_pg_dir;
-#endif
+ pgd_t *start = INIT_PGD;
pgprotval_t prot, eff;
int i;
struct pg_state st = {};
@@ -563,12 +569,13 @@ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
}
EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
-static void ptdump_walk_user_pgd_level_checkwx(void)
+void ptdump_walk_user_pgd_level_checkwx(void)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgd_t *pgd = (pgd_t *) &init_top_pgt;
+ pgd_t *pgd = INIT_PGD;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!(__supported_pte_mask & _PAGE_NX) ||
+ !static_cpu_has(X86_FEATURE_PTI))
return;
pr_info("x86/mm: Checking user space page tables\n");
@@ -580,7 +587,6 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
void ptdump_walk_pgd_level_checkwx(void)
{
ptdump_walk_pgd_level_core(NULL, NULL, true, false);
- ptdump_walk_user_pgd_level_checkwx();
}
static int __init pt_dump_init(void)
@@ -609,6 +615,9 @@ static int __init pt_dump_init(void)
# endif
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
+# ifdef CONFIG_MODIFY_LDT_SYSCALL
+ address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
+# endif
#endif
return 0;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2aafa6ab6103..b9123c497e0a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -16,6 +16,7 @@
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
+#include <linux/mm_types.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
@@ -317,8 +318,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
@@ -1001,7 +1000,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 *pkey, unsigned int fault)
+ unsigned long address, u32 *pkey, vm_fault_t fault)
{
if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
@@ -1215,7 +1214,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
- int fault, major = 0;
+ vm_fault_t fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
u32 pkey;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cee58a972cb2..acfab322fbe0 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,8 @@
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/bootmem.h> /* for max_low_pfn */
+#include <linux/swapfile.h>
+#include <linux/swapops.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -773,13 +775,44 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
}
}
+/*
+ * begin/end can be in the direct map or the "high kernel mapping"
+ * used for the kernel image only. free_init_pages() will do the
+ * right thing for either kind of address.
+ */
+void free_kernel_image_pages(void *begin, void *end)
+{
+ unsigned long begin_ul = (unsigned long)begin;
+ unsigned long end_ul = (unsigned long)end;
+ unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
+
+
+ free_init_pages("unused kernel image", begin_ul, end_ul);
+
+ /*
+ * PTI maps some of the kernel into userspace. For performance,
+ * this includes some kernel areas that do not contain secrets.
+ * Those areas might be adjacent to the parts of the kernel image
+ * being freed, which may contain secrets. Remove the "high kernel
+ * image mapping" for these freed areas, ensuring they are not even
+ * potentially vulnerable to Meltdown regardless of the specific
+ * optimizations PTI is currently using.
+ *
+ * The "noalias" prevents unmapping the direct map alias which is
+ * needed to access the freed pages.
+ *
+ * This is only valid for 64bit kernels. 32bit has only one mapping
+ * which can't be treated in this way for obvious reasons.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
+ set_memory_np_noalias(begin_ul, len_pages);
+}
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
- free_init_pages("unused kernel",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_kernel_image_pages(&__init_begin, &__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -880,3 +913,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
__pte2cachemode_tbl[entry] = cache;
}
+
+#ifdef CONFIG_SWAP
+unsigned long max_swapfile_size(void)
+{
+ unsigned long pages;
+
+ pages = generic_max_swapfile_size();
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+ unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
+ /*
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+ */
+#if CONFIG_PGTABLE_LEVELS > 2
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+#endif
+ pages = min_t(unsigned long, l1tf_limit, pages);
+ }
+ return pages;
+}
+#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a688617c727e..dd519f372169 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1283,20 +1283,10 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(text_end)),
- (unsigned long) __va(__pa_symbol(rodata_start)));
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(rodata_end)),
- (unsigned long) __va(__pa_symbol(_sdata)));
+ free_kernel_image_pages((void *)text_end, (void *)rodata_start);
+ free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
debug_checkwx();
-
- /*
- * Do this after all of the manipulation of the
- * kernel text page tables are complete.
- */
- pti_clone_kernel_text();
}
int kern_addr_valid(unsigned long addr)
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 7c8686709636..79eb55ce69a9 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
{
+ pmd_t new_pmd;
pmdval_t v = pmd_val(*pmd);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pmd(pmd, __pmd(v));
+ *old = v;
+ new_pmd = pmd_mknotpresent(*pmd);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ new_pmd = __pmd(*old);
+ }
+ set_pmd(pmd, new_pmd);
}
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
{
pteval_t v = pte_val(*pte);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pte_atomic(pte, __pte(v));
+ *old = v;
+ /* Nothing should care about address */
+ pte_clear(&init_mm, 0, pte);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ set_pte_atomic(pte, __pte(*old));
+ }
}
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 48c591251600..f40ab8185d94 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -240,3 +240,24 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
return phys_addr_valid(addr + count - 1);
}
+
+/*
+ * Only allow root to set high MMIO mappings to PROT_NONE.
+ * This prevents an unpriv. user to set them to PROT_NONE and invert
+ * them, then pointing to valid memory for L1TF speculation.
+ *
+ * Note: for locked down kernels may want to disable the root override.
+ */
+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return true;
+ if (!__pte_needs_invert(pgprot_val(prot)))
+ return true;
+ /* If it's real memory always allow */
+ if (pfn_valid(pfn))
+ return true;
+ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+ return false;
+ return true;
+}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 34a2a3bfde9c..b54d52a2d00a 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
@@ -198,40 +198,73 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
return end;
}
+static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
+{
+ unsigned long max_pfn = PHYS_PFN(max_addr);
+ unsigned long base_pfn = PHYS_PFN(base);
+ unsigned long hole_pfns = PHYS_PFN(hole);
+
+ return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
+}
+
/*
* Sets up fake nodes of `size' interleaved over physical nodes ranging from
* `addr' to `max_addr'.
*
* Returns zero on success or negative on error.
*/
-static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
struct numa_meminfo *pi,
- u64 addr, u64 max_addr, u64 size)
+ u64 addr, u64 max_addr, u64 size,
+ int nr_nodes, struct numa_memblk *pblk,
+ int nid)
{
nodemask_t physnode_mask = numa_nodes_parsed;
+ int i, ret, uniform = 0;
u64 min_size;
- int nid = 0;
- int i, ret;
- if (!size)
+ if ((!size && !nr_nodes) || (nr_nodes && !pblk))
return -1;
+
/*
- * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
- * increased accordingly if the requested size is too small. This
- * creates a uniform distribution of node sizes across the entire
- * machine (but not necessarily over physical nodes).
+ * In the 'uniform' case split the passed in physical node by
+ * nr_nodes, in the non-uniform case, ignore the passed in
+ * physical block and try to create nodes of at least size
+ * @size.
+ *
+ * In the uniform case, split the nodes strictly by physical
+ * capacity, i.e. ignore holes. In the non-uniform case account
+ * for holes and treat @size as a minimum floor.
*/
- min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
- min_size = max(min_size, FAKE_NODE_MIN_SIZE);
- if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
- min_size = (min_size + FAKE_NODE_MIN_SIZE) &
- FAKE_NODE_MIN_HASH_MASK;
+ if (!nr_nodes)
+ nr_nodes = MAX_NUMNODES;
+ else {
+ nodes_clear(physnode_mask);
+ node_set(pblk->nid, physnode_mask);
+ uniform = 1;
+ }
+
+ if (uniform) {
+ min_size = uniform_size(max_addr, addr, 0, nr_nodes);
+ size = min_size;
+ } else {
+ /*
+ * The limit on emulated nodes is MAX_NUMNODES, so the
+ * size per node is increased accordingly if the
+ * requested size is too small. This creates a uniform
+ * distribution of node sizes across the entire machine
+ * (but not necessarily over physical nodes).
+ */
+ min_size = uniform_size(max_addr, addr,
+ mem_hole_size(addr, max_addr), nr_nodes);
+ }
+ min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
size >> 20, min_size >> 20);
size = min_size;
}
- size &= FAKE_NODE_MIN_HASH_MASK;
+ size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
/*
* Fill physical nodes with fake nodes of size until there is no memory
@@ -248,10 +281,14 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
node_clear(i, physnode_mask);
continue;
}
+
start = pi->blk[phys_blk].start;
limit = pi->blk[phys_blk].end;
- end = find_end_of_node(start, limit, size);
+ if (uniform)
+ end = start + size;
+ else
+ end = find_end_of_node(start, limit, size);
/*
* If there won't be at least FAKE_NODE_MIN_SIZE of
* non-reserved memory in ZONE_DMA32 for the next node,
@@ -266,7 +303,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end - mem_hole_size(end, limit) < size)
+ if ((limit - end - mem_hole_size(end, limit) < size)
+ && !uniform)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -276,7 +314,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
return ret;
}
}
- return 0;
+ return nid;
+}
+
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ u64 addr, u64 max_addr, u64 size)
+{
+ return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
+ 0, NULL, NUMA_NO_NODE);
}
int __init setup_emu2phys_nid(int *dfl_phys_nid)
@@ -346,7 +392,28 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
* the fixed node size. Otherwise, if it is just a single number N,
* split the system RAM into N fake nodes.
*/
- if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+ if (strchr(emu_cmdline, 'U')) {
+ nodemask_t physnode_mask = numa_nodes_parsed;
+ unsigned long n;
+ int nid = 0;
+
+ n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
+ ret = -1;
+ for_each_node_mask(i, physnode_mask) {
+ ret = split_nodes_size_interleave_uniform(&ei, &pi,
+ pi.blk[i].start, pi.blk[i].end, 0,
+ n, &pi.blk[i], nid);
+ if (ret < 0)
+ break;
+ if (ret < n) {
+ pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
+ __func__, i, ret, n);
+ ret = -1;
+ break;
+ }
+ nid = ret;
+ }
+ } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
u64 size;
size = memparse(emu_cmdline, &emu_cmdline);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3bded76e8d5c..8d6c34fe49be 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -53,6 +53,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
+#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -1014,8 +1015,8 @@ static long populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
- set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
- massage_pgprot(pmd_pgprot)));
+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
+ canon_pgprot(pmd_pgprot))));
start += PMD_SIZE;
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
@@ -1087,8 +1088,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
- set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
- massage_pgprot(pud_pgprot)));
+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
+ canon_pgprot(pud_pgprot))));
start += PUD_SIZE;
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
@@ -1486,6 +1487,9 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
+ /* Has caller explicitly disabled alias checking? */
+ if (in_flag & CPA_NO_CHECK_ALIAS)
+ checkalias = 0;
ret = __change_page_attr_set_clr(&cpa, checkalias);
@@ -1772,6 +1776,15 @@ int set_memory_np(unsigned long addr, int numpages)
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
}
+int set_memory_np_noalias(unsigned long addr, int numpages)
+{
+ int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+ return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+ __pgprot(_PAGE_PRESENT), 0,
+ cpa_flags, NULL);
+}
+
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
@@ -1784,6 +1797,12 @@ int set_memory_nonglobal(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}
+int set_memory_global(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+}
+
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 47b5951e592b..3ef095c70ae3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+/*
+ * We allocate separate PMDs for the kernel part of the user page-table
+ * when PTI is enabled. We need them to map the per-process LDT into the
+ * user-space page-table.
+ */
+#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
+ KERNEL_PGD_PTRS : 0)
+
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
@@ -202,14 +210,14 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
-
+#define PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++)
+ for (i = 0; i < count; i++)
if (pmds[i]) {
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
free_page((unsigned long)pmds[i]);
@@ -217,7 +225,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
}
-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
bool failed = false;
@@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+ for (i = 0; i < count; i++) {
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
if (!pmd)
failed = true;
@@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
if (failed) {
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, count);
return -ENOMEM;
}
@@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
+static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
+{
+ pgd_t pgd = *pgdp;
+
+ if (pgd_val(pgd) != 0) {
+ pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+ *pgdp = native_make_pgd(0);
+
+ paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+ pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
+ }
+}
+
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
- pgd_t pgd = pgdp[i];
+ for (i = 0; i < PREALLOCATED_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i]);
- if (pgd_val(pgd) != 0) {
- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgdp[i] = native_make_pgd(0);
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(mm, pmd);
- mm_dec_nr_pmds(mm);
- }
- }
+ pgdp = kernel_to_user_pgdp(pgdp);
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
+#endif
}
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
@@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
}
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+ pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ p4d_t *u_p4d;
+ pud_t *u_pud;
+ int i;
+
+ u_p4d = p4d_offset(u_pgd, 0);
+ u_pud = pud_offset(u_p4d, 0);
+
+ s_pgd += KERNEL_PGD_BOUNDARY;
+ u_pud += KERNEL_PGD_BOUNDARY;
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
+ pmd_t *pmd = pmds[i];
+
+ memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
+ sizeof(pmd_t) * PTRS_PER_PMD);
+
+ pud_populate(mm, u_pud, pmd);
+ }
+
+}
+#else
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+}
+#endif
/*
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
* assumes that pgd should be in one page.
@@ -329,9 +384,6 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- if (!pgd_cache)
- return -ENOMEM;
-
return 0;
}
core_initcall(pgd_cache_init);
@@ -343,7 +395,8 @@ static inline pgd_t *_pgd_alloc(void)
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+ PGD_ALLOCATION_ORDER);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +408,7 @@ static inline pgd_t *_pgd_alloc(void)
static inline void _pgd_free(pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
- free_page((unsigned long)pgd);
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
else
kmem_cache_free(pgd_cache, pgd);
}
@@ -375,6 +428,7 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
+ pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
pmd_t *pmds[PREALLOCATED_PMDS];
pgd = _pgd_alloc();
@@ -384,12 +438,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
- if (preallocate_pmds(mm, pmds) != 0)
+ if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
goto out_free_pgd;
- if (paravirt_pgd_alloc(mm) != 0)
+ if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
goto out_free_pmds;
+ if (paravirt_pgd_alloc(mm) != 0)
+ goto out_free_user_pmds;
+
/*
* Make sure that pre-populating the pmds is atomic with
* respect to anything walking the pgd_list, so that they
@@ -399,13 +456,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_ctor(mm, pgd);
pgd_prepopulate_pmd(mm, pgd, pmds);
+ pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
spin_unlock(&pgd_lock);
return pgd;
+out_free_user_pmds:
+ free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
out_free_pmds:
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:
_pgd_free(pgd);
out:
@@ -719,28 +779,50 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+#ifdef CONFIG_X86_64
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
*
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
*/
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd;
+ pmd_t *pmd, *pmd_sv;
+ pte_t *pte;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+ if (!pmd_sv)
+ return 0;
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i]))
- return 0;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_sv[i] = pmd[i];
+ if (!pmd_none(pmd[i]))
+ pmd_clear(&pmd[i]);
+ }
pud_clear(pud);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd_sv[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+ free_page((unsigned long)pte);
+ }
+ }
+
+ free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd);
return 1;
@@ -749,11 +831,12 @@ int pud_free_pmd_page(pud_t *pud)
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
*
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
@@ -762,8 +845,30 @@ int pmd_free_pte_page(pmd_t *pmd)
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
free_page((unsigned long)pte);
return 1;
}
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4d418e705878..31341ae7309f 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -45,6 +45,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
+#include <asm/sections.h>
#undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
@@ -54,6 +55,16 @@
#define __GFP_NOTRACK 0
#endif
+/*
+ * Define the page-table levels we clone for user-space on 32
+ * and 64 bit.
+ */
+#ifdef CONFIG_X86_64
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
+#else
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
+#endif
+
static void __init pti_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
@@ -117,7 +128,7 @@ enable:
setup_force_cpu_cap(X86_FEATURE_PTI);
}
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
/*
* Changes to the high (kernel) portion of the kernelmode page
@@ -176,7 +187,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
if (pgd_none(*pgd)) {
unsigned long new_p4d_page = __get_free_page(gfp);
- if (!new_p4d_page)
+ if (WARN_ON_ONCE(!new_p4d_page))
return NULL;
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
@@ -195,13 +206,17 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+ p4d_t *p4d;
pud_t *pud;
+ p4d = pti_user_pagetable_walk_p4d(address);
+ if (!p4d)
+ return NULL;
+
BUILD_BUG_ON(p4d_large(*p4d) != 0);
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
- if (!new_pud_page)
+ if (WARN_ON_ONCE(!new_pud_page))
return NULL;
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
@@ -215,7 +230,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
}
if (pud_none(*pud)) {
unsigned long new_pmd_page = __get_free_page(gfp);
- if (!new_pmd_page)
+ if (WARN_ON_ONCE(!new_pmd_page))
return NULL;
set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
@@ -224,7 +239,6 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
return pmd_offset(pud, address);
}
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
/*
* Walk the shadow copy of the page tables (optionally) trying to allocate
* page table pages on the way down. Does not support large pages.
@@ -237,9 +251,13 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+ pmd_t *pmd;
pte_t *pte;
+ pmd = pti_user_pagetable_walk_pmd(address);
+ if (!pmd)
+ return NULL;
+
/* We can't do anything sensible if we hit a large mapping. */
if (pmd_large(*pmd)) {
WARN_ON(1);
@@ -262,6 +280,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
return pte;
}
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
static void __init pti_setup_vsyscall(void)
{
pte_t *pte, *target_pte;
@@ -282,8 +301,14 @@ static void __init pti_setup_vsyscall(void)
static void __init pti_setup_vsyscall(void) { }
#endif
+enum pti_clone_level {
+ PTI_CLONE_PMD,
+ PTI_CLONE_PTE,
+};
+
static void
-pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+pti_clone_pgtable(unsigned long start, unsigned long end,
+ enum pti_clone_level level)
{
unsigned long addr;
@@ -291,59 +316,105 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
* Clone the populated PMDs which cover start to end. These PMD areas
* can have holes.
*/
- for (addr = start; addr < end; addr += PMD_SIZE) {
+ for (addr = start; addr < end;) {
+ pte_t *pte, *target_pte;
pmd_t *pmd, *target_pmd;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
+ /* Overflow check */
+ if (addr < start)
+ break;
+
pgd = pgd_offset_k(addr);
if (WARN_ON(pgd_none(*pgd)))
return;
p4d = p4d_offset(pgd, addr);
if (WARN_ON(p4d_none(*p4d)))
return;
+
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (pud_none(*pud)) {
+ addr += PUD_SIZE;
continue;
+ }
+
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (pmd_none(*pmd)) {
+ addr += PMD_SIZE;
continue;
+ }
- target_pmd = pti_user_pagetable_walk_pmd(addr);
- if (WARN_ON(!target_pmd))
- return;
-
- /*
- * Only clone present PMDs. This ensures only setting
- * _PAGE_GLOBAL on present PMDs. This should only be
- * called on well-known addresses anyway, so a non-
- * present PMD would be a surprise.
- */
- if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
- return;
-
- /*
- * Setting 'target_pmd' below creates a mapping in both
- * the user and kernel page tables. It is effectively
- * global, so set it as global in both copies. Note:
- * the X86_FEATURE_PGE check is not _required_ because
- * the CPU ignores _PAGE_GLOBAL when PGE is not
- * supported. The check keeps consistentency with
- * code that only set this bit when supported.
- */
- if (boot_cpu_has(X86_FEATURE_PGE))
- *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
-
- /*
- * Copy the PMD. That is, the kernelmode and usermode
- * tables will share the last-level page tables of this
- * address range
- */
- *target_pmd = pmd_clear_flags(*pmd, clear);
+ if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+ target_pmd = pti_user_pagetable_walk_pmd(addr);
+ if (WARN_ON(!target_pmd))
+ return;
+
+ /*
+ * Only clone present PMDs. This ensures only setting
+ * _PAGE_GLOBAL on present PMDs. This should only be
+ * called on well-known addresses anyway, so a non-
+ * present PMD would be a surprise.
+ */
+ if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+ return;
+
+ /*
+ * Setting 'target_pmd' below creates a mapping in both
+ * the user and kernel page tables. It is effectively
+ * global, so set it as global in both copies. Note:
+ * the X86_FEATURE_PGE check is not _required_ because
+ * the CPU ignores _PAGE_GLOBAL when PGE is not
+ * supported. The check keeps consistentency with
+ * code that only set this bit when supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+ /*
+ * Copy the PMD. That is, the kernelmode and usermode
+ * tables will share the last-level page tables of this
+ * address range
+ */
+ *target_pmd = *pmd;
+
+ addr += PMD_SIZE;
+
+ } else if (level == PTI_CLONE_PTE) {
+
+ /* Walk the page-table down to the pte level */
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte)) {
+ addr += PAGE_SIZE;
+ continue;
+ }
+
+ /* Only clone present PTEs */
+ if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
+ return;
+
+ /* Allocate PTE in the user page-table */
+ target_pte = pti_user_pagetable_walk_pte(addr);
+ if (WARN_ON(!target_pte))
+ return;
+
+ /* Set GLOBAL bit in both PTEs */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
+
+ /* Clone the PTE */
+ *target_pte = *pte;
+
+ addr += PAGE_SIZE;
+
+ } else {
+ BUG();
+ }
}
}
+#ifdef CONFIG_X86_64
/*
* Clone a single p4d (i.e. a top-level entry on 4-level systems and a
* next-level entry on 5-level systems.
@@ -354,6 +425,9 @@ static void __init pti_clone_p4d(unsigned long addr)
pgd_t *kernel_pgd;
user_p4d = pti_user_pagetable_walk_p4d(addr);
+ if (!user_p4d)
+ return;
+
kernel_pgd = pgd_offset_k(addr);
kernel_p4d = p4d_offset(kernel_pgd, addr);
*user_p4d = *kernel_p4d;
@@ -367,6 +441,25 @@ static void __init pti_clone_user_shared(void)
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
}
+#else /* CONFIG_X86_64 */
+
+/*
+ * On 32 bit PAE systems with 1GB of Kernel address space there is only
+ * one pgd/p4d for the whole kernel. Cloning that would map the whole
+ * address space into the user page-tables, making PTI useless. So clone
+ * the page-table on the PMD level to prevent that.
+ */
+static void __init pti_clone_user_shared(void)
+{
+ unsigned long start, end;
+
+ start = CPU_ENTRY_AREA_BASE;
+ end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+
+ pti_clone_pgtable(start, end, PTI_CLONE_PMD);
+}
+#endif /* CONFIG_X86_64 */
+
/*
* Clone the ESPFIX P4D into the user space visible page table
*/
@@ -380,11 +473,11 @@ static void __init pti_setup_espfix64(void)
/*
* Clone the populated PMDs of the entry and irqentry text and force it RO.
*/
-static void __init pti_clone_entry_text(void)
+static void pti_clone_entry_text(void)
{
- pti_clone_pmds((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end,
- _PAGE_RW);
+ pti_clone_pgtable((unsigned long) __entry_text_start,
+ (unsigned long) __irqentry_text_end,
+ PTI_CLONE_PMD);
}
/*
@@ -435,10 +528,17 @@ static inline bool pti_kernel_image_global_ok(void)
}
/*
+ * This is the only user for these and it is not arch-generic
+ * like the other set_memory.h functions. Just extern them.
+ */
+extern int set_memory_nonglobal(unsigned long addr, int numpages);
+extern int set_memory_global(unsigned long addr, int numpages);
+
+/*
* For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems.
*/
-void pti_clone_kernel_text(void)
+static void pti_clone_kernel_text(void)
{
/*
* rodata is part of the kernel image and is normally
@@ -446,7 +546,8 @@ void pti_clone_kernel_text(void)
* clone the areas past rodata, they might contain secrets.
*/
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = (unsigned long)__end_rodata_hpage_align;
+ unsigned long end_clone = (unsigned long)__end_rodata_aligned;
+ unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
if (!pti_kernel_image_global_ok())
return;
@@ -458,14 +559,18 @@ void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
- pti_clone_pmds(start, end, _PAGE_RW);
+ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
+
+ /*
+ * pti_clone_pgtable() will set the global bit in any PMDs
+ * that it clones, but we also need to get any PTEs in
+ * the last level for areas that are not huge-page-aligned.
+ */
+
+ /* Set the global bit for normal non-__init kernel text: */
+ set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
}
-/*
- * This is the only user for it and it is not arch-generic like
- * the other set_memory.h functions. Just extern it.
- */
-extern int set_memory_nonglobal(unsigned long addr, int numpages);
void pti_set_kernel_image_nonglobal(void)
{
/*
@@ -477,9 +582,11 @@ void pti_set_kernel_image_nonglobal(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
- if (pti_kernel_image_global_ok())
- return;
-
+ /*
+ * This clears _PAGE_GLOBAL from the entire kernel image.
+ * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
+ * areas that are mapped to userspace.
+ */
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}
@@ -493,6 +600,28 @@ void __init pti_init(void)
pr_info("enabled\n");
+#ifdef CONFIG_X86_32
+ /*
+ * We check for X86_FEATURE_PCID here. But the init-code will
+ * clear the feature flag on 32 bit because the feature is not
+ * supported on 32 bit anyway. To print the warning we need to
+ * check with cpuid directly again.
+ */
+ if (cpuid_ecx(0x1) & BIT(17)) {
+ /* Use printk to work around pr_fmt() */
+ printk(KERN_WARNING "\n");
+ printk(KERN_WARNING "************************************************************\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
+ printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
+ printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "************************************************************\n");
+ }
+#endif
+
pti_clone_user_shared();
/* Undo all global bits from the init pagetables in head_64.S: */
@@ -502,3 +631,22 @@ void __init pti_init(void)
pti_setup_espfix64();
pti_setup_vsyscall();
}
+
+/*
+ * Finalize the kernel mappings in the userspace page-table. Some of the
+ * mappings for the kernel image might have changed since pti_init()
+ * cloned them. This is because parts of the kernel image have been
+ * mapped RO and/or NX. These changes need to be cloned again to the
+ * userspace page-table.
+ */
+void pti_finalize(void)
+{
+ /*
+ * We need to clone everything (again) that maps parts of the
+ * kernel image.
+ */
+ pti_clone_entry_text();
+ pti_clone_kernel_text();
+
+ debug_checkwx_user();
+}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6eb1f34c3c85..752dbf4e0e50 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
+#include <linux/gfp.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -35,7 +36,7 @@
* necessary invalidation by clearing out the 'ctx_id' which
* forces a TLB flush when the context is loaded.
*/
-void clear_asid_other(void)
+static void clear_asid_other(void)
{
u16 asid;
@@ -185,8 +186,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+ bool need_flush;
+ u16 new_asid;
/*
* NB: The scheduler will call us with prev == next when switching
@@ -240,20 +244,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
next->context.ctx_id);
/*
- * We don't currently support having a real mm loaded without
- * our cpu set in mm_cpumask(). We have all the bookkeeping
- * in place to figure out whether we would need to flush
- * if our cpu were cleared in mm_cpumask(), but we don't
- * currently use it.
+ * Even in lazy TLB mode, the CPU should stay set in the
+ * mm_cpumask. The TLB shootdown code can figure out from
+ * from cpu_tlbstate.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
- return;
+ /*
+ * If the CPU is not in lazy TLB mode, we are just switching
+ * from one thread in a process to another thread in the same
+ * process. No TLB flush required.
+ */
+ if (!was_lazy)
+ return;
+
+ /*
+ * Read the tlb_gen to check whether a flush is needed.
+ * If the TLB is up to date, just use it.
+ * The barrier synchronizes with the tlb_gen increment in
+ * the TLB shootdown code.
+ */
+ smp_mb();
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+ if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+ next_tlb_gen)
+ return;
+
+ /*
+ * TLB contents went out of date while we were in lazy
+ * mode. Fall through to the TLB switching code below.
+ */
+ new_asid = prev_asid;
+ need_flush = true;
} else {
- u16 new_asid;
- bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
/*
@@ -285,53 +310,60 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
sync_current_stack_to_mm(next);
}
- /* Stop remote flushes for the previous mm */
- VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
- real_prev != &init_mm);
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ /*
+ * Stop remote flushes for the previous mm.
+ * Skip kernel threads; we never send init_mm TLB flushing IPIs,
+ * but the bitmap manipulation can cause cache line contention.
+ */
+ if (real_prev != &init_mm) {
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
+ mm_cpumask(real_prev)));
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ }
/*
* Start remote flushes and then read tlb_gen.
*/
- cpumask_set_cpu(cpu, mm_cpumask(next));
+ if (next != &init_mm)
+ cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ }
- if (need_flush) {
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
- load_new_mm_cr3(next->pgd, new_asid, true);
-
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- } else {
- /* The new ASID is already up to date. */
- load_new_mm_cr3(next->pgd, new_asid, false);
-
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
- }
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+ load_new_mm_cr3(next->pgd, new_asid, true);
/*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
*/
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ } else {
+ /* The new ASID is already up to date. */
+ load_new_mm_cr3(next->pgd, new_asid, false);
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+
load_mm_cr4(next);
switch_ldt(real_prev, next);
}
@@ -354,20 +386,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (tlb_defer_switch_to_init_mm()) {
- /*
- * There's a significant optimization that may be possible
- * here. We have accurate enough TLB flush tracking that we
- * don't need to maintain coherence of TLB per se when we're
- * lazy. We do, however, need to maintain coherence of
- * paging-structure caches. We could, in principle, leave our
- * old mm loaded and only switch to init_mm when
- * tlb_remove_page() happens.
- */
- this_cpu_write(cpu_tlbstate.is_lazy, true);
- } else {
- switch_mm(NULL, &init_mm, NULL);
- }
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
}
/*
@@ -454,6 +473,9 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
+ *
+ * This should be rare, with native_flush_tlb_others skipping
+ * IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
@@ -560,6 +582,9 @@ static void flush_tlb_func_remote(void *info)
void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
+ cpumask_var_t lazymask;
+ unsigned int cpu;
+
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -583,8 +608,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* that UV should be updated so that smp_call_function_many(),
* etc, are optimal on UV.
*/
- unsigned int cpu;
-
cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask)
@@ -592,8 +615,29 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1);
return;
}
- smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+ /*
+ * A temporary cpumask is used in order to skip sending IPIs
+ * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
+ * If the allocation fails, simply IPI every CPU in mm_cpumask.
+ */
+ if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1);
+ return;
+ }
+
+ cpumask_copy(lazymask, cpumask);
+
+ for_each_cpu(cpu, lazymask) {
+ if (per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_clear_cpu(cpu, lazymask);
+ }
+
+ smp_call_function_many(lazymask, flush_tlb_func_remote,
+ (void *)info, 1);
+
+ free_cpumask_var(lazymask);
}
/*
@@ -646,6 +690,68 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
put_cpu();
}
+void tlb_flush_remove_tables_local(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
+ this_cpu_read(cpu_tlbstate.is_lazy)) {
+ /*
+ * We're in lazy mode. We need to at least flush our
+ * paging-structure cache to avoid speculatively reading
+ * garbage into our TLB. Since switching to init_mm is barely
+ * slower than a minimal flush, just switch to init_mm.
+ */
+ switch_mm_irqs_off(NULL, &init_mm, NULL);
+ }
+}
+
+static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
+ struct cpumask *lazy_cpus)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_set_cpu(cpu, lazy_cpus);
+ }
+}
+
+void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+ int cpu = get_cpu();
+ cpumask_var_t lazy_cpus;
+
+ if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
+ put_cpu();
+ return;
+ }
+
+ if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
+ /*
+ * If the cpumask allocation fails, do a brute force flush
+ * on all the CPUs that have this mm loaded.
+ */
+ smp_call_function_many(mm_cpumask(mm),
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ put_cpu();
+ return;
+ }
+
+ /*
+ * CPUs with !is_lazy either received a TLB flush IPI while the user
+ * pages in this address range were unmapped, or have context switched
+ * and reloaded %CR3 since then.
+ *
+ * Shootdown IPIs at page table freeing time only need to be sent to
+ * CPUs that may have out of date TLB contents.
+ */
+ mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
+ smp_call_function_many(lazy_cpus,
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ free_cpumask_var(lazy_cpus);
+ put_cpu();
+}
static void do_flush_tlb_all(void *info)
{
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 55799873ebe5..8f6cc71e0848 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
/* sub esp,STACK_SIZE */
EMIT2_off32(0x81, 0xEC, STACK_SIZE);
- /* sub ebp,SCRATCH_SIZE+4+12*/
- EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16);
+ /* sub ebp,SCRATCH_SIZE+12*/
+ EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
/* xor ebx,ebx */
EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
@@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
/* mov edx,dword ptr [ebp+off]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
- /* add ebp,SCRATCH_SIZE+4+12*/
- EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16);
+ /* add ebp,SCRATCH_SIZE+12*/
+ EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
/* mov ebx,dword ptr [ebp-12]*/
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 563049c483a1..d4ec117c1142 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -22,7 +22,6 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
-unsigned int pci_early_dump_regs;
static int pci_bf_sort;
int pci_routeirq;
int noioapicquirk;
@@ -599,9 +598,6 @@ char *__init pcibios_setup(char *str)
pci_probe |= PCI_BIG_ROOT_WINDOW;
return NULL;
#endif
- } else if (!strcmp(str, "earlydump")) {
- pci_early_dump_regs = 1;
- return NULL;
} else if (!strcmp(str, "routeirq")) {
pci_routeirq = 1;
return NULL;
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index e5f753cbb1c3..f5fc953e5848 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -57,47 +57,3 @@ int early_pci_allowed(void)
PCI_PROBE_CONF1;
}
-void early_dump_pci_device(u8 bus, u8 slot, u8 func)
-{
- u32 value[256 / 4];
- int i;
-
- pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
-
- for (i = 0; i < 256; i += 4)
- value[i / 4] = read_pci_config(bus, slot, func, i);
-
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
-}
-
-void early_dump_pci_devices(void)
-{
- unsigned bus, slot, func;
-
- if (!early_pci_allowed())
- return;
-
- for (bus = 0; bus < 256; bus++) {
- for (slot = 0; slot < 32; slot++) {
- for (func = 0; func < 8; func++) {
- u32 class;
- u8 type;
-
- class = read_pci_config(bus, slot, func,
- PCI_CLASS_REVISION);
- if (class == 0xffffffff)
- continue;
-
- early_dump_pci_device(bus, slot, func);
-
- if (func == 0) {
- type = read_pci_config_byte(bus, slot,
- func,
- PCI_HEADER_TYPE);
- if (!(type & 0x80))
- break;
- }
- }
- }
- }
-}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 77873ce700ae..ee5d08f25ce4 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD;
- if (sev_active())
+ if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
flags |= _PAGE_ENC;
pfn = md->phys_addr >> PAGE_SHIFT;
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
#ifdef CONFIG_EFI_MIXED
extern efi_status_t efi64_thunk(u32, ...);
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
#define runtime_service32(func) \
({ \
u32 table = (u32)(unsigned long)efi.systab; \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
#define efi_thunk(f, ...) \
({ \
efi_status_t __s; \
- unsigned long __flags; \
u32 __func; \
\
- local_irq_save(__flags); \
arch_efi_call_virt_setup(); \
\
__func = runtime_service32(f); \
__s = efi64_thunk(__func, __VA_ARGS__); \
\
arch_efi_call_virt_teardown(); \
- local_irq_restore(__flags); \
\
__s; \
})
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
u32 phys_tm, phys_tc;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
{
efi_status_t status;
u32 phys_enabled, phys_pending, phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_enabled = virt_to_phys_or_null(enabled);
phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data_size = virt_to_phys_or_null(data_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
{
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ /* If data_size is > sizeof(u32) we've got problems */
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+{
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(set_variable, phys_name, phys_vendor,
attr, data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
{
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_name_size = virt_to_phys_or_null(name_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
{
efi_status_t status;
u32 phys_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
u32 phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
}
static efi_status_t
@@ -872,10 +933,40 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
{
efi_status_t status;
u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
phys_storage = virt_to_phys_or_null(storage_space);
phys_remaining = virt_to_phys_or_null(remaining_space);
phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +974,8 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
efi.get_variable = efi_thunk_get_variable;
efi.get_next_variable = efi_thunk_get_next_variable;
efi.set_variable = efi_thunk_set_variable;
+ efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
efi.reset_system = efi_thunk_reset_system;
efi.query_variable_info = efi_thunk_query_variable_info;
+ efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
efi.update_capsule = efi_thunk_update_capsule;
efi.query_capsule_caps = efi_thunk_query_capsule_caps;
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 36c1f8b9f7e0..844d31cb8a0c 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
*/
void efi_delete_dummy_variable(void)
{
- efi.set_variable((efi_char16_t *)efi_dummy_name,
- &EFI_DUMMY_GUID,
- EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS,
- 0, NULL);
+ efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+ &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
}
/*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
int num_entries;
void *new;
- if (efi_mem_desc_lookup(addr, &md)) {
+ if (efi_mem_desc_lookup(addr, &md) ||
+ md.type != EFI_BOOT_SERVICES_DATA) {
pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
return;
}
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index fa021dfab088..5cf886c867c2 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 4f5fa65a1011..2acd6be13375 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -18,6 +18,7 @@
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
#define TANGIER_EXT_TIMER0_MSI 12
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 2ebdf31d9996..56f66eafb94f 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -36,8 +36,6 @@
#include <asm/apb_timer.h>
#include <asm/reboot.h>
-#include "intel_mid_weak_decls.h"
-
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_intel_mid_timer can be used to override the configuration
@@ -61,10 +59,6 @@
enum intel_mid_timer_options intel_mid_timer_options;
-/* intel_mid_ops to store sub arch ops */
-static struct intel_mid_ops *intel_mid_ops;
-/* getter function for sub arch ops*/
-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
@@ -82,11 +76,6 @@ static void intel_mid_reboot(void)
intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
}
-static unsigned long __init intel_mid_calibrate_tsc(void)
-{
- return 0;
-}
-
static void __init intel_mid_setup_bp_timer(void)
{
apbt_time_init();
@@ -133,6 +122,7 @@ static void intel_mid_arch_setup(void)
case 0x3C:
case 0x4A:
__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ x86_platform.legacy.rtc = 1;
break;
case 0x27:
default:
@@ -140,17 +130,7 @@ static void intel_mid_arch_setup(void)
break;
}
- if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
- intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
- else {
- intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
- pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
- }
-
out:
- if (intel_mid_ops->arch_setup)
- intel_mid_ops->arch_setup();
-
/*
* Intel MID platforms are using explicitly defined regulators.
*
@@ -191,7 +171,6 @@ void __init x86_intel_mid_early_setup(void)
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
- x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.arch_init = intel_mid_pci_init;
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
deleted file mode 100644
index 3c1c3866d82b..000000000000
--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-
-/* For every CPU addition a new get_<cpuname>_ops interface needs
- * to be added.
- */
-extern void *get_penwell_ops(void);
-extern void *get_cloverview_ops(void);
-extern void *get_tangier_ops(void);
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
deleted file mode 100644
index e42978d4deaf..000000000000
--- a/arch/x86/platform/intel-mid/mfld.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * mfld.c: Intel Medfield platform setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init mfld_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = FSB_FREQ_83SKU;
- else
- fsb = FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init penwell_arch_setup(void)
-{
- x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-}
-
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-void *get_penwell_ops(void)
-{
- return &penwell_ops;
-}
-
-void *get_cloverview_ops(void)
-{
- return &penwell_ops;
-}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
deleted file mode 100644
index ae7bdeb0e507..000000000000
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Intel Merrifield platform specific setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init tangier_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb, bus_freq;
-
- /* *********************** */
- /* Compute TSC:Ratio * FSB */
- /* *********************** */
-
- /* Compute Ratio */
- rdmsr(MSR_PLATFORM_INFO, lo, hi);
- pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
-
- ratio = (lo >> 8) & 0xFF;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
- ratio = 4;
- }
-
- /* Compute FSB */
- rdmsr(MSR_FSB_FREQ, lo, hi);
- pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
- hi, lo);
-
- bus_freq = lo & 0x7;
- pr_debug("bus_freq = 0x%x\n", bus_freq);
-
- if (bus_freq == 0)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 1)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 2)
- fsb = FSB_FREQ_133SKU;
- else if (bus_freq == 3)
- fsb = FSB_FREQ_167SKU;
- else if (bus_freq == 4)
- fsb = FSB_FREQ_83SKU;
- else if (bus_freq == 5)
- fsb = FSB_FREQ_400SKU;
- else if (bus_freq == 6)
- fsb = FSB_FREQ_267SKU;
- else if (bus_freq == 7)
- fsb = FSB_FREQ_333SKU;
- else {
- BUG();
- pr_err("Invalid bus_freq! Setting to minimal value!\n");
- fsb = FSB_FREQ_100SKU;
- }
-
- /* TSC = FSB Freq * Resolved HFM Ratio */
- fast_calibrate = ratio * fsb;
- pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
-
- /* ************************************ */
- /* Calculate Local APIC Timer Frequency */
- /* ************************************ */
- lapic_timer_frequency = (fsb * 1000) / HZ;
-
- pr_debug("Setting lapic_timer_frequency = %d\n",
- lapic_timer_frequency);
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init tangier_arch_setup(void)
-{
- x86_platform.calibrate_tsc = tangier_calibrate_tsc;
- x86_platform.legacy.rtc = 1;
-}
-
-/* tangier arch ops */
-static struct intel_mid_ops tangier_ops = {
- .arch_setup = tangier_arch_setup,
-};
-
-void *get_tangier_ops(void)
-{
- return &tangier_ops;
-}
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 7c3077e58fa0..f0e920fb98ad 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -311,10 +311,8 @@ static int __init add_xo1_platform_devices(void)
return PTR_ERR(pdev);
pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
static int olpc_xo1_ec_probe(struct platform_device *pdev)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index ca446da48fd2..a4130b84d1ff 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
struct msg_desc msgdesc;
ack_APIC_irq();
+ kvm_set_cpu_l1tf_flush_l1d();
time_start = get_cycles();
bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1607,8 +1608,6 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
*tunables[cnt].tunp = val;
continue;
}
- if (q == p)
- break;
}
return 0;
}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 67ccf64c8bd8..f8e3b668d20b 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -233,29 +233,35 @@ struct restore_data_record {
*/
static int get_e820_md5(struct e820_table *table, void *buf)
{
- struct scatterlist sg;
- struct crypto_ahash *tfm;
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
int size;
int ret = 0;
- tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm))
return -ENOMEM;
- {
- AHASH_REQUEST_ON_STACK(req, tfm);
- size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries;
- ahash_request_set_tfm(req, tfm);
- sg_init_one(&sg, (u8 *)table, size);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, &sg, buf, size);
-
- if (crypto_ahash_digest(req))
- ret = -EINVAL;
- ahash_request_zero(req);
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto free_tfm;
}
- crypto_free_ahash(tfm);
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ size = offsetof(struct e820_table, entries) +
+ sizeof(struct e820_entry) * table->nr_entries;
+
+ if (crypto_shash_digest(desc, (u8 *)table, size, buf))
+ ret = -EINVAL;
+
+ kzfree(desc);
+
+free_tfm:
+ crypto_free_shash(tfm);
return ret;
}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..fd369a6e9ff8 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -137,7 +137,7 @@ ENTRY(restore_registers)
/* Saved in save_processor_state. */
lgdt saved_context_gdt_desc(%rax)
- xorq %rax, %rax
+ xorl %eax, %eax
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 2e9ee023e6bc..3cf302b26332 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
@@ -28,9 +28,8 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
targets += kexec-purgatory.c
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 220e97841e49..3a6c8ebc8032 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -67,6 +67,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__tracedata_(start|end)|"
"__(start|stop)_notes|"
"__end_rodata|"
+ "__end_rodata_aligned|"
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 9d529f22fd9d..f518b4744ff8 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-mainmenu "User Mode Linux/$(SUBARCH) $(KERNELVERSION) Kernel Configuration"
-
-comment "Compiler: $(CC_VERSION_TEXT)"
-
-source "scripts/Kconfig.include"
-
-source "arch/um/Kconfig.common"
-
-menu "UML-specific options"
menu "Host processor type and features"
@@ -66,9 +57,3 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
config GENERIC_HWEIGHT
def_bool y
-
-source "arch/um/Kconfig.um"
-
-endmenu
-
-source "arch/um/Kconfig.rest"
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 744afdc18cf3..56c44d865f7b 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
if (!FIXADDR_USER_START)
return 0;
- gate_vma.vm_mm = NULL;
+ vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore
index 9cac6d072199..f8b69d84238e 100644
--- a/arch/x86/um/vdso/.gitignore
+++ b/arch/x86/um/vdso/.gitignore
@@ -1,2 +1 @@
-vdso-syms.lds
vdso.lds
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index b2d6967262b2..822ccdba93ad 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -53,22 +53,6 @@ $(vobjs): KBUILD_CFLAGS += $(CFL)
CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage
CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
-targets += vdso-syms.lds
-extra-$(VDSO64-y) += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
- -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
- $(call if_changed,vdsosym)
-
#
# The DSO images are built using a special linker script.
#
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 3b5318505c69..2eeddd814653 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -3,6 +3,7 @@
#endif
#include <linux/cpu.h>
#include <linux/kexec.h>
+#include <linux/slab.h>
#include <xen/features.h>
#include <xen/page.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 8d4e2e1ae60b..ee3b00c7acda 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -119,6 +119,27 @@ static void __init xen_banner(void)
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+
+static void __init xen_pv_init_platform(void)
+{
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+ /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
+ xen_vcpu_info_reset(0);
+
+ /* pvclock is in shared info area */
+ xen_init_time_ops();
+}
+
+static void __init xen_pv_guest_late_init(void)
+{
+#ifndef CONFIG_SMP
+ /* Setup shared vcpu info for non-smp configurations */
+ xen_setup_vcpu_info_placement();
+#endif
+}
+
/* Check if running on Xen version (major, minor) or later */
bool
xen_running_on_version_or_later(unsigned int major, unsigned int minor)
@@ -947,34 +968,8 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
xen_write_msr_safe(msr, low, high);
}
-void xen_setup_shared_info(void)
-{
- set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
-
- HYPERVISOR_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
- xen_setup_mfn_list_list();
-
- if (system_state == SYSTEM_BOOTING) {
-#ifndef CONFIG_SMP
- /*
- * In UP this is as good a place as any to set up shared info.
- * Limit this to boot only, at restore vcpu setup is done via
- * xen_vcpu_restore().
- */
- xen_setup_vcpu_info_placement();
-#endif
- /*
- * Now that shared info is set up we can start using routines
- * that point to pvclock area.
- */
- xen_init_time_ops();
- }
-}
-
/* This is called once we have the cpu_possible_mask */
-void __ref xen_setup_vcpu_info_placement(void)
+void __init xen_setup_vcpu_info_placement(void)
{
int cpu;
@@ -1207,12 +1202,20 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_setup_features();
- xen_setup_machphys_mapping();
-
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_init_ops.patch = paravirt_patch_default;
pv_cpu_ops = xen_cpu_ops;
+ xen_init_irq_ops();
+
+ /*
+ * Setup xen_vcpu early because it is needed for
+ * local_irq_disable(), irqs_disabled(), e.g. in printk().
+ *
+ * Don't do the full vcpu_info placement stuff until we have
+ * the cpu_possible_mask and a non-dummy shared_info.
+ */
+ xen_vcpu_info_reset(0);
x86_platform.get_nmi_reason = xen_get_nmi_reason;
@@ -1220,15 +1223,19 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.irqs.intr_mode_init = x86_init_noop;
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
+ x86_init.hyper.init_platform = xen_pv_init_platform;
+ x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
/*
* Set up some pagetable state before starting to set any ptes.
*/
+ xen_setup_machphys_mapping();
xen_init_mmu_ops();
/* Prevent unwanted bits from being set in PTEs. */
__supported_pte_mask &= ~_PAGE_GLOBAL;
+ __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/*
* Prevent page tables from being allocated in highmem, even
@@ -1249,20 +1256,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
- xen_init_irq_ops();
+ /* Determine virtual and physical address sizes */
+ get_cpu_address_sizes(&boot_cpu_data);
/* Let's presume PV guests always boot on vCPU with id 0. */
per_cpu(xen_vcpu_id, 0) = 0;
- /*
- * Setup xen_vcpu early because idt_setup_early_handler needs it for
- * local_irq_disable(), irqs_disabled().
- *
- * Don't do the full vcpu_info placement stuff until we have
- * the cpu_possible_mask and a non-dummy shared_info.
- */
- xen_vcpu_info_reset(0);
-
idt_setup_early_handler();
xen_init_capabilities();
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 74179852e46c..7515a19fd324 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
void __init xen_init_irq_ops(void)
{
- /* For PVH we use default pv_irq_ops settings. */
- if (!xen_feature(XENFEAT_hvm_callback_vector))
- pv_irq_ops = xen_irq_ops;
+ pv_irq_ops = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c30cabfda90..52206ad81e4b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1230,8 +1230,7 @@ static void __init xen_pagetable_p2m_free(void)
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunatly
- * we have already revectored in xen_setup_kernel_pagetable and in
- * xen_setup_shared_info.
+ * we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
@@ -1292,8 +1291,7 @@ static void __init xen_pagetable_init(void)
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
-
- xen_setup_shared_info();
+ xen_setup_mfn_list_list();
}
static void xen_write_cr2(unsigned long cr2)
{
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index dc502ca8263e..2bce7958ce8b 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -80,9 +80,9 @@ void xen_mc_flush(void)
and just do the call directly. */
mc = &b->entries[0];
- mc->result = privcmd_call(mc->op,
- mc->args[0], mc->args[1], mc->args[2],
- mc->args[3], mc->args[4]);
+ mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
+ mc->args[2], mc->args[3],
+ mc->args[4]);
ret = mc->result < 0;
break;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cd97a62394e7..973f10e05211 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -130,6 +130,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void)
{
+ /* Don't need to use pvqspinlock code if there is only 1 vCPU. */
+ if (num_possible_cpus() == 1)
+ xen_pvspin = false;
+
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
return;
diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c
index a2e0f110af56..8303b58c79a9 100644
--- a/arch/x86/xen/suspend_pv.c
+++ b/arch/x86/xen/suspend_pv.c
@@ -27,8 +27,9 @@ void xen_pv_pre_suspend(void)
void xen_pv_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
-
- xen_setup_shared_info();
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+ xen_setup_mfn_list_list();
if (suspend_cancelled) {
xen_start_info->store_mfn =
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index e0f1bcf01d63..c84f1e039d84 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,6 +31,8 @@
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
+static u64 xen_sched_clock_offset __read_mostly;
+
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
@@ -40,7 +42,7 @@ static unsigned long xen_tsc_khz(void)
return pvclock_tsc_khz(info);
}
-u64 xen_clocksource_read(void)
+static u64 xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
u64 ret;
@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
return xen_clocksource_read();
}
+static u64 xen_sched_clock(void)
+{
+ return xen_clocksource_read() - xen_sched_clock_offset;
+}
+
static void xen_read_wallclock(struct timespec64 *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
@@ -367,7 +374,7 @@ void xen_timer_resume(void)
}
static const struct pv_time_ops xen_time_ops __initconst = {
- .sched_clock = xen_clocksource_read,
+ .sched_clock = xen_sched_clock,
.steal_clock = xen_steal_clock,
};
@@ -503,8 +510,9 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __ref xen_init_time_ops(void)
+void __init xen_init_time_ops(void)
{
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
@@ -542,11 +550,11 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
- printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
- "disable pv timer\n");
+ pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 3b34745d0a52..e78684597f57 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -31,7 +31,6 @@ extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
void xen_setup_mfn_list_list(void);
-void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
@@ -68,12 +67,11 @@ void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
void xen_setup_runstate_info(int cpu);
void xen_teardown_timer(int cpu);
-u64 xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
void xen_save_time_memory_area(void);
void xen_restore_time_memory_area(void);
-void __ref xen_init_time_ops(void);
-void __init xen_hvm_init_time_ops(void);
+void xen_init_time_ops(void);
+void xen_hvm_init_time_ops(void);
irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d575e8701955..04d038f3b6fa 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -4,12 +4,15 @@ config ZONE_DMA
config XTENSA
def_bool y
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select DMA_NONCOHERENT_OPS
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW
@@ -60,9 +63,6 @@ config HZ
int
default 100
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
config LOCKDEP_SUPPORT
def_bool y
@@ -75,9 +75,6 @@ config TRACE_IRQFLAGS_SUPPORT
config MMU
def_bool n
-config VARIANT_IRQ_SWITCH
- def_bool n
-
config HAVE_XTENSA_GPIO32
def_bool n
@@ -176,8 +173,6 @@ config XTENSA_UNALIGNED_USER
Say Y here to enable unaligned memory access in user space.
-source "kernel/Kconfig.preempt"
-
config HAVE_SMP
bool "System Supports SMP (MX)"
depends on XTENSA_VARIANT_CUSTOM
@@ -249,6 +244,23 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y.
+config MEMMAP_CACHEATTR
+ hex "Cache attributes for the memory address space"
+ depends on !MMU
+ default 0x22222222
+ help
+ These cache attributes are set up for noMMU systems. Each hex digit
+ specifies cache attributes for the corresponding 512MB memory
+ region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
+ bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
+
+ Cache attribute values are specific for the MMU type, so e.g.
+ for region protection MMUs: 2 is cache bypass, 4 is WB cached,
+ 1 is WT cached, f is illegal. For ful MMU: bit 0 makes it executable,
+ bit 1 makes it writable, bits 2..3 meaning is 0: cache bypass,
+ 1: WB cache, 2: WT cache, 3: special (c and e are illegal, f is
+ reserved).
+
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
@@ -418,6 +430,10 @@ config XTENSA_PLATFORM_XTFPGA
endchoice
+config PLATFORM_NR_IRQS
+ int
+ default 3 if XTENSA_PLATFORM_XT2000
+ default 0
config XTENSA_CPU_CLOCK
int "CPU clock rate [MHz]"
@@ -455,6 +471,15 @@ config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
+config PARSE_BOOTPARAM
+ bool "Parse bootparam block"
+ default y
+ help
+ Parse parameters passed to the kernel from the bootloader. It may
+ be disabled if the kernel is known to run without the bootloader.
+
+ If unsure, say Y.
+
config BLK_DEV_SIMDISK
tristate "Host file-based simulated block device support"
default n
@@ -491,8 +516,6 @@ config SIMDISK1_FILENAME
Another simulated disk in a host file for a buildroot-independent
storage.
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
@@ -513,25 +536,13 @@ config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
- hex "Physical address of the default memory area start"
- depends on PLATFORM_WANT_DEFAULT_MEM
- default 0x00000000 if MMU
- default 0x60000000 if !MMU
- help
- This is the base address of the default memory area.
- Default memory area has platform-specific meaning, it may be used
- for e.g. early cache initialization.
-
- If unsure, leave the default value here.
-
-config DEFAULT_MEM_SIZE
- hex "Maximal size of the default memory area"
- depends on PLATFORM_WANT_DEFAULT_MEM
- default 0x04000000
+ hex
+ prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
+ default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
+ default 0x00000000
help
- This is the size of the default memory area.
- Default memory area has platform-specific meaning, it may be used
- for e.g. early cache initialization.
+ This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
+ in noMMU configurations.
If unsure, leave the default value here.
@@ -567,30 +578,8 @@ config XTFPGA_LCD_8BIT_ACCESS
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/xtensa/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index f64c14adadb3..39de98e20018 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config DEBUG_TLB_SANITY
bool "Debug TLB sanity"
@@ -34,5 +31,3 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
-
-endmenu
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 53e4178711e6..dc9e0ba7122c 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -30,8 +30,7 @@ Image: boot-elf
zImage: boot-redboot
uImage: $(obj)/uImage
-boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) \
- $(addprefix $(obj)/,$(host-progs))
+boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index b6aa85328ac0..29c68426ab56 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -15,10 +15,6 @@
*/
#include <asm/bootparam.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
#include <asm/vectors.h>
#include <linux/linkage.h>
@@ -33,16 +29,18 @@ _ResetVector:
.begin no-absolute-literals
.literal_position
- .align 4
-RomInitAddr:
#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
- .word CONFIG_KERNEL_LOAD_ADDRESS
+ .literal RomInitAddr, CONFIG_KERNEL_LOAD_ADDRESS
#else
- .word KERNELOFFSET
+ .literal RomInitAddr, KERNELOFFSET
#endif
-RomBootParam:
- .word _bootparam
+#ifndef CONFIG_PARSE_BOOTPARAM
+ .literal RomBootParam, 0
+#else
+ .literal RomBootParam, _bootparam
+
+ .align 4
_bootparam:
.short BP_TAG_FIRST
.short 4
@@ -50,6 +48,7 @@ _bootparam:
.short BP_TAG_LAST
.short 0
.long 0
+#endif
.align 4
_SetupMMU:
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 624f9b3a3878..f3fc4f970ca8 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -33,13 +33,13 @@ CONFIG_XTENSA_VARIANT_CUSTOM_NAME="de212"
# CONFIG_XTENSA_VARIANT_MMU is not set
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_PREEMPT=y
+CONFIG_MEMMAP_CACHEATTR=0xfff2442f
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705_nommu"
-CONFIG_DEFAULT_MEM_SIZE=0x10000000
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index e5e1e61c538c..82c756431b49 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += compat.h
generic-y += device.h
generic-y += div64.h
generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e7a23f2a519a..7de0149e1cf7 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -197,107 +197,9 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1,(v))
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1,(v))
-
-/**
- * atomic_dec_return - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
-
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 2041abb10a23..34545ecfdd6b 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -31,16 +31,32 @@
*
*/
- .macro __loop_cache_all ar at insn size line_width
- movi \ar, 0
+ .macro __loop_cache_unroll ar at insn size line_width max_immed
+
+ .if (1 << (\line_width)) > (\max_immed)
+ .set _reps, 1
+ .elseif (2 << (\line_width)) > (\max_immed)
+ .set _reps, 2
+ .else
+ .set _reps, 4
+ .endif
+
+ __loopi \ar, \at, \size, (_reps << (\line_width))
+ .set _index, 0
+ .rep _reps
+ \insn \ar, _index << (\line_width)
+ .set _index, _index + 1
+ .endr
+ __endla \ar, \at, _reps << (\line_width)
+
+ .endm
+
- __loopi \ar, \at, \size, (4 << (\line_width))
- \insn \ar, 0 << (\line_width)
- \insn \ar, 1 << (\line_width)
- \insn \ar, 2 << (\line_width)
- \insn \ar, 3 << (\line_width)
- __endla \ar, \at, 4 << (\line_width)
+ .macro __loop_cache_all ar at insn size line_width max_immed
+
+ movi \ar, 0
+ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
.endm
@@ -57,14 +73,9 @@
.endm
- .macro __loop_cache_page ar at insn line_width
+ .macro __loop_cache_page ar at insn line_width max_immed
- __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
- \insn \ar, 0 << (\line_width)
- \insn \ar, 1 << (\line_width)
- \insn \ar, 2 << (\line_width)
- \insn \ar, 3 << (\line_width)
- __endla \ar, \at, 4 << (\line_width)
+ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
.endm
@@ -72,7 +83,8 @@
.macro ___unlock_dcache_all ar at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
- __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@@ -81,7 +93,8 @@
.macro ___unlock_icache_all ar at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
- __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
+ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 240
#endif
.endm
@@ -90,7 +103,8 @@
.macro ___flush_invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
- __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@@ -99,7 +113,8 @@
.macro ___flush_dcache_all ar at
#if XCHAL_DCACHE_SIZE
- __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@@ -108,8 +123,8 @@
.macro ___invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
- __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
- XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@@ -118,8 +133,8 @@
.macro ___invalidate_icache_all ar at
#if XCHAL_ICACHE_SIZE
- __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
- XCHAL_ICACHE_LINEWIDTH
+ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
@@ -166,7 +181,7 @@
.macro ___flush_invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
- __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@@ -175,7 +190,7 @@
.macro ___flush_dcache_page ar as
#if XCHAL_DCACHE_SIZE
- __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@@ -184,7 +199,7 @@
.macro ___invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
- __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
+ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@@ -193,7 +208,7 @@
.macro ___invalidate_icache_page ar as
#if XCHAL_ICACHE_SIZE
- __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
+ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
deleted file mode 100644
index 44098800dad7..000000000000
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003 - 2005 Tensilica Inc.
- * Copyright (C) 2015 Cadence Design Systems Inc.
- */
-
-#ifndef _XTENSA_DMA_MAPPING_H
-#define _XTENSA_DMA_MAPPING_H
-
-#include <asm/cache.h>
-#include <asm/io.h>
-
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-extern const struct dma_map_ops xtensa_dma_map_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &xtensa_dma_map_ops;
-}
-
-#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
index dbe3053b284a..9f119c1ca0b5 100644
--- a/arch/xtensa/include/asm/hw_breakpoint.h
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -30,13 +30,16 @@ struct arch_hw_breakpoint {
u16 type;
};
+struct perf_event_attr;
struct perf_event;
struct pt_regs;
struct task_struct;
int hw_breakpoint_slots(int type);
-int arch_check_bp_in_kernelspace(struct perf_event *bp);
-int arch_validate_hwbkpt_settings(struct perf_event *bp);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 42410f253597..10e9852b2fb4 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -177,36 +177,36 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
-#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
- (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
- /* Enable data and instruction cache in the DEFAULT_MEMORY region
- * if the processor has DTLB and ITLB.
- */
+ .endm
+
+ .macro initialize_cacheattr
- movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
+#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
+#endif
+
+ movi a5, XCHAL_SPANNING_WAY
movi a6, ~_PAGE_ATTRIB_MASK
- movi a7, CA_WRITEBACK
+ movi a4, CONFIG_MEMMAP_CACHEATTR
movi a8, 0x20000000
- movi a9, PLATFORM_DEFAULT_MEM_SIZE
- j 2f
1:
- sub a9, a9, a8
-2:
-#if XCHAL_DCACHE_SIZE
rdtlb1 a3, a5
+ xor a3, a3, a4
and a3, a3, a6
- or a3, a3, a7
+ xor a3, a3, a4
wdtlb a3, a5
-#endif
-#if XCHAL_ICACHE_SIZE
- ritlb1 a4, a5
- and a4, a4, a6
- or a4, a4, a7
- witlb a4, a5
-#endif
+ ritlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ witlb a3, a5
+
add a5, a5, a8
- bltu a8, a9, 1b
+ srli a4, a4, 4
+ bgeu a5, a8, 1b
+ isync
#endif
.endm
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 19707db966f1..6c6ed23e0c79 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -12,32 +12,17 @@
#define _XTENSA_IRQ_H
#include <linux/init.h>
-#include <platform/hardware.h>
#include <variant/core.h>
-#ifdef CONFIG_VARIANT_IRQ_SWITCH
-#include <variant/irq.h>
+#ifdef CONFIG_PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
#else
-static inline void variant_irq_enable(unsigned int irq) { }
-static inline void variant_irq_disable(unsigned int irq) { }
-#endif
-
-#ifndef VARIANT_NR_IRQS
-# define VARIANT_NR_IRQS 0
-#endif
-#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
-#if VARIANT_NR_IRQS == 0
-static inline void variant_init_irq(void) { }
-#else
-void variant_init_irq(void) __init;
-#endif
-
static __inline__ int irq_canonicalize(int irq)
{
return (irq);
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 2317c835a4db..9c12babc016c 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -63,12 +63,6 @@
#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
#endif
-#else
-
-#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
-#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
-#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
-
#endif
#ifndef CONFIG_KASAN
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 5d69c11c01b8..09c56cba442e 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -14,7 +14,6 @@
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
-#include <platform/hardware.h>
#include <asm/kmem_layout.h>
/*
@@ -31,8 +30,8 @@
#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
PHYS_PFN(XCHAL_KSEG_SIZE))
#else
-#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START
-#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START
+#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 38802259978f..29cfe421cf41 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -66,6 +66,7 @@
#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+#ifdef CONFIG_MMU
/*
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
@@ -80,6 +81,13 @@
#define TLBTEMP_SIZE ICACHE_WAY_SIZE
#endif
+#else
+
+#define VMALLOC_START __XTENSA_UL_CONST(0)
+#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
+
+#endif
+
/*
* For the Xtensa architecture, the PTE layout is as follows:
*
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index f8fbef67bc5f..560483356a06 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -75,4 +75,31 @@ extern void platform_calibrate_ccount (void);
*/
void cpu_reset(void) __attribute__((noreturn));
+/*
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following set of functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled. Default implementations do nothing and issue a warning.
+ */
+
+/*
+ * Check whether p points to a cached memory.
+ */
+bool platform_vaddr_cached(const void *p);
+
+/*
+ * Check whether p points to an uncached memory.
+ */
+bool platform_vaddr_uncached(const void *p);
+
+/*
+ * Return pointer to an uncached view of the cached sddress p.
+ */
+void *platform_vaddr_to_uncached(void *p);
+
+/*
+ * Return pointer to a cached view of the uncached sddress p.
+ */
+void *platform_vaddr_to_cached(void *p);
+
#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 5b0027d4ecc0..e4ccb88b7996 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -11,7 +11,6 @@
#define _XTENSA_PROCESSOR_H
#include <variant/core.h>
-#include <platform/hardware.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 65d3da9db19b..7111280c8842 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -19,7 +19,6 @@
#define _XTENSA_VECTORS_H
#include <variant/core.h>
-#include <platform/hardware.h>
#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 75a07b8119a9..1de07a7f7680 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -116,4 +116,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9c4e9433e536..2f76118ecf62 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -181,6 +181,8 @@ ENTRY(_startup)
isync
+ initialize_cacheattr
+
#ifdef CONFIG_HAVE_SMP
movi a2, CCON # MX External Register to Configure Cache
movi a3, 1
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
index b35656ab7dbd..c2e387c19cda 100644
--- a/arch/xtensa/kernel/hw_breakpoint.c
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type)
}
}
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = bp->attr.bp_len;
+ va = hw->address;
+ len = hw->len;
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->type = XTENSA_BREAKPOINT_EXECUTE;
+ hw->type = XTENSA_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->type = XTENSA_BREAKPOINT_LOAD;
+ hw->type = XTENSA_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->type = XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- info->len = bp->attr.bp_len;
- if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
+ hw->len = attr->bp_len;
+ if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
- if (info->address & (info->len - 1))
+ hw->address = attr->bp_addr;
+ if (hw->address & (hw->len - 1))
return -EINVAL;
return 0;
}
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
-{
- int ret;
-
- /* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
- return ret;
-}
-
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 18e4ef34ac45..a48bf2d10ac2 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -158,7 +158,6 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
ipi_init();
#endif
- variant_init_irq();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 392b4a80ebc2..1fc138b6bc0a 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -16,26 +16,25 @@
*/
#include <linux/dma-contiguous.h>
+#include <linux/dma-noncoherent.h>
#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
+#include <asm/platform.h>
-static void do_cache_op(dma_addr_t dma_handle, size_t size,
+static void do_cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long, unsigned long))
{
- unsigned long off = dma_handle & (PAGE_SIZE - 1);
- unsigned long pfn = PFN_DOWN(dma_handle);
+ unsigned long off = paddr & (PAGE_SIZE - 1);
+ unsigned long pfn = PFN_DOWN(paddr);
struct page *page = pfn_to_page(pfn);
if (!PageHighMem(page))
- fn((unsigned long)bus_to_virt(dma_handle), size);
+ fn((unsigned long)phys_to_virt(paddr), size);
else
while (size > 0) {
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
@@ -49,14 +48,13 @@ static void do_cache_op(dma_addr_t dma_handle, size_t size,
}
}
-static void xtensa_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
- do_cache_op(dma_handle, size, __invalidate_dcache_range);
+ do_cache_op(paddr, size, __invalidate_dcache_range);
break;
case DMA_NONE:
@@ -68,15 +66,14 @@ static void xtensa_sync_single_for_cpu(struct device *dev,
}
}
-static void xtensa_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
if (XCHAL_DCACHE_IS_WRITEBACK)
- do_cache_op(dma_handle, size, __flush_dcache_range);
+ do_cache_op(paddr, size, __flush_dcache_range);
break;
case DMA_NONE:
@@ -88,43 +85,66 @@ static void xtensa_sync_single_for_device(struct device *dev,
}
}
-static void xtensa_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+#ifdef CONFIG_MMU
+bool platform_vaddr_cached(const void *p)
{
- struct scatterlist *s;
- int i;
+ unsigned long addr = (unsigned long)p;
- for_each_sg(sg, s, nents, i) {
- xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
- sg_dma_len(s), dir);
- }
+ return addr >= XCHAL_KSEG_CACHED_VADDR &&
+ addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
}
-static void xtensa_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+bool platform_vaddr_uncached(const void *p)
{
- struct scatterlist *s;
- int i;
+ unsigned long addr = (unsigned long)p;
- for_each_sg(sg, s, nents, i) {
- xtensa_sync_single_for_device(dev, sg_dma_address(s),
- sg_dma_len(s), dir);
- }
+ return addr >= XCHAL_KSEG_BYPASS_VADDR &&
+ addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
+}
+
+void *platform_vaddr_to_uncached(void *p)
+{
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
+}
+
+void *platform_vaddr_to_cached(void *p)
+{
+ return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+}
+#else
+bool __attribute__((weak)) platform_vaddr_cached(const void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return true;
+}
+
+bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return false;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return p;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return p;
}
+#endif
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
-static void *xtensa_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flag,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t flag, unsigned long attrs)
{
- unsigned long ret;
- unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
@@ -137,7 +157,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag);
+ flag & __GFP_NOWARN);
if (!page)
page = alloc_pages(flag, get_order(size));
@@ -147,6 +167,10 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
*handle = phys_to_dma(dev, page_to_phys(page));
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ return page;
+ }
+
#ifdef CONFIG_MMU
if (PageHighMem(page)) {
void *p;
@@ -161,27 +185,21 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
return p;
}
#endif
- ret = (unsigned long)page_address(page);
- BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
- ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
-
- uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
- __invalidate_dcache_range(ret, size);
-
- return (void *)uncached;
+ BUG_ON(!platform_vaddr_cached(page_address(page)));
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
+ return platform_vaddr_to_uncached(page_address(page));
}
-static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)vaddr;
struct page *page;
- if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
- addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
- page = virt_to_page(addr);
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ page = vaddr;
+ } else if (platform_vaddr_uncached(vaddr)) {
+ page = virt_to_page(platform_vaddr_to_cached(vaddr));
} else {
#ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size, VM_MAP);
@@ -192,72 +210,3 @@ static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
-
-static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_handle = page_to_phys(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xtensa_sync_single_for_device(dev, dma_handle, size, dir);
-
- return dma_handle;
-}
-
-static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
-}
-
-static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
- s->length, dir, attrs);
- }
- return nents;
-}
-
-static void xtensa_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- xtensa_unmap_page(dev, sg_dma_address(s),
- sg_dma_len(s), dir, attrs);
- }
-}
-
-int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-const struct dma_map_ops xtensa_dma_map_ops = {
- .alloc = xtensa_dma_alloc,
- .free = xtensa_dma_free,
- .map_page = xtensa_map_page,
- .unmap_page = xtensa_unmap_page,
- .map_sg = xtensa_map_sg,
- .unmap_sg = xtensa_unmap_sg,
- .sync_single_for_cpu = xtensa_sync_single_for_cpu,
- .sync_single_for_device = xtensa_sync_single_for_device,
- .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
- .sync_sg_for_device = xtensa_sync_sg_for_device,
- .mapping_error = xtensa_dma_mapping_error,
-};
-EXPORT_SYMBOL(xtensa_dma_map_ops);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 686a27444bba..351283b60df6 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -47,8 +47,6 @@
#include <asm/smp.h>
#include <asm/sysmem.h>
-#include <platform/hardware.h>
-
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
.orig_x = 0,
@@ -81,6 +79,7 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
+#ifdef CONFIG_PARSE_BOOTPARAM
/*
* Boot parameter parsing.
*
@@ -178,6 +177,13 @@ static int __init parse_bootparam(const bp_tag_t* tag)
return 0;
}
+#else
+static int __init parse_bootparam(const bp_tag_t *tag)
+{
+ pr_info("Ignoring boot parameters at %p\n", tag);
+ return 0;
+}
+#endif
#ifdef CONFIG_OF
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 70b731edc7b8..a1c3edb8ad56 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -20,7 +20,7 @@
#include <asm/vectors.h>
#include <variant/core.h>
-#include <platform/hardware.h>
+
OUTPUT_ARCH(xtensa)
ENTRY(_start)
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index c111a833205a..2ab0e0dcd166 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -42,7 +42,7 @@ void do_page_fault(struct pt_regs *regs)
int code;
int is_write, is_exec;
- int fault;
+ vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
code = SEGV_MAPERR;
diff --git a/arch/xtensa/platforms/iss/include/platform/hardware.h b/arch/xtensa/platforms/iss/include/platform/hardware.h
deleted file mode 100644
index 6930c12adc16..000000000000
--- a/arch/xtensa/platforms/iss/include/platform/hardware.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * include/asm-xtensa/platform-iss/hardware.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 Tensilica Inc.
- */
-
-/*
- * This file contains the default configuration of ISS.
- */
-
-#ifndef _XTENSA_PLATFORM_ISS_HARDWARE_H
-#define _XTENSA_PLATFORM_ISS_HARDWARE_H
-
-/*
- * Memory configuration.
- */
-
-#define PLATFORM_DEFAULT_MEM_START 0x00000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
-
-/*
- * Interrupt configuration.
- */
-
-#endif /* _XTENSA_PLATFORM_ISS_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xt2000/include/platform/hardware.h b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
index 886ef156ded3..8e5e0d6a81ec 100644
--- a/arch/xtensa/platforms/xt2000/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
@@ -17,17 +17,6 @@
#include <variant/core.h>
-/*
- * Memory configuration.
- */
-
-#define PLATFORM_DEFAULT_MEM_START 0x00000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
-
-/*
- * Number of platform IRQs
- */
-#define PLATFORM_NR_IRQS 3
/*
* On-board components.
*/
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index 1fda7e20dfcb..30d9cb6cf168 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -17,15 +17,6 @@
#ifndef __XTENSA_XTAVNET_HARDWARE_H
#define __XTENSA_XTAVNET_HARDWARE_H
-/* Memory configuration. */
-
-#define PLATFORM_DEFAULT_MEM_START __XTENSA_UL(CONFIG_DEFAULT_MEM_START)
-#define PLATFORM_DEFAULT_MEM_SIZE __XTENSA_UL(CONFIG_DEFAULT_MEM_SIZE)
-
-/* Interrupt configuration. */
-
-#define PLATFORM_NR_IRQS 0
-
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/core.h b/arch/xtensa/variants/test_kc705_be/include/variant/core.h
new file mode 100644
index 000000000000..a4ebdf7197d7
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/core.h
@@ -0,0 +1,575 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 1 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2_MUL32X24 1 /* HiFi2 and 32x24 MACs */
+#define XCHAL_HAVE_HIFI2EP 1 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_kc705_be" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00058D8C /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C858D8C /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 1 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 1 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 8 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 4 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00000140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00008000
+#define XCHAL_INTLEVEL5_MASK 0x00003000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F01FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F0FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 5
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F0000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL4_NUM 15
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3, 5.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 5) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 5) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h b/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h
new file mode 100644
index 000000000000..b87fe1a5177b
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h
@@ -0,0 +1,308 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+ /*
+ * Macro to store all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ rsr.BR \at1 // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to load all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.BR \at1 // boolean option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.SCOMPARE1 \at1 // conditional store option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wsr.M3 \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+ /*
+ * Macro to store the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_store.
+ */
+#define xchal_cp_AudioEngineLX_store xchal_cp1_store
+ .macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ rur.AE_OVF_SAR \at1 // ureg 240
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rur.AE_BITHEAD \at1 // ureg 241
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rur.AE_SD_NO \at1 // ureg 243
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rur.AE_CBEGIN0 \at1 // ureg 246
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rur.AE_CEND0 \at1 // ureg 247
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ ae_sp24x2s.i aep0, \ptr, .Lxchal_ofs_+24
+ ae_sp24x2s.i aep1, \ptr, .Lxchal_ofs_+32
+ ae_sp24x2s.i aep2, \ptr, .Lxchal_ofs_+40
+ ae_sp24x2s.i aep3, \ptr, .Lxchal_ofs_+48
+ ae_sp24x2s.i aep4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ ae_sp24x2s.i aep5, \ptr, .Lxchal_ofs_+0
+ ae_sp24x2s.i aep6, \ptr, .Lxchal_ofs_+8
+ ae_sp24x2s.i aep7, \ptr, .Lxchal_ofs_+16
+ ae_sq56s.i aeq0, \ptr, .Lxchal_ofs_+24
+ ae_sq56s.i aeq1, \ptr, .Lxchal_ofs_+32
+ ae_sq56s.i aeq2, \ptr, .Lxchal_ofs_+40
+ ae_sq56s.i aeq3, \ptr, .Lxchal_ofs_+48
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 64
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 56
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 120
+ .endif
+ .endm // xchal_cp1_store
+
+ /*
+ * Macro to load the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_load.
+ */
+#define xchal_cp_AudioEngineLX_load xchal_cp1_load
+ .macro xchal_cp1_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.AE_OVF_SAR \at1 // ureg 240
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wur.AE_BITHEAD \at1 // ureg 241
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wur.AE_SD_NO \at1 // ureg 243
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wur.AE_CBEGIN0 \at1 // ureg 246
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wur.AE_CEND0 \at1 // ureg 247
+ ae_lp24x2.i aep0, \ptr, .Lxchal_ofs_+24
+ ae_lp24x2.i aep1, \ptr, .Lxchal_ofs_+32
+ ae_lp24x2.i aep2, \ptr, .Lxchal_ofs_+40
+ ae_lp24x2.i aep3, \ptr, .Lxchal_ofs_+48
+ ae_lp24x2.i aep4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ ae_lp24x2.i aep5, \ptr, .Lxchal_ofs_+0
+ ae_lp24x2.i aep6, \ptr, .Lxchal_ofs_+8
+ ae_lp24x2.i aep7, \ptr, .Lxchal_ofs_+16
+ addi \ptr, \ptr, 24
+ ae_lq56.i aeq0, \ptr, .Lxchal_ofs_+0
+ ae_lq56.i aeq1, \ptr, .Lxchal_ofs_+8
+ ae_lq56.i aeq2, \ptr, .Lxchal_ofs_+16
+ ae_lq56.i aeq3, \ptr, .Lxchal_ofs_+24
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 88
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 32
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 120
+ .endif
+ .endm // xchal_cp1_load
+
+#define XCHAL_CP1_NUM_ATMPS 1
+#define XCHAL_SA_NUM_ATMPS 1
+
+ /* Empty macros for unconfigured coprocessors: */
+ .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/tie.h b/arch/xtensa/variants/test_kc705_be/include/variant/tie.h
new file mode 100644
index 000000000000..4e1b4baac8e2
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/tie.h
@@ -0,0 +1,182 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 2 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x82 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP1_NAME "AudioEngineLX"
+#define XCHAL_CP1_IDENT AudioEngineLX
+#define XCHAL_CP1_SA_SIZE 120 /* size of state save area */
+#define XCHAL_CP1_SA_ALIGN 8 /* min alignment of save area */
+#define XCHAL_CP_ID_AUDIOENGINELX 1 /* coprocessor ID (0..7) */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 36
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 160 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 8 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 9
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 18
+#define XCHAL_CP1_SA_LIST(s) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_ovf_sar, 8, 4, 4,0x03F0, ur,240, 7,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_bithead, 4, 4, 4,0x03F1, ur,241, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0,ae_ts_fts_bu_bp, 4, 4, 4,0x03F2, ur,242, 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_sd_no, 4, 4, 4,0x03F3, ur,243, 28,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cbegin0, 4, 4, 4,0x03F6, ur,246, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cend0, 4, 4, 4,0x03F7, ur,247, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep0, 8, 8, 8,0x0060, aep,0 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep1, 8, 8, 8,0x0061, aep,1 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep2, 8, 8, 8,0x0062, aep,2 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep3, 8, 8, 8,0x0063, aep,3 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep4, 8, 8, 8,0x0064, aep,4 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep5, 8, 8, 8,0x0065, aep,5 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep6, 8, 8, 8,0x0066, aep,6 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep7, 8, 8, 8,0x0067, aep,7 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq0, 8, 8, 8,0x0068, aeq,0 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq1, 8, 8, 8,0x0069, aeq,1 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq2, 8, 8, 8,0x006A, aeq,2 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq3, 8, 8, 8,0x006B, aeq,3 , 56,0,0,0)
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,8
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/block/Kconfig b/block/Kconfig
index eb50fd4977c2..1f2469a0123c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -149,6 +149,18 @@ config BLK_WBT
dynamically on an algorithm loosely based on CoDel, factoring in
the realtime performance of the disk.
+config BLK_CGROUP_IOLATENCY
+ bool "Enable support for latency based cgroup IO protection"
+ depends on BLK_CGROUP=y
+ default n
+ ---help---
+ Enabling this option enables the .latency interface for IO throttling.
+ The IO controller will attempt to maintain average IO latencies below
+ the configured latency target, throttling anybody with a higher latency
+ target than the victimized group.
+
+ Note, this is an experimental interface and could be changed someday.
+
config BLK_WBT_SQ
bool "Single queue writeback throttling"
default n
@@ -177,6 +189,10 @@ config BLK_DEBUG_FS
Unless you are building a kernel for a tiny system, you should
say Y here.
+config BLK_DEBUG_FS_ZONED
+ bool
+ default BLK_DEBUG_FS && BLK_DEV_ZONED
+
config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
---help---
diff --git a/block/Makefile b/block/Makefile
index 6a56303b9925..572b33f32c07 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o partition-generic.o ioprio.o \
- badblocks.o partitions/
+ badblocks.o partitions/ blk-rq-qos.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
+obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
@@ -34,4 +35,5 @@ obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
+obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index a9e8633388f4..58c6efa9f9a9 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
if (ret)
return ret;
- return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+ ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+ return ret ?: nbytes;
}
#ifdef CONFIG_DEBUG_BLK_CGROUP
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 495b9ddb3355..653100fb719e 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
static const int bfq_default_max_budget = 16 * 1024;
/*
- * Async to sync throughput distribution is controlled as follows:
- * when an async request is served, the entity is charged the number
- * of sectors of the request, multiplied by the factor below
+ * When a sync request is dispatched, the queue that contains that
+ * request, and all the ancestor entities of that queue, are charged
+ * with the number of sectors of the request. In constrast, if the
+ * request is async, then the queue and its ancestor entities are
+ * charged with the number of sectors of the request, multiplied by
+ * the factor below. This throttles the bandwidth for async I/O,
+ * w.r.t. to sync I/O, and it is done to counter the tendency of async
+ * writes to steal I/O throughput to reads.
+ *
+ * The current value of this parameter is the result of a tuning with
+ * several hardware and software configurations. We tried to find the
+ * lowest value for which writes do not cause noticeable problems to
+ * reads. In fact, the lower this parameter, the stabler I/O control,
+ * in the following respect. The lower this parameter is, the less
+ * the bandwidth enjoyed by a group decreases
+ * - when the group does writes, w.r.t. to when it does reads;
+ * - when other groups do reads, w.r.t. to when they do writes.
*/
-static const int bfq_async_charge_factor = 10;
+static const int bfq_async_charge_factor = 3;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
const int bfq_timeout = HZ / 8;
@@ -634,7 +648,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
* The following function returns true if every queue must receive the
* same share of the throughput (this condition is used when deciding
* whether idling may be disabled, see the comments in the function
- * bfq_bfqq_may_idle()).
+ * bfq_better_to_idle()).
*
* Such a scenario occurs when:
* 1) all active queues have the same weight,
@@ -742,8 +756,9 @@ inc_counter:
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
-void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root)
+void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_entity *entity,
+ struct rb_root *root)
{
if (!entity->weight_counter)
return;
@@ -760,6 +775,43 @@ reset_entity_pointer:
}
/*
+ * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
+ * parent entities.
+ */
+void bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ struct bfq_entity *entity = bfqq->entity.parent;
+
+ __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+ &bfqd->queue_weights_tree);
+
+ for_each_entity(entity) {
+ struct bfq_sched_data *sd = entity->my_sched_data;
+
+ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+ * entity is still active, because either
+ * next_in_service or in_service_entity is not
+ * NULL (see the comments on the definition of
+ * next_in_service for details on why
+ * in_service_entity must be checked too).
+ *
+ * As a consequence, the weight of entity is
+ * not to be removed. In addition, if entity
+ * is active, then its parent entities are
+ * active as well, and thus their weights are
+ * not to be removed either. In the end, this
+ * loop must stop here.
+ */
+ break;
+ }
+ __bfq_weights_tree_remove(bfqd, entity,
+ &bfqd->group_weights_tree);
+ }
+}
+
+/*
* Return expired entry, or NULL to just start from scratch in rbtree.
*/
static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
@@ -815,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
return blk_rq_sectors(rq);
- /*
- * If there are no weight-raised queues, then amplify service
- * by just the async charge factor; otherwise amplify service
- * by twice the async charge factor, to further reduce latency
- * for weight-raised queues.
- */
- if (bfqq->bfqd->wr_busy_queues == 0)
- return blk_rq_sectors(rq) * bfq_async_charge_factor;
-
- return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
+ return blk_rq_sectors(rq) * bfq_async_charge_factor;
}
/**
@@ -1344,18 +1387,30 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
* remain unchanged after such an expiration, and the
* following statement therefore assigns to
* entity->budget the remaining budget on such an
- * expiration. For clarity, entity->service is not
- * updated on expiration in any case, and, in normal
- * operation, is reset only when bfqq is selected for
- * service (see bfq_get_next_queue).
+ * expiration.
*/
entity->budget = min_t(unsigned long,
bfq_bfqq_budget_left(bfqq),
bfqq->max_budget);
+ /*
+ * At this point, we have used entity->service to get
+ * the budget left (needed for updating
+ * entity->budget). Thus we finally can, and have to,
+ * reset entity->service. The latter must be reset
+ * because bfqq would otherwise be charged again for
+ * the service it has received during its previous
+ * service slot(s).
+ */
+ entity->service = 0;
+
return true;
}
+ /*
+ * We can finally complete expiration, by setting service to 0.
+ */
+ entity->service = 0;
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(bfqq->next_rq, bfqq));
bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
@@ -3233,11 +3288,42 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
ref = bfqq->ref;
__bfq_bfqq_expire(bfqd, bfqq);
+ if (ref == 1) /* bfqq is gone, no more actions on it */
+ return;
+
/* mark bfqq as waiting a request only if a bic still points to it */
- if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
+ if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
- reason != BFQQE_BUDGET_EXHAUSTED)
+ reason != BFQQE_BUDGET_EXHAUSTED) {
bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
+ /*
+ * Not setting service to 0, because, if the next rq
+ * arrives in time, the queue will go on receiving
+ * service with this same budget (as if it never expired)
+ */
+ } else
+ entity->service = 0;
+
+ /*
+ * Reset the received-service counter for every parent entity.
+ * Differently from what happens with bfqq->entity.service,
+ * the resetting of this counter never needs to be postponed
+ * for parent entities. In fact, in case bfqq may have a
+ * chance to go on being served using the last, partially
+ * consumed budget, bfqq->entity.service needs to be kept,
+ * because if bfqq then actually goes on being served using
+ * the same budget, the last value of bfqq->entity.service is
+ * needed to properly decrement bfqq->entity.budget by the
+ * portion already consumed. In contrast, it is not necessary
+ * to keep entity->service for parent entities too, because
+ * the bubble up of the new value of bfqq->entity.budget will
+ * make sure that the budgets of parent entities are correct,
+ * even in case bfqq and thus parent entities go on receiving
+ * service with the same budget.
+ */
+ entity = entity->parent;
+ for_each_entity(entity)
+ entity->service = 0;
}
/*
@@ -3295,7 +3381,7 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
* issues taken into account are not trivial. We discuss these issues
* individually while introducing the variables.
*/
-static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+static bool bfq_better_to_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
bool rot_without_queueing =
@@ -3528,19 +3614,19 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
}
/*
- * If the in-service queue is empty but the function bfq_bfqq_may_idle
+ * If the in-service queue is empty but the function bfq_better_to_idle
* returns true, then:
* 1) the queue must remain in service and cannot be expired, and
* 2) the device must be idled to wait for the possible arrival of a new
* request for the queue.
- * See the comments on the function bfq_bfqq_may_idle for the reasons
+ * See the comments on the function bfq_better_to_idle for the reasons
* why performing device idling is the best choice to boost the throughput
- * and preserve service guarantees when bfq_bfqq_may_idle itself
+ * and preserve service guarantees when bfq_better_to_idle itself
* returns true.
*/
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
{
- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
/*
@@ -3559,8 +3645,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+ /*
+ * Do not expire bfqq for budget timeout if bfqq may be about
+ * to enjoy device idling. The reason why, in this case, we
+ * prevent bfqq from expiring is the same as in the comments
+ * on the case where bfq_bfqq_must_idle() returns true, in
+ * bfq_completed_request().
+ */
if (bfq_may_expire_for_budg_timeout(bfqq) &&
- !bfq_bfqq_wait_request(bfqq) &&
!bfq_bfqq_must_idle(bfqq))
goto expire;
@@ -3620,7 +3712,7 @@ check_queue:
* may idle after their completion, then keep it anyway.
*/
if (bfq_bfqq_wait_request(bfqq) ||
- (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
+ (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
bfqq = NULL;
goto keep_queue;
}
@@ -4582,8 +4674,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
*/
bfqq->budget_timeout = jiffies;
- bfq_weights_tree_remove(bfqd, &bfqq->entity,
- &bfqd->queue_weights_tree);
+ bfq_weights_tree_remove(bfqd, bfqq);
}
now_ns = ktime_get_ns();
@@ -4637,15 +4728,39 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
* or if we want to idle in case it has no pending requests.
*/
if (bfqd->in_service_queue == bfqq) {
- if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
- bfq_arm_slice_timer(bfqd);
+ if (bfq_bfqq_must_idle(bfqq)) {
+ if (bfqq->dispatched == 0)
+ bfq_arm_slice_timer(bfqd);
+ /*
+ * If we get here, we do not expire bfqq, even
+ * if bfqq was in budget timeout or had no
+ * more requests (as controlled in the next
+ * conditional instructions). The reason for
+ * not expiring bfqq is as follows.
+ *
+ * Here bfqq->dispatched > 0 holds, but
+ * bfq_bfqq_must_idle() returned true. This
+ * implies that, even if no request arrives
+ * for bfqq before bfqq->dispatched reaches 0,
+ * bfqq will, however, not be expired on the
+ * completion event that causes bfqq->dispatch
+ * to reach zero. In contrast, on this event,
+ * bfqq will start enjoying device idling
+ * (I/O-dispatch plugging).
+ *
+ * But, if we expired bfqq here, bfqq would
+ * not have the chance to enjoy device idling
+ * when bfqq->dispatched finally reaches
+ * zero. This would expose bfqq to violation
+ * of its reserved service guarantees.
+ */
return;
} else if (bfq_may_expire_for_budg_timeout(bfqq))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_BUDGET_TIMEOUT);
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
(bfqq->dispatched == 0 ||
- !bfq_bfqq_may_idle(bfqq)))
+ !bfq_better_to_idle(bfqq)))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS);
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 0f712e03b035..a8a2e5aca4d4 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -827,8 +827,11 @@ struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
struct rb_root *root);
-void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root);
+void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_entity *entity,
+ struct rb_root *root);
+void bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool compensate, enum bfqq_expiration reason);
void bfq_put_queue(struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 4498c43245e2..ae52bff43ce4 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
if (!change_without_lookup) /* lookup needed */
next_in_service = bfq_lookup_next_entity(sd, expiration);
- if (next_in_service)
- parent_sched_may_change = !sd->next_in_service ||
+ if (next_in_service) {
+ bool new_budget_triggers_change =
bfq_update_parent_budget(next_in_service);
+ parent_sched_may_change = !sd->next_in_service ||
+ new_budget_triggers_change;
+ }
+
sd->next_in_service = next_in_service;
if (!next_in_service)
@@ -499,9 +503,6 @@ static void bfq_active_insert(struct bfq_service_tree *st,
if (bfqq)
list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- else /* bfq_group */
- bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
-
if (bfqg != bfqd->root_group)
bfqg->active_entities++;
#endif
@@ -601,10 +602,6 @@ static void bfq_active_extract(struct bfq_service_tree *st,
if (bfqq)
list_del(&bfqq->bfqq_list);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- else /* bfq_group */
- bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
-
if (bfqg != bfqd->root_group)
bfqg->active_entities--;
#endif
@@ -799,7 +796,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
if (prev_weight != new_weight) {
root = bfqq ? &bfqd->queue_weights_tree :
&bfqd->group_weights_tree;
- bfq_weights_tree_remove(bfqd, entity, root);
+ __bfq_weights_tree_remove(bfqd, entity, root);
}
entity->weight = new_weight;
/*
@@ -884,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
unsigned long time_ms)
{
struct bfq_entity *entity = &bfqq->entity;
- int tot_serv_to_charge = entity->service;
- unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
-
- if (time_ms > 0 && time_ms < timeout_ms)
- tot_serv_to_charge =
- (bfqd->bfq_max_budget * time_ms) / timeout_ms;
-
- if (tot_serv_to_charge < entity->service)
- tot_serv_to_charge = entity->service;
+ unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
+ unsigned long bounded_time_ms = min(time_ms, timeout_ms);
+ int serv_to_charge_for_time =
+ (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
+ int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
/* Increase budget to avoid inconsistencies */
if (tot_serv_to_charge > entity->budget)
@@ -971,7 +964,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
* one of its children receives a new request.
*
* Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possibly extracting it
+ * inserts entity into its active tree, after possibly extracting it
* from its idle tree.
*/
static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1015,6 +1008,16 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
entity->on_st = true;
}
+#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_weights_tree_add(bfqg->bfqd, entity,
+ &bfqd->group_weights_tree);
+ }
+#endif
+
bfq_update_fin_time_enqueue(entity, st, backshifted);
}
@@ -1542,12 +1545,6 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
sd->in_service_entity = entity;
/*
- * Reset the accumulator of the amount of service that
- * the entity is about to receive.
- */
- entity->service = 0;
-
- /*
* If entity is no longer a candidate for next
* service, then it must be extracted from its active
* tree, so as to make sure that it won't be
@@ -1664,8 +1661,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqd->busy_queues--;
if (!bfqq->dispatched)
- bfq_weights_tree_remove(bfqd, &bfqq->entity,
- &bfqd->queue_weights_tree);
+ bfq_weights_tree_remove(bfqd, bfqq);
if (bfqq->wr_coeff > 1)
bfqd->wr_busy_queues--;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index add7c7c85335..67b5fb861a51 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -160,28 +160,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_integrity_add_page);
/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return sectors >> (bi->interval_exp - 9);
-}
-
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
-}
-
-/**
* bio_integrity_process - Process integrity metadata for a bio
* @bio: bio to generate/verify integrity metadata for
* @proc_iter: iterator to process
diff --git a/block/bio.c b/block/bio.c
index 67eff5eddc49..b12966e415d3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -28,9 +28,11 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-rq-qos.h"
/*
* Test patch to inline a certain number of bi_io_vec's inside the bio
@@ -156,7 +158,7 @@ out:
unsigned int bvec_nr_vecs(unsigned short idx)
{
- return bvec_slabs[idx].nr_vecs;
+ return bvec_slabs[--idx].nr_vecs;
}
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
@@ -645,83 +647,6 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
EXPORT_SYMBOL(bio_clone_fast);
/**
- * bio_clone_bioset - clone a bio
- * @bio_src: bio to clone
- * @gfp_mask: allocation priority
- * @bs: bio_set to allocate from
- *
- * Clone bio. Caller will own the returned bio, but not the actual data it
- * points to. Reference count of returned bio will be one.
- */
-struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
-{
- struct bvec_iter iter;
- struct bio_vec bv;
- struct bio *bio;
-
- /*
- * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
- * bio_src->bi_io_vec to bio->bi_io_vec.
- *
- * We can't do that anymore, because:
- *
- * - The point of cloning the biovec is to produce a bio with a biovec
- * the caller can modify: bi_idx and bi_bvec_done should be 0.
- *
- * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
- * we tried to clone the whole thing bio_alloc_bioset() would fail.
- * But the clone should succeed as long as the number of biovecs we
- * actually need to allocate is fewer than BIO_MAX_PAGES.
- *
- * - Lastly, bi_vcnt should not be looked at or relied upon by code
- * that does not own the bio - reason being drivers don't use it for
- * iterating over the biovec anymore, so expecting it to be kept up
- * to date (i.e. for clones that share the parent biovec) is just
- * asking for trouble and would force extra work on
- * __bio_clone_fast() anyways.
- */
-
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
- bio->bi_disk = bio_src->bi_disk;
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_write_hint = bio_src->bi_write_hint;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
- }
-
- if (bio_integrity(bio_src)) {
- int ret;
-
- ret = bio_integrity_clone(bio, bio_src, gfp_mask);
- if (ret < 0) {
- bio_put(bio);
- return NULL;
- }
- }
-
- bio_clone_blkcg_association(bio, bio_src);
-
- return bio;
-}
-EXPORT_SYMBOL(bio_clone_bioset);
-
-/**
* bio_add_pc_page - attempt to add page to bio
* @q: the target queue
* @bio: destination bio
@@ -903,25 +828,27 @@ int bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_add_page);
/**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be mapped
*
- * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * Pins pages from *iter and appends them to @bio's bvec array. The
* pages will have to be released using put_page() when done.
+ * For multi-segment *iter, this function only adds pages from the
+ * the next non-empty segment of the iov iterator.
*/
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
- size_t offset, diff;
+ size_t offset;
ssize_t size;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
- nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
/*
* Deep magic below: We need to walk the pinned pages backwards
@@ -934,21 +861,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size += size;
bio->bi_vcnt += nr_pages;
- diff = (nr_pages * PAGE_SIZE - offset) - size;
- while (nr_pages--) {
- bv[nr_pages].bv_page = pages[nr_pages];
- bv[nr_pages].bv_len = PAGE_SIZE;
- bv[nr_pages].bv_offset = 0;
+ while (idx--) {
+ bv[idx].bv_page = pages[idx];
+ bv[idx].bv_len = PAGE_SIZE;
+ bv[idx].bv_offset = 0;
}
bv[0].bv_offset += offset;
bv[0].bv_len -= offset;
- if (diff)
- bv[bio->bi_vcnt - 1].bv_len -= diff;
+ bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
iov_iter_advance(iter, size);
return 0;
}
+
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ * The function tries, but does not guarantee, to pin as many pages as
+ * fit into the bio, or are requested in *iter, whatever is smaller.
+ * If MM encounters an error pinning the requested pages, it stops.
+ * Error is returned only if 0 pages could be pinned.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+ unsigned short orig_vcnt = bio->bi_vcnt;
+
+ do {
+ int ret = __bio_iov_iter_get_pages(bio, iter);
+
+ if (unlikely(ret))
+ return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+
+ } while (iov_iter_count(iter) && !bio_full(bio));
+
+ return 0;
+}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
@@ -1634,10 +1586,8 @@ void bio_set_pages_dirty(struct bio *bio)
int i;
bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (page && !PageCompound(page))
- set_page_dirty_lock(page);
+ if (!PageCompound(bvec->bv_page))
+ set_page_dirty_lock(bvec->bv_page);
}
}
EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
@@ -1647,19 +1597,15 @@ static void bio_release_pages(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (page)
- put_page(page);
- }
+ bio_for_each_segment_all(bvec, bio, i)
+ put_page(bvec->bv_page);
}
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
* If they are, then fine. If, however, some pages are clean then they must
* have been written out during the direct-IO read. So we take another ref on
- * the BIO and the offending pages and re-dirty the pages in process context.
+ * the BIO and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one put_page() against each page and will run one
@@ -1677,78 +1623,70 @@ static struct bio *bio_dirty_list;
*/
static void bio_dirty_fn(struct work_struct *work)
{
- unsigned long flags;
- struct bio *bio;
+ struct bio *bio, *next;
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio = bio_dirty_list;
+ spin_lock_irq(&bio_dirty_lock);
+ next = bio_dirty_list;
bio_dirty_list = NULL;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
+ spin_unlock_irq(&bio_dirty_lock);
- while (bio) {
- struct bio *next = bio->bi_private;
+ while ((bio = next) != NULL) {
+ next = bio->bi_private;
bio_set_pages_dirty(bio);
bio_release_pages(bio);
bio_put(bio);
- bio = next;
}
}
void bio_check_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
- int nr_clean_pages = 0;
+ unsigned long flags;
int i;
bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (PageDirty(page) || PageCompound(page)) {
- put_page(page);
- bvec->bv_page = NULL;
- } else {
- nr_clean_pages++;
- }
+ if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ goto defer;
}
- if (nr_clean_pages) {
- unsigned long flags;
-
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio->bi_private = bio_dirty_list;
- bio_dirty_list = bio;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
- schedule_work(&bio_dirty_work);
- } else {
- bio_put(bio);
- }
+ bio_release_pages(bio);
+ bio_put(bio);
+ return;
+defer:
+ spin_lock_irqsave(&bio_dirty_lock, flags);
+ bio->bi_private = bio_dirty_list;
+ bio_dirty_list = bio;
+ spin_unlock_irqrestore(&bio_dirty_lock, flags);
+ schedule_work(&bio_dirty_work);
}
EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
-void generic_start_io_acct(struct request_queue *q, int rw,
+void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part)
{
+ const int sgrp = op_stat_group(op);
int cpu = part_stat_lock();
part_round_stats(q, cpu, part);
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, sectors[rw], sectors);
- part_inc_in_flight(q, part, rw);
+ part_stat_inc(cpu, part, ios[sgrp]);
+ part_stat_add(cpu, part, sectors[sgrp], sectors);
+ part_inc_in_flight(q, part, op_is_write(op));
part_stat_unlock();
}
EXPORT_SYMBOL(generic_start_io_acct);
-void generic_end_io_acct(struct request_queue *q, int rw,
+void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time)
{
unsigned long duration = jiffies - start_time;
+ const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(q, cpu, part);
- part_dec_in_flight(q, part, rw);
+ part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock();
}
@@ -1807,6 +1745,9 @@ again:
if (!bio_integrity_endio(bio))
return;
+ if (bio->bi_disk)
+ rq_qos_done_bio(bio->bi_disk->queue, bio);
+
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
@@ -1866,6 +1807,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
+ bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);
@@ -2014,6 +1956,30 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
+#ifdef CONFIG_MEMCG
+/**
+ * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * @bio: target bio
+ * @page: the page to lookup the blkcg from
+ *
+ * Associate @bio with the blkcg from @page's owning memcg. This works like
+ * every other associate function wrt references.
+ */
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+{
+ struct cgroup_subsys_state *blkcg_css;
+
+ if (unlikely(bio->bi_css))
+ return -EBUSY;
+ if (!page->mem_cgroup)
+ return 0;
+ blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
+ &io_cgrp_subsys);
+ bio->bi_css = blkcg_css;
+ return 0;
+}
+#endif /* CONFIG_MEMCG */
+
/**
* bio_associate_blkcg - associate a bio with the specified blkcg
* @bio: target bio
@@ -2037,6 +2003,24 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
+ * bio_associate_blkg - associate a bio with the specified blkg
+ * @bio: target bio
+ * @blkg: the blkg to associate
+ *
+ * Associate @bio with the blkg specified by @blkg. This is the queue specific
+ * blkcg information associated with the @bio, a reference will be taken on the
+ * @blkg and will be freed when the bio is freed.
+ */
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ blkg_get(blkg);
+ bio->bi_blkg = blkg;
+ return 0;
+}
+
+/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
@@ -2050,6 +2034,10 @@ void bio_disassociate_task(struct bio *bio)
css_put(bio->bi_css);
bio->bi_css = NULL;
}
+ if (bio->bi_blkg) {
+ blkg_put(bio->bi_blkg);
+ bio->bi_blkg = NULL;
+ }
}
/**
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index eb85cb87c40f..694595b29b8f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -27,6 +27,7 @@
#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/blk-cgroup.h>
+#include <linux/tracehook.h>
#include "blk.h"
#define MAX_KEY_LEN 100
@@ -50,6 +51,8 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
+static bool blkcg_debug_stats = false;
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -564,6 +567,7 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
[BLKG_RWSTAT_WRITE] = "Write",
[BLKG_RWSTAT_SYNC] = "Sync",
[BLKG_RWSTAT_ASYNC] = "Async",
+ [BLKG_RWSTAT_DISCARD] = "Discard",
};
const char *dname = blkg_dev_name(pd->blkg);
u64 v;
@@ -577,7 +581,8 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
(unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
+ atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
+ atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
return v;
}
@@ -954,30 +959,77 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
const char *dname;
+ char *buf;
struct blkg_rwstat rwstat;
- u64 rbytes, wbytes, rios, wios;
+ u64 rbytes, wbytes, rios, wios, dbytes, dios;
+ size_t size = seq_get_buf(sf, &buf), off = 0;
+ int i;
+ bool has_stats = false;
dname = blkg_dev_name(blkg);
if (!dname)
continue;
+ /*
+ * Hooray string manipulation, count is the size written NOT
+ * INCLUDING THE \0, so size is now count+1 less than what we
+ * had before, but we want to start writing the next bit from
+ * the \0 so we only add count to buf.
+ */
+ off += scnprintf(buf+off, size-off, "%s ", dname);
+
spin_lock_irq(blkg->q->queue_lock);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes));
rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_ios));
rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
spin_unlock_irq(blkg->q->queue_lock);
- if (rbytes || wbytes || rios || wios)
- seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
- dname, rbytes, wbytes, rios, wios);
+ if (rbytes || wbytes || rios || wios) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off,
+ "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+ rbytes, wbytes, rios, wios,
+ dbytes, dios);
+ }
+
+ if (!blkcg_debug_stats)
+ goto next;
+
+ if (atomic_read(&blkg->use_delay)) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off,
+ " use_delay=%d delay_nsec=%llu",
+ atomic_read(&blkg->use_delay),
+ (unsigned long long)atomic64_read(&blkg->delay_nsec));
+ }
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ size_t written;
+
+ if (!blkg->pd[i] || !pol->pd_stat_fn)
+ continue;
+
+ written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
+ if (written)
+ has_stats = true;
+ off += written;
+ }
+next:
+ if (has_stats) {
+ off += scnprintf(buf+off, size-off, "\n");
+ seq_commit(sf, off);
+ }
}
rcu_read_unlock();
@@ -1191,6 +1243,14 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+ }
+
ret = blk_throtl_init(q);
if (ret) {
spin_lock_irq(q->queue_lock);
@@ -1288,6 +1348,13 @@ static void blkcg_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&blkcg_pol_mutex);
}
+static void blkcg_exit(struct task_struct *tsk)
+{
+ if (tsk->throttle_queue)
+ blk_put_queue(tsk->throttle_queue);
+ tsk->throttle_queue = NULL;
+}
+
struct cgroup_subsys io_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
@@ -1297,6 +1364,7 @@ struct cgroup_subsys io_cgrp_subsys = {
.dfl_cftypes = blkcg_files,
.legacy_cftypes = blkcg_legacy_files,
.legacy_name = "blkio",
+ .exit = blkcg_exit,
#ifdef CONFIG_MEMCG
/*
* This ensures that, if available, memcg is automatically enabled
@@ -1547,3 +1615,209 @@ out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
+
+/*
+ * Scale the accumulated delay based on how long it has been since we updated
+ * the delay. We only call this when we are adding delay, in case it's been a
+ * while since we added delay, and when we are checking to see if we need to
+ * delay a task, to account for any delays that may have occurred.
+ */
+static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
+{
+ u64 old = atomic64_read(&blkg->delay_start);
+
+ /*
+ * We only want to scale down every second. The idea here is that we
+ * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
+ * time window. We only want to throttle tasks for recent delay that
+ * has occurred, in 1 second time windows since that's the maximum
+ * things can be throttled. We save the current delay window in
+ * blkg->last_delay so we know what amount is still left to be charged
+ * to the blkg from this point onward. blkg->last_use keeps track of
+ * the use_delay counter. The idea is if we're unthrottling the blkg we
+ * are ok with whatever is happening now, and we can take away more of
+ * the accumulated delay as we've already throttled enough that
+ * everybody is happy with their IO latencies.
+ */
+ if (time_before64(old + NSEC_PER_SEC, now) &&
+ atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
+ u64 cur = atomic64_read(&blkg->delay_nsec);
+ u64 sub = min_t(u64, blkg->last_delay, now - old);
+ int cur_use = atomic_read(&blkg->use_delay);
+
+ /*
+ * We've been unthrottled, subtract a larger chunk of our
+ * accumulated delay.
+ */
+ if (cur_use < blkg->last_use)
+ sub = max_t(u64, sub, blkg->last_delay >> 1);
+
+ /*
+ * This shouldn't happen, but handle it anyway. Our delay_nsec
+ * should only ever be growing except here where we subtract out
+ * min(last_delay, 1 second), but lord knows bugs happen and I'd
+ * rather not end up with negative numbers.
+ */
+ if (unlikely(cur < sub)) {
+ atomic64_set(&blkg->delay_nsec, 0);
+ blkg->last_delay = 0;
+ } else {
+ atomic64_sub(sub, &blkg->delay_nsec);
+ blkg->last_delay = cur - sub;
+ }
+ blkg->last_use = cur_use;
+ }
+}
+
+/*
+ * This is called when we want to actually walk up the hierarchy and check to
+ * see if we need to throttle, and then actually throttle if there is some
+ * accumulated delay. This should only be called upon return to user space so
+ * we're not holding some lock that would induce a priority inversion.
+ */
+static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
+{
+ u64 now = ktime_to_ns(ktime_get());
+ u64 exp;
+ u64 delay_nsec = 0;
+ int tok;
+
+ while (blkg->parent) {
+ if (atomic_read(&blkg->use_delay)) {
+ blkcg_scale_delay(blkg, now);
+ delay_nsec = max_t(u64, delay_nsec,
+ atomic64_read(&blkg->delay_nsec));
+ }
+ blkg = blkg->parent;
+ }
+
+ if (!delay_nsec)
+ return;
+
+ /*
+ * Let's not sleep for all eternity if we've amassed a huge delay.
+ * Swapping or metadata IO can accumulate 10's of seconds worth of
+ * delay, and we want userspace to be able to do _something_ so cap the
+ * delays at 1 second. If there's 10's of seconds worth of delay then
+ * the tasks will be delayed for 1 second for every syscall.
+ */
+ delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
+
+ /*
+ * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
+ * that hasn't landed upstream yet. Once that stuff is in place we need
+ * to do a psi_memstall_enter/leave if memdelay is set.
+ */
+
+ exp = ktime_add_ns(now, delay_nsec);
+ tok = io_schedule_prepare();
+ do {
+ __set_current_state(TASK_KILLABLE);
+ if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
+ break;
+ } while (!fatal_signal_pending(current));
+ io_schedule_finish(tok);
+}
+
+/**
+ * blkcg_maybe_throttle_current - throttle the current task if it has been marked
+ *
+ * This is only called if we've been marked with set_notify_resume(). Obviously
+ * we can be set_notify_resume() for reasons other than blkcg throttling, so we
+ * check to see if current->throttle_queue is set and if not this doesn't do
+ * anything. This should only ever be called by the resume code, it's not meant
+ * to be called by people willy-nilly as it will actually do the work to
+ * throttle the task if it is setup for throttling.
+ */
+void blkcg_maybe_throttle_current(void)
+{
+ struct request_queue *q = current->throttle_queue;
+ struct cgroup_subsys_state *css;
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ bool use_memdelay = current->use_memdelay;
+
+ if (!q)
+ return;
+
+ current->throttle_queue = NULL;
+ current->use_memdelay = false;
+
+ rcu_read_lock();
+ css = kthread_blkcg();
+ if (css)
+ blkcg = css_to_blkcg(css);
+ else
+ blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
+
+ if (!blkcg)
+ goto out;
+ blkg = blkg_lookup(blkcg, q);
+ if (!blkg)
+ goto out;
+ blkg = blkg_try_get(blkg);
+ if (!blkg)
+ goto out;
+ rcu_read_unlock();
+
+ blkcg_maybe_throttle_blkg(blkg, use_memdelay);
+ blkg_put(blkg);
+ blk_put_queue(q);
+ return;
+out:
+ rcu_read_unlock();
+ blk_put_queue(q);
+}
+EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
+
+/**
+ * blkcg_schedule_throttle - this task needs to check for throttling
+ * @q - the request queue IO was submitted on
+ * @use_memdelay - do we charge this to memory delay for PSI
+ *
+ * This is called by the IO controller when we know there's delay accumulated
+ * for the blkg for this task. We do not pass the blkg because there are places
+ * we call this that may not have that information, the swapping code for
+ * instance will only have a request_queue at that point. This set's the
+ * notify_resume for the task to check and see if it requires throttling before
+ * returning to user space.
+ *
+ * We will only schedule once per syscall. You can call this over and over
+ * again and it will only do the check once upon return to user space, and only
+ * throttle once. If the task needs to be throttled again it'll need to be
+ * re-set at the next time we see the task.
+ */
+void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
+{
+ if (unlikely(current->flags & PF_KTHREAD))
+ return;
+
+ if (!blk_get_queue(q))
+ return;
+
+ if (current->throttle_queue)
+ blk_put_queue(current->throttle_queue);
+ current->throttle_queue = q;
+ if (use_memdelay)
+ current->use_memdelay = use_memdelay;
+ set_notify_resume(current);
+}
+EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
+
+/**
+ * blkcg_add_delay - add delay to this blkg
+ * @now - the current time in nanoseconds
+ * @delta - how many nanoseconds of delay to add
+ *
+ * Charge @delta to the blkg's current delay accumulation. This is used to
+ * throttle tasks if an IO controller thinks we need more throttling.
+ */
+void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
+{
+ blkcg_scale_delay(blkg, now);
+ atomic64_add(delta, &blkg->delay_nsec);
+}
+EXPORT_SYMBOL_GPL(blkcg_add_delay);
+
+module_param(blkcg_debug_stats, bool, 0644);
+MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
diff --git a/block/blk-core.c b/block/blk-core.c
index f84a9b7b6f5a..dee56c282efb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -42,7 +42,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
-#include "blk-wbt.h"
+#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
@@ -715,6 +715,35 @@ void blk_set_queue_dying(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -762,9 +791,13 @@ void blk_cleanup_queue(struct request_queue *q)
* make sure all in-progress dispatch are completed because
* blk_freeze_queue() can only complete all requests, and
* dispatch may still be in-progress since we dispatch requests
- * from more than one contexts
+ * from more than one contexts.
+ *
+ * No need to quiesce queue if it isn't initialized yet since
+ * blk_freeze_queue() should be enough for cases of passthrough
+ * request.
*/
- if (q->mq_ops)
+ if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -780,30 +813,7 @@ void blk_cleanup_queue(struct request_queue *q)
*/
WARN_ON_ONCE(q->kobj.state_in_sysfs);
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
+ blk_exit_queue(q);
if (q->mq_ops)
blk_mq_free_queue(q);
@@ -1026,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
- INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
@@ -1180,6 +1189,7 @@ out_exit_flush_rq:
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
+ q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1641,7 +1651,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, rq);
+ rq_qos_requeue(q, rq);
if (rq->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1748,7 +1758,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
- wbt_done(q->rq_wb, req);
+ rq_qos_done(q, req);
/*
* Request may not have originated from ll_rw_blk. if not,
@@ -1982,7 +1992,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT;
struct request *req, *free;
unsigned int request_count = 0;
- unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -2040,7 +2049,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
- wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
+ rq_qos_throttle(q, bio, q->queue_lock);
/*
* Grab a free request. This is might sleep but can not fail.
@@ -2050,7 +2059,7 @@ get_rq:
req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
if (IS_ERR(req)) {
blk_queue_exit(q);
- __wbt_done(q->rq_wb, wb_acct);
+ rq_qos_cleanup(q, bio);
if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
else
@@ -2059,7 +2068,7 @@ get_rq:
goto out_unlock;
}
- wbt_track(req, wb_acct);
+ rq_qos_track(q, req, bio);
/*
* After dropping the lock and possibly sleeping here, our request
@@ -2152,14 +2161,17 @@ static inline bool should_fail_request(struct hd_struct *part,
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
- if (part->policy && op_is_write(bio_op(bio))) {
+ const int op = bio_op(bio);
+
+ if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
char b[BDEVNAME_SIZE];
- printk(KERN_ERR
+ WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
- return true;
+ /* Older lvm-tools actually trigger this */
+ return false;
}
return false;
@@ -2699,13 +2711,13 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
- const int rw = rq_data_dir(req);
+ const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = req->part;
- part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -2719,7 +2731,7 @@ void blk_account_io_done(struct request *req, u64 now)
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration;
- const int rw = rq_data_dir(req);
+ const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
@@ -2727,10 +2739,10 @@ void blk_account_io_done(struct request *req, u64 now)
cpu = part_stat_lock();
part = req->part;
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_inc(cpu, part, ios[sgrp]);
+ part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(req->q, cpu, part);
- part_dec_in_flight(req->q, part, rw);
+ part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part_stat_unlock();
@@ -2750,9 +2762,9 @@ static bool blk_pm_allow_request(struct request *rq)
return rq->rq_flags & RQF_PM;
case RPM_SUSPENDED:
return false;
+ default:
+ return true;
}
-
- return true;
}
#else
static bool blk_pm_allow_request(struct request *rq)
@@ -2979,7 +2991,7 @@ void blk_start_request(struct request *req)
req->throtl_size = blk_rq_sectors(req);
#endif
req->rq_flags |= RQF_STATS;
- wbt_issue(req->q->rq_wb, req);
+ rq_qos_issue(req->q, req);
}
BUG_ON(blk_rq_is_complete(req));
@@ -3052,6 +3064,10 @@ EXPORT_SYMBOL_GPL(blk_steal_bios);
* Passing the result of blk_rq_bytes() as @nr_bytes guarantees
* %false return from this function.
*
+ * Note:
+ * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
+ * blk_rq_bytes() and in blk_update_request().
+ *
* Return:
* %false - this request doesn't have any more data
* %true - this request has more data
@@ -3199,7 +3215,7 @@ void blk_finish_request(struct request *req, blk_status_t error)
blk_account_io_done(req, now);
if (req->end_io) {
- wbt_done(req->q->rq_wb, req);
+ rq_qos_done(q, req);
req->end_io(req, error);
} else {
if (blk_bidi_rq(req))
@@ -3762,9 +3778,11 @@ EXPORT_SYMBOL(blk_finish_plug);
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
- /* not support for RQF_PM and ->rpm_status in blk-mq yet */
- if (q->mq_ops)
+ /* Don't enable runtime PM for blk-mq until it is ready */
+ if (q->mq_ops) {
+ pm_runtime_disable(dev);
return;
+ }
q->dev = dev;
q->rpm_status = RPM_ACTIVE;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index f23311e4b201..01580f88fcb3 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -278,7 +278,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
- INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
+ INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
new file mode 100644
index 000000000000..19923f8a029d
--- /dev/null
+++ b/block/blk-iolatency.c
@@ -0,0 +1,955 @@
+/*
+ * Block rq-qos base io controller
+ *
+ * This works similar to wbt with a few exceptions
+ *
+ * - It's bio based, so the latency covers the whole block layer in addition to
+ * the actual io.
+ * - We will throttle all IO that comes in here if we need to.
+ * - We use the mean latency over the 100ms window. This is because writes can
+ * be particularly fast, which could give us a false sense of the impact of
+ * other workloads on our protected workload.
+ * - By default there's no throttling, we set the queue_depth to UINT_MAX so
+ * that we can have as many outstanding bio's as we're allowed to. Only at
+ * throttle time do we pay attention to the actual queue depth.
+ *
+ * The hierarchy works like the cpu controller does, we track the latency at
+ * every configured node, and each configured node has it's own independent
+ * queue depth. This means that we only care about our latency targets at the
+ * peer level. Some group at the bottom of the hierarchy isn't going to affect
+ * a group at the end of some other path if we're only configred at leaf level.
+ *
+ * Consider the following
+ *
+ * root blkg
+ * / \
+ * fast (target=5ms) slow (target=10ms)
+ * / \ / \
+ * a b normal(15ms) unloved
+ *
+ * "a" and "b" have no target, but their combined io under "fast" cannot exceed
+ * an average latency of 5ms. If it does then we will throttle the "slow"
+ * group. In the case of "normal", if it exceeds its 15ms target, we will
+ * throttle "unloved", but nobody else.
+ *
+ * In this example "fast", "slow", and "normal" will be the only groups actually
+ * accounting their io latencies. We have to walk up the heirarchy to the root
+ * on every submit and complete so we can do the appropriate stat recording and
+ * adjust the queue depth of ourselves if needed.
+ *
+ * There are 2 ways we throttle IO.
+ *
+ * 1) Queue depth throttling. As we throttle down we will adjust the maximum
+ * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
+ * to 1. If the group is only ever submitting IO for itself then this is the
+ * only way we throttle.
+ *
+ * 2) Induced delay throttling. This is for the case that a group is generating
+ * IO that has to be issued by the root cg to avoid priority inversion. So think
+ * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
+ * of work done for us on behalf of the root cg and are being asked to scale
+ * down more then we induce a latency at userspace return. We accumulate the
+ * total amount of time we need to be punished by doing
+ *
+ * total_time += min_lat_nsec - actual_io_completion
+ *
+ * and then at throttle time will do
+ *
+ * throttle_time = min(total_time, NSEC_PER_SEC)
+ *
+ * This induced delay will throttle back the activity that is generating the
+ * root cg issued io's, wethere that's some metadata intensive operation or the
+ * group is using so much memory that it is pushing us into swap.
+ *
+ * Copyright (C) 2018 Josef Bacik
+ */
+#include <linux/kernel.h>
+#include <linux/blk_types.h>
+#include <linux/backing-dev.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/memcontrol.h>
+#include <linux/sched/loadavg.h>
+#include <linux/sched/signal.h>
+#include <trace/events/block.h>
+#include "blk-rq-qos.h"
+#include "blk-stat.h"
+
+#define DEFAULT_SCALE_COOKIE 1000000U
+
+static struct blkcg_policy blkcg_policy_iolatency;
+struct iolatency_grp;
+
+struct blk_iolatency {
+ struct rq_qos rqos;
+ struct timer_list timer;
+ atomic_t enabled;
+};
+
+static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
+{
+ return container_of(rqos, struct blk_iolatency, rqos);
+}
+
+static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
+{
+ return atomic_read(&blkiolat->enabled) > 0;
+}
+
+struct child_latency_info {
+ spinlock_t lock;
+
+ /* Last time we adjusted the scale of everybody. */
+ u64 last_scale_event;
+
+ /* The latency that we missed. */
+ u64 scale_lat;
+
+ /* Total io's from all of our children for the last summation. */
+ u64 nr_samples;
+
+ /* The guy who actually changed the latency numbers. */
+ struct iolatency_grp *scale_grp;
+
+ /* Cookie to tell if we need to scale up or down. */
+ atomic_t scale_cookie;
+};
+
+struct iolatency_grp {
+ struct blkg_policy_data pd;
+ struct blk_rq_stat __percpu *stats;
+ struct blk_iolatency *blkiolat;
+ struct rq_depth rq_depth;
+ struct rq_wait rq_wait;
+ atomic64_t window_start;
+ atomic_t scale_cookie;
+ u64 min_lat_nsec;
+ u64 cur_win_nsec;
+
+ /* total running average of our io latency. */
+ u64 lat_avg;
+
+ /* Our current number of IO's for the last summation. */
+ u64 nr_samples;
+
+ struct child_latency_info child_lat;
+};
+
+#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
+#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
+/*
+ * These are the constants used to fake the fixed-point moving average
+ * calculation just like load average. The call to CALC_LOAD folds
+ * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
+ * window size is bucketed to try to approximately calculate average
+ * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
+ * elapse immediately. Note, windows only elapse with IO activity. Idle
+ * periods extend the most recent window.
+ */
+#define BLKIOLATENCY_NR_EXP_FACTORS 5
+#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
+ (BLKIOLATENCY_NR_EXP_FACTORS - 1))
+static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
+ 2045, // exp(1/600) - 600 samples
+ 2039, // exp(1/240) - 240 samples
+ 2031, // exp(1/120) - 120 samples
+ 2023, // exp(1/80) - 80 samples
+ 2014, // exp(1/60) - 60 samples
+};
+
+static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
+}
+
+static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
+{
+ return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
+}
+
+static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
+{
+ return pd_to_blkg(&iolat->pd);
+}
+
+static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
+ wait_queue_entry_t *wait,
+ bool first_block)
+{
+ struct rq_wait *rqw = &iolat->rq_wait;
+
+ if (first_block && waitqueue_active(&rqw->wait) &&
+ rqw->wait.head.next != &wait->entry)
+ return false;
+ return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
+}
+
+static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
+ struct iolatency_grp *iolat,
+ spinlock_t *lock, bool issue_as_root,
+ bool use_memdelay)
+ __releases(lock)
+ __acquires(lock)
+{
+ struct rq_wait *rqw = &iolat->rq_wait;
+ unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
+ DEFINE_WAIT(wait);
+ bool first_block = true;
+
+ if (use_delay)
+ blkcg_schedule_throttle(rqos->q, use_memdelay);
+
+ /*
+ * To avoid priority inversions we want to just take a slot if we are
+ * issuing as root. If we're being killed off there's no point in
+ * delaying things, we may have been killed by OOM so throttling may
+ * make recovery take even longer, so just let the IO's through so the
+ * task can go away.
+ */
+ if (issue_as_root || fatal_signal_pending(current)) {
+ atomic_inc(&rqw->inflight);
+ return;
+ }
+
+ if (iolatency_may_queue(iolat, &wait, first_block))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rqw->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (iolatency_may_queue(iolat, &wait, first_block))
+ break;
+ first_block = false;
+
+ if (lock) {
+ spin_unlock_irq(lock);
+ io_schedule();
+ spin_lock_irq(lock);
+ } else {
+ io_schedule();
+ }
+ } while (1);
+
+ finish_wait(&rqw->wait, &wait);
+}
+
+#define SCALE_DOWN_FACTOR 2
+#define SCALE_UP_FACTOR 4
+
+static inline unsigned long scale_amount(unsigned long qd, bool up)
+{
+ return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
+}
+
+/*
+ * We scale the qd down faster than we scale up, so we need to use this helper
+ * to adjust the scale_cookie accordingly so we don't prematurely get
+ * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
+ *
+ * Each group has their own local copy of the last scale cookie they saw, so if
+ * the global scale cookie goes up or down they know which way they need to go
+ * based on their last knowledge of it.
+ */
+static void scale_cookie_change(struct blk_iolatency *blkiolat,
+ struct child_latency_info *lat_info,
+ bool up)
+{
+ unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
+ unsigned long scale = scale_amount(qd, up);
+ unsigned long old = atomic_read(&lat_info->scale_cookie);
+ unsigned long max_scale = qd << 1;
+ unsigned long diff = 0;
+
+ if (old < DEFAULT_SCALE_COOKIE)
+ diff = DEFAULT_SCALE_COOKIE - old;
+
+ if (up) {
+ if (scale + old > DEFAULT_SCALE_COOKIE)
+ atomic_set(&lat_info->scale_cookie,
+ DEFAULT_SCALE_COOKIE);
+ else if (diff > qd)
+ atomic_inc(&lat_info->scale_cookie);
+ else
+ atomic_add(scale, &lat_info->scale_cookie);
+ } else {
+ /*
+ * We don't want to dig a hole so deep that it takes us hours to
+ * dig out of it. Just enough that we don't throttle/unthrottle
+ * with jagged workloads but can still unthrottle once pressure
+ * has sufficiently dissipated.
+ */
+ if (diff > qd) {
+ if (diff < max_scale)
+ atomic_dec(&lat_info->scale_cookie);
+ } else {
+ atomic_sub(scale, &lat_info->scale_cookie);
+ }
+ }
+}
+
+/*
+ * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
+ * queue depth at a time so we don't get wild swings and hopefully dial in to
+ * fairer distribution of the overall queue depth.
+ */
+static void scale_change(struct iolatency_grp *iolat, bool up)
+{
+ unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
+ unsigned long scale = scale_amount(qd, up);
+ unsigned long old = iolat->rq_depth.max_depth;
+ bool changed = false;
+
+ if (old > qd)
+ old = qd;
+
+ if (up) {
+ if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
+ return;
+
+ if (old < qd) {
+ changed = true;
+ old += scale;
+ old = min(old, qd);
+ iolat->rq_depth.max_depth = old;
+ wake_up_all(&iolat->rq_wait.wait);
+ }
+ } else if (old > 1) {
+ old >>= 1;
+ changed = true;
+ iolat->rq_depth.max_depth = max(old, 1UL);
+ }
+}
+
+/* Check our parent and see if the scale cookie has changed. */
+static void check_scale_change(struct iolatency_grp *iolat)
+{
+ struct iolatency_grp *parent;
+ struct child_latency_info *lat_info;
+ unsigned int cur_cookie;
+ unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
+ u64 scale_lat;
+ unsigned int old;
+ int direction = 0;
+
+ if (lat_to_blkg(iolat)->parent == NULL)
+ return;
+
+ parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
+ if (!parent)
+ return;
+
+ lat_info = &parent->child_lat;
+ cur_cookie = atomic_read(&lat_info->scale_cookie);
+ scale_lat = READ_ONCE(lat_info->scale_lat);
+
+ if (cur_cookie < our_cookie)
+ direction = -1;
+ else if (cur_cookie > our_cookie)
+ direction = 1;
+ else
+ return;
+
+ old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
+
+ /* Somebody beat us to the punch, just bail. */
+ if (old != our_cookie)
+ return;
+
+ if (direction < 0 && iolat->min_lat_nsec) {
+ u64 samples_thresh;
+
+ if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
+ return;
+
+ /*
+ * Sometimes high priority groups are their own worst enemy, so
+ * instead of taking it out on some poor other group that did 5%
+ * or less of the IO's for the last summation just skip this
+ * scale down event.
+ */
+ samples_thresh = lat_info->nr_samples * 5;
+ samples_thresh = div64_u64(samples_thresh, 100);
+ if (iolat->nr_samples <= samples_thresh)
+ return;
+ }
+
+ /* We're as low as we can go. */
+ if (iolat->rq_depth.max_depth == 1 && direction < 0) {
+ blkcg_use_delay(lat_to_blkg(iolat));
+ return;
+ }
+
+ /* We're back to the default cookie, unthrottle all the things. */
+ if (cur_cookie == DEFAULT_SCALE_COOKIE) {
+ blkcg_clear_delay(lat_to_blkg(iolat));
+ iolat->rq_depth.max_depth = UINT_MAX;
+ wake_up_all(&iolat->rq_wait.wait);
+ return;
+ }
+
+ scale_change(iolat, direction > 0);
+}
+
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
+ spinlock_t *lock)
+{
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ struct request_queue *q = rqos->q;
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+
+ if (!blk_iolatency_enabled(blkiolat))
+ return;
+
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+ bio_associate_blkcg(bio, &blkcg->css);
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ if (!lock)
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ if (!lock)
+ spin_unlock_irq(q->queue_lock);
+ }
+ if (!blkg)
+ goto out;
+
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+ bio_associate_blkg(bio, blkg);
+out:
+ rcu_read_unlock();
+ while (blkg && blkg->parent) {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ if (!iolat) {
+ blkg = blkg->parent;
+ continue;
+ }
+
+ check_scale_change(iolat);
+ __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
+ blkg = blkg->parent;
+ }
+ if (!timer_pending(&blkiolat->timer))
+ mod_timer(&blkiolat->timer, jiffies + HZ);
+}
+
+static void iolatency_record_time(struct iolatency_grp *iolat,
+ struct bio_issue *issue, u64 now,
+ bool issue_as_root)
+{
+ struct blk_rq_stat *rq_stat;
+ u64 start = bio_issue_time(issue);
+ u64 req_time;
+
+ /*
+ * Have to do this so we are truncated to the correct time that our
+ * issue is truncated to.
+ */
+ now = __bio_issue_time(now);
+
+ if (now <= start)
+ return;
+
+ req_time = now - start;
+
+ /*
+ * We don't want to count issue_as_root bio's in the cgroups latency
+ * statistics as it could skew the numbers downwards.
+ */
+ if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
+ u64 sub = iolat->min_lat_nsec;
+ if (req_time < sub)
+ blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
+ return;
+ }
+
+ rq_stat = get_cpu_ptr(iolat->stats);
+ blk_rq_stat_add(rq_stat, req_time);
+ put_cpu_ptr(rq_stat);
+}
+
+#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
+#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
+
+static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
+{
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct iolatency_grp *parent;
+ struct child_latency_info *lat_info;
+ struct blk_rq_stat stat;
+ unsigned long flags;
+ int cpu, exp_idx;
+
+ blk_rq_stat_init(&stat);
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ struct blk_rq_stat *s;
+ s = per_cpu_ptr(iolat->stats, cpu);
+ blk_rq_stat_sum(&stat, s);
+ blk_rq_stat_init(s);
+ }
+ preempt_enable();
+
+ parent = blkg_to_lat(blkg->parent);
+ if (!parent)
+ return;
+
+ lat_info = &parent->child_lat;
+
+ /*
+ * CALC_LOAD takes in a number stored in fixed point representation.
+ * Because we are using this for IO time in ns, the values stored
+ * are significantly larger than the FIXED_1 denominator (2048).
+ * Therefore, rounding errors in the calculation are negligible and
+ * can be ignored.
+ */
+ exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ div64_u64(iolat->cur_win_nsec,
+ BLKIOLATENCY_EXP_BUCKET_SIZE));
+ CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
+
+ /* Everything is ok and we don't need to adjust the scale. */
+ if (stat.mean <= iolat->min_lat_nsec &&
+ atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
+ return;
+
+ /* Somebody beat us to the punch, just bail. */
+ spin_lock_irqsave(&lat_info->lock, flags);
+ lat_info->nr_samples -= iolat->nr_samples;
+ lat_info->nr_samples += stat.nr_samples;
+ iolat->nr_samples = stat.nr_samples;
+
+ if ((lat_info->last_scale_event >= now ||
+ now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
+ lat_info->scale_lat <= iolat->min_lat_nsec)
+ goto out;
+
+ if (stat.mean <= iolat->min_lat_nsec &&
+ stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
+ if (lat_info->scale_grp == iolat) {
+ lat_info->last_scale_event = now;
+ scale_cookie_change(iolat->blkiolat, lat_info, true);
+ }
+ } else if (stat.mean > iolat->min_lat_nsec) {
+ lat_info->last_scale_event = now;
+ if (!lat_info->scale_grp ||
+ lat_info->scale_lat > iolat->min_lat_nsec) {
+ WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
+ lat_info->scale_grp = iolat;
+ }
+ scale_cookie_change(iolat->blkiolat, lat_info, false);
+ }
+out:
+ spin_unlock_irqrestore(&lat_info->lock, flags);
+}
+
+static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
+{
+ struct blkcg_gq *blkg;
+ struct rq_wait *rqw;
+ struct iolatency_grp *iolat;
+ u64 window_start;
+ u64 now = ktime_to_ns(ktime_get());
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+ bool enabled = false;
+
+ blkg = bio->bi_blkg;
+ if (!blkg)
+ return;
+
+ iolat = blkg_to_lat(bio->bi_blkg);
+ if (!iolat)
+ return;
+
+ enabled = blk_iolatency_enabled(iolat->blkiolat);
+ while (blkg && blkg->parent) {
+ iolat = blkg_to_lat(blkg);
+ if (!iolat) {
+ blkg = blkg->parent;
+ continue;
+ }
+ rqw = &iolat->rq_wait;
+
+ atomic_dec(&rqw->inflight);
+ if (!enabled || iolat->min_lat_nsec == 0)
+ goto next;
+ iolatency_record_time(iolat, &bio->bi_issue, now,
+ issue_as_root);
+ window_start = atomic64_read(&iolat->window_start);
+ if (now > window_start &&
+ (now - window_start) >= iolat->cur_win_nsec) {
+ if (atomic64_cmpxchg(&iolat->window_start,
+ window_start, now) == window_start)
+ iolatency_check_latencies(iolat, now);
+ }
+next:
+ wake_up(&rqw->wait);
+ blkg = blkg->parent;
+ }
+}
+
+static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
+{
+ struct blkcg_gq *blkg;
+
+ blkg = bio->bi_blkg;
+ while (blkg && blkg->parent) {
+ struct rq_wait *rqw;
+ struct iolatency_grp *iolat;
+
+ iolat = blkg_to_lat(blkg);
+ if (!iolat)
+ goto next;
+
+ rqw = &iolat->rq_wait;
+ atomic_dec(&rqw->inflight);
+ wake_up(&rqw->wait);
+next:
+ blkg = blkg->parent;
+ }
+}
+
+static void blkcg_iolatency_exit(struct rq_qos *rqos)
+{
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+
+ del_timer_sync(&blkiolat->timer);
+ blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
+ kfree(blkiolat);
+}
+
+static struct rq_qos_ops blkcg_iolatency_ops = {
+ .throttle = blkcg_iolatency_throttle,
+ .cleanup = blkcg_iolatency_cleanup,
+ .done_bio = blkcg_iolatency_done_bio,
+ .exit = blkcg_iolatency_exit,
+};
+
+static void blkiolatency_timer_fn(struct timer_list *t)
+{
+ struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
+ u64 now = ktime_to_ns(ktime_get());
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(blkg, pos_css,
+ blkiolat->rqos.q->root_blkg) {
+ struct iolatency_grp *iolat;
+ struct child_latency_info *lat_info;
+ unsigned long flags;
+ u64 cookie;
+
+ /*
+ * We could be exiting, don't access the pd unless we have a
+ * ref on the blkg.
+ */
+ if (!blkg_try_get(blkg))
+ continue;
+
+ iolat = blkg_to_lat(blkg);
+ if (!iolat)
+ goto next;
+
+ lat_info = &iolat->child_lat;
+ cookie = atomic_read(&lat_info->scale_cookie);
+
+ if (cookie >= DEFAULT_SCALE_COOKIE)
+ goto next;
+
+ spin_lock_irqsave(&lat_info->lock, flags);
+ if (lat_info->last_scale_event >= now)
+ goto next_lock;
+
+ /*
+ * We scaled down but don't have a scale_grp, scale up and carry
+ * on.
+ */
+ if (lat_info->scale_grp == NULL) {
+ scale_cookie_change(iolat->blkiolat, lat_info, true);
+ goto next_lock;
+ }
+
+ /*
+ * It's been 5 seconds since our last scale event, clear the
+ * scale grp in case the group that needed the scale down isn't
+ * doing any IO currently.
+ */
+ if (now - lat_info->last_scale_event >=
+ ((u64)NSEC_PER_SEC * 5))
+ lat_info->scale_grp = NULL;
+next_lock:
+ spin_unlock_irqrestore(&lat_info->lock, flags);
+next:
+ blkg_put(blkg);
+ }
+ rcu_read_unlock();
+}
+
+int blk_iolatency_init(struct request_queue *q)
+{
+ struct blk_iolatency *blkiolat;
+ struct rq_qos *rqos;
+ int ret;
+
+ blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
+ if (!blkiolat)
+ return -ENOMEM;
+
+ rqos = &blkiolat->rqos;
+ rqos->id = RQ_QOS_CGROUP;
+ rqos->ops = &blkcg_iolatency_ops;
+ rqos->q = q;
+
+ rq_qos_add(q, rqos);
+
+ ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
+ if (ret) {
+ rq_qos_del(q, rqos);
+ kfree(blkiolat);
+ return ret;
+ }
+
+ timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
+
+ return 0;
+}
+
+static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+{
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ u64 oldval = iolat->min_lat_nsec;
+
+ iolat->min_lat_nsec = val;
+ iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
+ iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
+ BLKIOLATENCY_MAX_WIN_SIZE);
+
+ if (!oldval && val)
+ atomic_inc(&blkiolat->enabled);
+ if (oldval && !val)
+ atomic_dec(&blkiolat->enabled);
+}
+
+static void iolatency_clear_scaling(struct blkcg_gq *blkg)
+{
+ if (blkg->parent) {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
+ struct child_latency_info *lat_info;
+ if (!iolat)
+ return;
+
+ lat_info = &iolat->child_lat;
+ spin_lock(&lat_info->lock);
+ atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
+ lat_info->last_scale_event = 0;
+ lat_info->scale_grp = NULL;
+ lat_info->scale_lat = 0;
+ spin_unlock(&lat_info->lock);
+ }
+}
+
+static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkcg_gq *blkg;
+ struct blk_iolatency *blkiolat;
+ struct blkg_conf_ctx ctx;
+ struct iolatency_grp *iolat;
+ char *p, *tok;
+ u64 lat_val = 0;
+ u64 oldval;
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
+ if (ret)
+ return ret;
+
+ iolat = blkg_to_lat(ctx.blkg);
+ blkiolat = iolat->blkiolat;
+ p = ctx.body;
+
+ ret = -EINVAL;
+ while ((tok = strsep(&p, " "))) {
+ char key[16];
+ char val[21]; /* 18446744073709551616 */
+
+ if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
+ goto out;
+
+ if (!strcmp(key, "target")) {
+ u64 v;
+
+ if (!strcmp(val, "max"))
+ lat_val = 0;
+ else if (sscanf(val, "%llu", &v) == 1)
+ lat_val = v * NSEC_PER_USEC;
+ else
+ goto out;
+ } else {
+ goto out;
+ }
+ }
+
+ /* Walk up the tree to see if our new val is lower than it should be. */
+ blkg = ctx.blkg;
+ oldval = iolat->min_lat_nsec;
+
+ iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (oldval != iolat->min_lat_nsec) {
+ iolatency_clear_scaling(blkg);
+ }
+
+ ret = 0;
+out:
+ blkg_conf_finish(&ctx);
+ return ret ?: nbytes;
+}
+
+static u64 iolatency_prfill_limit(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ const char *dname = blkg_dev_name(pd->blkg);
+
+ if (!dname || !iolat->min_lat_nsec)
+ return 0;
+ seq_printf(sf, "%s target=%llu\n",
+ dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
+ return 0;
+}
+
+static int iolatency_print_limit(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ iolatency_prfill_limit,
+ &blkcg_policy_iolatency, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
+ size_t size)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
+ unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
+
+ if (iolat->rq_depth.max_depth == UINT_MAX)
+ return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
+ avg_lat, cur_win);
+
+ return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
+ iolat->rq_depth.max_depth, avg_lat, cur_win);
+}
+
+
+static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
+{
+ struct iolatency_grp *iolat;
+
+ iolat = kzalloc_node(sizeof(*iolat), gfp, node);
+ if (!iolat)
+ return NULL;
+ iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
+ __alignof__(struct blk_rq_stat), gfp);
+ if (!iolat->stats) {
+ kfree(iolat);
+ return NULL;
+ }
+ return &iolat->pd;
+}
+
+static void iolatency_pd_init(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ u64 now = ktime_to_ns(ktime_get());
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct blk_rq_stat *stat;
+ stat = per_cpu_ptr(iolat->stats, cpu);
+ blk_rq_stat_init(stat);
+ }
+
+ rq_wait_init(&iolat->rq_wait);
+ spin_lock_init(&iolat->child_lat.lock);
+ iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
+ iolat->rq_depth.max_depth = UINT_MAX;
+ iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
+ iolat->blkiolat = blkiolat;
+ iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
+ atomic64_set(&iolat->window_start, now);
+
+ /*
+ * We init things in list order, so the pd for the parent may not be
+ * init'ed yet for whatever reason.
+ */
+ if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
+ struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
+ atomic_set(&iolat->scale_cookie,
+ atomic_read(&parent->child_lat.scale_cookie));
+ } else {
+ atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
+ }
+
+ atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
+}
+
+static void iolatency_pd_offline(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+
+ iolatency_set_min_lat_nsec(blkg, 0);
+ iolatency_clear_scaling(blkg);
+}
+
+static void iolatency_pd_free(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ free_percpu(iolat->stats);
+ kfree(iolat);
+}
+
+static struct cftype iolatency_files[] = {
+ {
+ .name = "latency",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = iolatency_print_limit,
+ .write = iolatency_set_limit,
+ },
+ {}
+};
+
+static struct blkcg_policy blkcg_policy_iolatency = {
+ .dfl_cftypes = iolatency_files,
+ .pd_alloc_fn = iolatency_pd_alloc,
+ .pd_init_fn = iolatency_pd_init,
+ .pd_offline_fn = iolatency_pd_offline,
+ .pd_free_fn = iolatency_pd_free,
+ .pd_stat_fn = iolatency_pd_stat,
+};
+
+static int __init iolatency_init(void)
+{
+ return blkcg_policy_register(&blkcg_policy_iolatency);
+}
+
+static void __exit iolatency_exit(void)
+{
+ return blkcg_policy_unregister(&blkcg_policy_iolatency);
+}
+
+module_init(iolatency_init);
+module_exit(iolatency_exit);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 8faa70f26fcd..d1b9dd03da25 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -68,6 +68,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
req_sects = min_t(sector_t, nr_sects,
q->limits.max_discard_sectors);
+ if (!req_sects)
+ goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
@@ -105,6 +107,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*biop = bio;
return 0;
+
+fail:
+ if (bio) {
+ submit_bio_wait(bio);
+ bio_put(bio);
+ }
+ *biop = NULL;
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
new file mode 100644
index 000000000000..fb2c82c351e4
--- /dev/null
+++ b/block/blk-mq-debugfs-zoned.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/blkdev.h>
+#include "blk-mq-debugfs.h"
+
+int queue_zone_wlock_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ unsigned int i;
+
+ if (!q->seq_zones_wlock)
+ return 0;
+
+ for (i = 0; i < q->nr_zones; i++)
+ if (test_bit(i, q->seq_zones_wlock))
+ seq_printf(m, "%u\n", i);
+
+ return 0;
+}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 1c4532e92938..cb1e6cf7ac48 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -206,21 +206,6 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
return count;
}
-static int queue_zone_wlock_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- unsigned int i;
-
- if (!q->seq_zones_wlock)
- return 0;
-
- for (i = 0; i < blk_queue_nr_zones(q); i++)
- if (test_bit(i, q->seq_zones_wlock))
- seq_printf(m, "%u\n", i);
-
- return 0;
-}
-
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
@@ -637,6 +622,14 @@ static int hctx_active_show(void *data, struct seq_file *m)
return 0;
}
+static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+
+ seq_printf(m, "%u\n", hctx->dispatch_busy);
+ return 0;
+}
+
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
__acquires(&ctx->lock)
{
@@ -798,6 +791,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"queued", 0600, hctx_queued_show, hctx_queued_write},
{"run", 0600, hctx_run_show, hctx_run_write},
{"active", 0400, hctx_active_show},
+ {"dispatch_busy", 0400, hctx_dispatch_busy_show},
{},
};
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index b9d366e57097..a9160be12be0 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -80,4 +80,13 @@ static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hc
}
#endif
+#ifdef CONFIG_BLK_DEBUG_FS_ZONED
+int queue_zone_wlock_show(void *data, struct seq_file *m);
+#else
+static inline int queue_zone_wlock_show(void *data, struct seq_file *m)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index e233996bb76f..db644ec624f5 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -17,6 +17,8 @@
#include <linux/pci.h>
#include <linux/module.h>
+#include "blk-mq.h"
+
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
* @set: tagset to provide the mapping for
@@ -48,8 +50,7 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
fallback:
WARN_ON_ONCE(set->nr_hw_queues > 1);
- for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ blk_mq_clear_mq_map(set);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 56c493c6cd90..29bfe8017a2d 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -59,29 +59,16 @@ static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
- if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
- struct request_queue *q = hctx->queue;
-
- if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_inc(&q->shared_hctx_restart);
- } else
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- return false;
-
- if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
- struct request_queue *q = hctx->queue;
-
- if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_dec(&q->shared_hctx_restart);
- } else
- clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ return;
+ clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- return blk_mq_run_hw_queue(hctx, true);
+ blk_mq_run_hw_queue(hctx, true);
}
/*
@@ -219,15 +206,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
}
} else if (has_sched_dispatch) {
blk_mq_do_dispatch_sched(hctx);
- } else if (q->mq_ops->get_budget) {
- /*
- * If we need to get budget before queuing request, we
- * dequeue request one by one from sw queue for avoiding
- * to mess up I/O merge when dispatch runs out of resource.
- *
- * TODO: get more budgets, and dequeue more requests in
- * one time.
- */
+ } else if (hctx->dispatch_busy) {
+ /* dequeue request one by one from sw queue if queue is busy */
blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
@@ -339,7 +319,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
return e->type->ops.mq.bio_merge(hctx, bio);
}
- if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+ if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+ !list_empty_careful(&ctx->rq_list)) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
ret = blk_mq_attempt_merge(q, ctx, bio);
@@ -380,68 +361,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return false;
}
-/**
- * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
- * @pos: loop cursor.
- * @skip: the list element that will not be examined. Iteration starts at
- * @skip->next.
- * @head: head of the list to examine. This list must have at least one
- * element, namely @skip.
- * @member: name of the list_head structure within typeof(*pos).
- */
-#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
- for ((pos) = (skip); \
- (pos = (pos)->member.next != (head) ? list_entry_rcu( \
- (pos)->member.next, typeof(*pos), member) : \
- list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
- (pos) != (skip); )
-
-/*
- * Called after a driver tag has been freed to check whether a hctx needs to
- * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
- * queues in a round-robin fashion if the tag set of @hctx is shared with other
- * hardware queues.
- */
-void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
-{
- struct blk_mq_tags *const tags = hctx->tags;
- struct blk_mq_tag_set *const set = hctx->queue->tag_set;
- struct request_queue *const queue = hctx->queue, *q;
- struct blk_mq_hw_ctx *hctx2;
- unsigned int i, j;
-
- if (set->flags & BLK_MQ_F_TAG_SHARED) {
- /*
- * If this is 0, then we know that no hardware queues
- * have RESTART marked. We're done.
- */
- if (!atomic_read(&queue->shared_hctx_restart))
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
- tag_set_list) {
- queue_for_each_hw_ctx(q, hctx2, i)
- if (hctx2->tags == tags &&
- blk_mq_sched_restart_hctx(hctx2))
- goto done;
- }
- j = hctx->queue_num + 1;
- for (i = 0; i < queue->nr_hw_queues; i++, j++) {
- if (j == queue->nr_hw_queues)
- j = 0;
- hctx2 = queue->queue_hw_ctx[j];
- if (hctx2->tags == tags &&
- blk_mq_sched_restart_hctx(hctx2))
- break;
- }
-done:
- rcu_read_unlock();
- } else {
- blk_mq_sched_restart_hctx(hctx);
- }
-}
-
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async)
{
@@ -486,8 +405,19 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false);
- else
+ else {
+ /*
+ * try to issue requests directly if the hw queue isn't
+ * busy in case of 'none' scheduler, and this way may save
+ * us one extra enqueue & dequeue to sw queue.
+ */
+ if (!hctx->dispatch_busy && !e && !run_queue_async) {
+ blk_mq_try_issue_list_directly(hctx, list);
+ if (list_empty(list))
+ return;
+ }
blk_mq_insert_requests(hctx, ctx, list);
+ }
blk_mq_run_hw_queue(hctx, run_queue_async);
}
@@ -532,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
blk_mq_sched_free_tags(set, hctx, i);
}
-int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
-{
- struct elevator_queue *e = q->elevator;
- int ret;
-
- if (!e)
- return 0;
-
- ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
- if (ret)
- return ret;
-
- if (e->type->ops.mq.init_hctx) {
- ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
- if (ret) {
- blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
- return ret;
- }
- }
-
- blk_mq_debugfs_register_sched_hctx(q, hctx);
-
- return 0;
-}
-
-void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
-{
- struct elevator_queue *e = q->elevator;
-
- if (!e)
- return;
-
- blk_mq_debugfs_unregister_sched_hctx(hctx);
-
- if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, hctx_idx);
- hctx->sched_data = NULL;
- }
-
- blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
-}
-
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 0cb8f938dff9..4e028ee42430 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
-int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx);
-void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx);
-
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 09b2ee6694fb..94e1ed667b6e 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
/*
* If a previously inactive queue goes active, bump the active user count.
+ * We need to do this before try to allocate driver tag, then even if fail
+ * to get tag when first time, the other shared-tag users could reserve
+ * budget for it.
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
@@ -271,7 +274,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* test and set the bit before assining ->rqs[].
*/
rq = tags->rqs[bitnr];
- if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+ if (rq && blk_mq_request_started(rq))
iter_data->fn(rq, iter_data->data, reserved);
return true;
@@ -317,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
struct blk_mq_hw_ctx *hctx;
int i;
+ /*
+ * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
+ * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
+ * to avoid race with it. __blk_mq_update_nr_hw_queues will users
+ * synchronize_rcu to ensure all of the users go out of the critical
+ * section below and see zeroed q_usage_counter.
+ */
+ rcu_read_lock();
+ if (percpu_ref_is_zero(&q->q_usage_counter)) {
+ rcu_read_unlock();
+ return;
+ }
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
@@ -332,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
-
+ rcu_read_unlock();
}
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
@@ -399,8 +414,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;
- tdepth -= tags->nr_reserved_tags;
-
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
@@ -420,7 +433,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > 16 * BLKDEV_MAX_RQ)
return -EINVAL;
- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
+ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
+ tags->nr_reserved_tags);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
@@ -437,7 +451,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* Don't need (or can't) update reserved tags here, they
* remain static and should never need resizing.
*/
- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+ sbitmap_queue_resize(&tags->bitmap_tags,
+ tdepth - tags->nr_reserved_tags);
}
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 95919268564b..85a1c1a59c72 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -34,8 +34,8 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
-#include "blk-wbt.h"
#include "blk-mq-sched.h"
+#include "blk-rq-qos.h"
static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->tag = -1;
rq->internal_tag = tag;
} else {
- if (blk_mq_tag_busy(data->hctx)) {
+ if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.mq.limit_depth(op, data);
+ } else {
+ blk_mq_tag_busy(data->hctx);
}
tag = blk_mq_get_tag(data);
@@ -504,7 +506,7 @@ void blk_mq_free_request(struct request *rq)
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);
- wbt_done(q->rq_wb, rq);
+ rq_qos_done(q, rq);
if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
@@ -527,7 +529,7 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_account_io_done(rq, now);
if (rq->end_io) {
- wbt_done(rq->q->rq_wb, rq);
+ rq_qos_done(rq->q, rq);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -558,10 +560,8 @@ static void __blk_mq_complete_request(struct request *rq)
bool shared = false;
int cpu;
- if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
- MQ_RQ_IN_FLIGHT)
+ if (!blk_mq_mark_complete(rq))
return;
-
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
@@ -641,7 +641,7 @@ void blk_mq_start_request(struct request *rq)
rq->throtl_size = blk_rq_sectors(rq);
#endif
rq->rq_flags |= RQF_STATS;
- wbt_issue(q->rq_wb, rq);
+ rq_qos_issue(q, rq);
}
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
@@ -667,7 +667,7 @@ static void __blk_mq_requeue_request(struct request *rq)
blk_mq_put_driver_tag(rq);
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, rq);
+ rq_qos_requeue(q, rq);
if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
@@ -964,16 +964,14 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
- bool wait)
+bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_alloc_data data = {
.q = rq->q,
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
- .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
+ .flags = BLK_MQ_REQ_NOWAIT,
};
-
- might_sleep_if(wait);
+ bool shared;
if (rq->tag != -1)
goto done;
@@ -981,9 +979,10 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
+ shared = blk_mq_tag_busy(data.hctx);
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
- if (blk_mq_tag_busy(data.hctx)) {
+ if (shared) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
atomic_inc(&data.hctx->nr_active);
}
@@ -991,8 +990,6 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
}
done:
- if (hctx)
- *hctx = data.hctx;
return rq->tag != -1;
}
@@ -1003,7 +1000,10 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
+ spin_lock(&hctx->dispatch_wait_lock);
list_del_init(&wait->entry);
+ spin_unlock(&hctx->dispatch_wait_lock);
+
blk_mq_run_hw_queue(hctx, true);
return 1;
}
@@ -1014,17 +1014,16 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
* restart. For both cases, take care to check the condition again after
* marking us as waiting.
*/
-static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
+static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- struct blk_mq_hw_ctx *this_hctx = *hctx;
- struct sbq_wait_state *ws;
+ struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
- if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
+ if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
+ if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
/*
* It's possible that a tag was freed in the window between the
@@ -1034,30 +1033,35 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
* Don't clear RESTART here, someone else could have set it.
* At most this will cost an extra queue run.
*/
- return blk_mq_get_driver_tag(rq, hctx, false);
+ return blk_mq_get_driver_tag(rq);
}
- wait = &this_hctx->dispatch_wait;
+ wait = &hctx->dispatch_wait;
if (!list_empty_careful(&wait->entry))
return false;
- spin_lock(&this_hctx->lock);
+ wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+
+ spin_lock_irq(&wq->lock);
+ spin_lock(&hctx->dispatch_wait_lock);
if (!list_empty(&wait->entry)) {
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return false;
}
- ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
- add_wait_queue(&ws->wait, wait);
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(wq, wait);
/*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
*/
- ret = blk_mq_get_driver_tag(rq, hctx, false);
+ ret = blk_mq_get_driver_tag(rq);
if (!ret) {
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return false;
}
@@ -1065,14 +1069,42 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
* We got a tag, remove ourselves from the wait queue to ensure
* someone else gets the wakeup.
*/
- spin_lock_irq(&ws->wait.lock);
list_del_init(&wait->entry);
- spin_unlock_irq(&ws->wait.lock);
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return true;
}
+#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
+#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
+/*
+ * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
+ * - EWMA is one simple way to compute running average value
+ * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
+ * - take 4 as factor for avoiding to get too small(0) result, and this
+ * factor doesn't matter because EWMA decreases exponentially
+ */
+static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
+{
+ unsigned int ewma;
+
+ if (hctx->queue->elevator)
+ return;
+
+ ewma = hctx->dispatch_busy;
+
+ if (!ewma && !busy)
+ return;
+
+ ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
+ if (busy)
+ ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
+ ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
+
+ hctx->dispatch_busy = ewma;
+}
+
#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
/*
@@ -1105,7 +1137,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;
- if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ if (!blk_mq_get_driver_tag(rq)) {
/*
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed. The
@@ -1113,7 +1145,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
* before we add this entry back on the dispatch list,
* we'll re-run it below.
*/
- if (!blk_mq_mark_tag_wait(&hctx, rq)) {
+ if (!blk_mq_mark_tag_wait(hctx, rq)) {
blk_mq_put_dispatch_budget(hctx);
/*
* For non-shared tags, the RESTART check
@@ -1137,7 +1169,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bd.last = true;
else {
nxt = list_first_entry(list, struct request, queuelist);
- bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
+ bd.last = !blk_mq_get_driver_tag(nxt);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
@@ -1209,8 +1241,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
else if (needs_restart && (ret == BLK_STS_RESOURCE))
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+ blk_mq_update_dispatch_busy(hctx, true);
return false;
- }
+ } else
+ blk_mq_update_dispatch_busy(hctx, false);
/*
* If the host/device is unable to accept more work, inform the
@@ -1544,19 +1578,19 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list)
{
+ struct request *rq;
+
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
*/
- spin_lock(&ctx->lock);
- while (!list_empty(list)) {
- struct request *rq;
-
- rq = list_first_entry(list, struct request, queuelist);
+ list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
- list_del_init(&rq->queuelist);
- __blk_mq_insert_req_list(hctx, rq, false);
+ trace_block_rq_insert(hctx->queue, rq);
}
+
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(list, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
@@ -1659,13 +1693,16 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_STS_OK:
+ blk_mq_update_dispatch_busy(hctx, false);
*cookie = new_cookie;
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
+ blk_mq_update_dispatch_busy(hctx, true);
__blk_mq_requeue_request(rq);
break;
default:
+ blk_mq_update_dispatch_busy(hctx, false);
*cookie = BLK_QC_T_NONE;
break;
}
@@ -1700,7 +1737,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!blk_mq_get_dispatch_budget(hctx))
goto insert;
- if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
goto insert;
}
@@ -1748,6 +1785,27 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
return ret;
}
+void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list)
+{
+ while (!list_empty(list)) {
+ blk_status_t ret;
+ struct request *rq = list_first_entry(list, struct request,
+ queuelist);
+
+ list_del_init(&rq->queuelist);
+ ret = blk_mq_request_issue_directly(rq);
+ if (ret != BLK_STS_OK) {
+ if (ret == BLK_STS_RESOURCE ||
+ ret == BLK_STS_DEV_RESOURCE) {
+ list_add(&rq->queuelist, list);
+ break;
+ }
+ blk_mq_end_request(rq, ret);
+ }
+ }
+}
+
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
@@ -1758,7 +1816,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
- unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1774,19 +1831,19 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (blk_mq_sched_bio_merge(q, bio))
return BLK_QC_T_NONE;
- wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+ rq_qos_throttle(q, bio, NULL);
trace_block_getrq(q, bio, bio->bi_opf);
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
if (unlikely(!rq)) {
- __wbt_done(q->rq_wb, wb_acct);
+ rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
return BLK_QC_T_NONE;
}
- wbt_track(rq, wb_acct);
+ rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
@@ -1849,7 +1906,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
- } else if (q->nr_hw_queues > 1 && is_sync) {
+ } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
+ !data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
@@ -2087,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
- blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
-
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -2148,6 +2204,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->nr_ctx = 0;
+ spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
@@ -2155,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
- if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
- goto exit_hctx;
-
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto sched_exit_hctx;
+ goto exit_hctx;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq;
@@ -2174,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
- sched_exit_hctx:
- blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -2333,15 +2385,10 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- if (shared) {
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_inc(&q->shared_hctx_restart);
+ if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED;
- } else {
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_dec(&q->shared_hctx_restart);
+ else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
- }
}
}
@@ -2372,7 +2419,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
- synchronize_rcu();
INIT_LIST_HEAD(&q->tag_set_list);
}
@@ -2687,7 +2733,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
if (set->ops->map_queues) {
- int cpu;
/*
* transport .map_queues is usually done in the following
* way:
@@ -2702,8 +2747,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
- for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ blk_mq_clear_mq_map(set);
return set->ops->map_queues(set);
} else
@@ -2713,7 +2757,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
- * requested depth down, if if it too large. In that case, the set
+ * requested depth down, if it's too large. In that case, the set
* value will be stored in set->queue_depth.
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
@@ -2845,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
+/*
+ * request_queue and elevator_type pair.
+ * It is just used by __blk_mq_update_nr_hw_queues to cache
+ * the elevator_type associated with a request_queue.
+ */
+struct blk_mq_qe_pair {
+ struct list_head node;
+ struct request_queue *q;
+ struct elevator_type *type;
+};
+
+/*
+ * Cache the elevator_type in qe pair list and switch the
+ * io scheduler to 'none'
+ */
+static bool blk_mq_elv_switch_none(struct list_head *head,
+ struct request_queue *q)
+{
+ struct blk_mq_qe_pair *qe;
+
+ if (!q->elevator)
+ return true;
+
+ qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+ if (!qe)
+ return false;
+
+ INIT_LIST_HEAD(&qe->node);
+ qe->q = q;
+ qe->type = q->elevator->type;
+ list_add(&qe->node, head);
+
+ mutex_lock(&q->sysfs_lock);
+ /*
+ * After elevator_switch_mq, the previous elevator_queue will be
+ * released by elevator_release. The reference of the io scheduler
+ * module get by elevator_get will also be put. So we need to get
+ * a reference of the io scheduler module here to prevent it to be
+ * removed.
+ */
+ __module_get(qe->type->elevator_owner);
+ elevator_switch_mq(q, NULL);
+ mutex_unlock(&q->sysfs_lock);
+
+ return true;
+}
+
+static void blk_mq_elv_switch_back(struct list_head *head,
+ struct request_queue *q)
+{
+ struct blk_mq_qe_pair *qe;
+ struct elevator_type *t = NULL;
+
+ list_for_each_entry(qe, head, node)
+ if (qe->q == q) {
+ t = qe->type;
+ break;
+ }
+
+ if (!t)
+ return;
+
+ list_del(&qe->node);
+ kfree(qe);
+
+ mutex_lock(&q->sysfs_lock);
+ elevator_switch_mq(q, t);
+ mutex_unlock(&q->sysfs_lock);
+}
+
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
+ LIST_HEAD(head);
lockdep_assert_held(&set->tag_list_lock);
@@ -2859,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
+ /*
+ * Sync with blk_mq_queue_tag_busy_iter.
+ */
+ synchronize_rcu();
+ /*
+ * Switch IO scheduler to 'none', cleaning up the data associated
+ * with the previous scheduler. We will switch back once we are done
+ * updating the new sw to hw queue mappings.
+ */
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ if (!blk_mq_elv_switch_none(&head, q))
+ goto switch_back;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
@@ -2867,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_queue_reinit(q);
}
+switch_back:
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+ blk_mq_elv_switch_back(&head, q);
+
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 89231e439b2f..9497b47e2526 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -36,8 +36,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
- bool wait);
+bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
@@ -65,6 +64,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
/* Used by blk_insert_cloned_request() to issue request directly */
blk_status_t blk_mq_request_issue_directly(struct request *rq);
+void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list);
/*
* CPU -> queue mappings
@@ -203,4 +204,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(hctx, rq);
}
+static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+}
+
#endif
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
new file mode 100644
index 000000000000..0005dfd568dd
--- /dev/null
+++ b/block/blk-rq-qos.c
@@ -0,0 +1,194 @@
+#include "blk-rq-qos.h"
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, unsigned int below)
+{
+ unsigned int cur = atomic_read(v);
+
+ for (;;) {
+ unsigned int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
+{
+ return atomic_inc_below(&rq_wait->inflight, limit);
+}
+
+void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->cleanup)
+ rqos->ops->cleanup(rqos, bio);
+ }
+}
+
+void rq_qos_done(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->done)
+ rqos->ops->done(rqos, rq);
+ }
+}
+
+void rq_qos_issue(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->issue)
+ rqos->ops->issue(rqos, rq);
+ }
+}
+
+void rq_qos_requeue(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->requeue)
+ rqos->ops->requeue(rqos, rq);
+ }
+}
+
+void rq_qos_throttle(struct request_queue *q, struct bio *bio,
+ spinlock_t *lock)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->throttle)
+ rqos->ops->throttle(rqos, bio, lock);
+ }
+}
+
+void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->track)
+ rqos->ops->track(rqos, rq, bio);
+ }
+}
+
+void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->done_bio)
+ rqos->ops->done_bio(rqos, bio);
+ }
+}
+
+/*
+ * Return true, if we can't increase the depth further by scaling
+ */
+bool rq_depth_calc_max_depth(struct rq_depth *rqd)
+{
+ unsigned int depth;
+ bool ret = false;
+
+ /*
+ * For QD=1 devices, this is a special case. It's important for those
+ * to have one request ready when one completes, so force a depth of
+ * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
+ * since the device can't have more than that in flight. If we're
+ * scaling down, then keep a setting of 1/1/1.
+ */
+ if (rqd->queue_depth == 1) {
+ if (rqd->scale_step > 0)
+ rqd->max_depth = 1;
+ else {
+ rqd->max_depth = 2;
+ ret = true;
+ }
+ } else {
+ /*
+ * scale_step == 0 is our default state. If we have suffered
+ * latency spikes, step will be > 0, and we shrink the
+ * allowed write depths. If step is < 0, we're only doing
+ * writes, and we allow a temporarily higher depth to
+ * increase performance.
+ */
+ depth = min_t(unsigned int, rqd->default_depth,
+ rqd->queue_depth);
+ if (rqd->scale_step > 0)
+ depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
+ else if (rqd->scale_step < 0) {
+ unsigned int maxd = 3 * rqd->queue_depth / 4;
+
+ depth = 1 + ((depth - 1) << -rqd->scale_step);
+ if (depth > maxd) {
+ depth = maxd;
+ ret = true;
+ }
+ }
+
+ rqd->max_depth = depth;
+ }
+
+ return ret;
+}
+
+void rq_depth_scale_up(struct rq_depth *rqd)
+{
+ /*
+ * Hit max in previous round, stop here
+ */
+ if (rqd->scaled_max)
+ return;
+
+ rqd->scale_step--;
+
+ rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+}
+
+/*
+ * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
+ * had a latency violation.
+ */
+void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+{
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+ * ->scale_step from going to crazy values, if the device can't
+ * keep up.
+ */
+ if (rqd->max_depth == 1)
+ return;
+
+ if (rqd->scale_step < 0 && hard_throttle)
+ rqd->scale_step = 0;
+ else
+ rqd->scale_step++;
+
+ rqd->scaled_max = false;
+ rq_depth_calc_max_depth(rqd);
+}
+
+void rq_qos_exit(struct request_queue *q)
+{
+ while (q->rq_qos) {
+ struct rq_qos *rqos = q->rq_qos;
+ q->rq_qos = rqos->next;
+ rqos->ops->exit(rqos);
+ }
+}
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
new file mode 100644
index 000000000000..32b02efbfa66
--- /dev/null
+++ b/block/blk-rq-qos.h
@@ -0,0 +1,109 @@
+#ifndef RQ_QOS_H
+#define RQ_QOS_H
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+enum rq_qos_id {
+ RQ_QOS_WBT,
+ RQ_QOS_CGROUP,
+};
+
+struct rq_wait {
+ wait_queue_head_t wait;
+ atomic_t inflight;
+};
+
+struct rq_qos {
+ struct rq_qos_ops *ops;
+ struct request_queue *q;
+ enum rq_qos_id id;
+ struct rq_qos *next;
+};
+
+struct rq_qos_ops {
+ void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+ void (*track)(struct rq_qos *, struct request *, struct bio *);
+ void (*issue)(struct rq_qos *, struct request *);
+ void (*requeue)(struct rq_qos *, struct request *);
+ void (*done)(struct rq_qos *, struct request *);
+ void (*done_bio)(struct rq_qos *, struct bio *);
+ void (*cleanup)(struct rq_qos *, struct bio *);
+ void (*exit)(struct rq_qos *);
+};
+
+struct rq_depth {
+ unsigned int max_depth;
+
+ int scale_step;
+ bool scaled_max;
+
+ unsigned int queue_depth;
+ unsigned int default_depth;
+};
+
+static inline struct rq_qos *rq_qos_id(struct request_queue *q,
+ enum rq_qos_id id)
+{
+ struct rq_qos *rqos;
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->id == id)
+ break;
+ }
+ return rqos;
+}
+
+static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
+{
+ return rq_qos_id(q, RQ_QOS_WBT);
+}
+
+static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
+{
+ return rq_qos_id(q, RQ_QOS_CGROUP);
+}
+
+static inline void rq_wait_init(struct rq_wait *rq_wait)
+{
+ atomic_set(&rq_wait->inflight, 0);
+ init_waitqueue_head(&rq_wait->wait);
+}
+
+static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
+{
+ rqos->next = q->rq_qos;
+ q->rq_qos = rqos;
+}
+
+static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
+{
+ struct rq_qos *cur, *prev = NULL;
+ for (cur = q->rq_qos; cur; cur = cur->next) {
+ if (cur == rqos) {
+ if (prev)
+ prev->next = rqos->next;
+ else
+ q->rq_qos = cur;
+ break;
+ }
+ prev = cur;
+ }
+}
+
+bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
+void rq_depth_scale_up(struct rq_depth *rqd);
+void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_calc_max_depth(struct rq_depth *rqd);
+
+void rq_qos_cleanup(struct request_queue *, struct bio *);
+void rq_qos_done(struct request_queue *, struct request *);
+void rq_qos_issue(struct request_queue *, struct request *);
+void rq_qos_requeue(struct request_queue *, struct request *);
+void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
+void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
+void rq_qos_exit(struct request_queue *);
+#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d1de71124656..ffd459969689 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -128,7 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
/* Inherit limits from component devices */
lim->max_segments = USHRT_MAX;
- lim->max_discard_segments = 1;
+ lim->max_discard_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
@@ -875,7 +875,7 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
- wbt_set_queue_depth(q->rq_wb, depth);
+ wbt_set_queue_depth(q, depth);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -900,7 +900,7 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
- wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 175c143ac5b9..7587b1c3caaf 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -17,7 +17,7 @@ struct blk_queue_stats {
bool enable_accounting;
};
-static void blk_stat_init(struct blk_rq_stat *stat)
+void blk_rq_stat_init(struct blk_rq_stat *stat)
{
stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0;
@@ -25,7 +25,7 @@ static void blk_stat_init(struct blk_rq_stat *stat)
}
/* src is a per-cpu stat, mean isn't initialized */
-static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
if (!src->nr_samples)
return;
@@ -39,7 +39,7 @@ static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
dst->nr_samples += src->nr_samples;
}
-static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
+void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
stat->max = max(stat->max, value);
@@ -69,7 +69,7 @@ void blk_stat_add(struct request *rq, u64 now)
continue;
stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
- __blk_stat_add(stat, value);
+ blk_rq_stat_add(stat, value);
put_cpu_ptr(cb->cpu_stat);
}
rcu_read_unlock();
@@ -82,15 +82,15 @@ static void blk_stat_timer_fn(struct timer_list *t)
int cpu;
for (bucket = 0; bucket < cb->buckets; bucket++)
- blk_stat_init(&cb->stat[bucket]);
+ blk_rq_stat_init(&cb->stat[bucket]);
for_each_online_cpu(cpu) {
struct blk_rq_stat *cpu_stat;
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
for (bucket = 0; bucket < cb->buckets; bucket++) {
- blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
- blk_stat_init(&cpu_stat[bucket]);
+ blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
+ blk_rq_stat_init(&cpu_stat[bucket]);
}
}
@@ -143,7 +143,7 @@ void blk_stat_add_callback(struct request_queue *q,
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
for (bucket = 0; bucket < cb->buckets; bucket++)
- blk_stat_init(&cpu_stat[bucket]);
+ blk_rq_stat_init(&cpu_stat[bucket]);
}
spin_lock(&q->stats->lock);
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 78399cdde9c9..f4a1568e81a4 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -159,4 +159,8 @@ static inline void blk_stat_activate_msecs(struct blk_stat_callback *cb,
mod_timer(&cb->timer, jiffies + msecs_to_jiffies(msecs));
}
+void blk_rq_stat_add(struct blk_rq_stat *, u64);
+void blk_rq_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *);
+void blk_rq_stat_init(struct blk_rq_stat *);
+
#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 94987b1f69e1..bb109bb0a055 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -422,16 +422,16 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
- if (!q->rq_wb)
+ if (!wbt_rq_qos(q))
return -EINVAL;
- return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+ return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
}
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
size_t count)
{
- struct rq_wb *rwb;
+ struct rq_qos *rqos;
ssize_t ret;
s64 val;
@@ -441,23 +441,21 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
if (val < -1)
return -EINVAL;
- rwb = q->rq_wb;
- if (!rwb) {
+ rqos = wbt_rq_qos(q);
+ if (!rqos) {
ret = wbt_init(q);
if (ret)
return ret;
}
- rwb = q->rq_wb;
if (val == -1)
- rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+ val = wbt_default_latency_nsec(q);
else if (val >= 0)
- rwb->min_lat_nsec = val * 1000ULL;
+ val *= 1000ULL;
- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
- rwb->enable_state = WBT_STATE_ON_MANUAL;
+ wbt_set_min_lat(q, val);
- wbt_update_limits(rwb);
+ wbt_update_limits(q);
return count;
}
@@ -804,6 +802,21 @@ static void __blk_release_queue(struct work_struct *work)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
+ if (!blk_queue_dead(q)) {
+ /*
+ * Last reference was dropped without having called
+ * blk_cleanup_queue().
+ */
+ WARN_ONCE(blk_queue_init_done(q),
+ "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
+ q);
+ blk_exit_queue(q);
+ }
+
+ WARN(blk_queue_root_blkg(q),
+ "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
+ q);
+
blk_free_queue_stats(q->stats);
blk_exit_rl(q, &q->root_rl);
@@ -964,7 +977,7 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- wbt_exit(q);
+ rq_qos_exit(q);
mutex_lock(&q->sysfs_lock);
if (q->request_fn || (q->mq_ops && q->elevator))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 82282e6fdcf8..a3eede00d302 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -579,8 +579,10 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
struct throtl_grp *tg = blkg_to_tg(blkg);
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
- tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
+ tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
low_valid = true;
+ break;
+ }
}
rcu_read_unlock();
@@ -920,12 +922,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
}
/* Calc approx time to dispatch */
- jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
-
- if (jiffy_wait > jiffy_elapsed)
- jiffy_wait = jiffy_wait - jiffy_elapsed;
- else
- jiffy_wait = 1;
+ jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
if (wait)
*wait = jiffy_wait;
@@ -2132,12 +2129,8 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- if (bio->bi_css) {
- if (bio->bi_cg_private)
- blkg_put(tg_to_blkg(bio->bi_cg_private));
- bio->bi_cg_private = tg;
- blkg_get(tg_to_blkg(tg));
- }
+ if (bio->bi_css)
+ bio_associate_blkg(bio, tg_to_blkg(tg));
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
#endif
}
@@ -2285,6 +2278,7 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
void blk_throtl_bio_endio(struct bio *bio)
{
+ struct blkcg_gq *blkg;
struct throtl_grp *tg;
u64 finish_time_ns;
unsigned long finish_time;
@@ -2292,20 +2286,18 @@ void blk_throtl_bio_endio(struct bio *bio)
unsigned long lat;
int rw = bio_data_dir(bio);
- tg = bio->bi_cg_private;
- if (!tg)
+ blkg = bio->bi_blkg;
+ if (!blkg)
return;
- bio->bi_cg_private = NULL;
+ tg = blkg_to_tg(blkg);
finish_time_ns = ktime_get_ns();
tg->last_finish_time = finish_time_ns >> 10;
start_time = bio_issue_time(&bio->bi_issue) >> 10;
finish_time = __bio_issue_time(finish_time_ns) >> 10;
- if (!start_time || finish_time <= start_time) {
- blkg_put(tg_to_blkg(tg));
+ if (!start_time || finish_time <= start_time)
return;
- }
lat = finish_time - start_time;
/* this is only for bio based driver */
@@ -2334,8 +2326,6 @@ void blk_throtl_bio_endio(struct bio *bio)
tg->bio_cnt /= 2;
tg->bad_bio_cnt /= 2;
}
-
- blkg_put(tg_to_blkg(tg));
}
#endif
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 4f89b28fa652..bb93c7c2b182 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -25,6 +25,7 @@
#include <linux/swap.h>
#include "blk-wbt.h"
+#include "blk-rq-qos.h"
#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>
@@ -78,28 +79,6 @@ static inline bool rwb_enabled(struct rq_wb *rwb)
return rwb && rwb->wb_normal != 0;
}
-/*
- * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
- * false if 'v' + 1 would be bigger than 'below'.
- */
-static bool atomic_inc_below(atomic_t *v, int below)
-{
- int cur = atomic_read(v);
-
- for (;;) {
- int old;
-
- if (cur >= below)
- return false;
- old = atomic_cmpxchg(v, cur, cur + 1);
- if (old == cur)
- break;
- cur = old;
- }
-
- return true;
-}
-
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
if (rwb_enabled(rwb)) {
@@ -116,7 +95,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
+ struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
@@ -144,8 +123,9 @@ static void rwb_wake_all(struct rq_wb *rwb)
}
}
-void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
+static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{
+ struct rq_wb *rwb = RQWB(rqos);
struct rq_wait *rqw;
int inflight, limit;
@@ -186,7 +166,7 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2)
- wake_up_all(&rqw->wait);
+ wake_up(&rqw->wait);
}
}
@@ -194,10 +174,9 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
* Called on completion of a request. Note that it's also called when
* a request is merged, when the request gets freed.
*/
-void wbt_done(struct rq_wb *rwb, struct request *rq)
+static void wbt_done(struct rq_qos *rqos, struct request *rq)
{
- if (!rwb)
- return;
+ struct rq_wb *rwb = RQWB(rqos);
if (!wbt_is_tracked(rq)) {
if (rwb->sync_cookie == rq) {
@@ -209,72 +188,11 @@ void wbt_done(struct rq_wb *rwb, struct request *rq)
wb_timestamp(rwb, &rwb->last_comp);
} else {
WARN_ON_ONCE(rq == rwb->sync_cookie);
- __wbt_done(rwb, wbt_flags(rq));
+ __wbt_done(rqos, wbt_flags(rq));
}
wbt_clear_state(rq);
}
-/*
- * Return true, if we can't increase the depth further by scaling
- */
-static bool calc_wb_limits(struct rq_wb *rwb)
-{
- unsigned int depth;
- bool ret = false;
-
- if (!rwb->min_lat_nsec) {
- rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
- return false;
- }
-
- /*
- * For QD=1 devices, this is a special case. It's important for those
- * to have one request ready when one completes, so force a depth of
- * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
- * since the device can't have more than that in flight. If we're
- * scaling down, then keep a setting of 1/1/1.
- */
- if (rwb->queue_depth == 1) {
- if (rwb->scale_step > 0)
- rwb->wb_max = rwb->wb_normal = 1;
- else {
- rwb->wb_max = rwb->wb_normal = 2;
- ret = true;
- }
- rwb->wb_background = 1;
- } else {
- /*
- * scale_step == 0 is our default state. If we have suffered
- * latency spikes, step will be > 0, and we shrink the
- * allowed write depths. If step is < 0, we're only doing
- * writes, and we allow a temporarily higher depth to
- * increase performance.
- */
- depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
- if (rwb->scale_step > 0)
- depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
- else if (rwb->scale_step < 0) {
- unsigned int maxd = 3 * rwb->queue_depth / 4;
-
- depth = 1 + ((depth - 1) << -rwb->scale_step);
- if (depth > maxd) {
- depth = maxd;
- ret = true;
- }
- }
-
- /*
- * Set our max/normal/bg queue depths based on how far
- * we have scaled down (->scale_step).
- */
- rwb->wb_max = depth;
- rwb->wb_normal = (rwb->wb_max + 1) / 2;
- rwb->wb_background = (rwb->wb_max + 3) / 4;
- }
-
- return ret;
-}
-
static inline bool stat_sample_valid(struct blk_rq_stat *stat)
{
/*
@@ -307,7 +225,8 @@ enum {
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
- struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat;
/*
@@ -351,7 +270,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
return LAT_EXCEEDED;
}
- if (rwb->scale_step)
+ if (rqd->scale_step)
trace_wbt_stat(bdi, stat);
return LAT_OK;
@@ -359,58 +278,48 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
- struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct rq_depth *rqd = &rwb->rq_depth;
- trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
- rwb->wb_background, rwb->wb_normal, rwb->wb_max);
+ trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
+ rwb->wb_background, rwb->wb_normal, rqd->max_depth);
}
-static void scale_up(struct rq_wb *rwb)
+static void calc_wb_limits(struct rq_wb *rwb)
{
- /*
- * Hit max in previous round, stop here
- */
- if (rwb->scaled_max)
- return;
+ if (rwb->min_lat_nsec == 0) {
+ rwb->wb_normal = rwb->wb_background = 0;
+ } else if (rwb->rq_depth.max_depth <= 2) {
+ rwb->wb_normal = rwb->rq_depth.max_depth;
+ rwb->wb_background = 1;
+ } else {
+ rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
+ rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
+ }
+}
- rwb->scale_step--;
+static void scale_up(struct rq_wb *rwb)
+{
+ rq_depth_scale_up(&rwb->rq_depth);
+ calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
-
- rwb->scaled_max = calc_wb_limits(rwb);
-
- rwb_wake_all(rwb);
-
- rwb_trace_step(rwb, "step up");
+ rwb_trace_step(rwb, "scale up");
}
-/*
- * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
- */
static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
- /*
- * Stop scaling down when we've hit the limit. This also prevents
- * ->scale_step from going to crazy values, if the device can't
- * keep up.
- */
- if (rwb->wb_max == 1)
- return;
-
- if (rwb->scale_step < 0 && hard_throttle)
- rwb->scale_step = 0;
- else
- rwb->scale_step++;
-
- rwb->scaled_max = false;
- rwb->unknown_cnt = 0;
+ rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
calc_wb_limits(rwb);
- rwb_trace_step(rwb, "step down");
+ rwb->unknown_cnt = 0;
+ rwb_wake_all(rwb);
+ rwb_trace_step(rwb, "scale down");
}
static void rwb_arm_timer(struct rq_wb *rwb)
{
- if (rwb->scale_step > 0) {
+ struct rq_depth *rqd = &rwb->rq_depth;
+
+ if (rqd->scale_step > 0) {
/*
* We should speed this up, using some variant of a fast
* integer inverse square root calculation. Since we only do
@@ -418,7 +327,7 @@ static void rwb_arm_timer(struct rq_wb *rwb)
* though.
*/
rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
- int_sqrt((rwb->scale_step + 1) << 8));
+ int_sqrt((rqd->scale_step + 1) << 8));
} else {
/*
* For step < 0, we don't want to increase/decrease the
@@ -433,12 +342,13 @@ static void rwb_arm_timer(struct rq_wb *rwb)
static void wb_timer_fn(struct blk_stat_callback *cb)
{
struct rq_wb *rwb = cb->data;
+ struct rq_depth *rqd = &rwb->rq_depth;
unsigned int inflight = wbt_inflight(rwb);
int status;
status = latency_exceeded(rwb, cb->stat);
- trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
+ trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
inflight);
/*
@@ -469,9 +379,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
* currently don't have a valid read/write sample. For that
* case, slowly return to center state (step == 0).
*/
- if (rwb->scale_step > 0)
+ if (rqd->scale_step > 0)
scale_up(rwb);
- else if (rwb->scale_step < 0)
+ else if (rqd->scale_step < 0)
scale_down(rwb, false);
break;
default:
@@ -481,19 +391,50 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
/*
* Re-arm timer, if we have IO in flight
*/
- if (rwb->scale_step || inflight)
+ if (rqd->scale_step || inflight)
rwb_arm_timer(rwb);
}
-void wbt_update_limits(struct rq_wb *rwb)
+static void __wbt_update_limits(struct rq_wb *rwb)
{
- rwb->scale_step = 0;
- rwb->scaled_max = false;
+ struct rq_depth *rqd = &rwb->rq_depth;
+
+ rqd->scale_step = 0;
+ rqd->scaled_max = false;
+
+ rq_depth_calc_max_depth(rqd);
calc_wb_limits(rwb);
rwb_wake_all(rwb);
}
+void wbt_update_limits(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return;
+ __wbt_update_limits(RQWB(rqos));
+}
+
+u64 wbt_get_min_lat(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return 0;
+ return RQWB(rqos)->min_lat_nsec;
+}
+
+void wbt_set_min_lat(struct request_queue *q, u64 val)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return;
+ RQWB(rqos)->min_lat_nsec = val;
+ RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
+ __wbt_update_limits(RQWB(rqos));
+}
+
+
static bool close_io(struct rq_wb *rwb)
{
const unsigned long now = jiffies;
@@ -520,7 +461,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
* IO for a bit.
*/
if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
- limit = rwb->wb_max;
+ limit = rwb->rq_depth.max_depth;
else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
/*
* If less than 100ms since we completed unrelated IO,
@@ -533,30 +474,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
return limit;
}
-static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
- wait_queue_entry_t *wait, unsigned long rw)
-{
- /*
- * inc it here even if disabled, since we'll dec it at completion.
- * this only happens if the task was sleeping in __wbt_wait(),
- * and someone turned it off at the same time.
- */
- if (!rwb_enabled(rwb)) {
- atomic_inc(&rqw->inflight);
- return true;
- }
-
- /*
- * If the waitqueue is already active and we are not the next
- * in line to be woken up, wait for our turn.
- */
- if (waitqueue_active(&rqw->wait) &&
- rqw->wait.head.next != &wait->entry)
- return false;
-
- return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
-}
-
/*
* Block if we will exceed our limit, or if we are currently waiting for
* the timer to kick off queuing again.
@@ -567,16 +484,32 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
__acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
- DEFINE_WAIT(wait);
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __wbt_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
+ return;
+ }
- if (may_queue(rwb, rqw, &wait, rw))
+ if (!waitqueue_active(&rqw->wait)
+ && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
return;
+ add_wait_queue_exclusive(&rqw->wait, &wait);
do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
+ break;
+ }
- if (may_queue(rwb, rqw, &wait, rw))
+ if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
break;
if (lock) {
@@ -587,7 +520,8 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
io_schedule();
} while (1);
- finish_wait(&rqw->wait, &wait);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&rqw->wait, &wait);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -608,43 +542,68 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
}
}
+static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
+{
+ enum wbt_flags flags = 0;
+
+ if (bio_op(bio) == REQ_OP_READ) {
+ flags = WBT_READ;
+ } else if (wbt_should_throttle(rwb, bio)) {
+ if (current_is_kswapd())
+ flags |= WBT_KSWAPD;
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ flags |= WBT_DISCARD;
+ flags |= WBT_TRACKED;
+ }
+ return flags;
+}
+
+static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
+ __wbt_done(rqos, flags);
+}
+
/*
* Returns true if the IO request should be accounted, false if not.
* May sleep, if we have exceeded the writeback limits. Caller can pass
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
{
- enum wbt_flags ret = 0;
+ struct rq_wb *rwb = RQWB(rqos);
+ enum wbt_flags flags;
- if (!rwb_enabled(rwb))
- return 0;
-
- if (bio_op(bio) == REQ_OP_READ)
- ret = WBT_READ;
-
- if (!wbt_should_throttle(rwb, bio)) {
- if (ret & WBT_READ)
+ flags = bio_to_wbt_flags(rwb, bio);
+ if (!(flags & WBT_TRACKED)) {
+ if (flags & WBT_READ)
wb_timestamp(rwb, &rwb->last_issue);
- return ret;
+ return;
}
if (current_is_kswapd())
- ret |= WBT_KSWAPD;
+ flags |= WBT_KSWAPD;
if (bio_op(bio) == REQ_OP_DISCARD)
- ret |= WBT_DISCARD;
+ flags |= WBT_DISCARD;
- __wbt_wait(rwb, ret, bio->bi_opf, lock);
+ __wbt_wait(rwb, flags, bio->bi_opf, lock);
if (!blk_stat_is_active(rwb->cb))
rwb_arm_timer(rwb);
+}
- return ret | WBT_TRACKED;
+static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}
-void wbt_issue(struct rq_wb *rwb, struct request *rq)
+void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
+ struct rq_wb *rwb = RQWB(rqos);
+
if (!rwb_enabled(rwb))
return;
@@ -661,8 +620,9 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq)
}
}
-void wbt_requeue(struct rq_wb *rwb, struct request *rq)
+void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
+ struct rq_wb *rwb = RQWB(rqos);
if (!rwb_enabled(rwb))
return;
if (rq == rwb->sync_cookie) {
@@ -671,39 +631,30 @@ void wbt_requeue(struct rq_wb *rwb, struct request *rq)
}
}
-void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
{
- if (rwb) {
- rwb->queue_depth = depth;
- wbt_update_limits(rwb);
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (rqos) {
+ RQWB(rqos)->rq_depth.queue_depth = depth;
+ __wbt_update_limits(RQWB(rqos));
}
}
-void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
+void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
{
- if (rwb)
- rwb->wc = write_cache_on;
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (rqos)
+ RQWB(rqos)->wc = write_cache_on;
}
/*
- * Disable wbt, if enabled by default.
- */
-void wbt_disable_default(struct request_queue *q)
-{
- struct rq_wb *rwb = q->rq_wb;
-
- if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT)
- wbt_exit(q);
-}
-EXPORT_SYMBOL_GPL(wbt_disable_default);
-
-/*
* Enable wbt if defaults are configured that way
*/
void wbt_enable_default(struct request_queue *q)
{
+ struct rq_qos *rqos = wbt_rq_qos(q);
/* Throttling already enabled? */
- if (q->rq_wb)
+ if (rqos)
return;
/* Queue not registered? Maybe shutting down... */
@@ -741,6 +692,42 @@ static int wbt_data_dir(const struct request *rq)
return -1;
}
+static void wbt_exit(struct rq_qos *rqos)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ struct request_queue *q = rqos->q;
+
+ blk_stat_remove_callback(q, rwb->cb);
+ blk_stat_free_callback(rwb->cb);
+ kfree(rwb);
+}
+
+/*
+ * Disable wbt, if enabled by default.
+ */
+void wbt_disable_default(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ struct rq_wb *rwb;
+ if (!rqos)
+ return;
+ rwb = RQWB(rqos);
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ rwb->wb_normal = 0;
+}
+EXPORT_SYMBOL_GPL(wbt_disable_default);
+
+
+static struct rq_qos_ops wbt_rqos_ops = {
+ .throttle = wbt_wait,
+ .issue = wbt_issue,
+ .track = wbt_track,
+ .requeue = wbt_requeue,
+ .done = wbt_done,
+ .cleanup = wbt_cleanup,
+ .exit = wbt_exit,
+};
+
int wbt_init(struct request_queue *q)
{
struct rq_wb *rwb;
@@ -756,39 +743,29 @@ int wbt_init(struct request_queue *q)
return -ENOMEM;
}
- for (i = 0; i < WBT_NUM_RWQ; i++) {
- atomic_set(&rwb->rq_wait[i].inflight, 0);
- init_waitqueue_head(&rwb->rq_wait[i].wait);
- }
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ rq_wait_init(&rwb->rq_wait[i]);
+ rwb->rqos.id = RQ_QOS_WBT;
+ rwb->rqos.ops = &wbt_rqos_ops;
+ rwb->rqos.q = q;
rwb->last_comp = rwb->last_issue = jiffies;
- rwb->queue = q;
rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT;
- wbt_update_limits(rwb);
+ rwb->wc = 1;
+ rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
+ __wbt_update_limits(rwb);
/*
* Assign rwb and add the stats callback.
*/
- q->rq_wb = rwb;
+ rq_qos_add(q, &rwb->rqos);
blk_stat_add_callback(q, rwb->cb);
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
- wbt_set_queue_depth(rwb, blk_queue_depth(q));
- wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ wbt_set_queue_depth(q, blk_queue_depth(q));
+ wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0;
}
-
-void wbt_exit(struct request_queue *q)
-{
- struct rq_wb *rwb = q->rq_wb;
-
- if (rwb) {
- blk_stat_remove_callback(q, rwb->cb);
- blk_stat_free_callback(rwb->cb);
- q->rq_wb = NULL;
- kfree(rwb);
- }
-}
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 300df531d0a6..f47218d5b3b2 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -9,6 +9,7 @@
#include <linux/ktime.h>
#include "blk-stat.h"
+#include "blk-rq-qos.h"
enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
@@ -35,20 +36,12 @@ enum {
WBT_STATE_ON_MANUAL = 2,
};
-struct rq_wait {
- wait_queue_head_t wait;
- atomic_t inflight;
-};
-
struct rq_wb {
/*
* Settings that govern how we throttle
*/
unsigned int wb_background; /* background writeback */
unsigned int wb_normal; /* normal writeback */
- unsigned int wb_max; /* max throughput writeback */
- int scale_step;
- bool scaled_max;
short enable_state; /* WBT_STATE_* */
@@ -67,15 +60,20 @@ struct rq_wb {
void *sync_cookie;
unsigned int wc;
- unsigned int queue_depth;
unsigned long last_issue; /* last non-throttled issue */
unsigned long last_comp; /* last non-throttled comp */
unsigned long min_lat_nsec;
- struct request_queue *queue;
+ struct rq_qos rqos;
struct rq_wait rq_wait[WBT_NUM_RWQ];
+ struct rq_depth rq_depth;
};
+static inline struct rq_wb *RQWB(struct rq_qos *rqos)
+{
+ return container_of(rqos, struct rq_wb, rqos);
+}
+
static inline unsigned int wbt_inflight(struct rq_wb *rwb)
{
unsigned int i, ret = 0;
@@ -86,26 +84,19 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
return ret;
}
-#ifdef CONFIG_BLK_WBT
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
- rq->wbt_flags |= flags;
-}
+#ifdef CONFIG_BLK_WBT
-void __wbt_done(struct rq_wb *, enum wbt_flags);
-void wbt_done(struct rq_wb *, struct request *);
-enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
int wbt_init(struct request_queue *);
-void wbt_exit(struct request_queue *);
-void wbt_update_limits(struct rq_wb *);
-void wbt_requeue(struct rq_wb *, struct request *);
-void wbt_issue(struct rq_wb *, struct request *);
+void wbt_update_limits(struct request_queue *);
void wbt_disable_default(struct request_queue *);
void wbt_enable_default(struct request_queue *);
-void wbt_set_queue_depth(struct rq_wb *, unsigned int);
-void wbt_set_write_cache(struct rq_wb *, bool);
+u64 wbt_get_min_lat(struct request_queue *q);
+void wbt_set_min_lat(struct request_queue *q, u64 val);
+
+void wbt_set_queue_depth(struct request_queue *, unsigned int);
+void wbt_set_write_cache(struct request_queue *, bool);
u64 wbt_default_latency_nsec(struct request_queue *);
@@ -114,43 +105,30 @@ u64 wbt_default_latency_nsec(struct request_queue *);
static inline void wbt_track(struct request *rq, enum wbt_flags flags)
{
}
-static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
-{
-}
-static inline void wbt_done(struct rq_wb *rwb, struct request *rq)
-{
-}
-static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
- spinlock_t *lock)
-{
- return 0;
-}
static inline int wbt_init(struct request_queue *q)
{
return -EINVAL;
}
-static inline void wbt_exit(struct request_queue *q)
-{
-}
-static inline void wbt_update_limits(struct rq_wb *rwb)
+static inline void wbt_update_limits(struct request_queue *q)
{
}
-static inline void wbt_requeue(struct rq_wb *rwb, struct request *rq)
+static inline void wbt_disable_default(struct request_queue *q)
{
}
-static inline void wbt_issue(struct rq_wb *rwb, struct request *rq)
+static inline void wbt_enable_default(struct request_queue *q)
{
}
-static inline void wbt_disable_default(struct request_queue *q)
+static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
{
}
-static inline void wbt_enable_default(struct request_queue *q)
+static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
{
}
-static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+static inline u64 wbt_get_min_lat(struct request_queue *q)
{
+ return 0;
}
-static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
+static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
{
}
static inline u64 wbt_default_latency_nsec(struct request_queue *q)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 51000914e23f..c461cf63f1f4 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -200,7 +200,7 @@ int blkdev_report_zones(struct block_device *bdev,
/* Get header in the first page */
ofst = 0;
if (!nr_rep) {
- hdr = (struct blk_zone_report_hdr *) addr;
+ hdr = addr;
nr_rep = hdr->nr_zones;
ofst = sizeof(struct blk_zone_report_hdr);
}
diff --git a/block/blk.h b/block/blk.h
index 8d23aea96ce9..9db4e389582c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -130,6 +130,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
+void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);
@@ -233,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q);
+int elevator_switch_mq(struct request_queue *q,
+ struct elevator_type *new_e);
void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
@@ -296,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
* b) the queue had IO stats enabled when this request was started, and
* c) it's a file system request
*/
-static inline int blk_do_io_stat(struct request *rq)
+static inline bool blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&
@@ -412,4 +415,10 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
extern void blk_drain_queue(struct request_queue *q);
+#ifdef CONFIG_BLK_CGROUP_IOLATENCY
+extern int blk_iolatency_init(struct request_queue *q);
+#else
+static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
+#endif
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index fd31347b7836..bc63b3a2d18c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -195,6 +195,73 @@ static void bounce_end_io_read_isa(struct bio *bio)
__bounce_end_io_read(bio, &isa_page_pool);
}
+static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
+ struct bio_set *bs)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ struct bio *bio;
+
+ /*
+ * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+ * bio_src->bi_io_vec to bio->bi_io_vec.
+ *
+ * We can't do that anymore, because:
+ *
+ * - The point of cloning the biovec is to produce a bio with a biovec
+ * the caller can modify: bi_idx and bi_bvec_done should be 0.
+ *
+ * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+ * we tried to clone the whole thing bio_alloc_bioset() would fail.
+ * But the clone should succeed as long as the number of biovecs we
+ * actually need to allocate is fewer than BIO_MAX_PAGES.
+ *
+ * - Lastly, bi_vcnt should not be looked at or relied upon by code
+ * that does not own the bio - reason being drivers don't use it for
+ * iterating over the biovec anymore, so expecting it to be kept up
+ * to date (i.e. for clones that share the parent biovec) is just
+ * asking for trouble and would force extra work on
+ * __bio_clone_fast() anyways.
+ */
+
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
+ if (!bio)
+ return NULL;
+ bio->bi_disk = bio_src->bi_disk;
+ bio->bi_opf = bio_src->bi_opf;
+ bio->bi_write_hint = bio_src->bi_write_hint;
+ bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
+ bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ break;
+ case REQ_OP_WRITE_SAME:
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ break;
+ default:
+ bio_for_each_segment(bv, bio_src, iter)
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+ break;
+ }
+
+ if (bio_integrity(bio_src)) {
+ int ret;
+
+ ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+ if (ret < 0) {
+ bio_put(bio);
+ return NULL;
+ }
+ }
+
+ bio_clone_blkcg_association(bio, bio_src);
+
+ return bio;
+}
+
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
@@ -222,7 +289,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
generic_make_request(*bio_orig);
*bio_orig = bio;
}
- bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+ bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
&bounce_bio_set);
bio_for_each_segment_all(to, bio, i) {
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 9419def8c017..f3501cdaf1a6 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -48,9 +48,8 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
- if (IS_ERR(job->request))
- return PTR_ERR(job->request);
- return 0;
+
+ return PTR_ERR_OR_ZERO(job->request);
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
diff --git a/block/bsg.c b/block/bsg.c
index 66602c489956..db588add6ba6 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -13,11 +13,9 @@
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
-#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
@@ -38,21 +36,10 @@
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
- struct list_head busy_list;
- struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
- int queued_cmds;
- int done_cmds;
- wait_queue_head_t wq_done;
- wait_queue_head_t wq_free;
char name[20];
int max_queue;
- unsigned long flags;
-};
-
-enum {
- BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
@@ -67,64 +54,6 @@ static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
-static struct kmem_cache *bsg_cmd_cachep;
-
-/*
- * our internal command type
- */
-struct bsg_command {
- struct bsg_device *bd;
- struct list_head list;
- struct request *rq;
- struct bio *bio;
- struct bio *bidi_bio;
- int err;
- struct sg_io_v4 hdr;
-};
-
-static void bsg_free_command(struct bsg_command *bc)
-{
- struct bsg_device *bd = bc->bd;
- unsigned long flags;
-
- kmem_cache_free(bsg_cmd_cachep, bc);
-
- spin_lock_irqsave(&bd->lock, flags);
- bd->queued_cmds--;
- spin_unlock_irqrestore(&bd->lock, flags);
-
- wake_up(&bd->wq_free);
-}
-
-static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
-{
- struct bsg_command *bc = ERR_PTR(-EINVAL);
-
- spin_lock_irq(&bd->lock);
-
- if (bd->queued_cmds >= bd->max_queue)
- goto out;
-
- bd->queued_cmds++;
- spin_unlock_irq(&bd->lock);
-
- bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
- if (unlikely(!bc)) {
- spin_lock_irq(&bd->lock);
- bd->queued_cmds--;
- bc = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- bc->bd = bd;
- INIT_LIST_HEAD(&bc->list);
- bsg_dbg(bd, "returning free cmd %p\n", bc);
- return bc;
-out:
- spin_unlock_irq(&bd->lock);
- return bc;
-}
-
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
@@ -267,8 +196,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
} else if (hdr->din_xfer_len) {
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
- } else {
- ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
}
if (ret)
@@ -287,101 +214,6 @@ out:
return ERR_PTR(ret);
}
-/*
- * async completion call-back from the block layer, when scsi/ide/whatever
- * calls end_that_request_last() on a request
- */
-static void bsg_rq_end_io(struct request *rq, blk_status_t status)
-{
- struct bsg_command *bc = rq->end_io_data;
- struct bsg_device *bd = bc->bd;
- unsigned long flags;
-
- bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
- rq, bc, bc->bio);
-
- bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
-
- spin_lock_irqsave(&bd->lock, flags);
- list_move_tail(&bc->list, &bd->done_list);
- bd->done_cmds++;
- spin_unlock_irqrestore(&bd->lock, flags);
-
- wake_up(&bd->wq_done);
-}
-
-/*
- * do final setup of a 'bc' and submit the matching 'rq' to the block
- * layer for io
- */
-static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
- struct bsg_command *bc, struct request *rq)
-{
- int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
-
- /*
- * add bc command to busy queue and submit rq for io
- */
- bc->rq = rq;
- bc->bio = rq->bio;
- if (rq->next_rq)
- bc->bidi_bio = rq->next_rq->bio;
- bc->hdr.duration = jiffies;
- spin_lock_irq(&bd->lock);
- list_add_tail(&bc->list, &bd->busy_list);
- spin_unlock_irq(&bd->lock);
-
- bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
-
- rq->end_io_data = bc;
- blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
-}
-
-static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
-{
- struct bsg_command *bc = NULL;
-
- spin_lock_irq(&bd->lock);
- if (bd->done_cmds) {
- bc = list_first_entry(&bd->done_list, struct bsg_command, list);
- list_del(&bc->list);
- bd->done_cmds--;
- }
- spin_unlock_irq(&bd->lock);
-
- return bc;
-}
-
-/*
- * Get a finished command from the done list
- */
-static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret;
-
- do {
- bc = bsg_next_done_cmd(bd);
- if (bc)
- break;
-
- if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
- bc = ERR_PTR(-EAGAIN);
- break;
- }
-
- ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
- if (ret) {
- bc = ERR_PTR(-ERESTARTSYS);
- break;
- }
- } while (1);
-
- bsg_dbg(bd, "returning done %p\n", bc);
-
- return bc;
-}
-
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
@@ -400,234 +232,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
return ret;
}
-static bool bsg_complete(struct bsg_device *bd)
-{
- bool ret = false;
- bool spin;
-
- do {
- spin_lock_irq(&bd->lock);
-
- BUG_ON(bd->done_cmds > bd->queued_cmds);
-
- /*
- * All commands consumed.
- */
- if (bd->done_cmds == bd->queued_cmds)
- ret = true;
-
- spin = !test_bit(BSG_F_BLOCK, &bd->flags);
-
- spin_unlock_irq(&bd->lock);
- } while (!ret && spin);
-
- return ret;
-}
-
-static int bsg_complete_all_commands(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret, tret;
-
- bsg_dbg(bd, "entered\n");
-
- /*
- * wait for all commands to complete
- */
- io_wait_event(bd->wq_done, bsg_complete(bd));
-
- /*
- * discard done commands
- */
- ret = 0;
- do {
- spin_lock_irq(&bd->lock);
- if (!bd->queued_cmds) {
- spin_unlock_irq(&bd->lock);
- break;
- }
- spin_unlock_irq(&bd->lock);
-
- bc = bsg_get_done_cmd(bd);
- if (IS_ERR(bc))
- break;
-
- tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
- bc->bidi_bio);
- if (!ret)
- ret = tret;
-
- bsg_free_command(bc);
- } while (1);
-
- return ret;
-}
-
-static int
-__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
- const struct iovec *iov, ssize_t *bytes_read)
-{
- struct bsg_command *bc;
- int nr_commands, ret;
-
- if (count % sizeof(struct sg_io_v4))
- return -EINVAL;
-
- ret = 0;
- nr_commands = count / sizeof(struct sg_io_v4);
- while (nr_commands) {
- bc = bsg_get_done_cmd(bd);
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
- break;
- }
-
- /*
- * this is the only case where we need to copy data back
- * after completing the request. so do that here,
- * bsg_complete_work() cannot do that for us
- */
- ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
- bc->bidi_bio);
-
- if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
- ret = -EFAULT;
-
- bsg_free_command(bc);
-
- if (ret)
- break;
-
- buf += sizeof(struct sg_io_v4);
- *bytes_read += sizeof(struct sg_io_v4);
- nr_commands--;
- }
-
- return ret;
-}
-
-static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
-{
- if (file->f_flags & O_NONBLOCK)
- clear_bit(BSG_F_BLOCK, &bd->flags);
- else
- set_bit(BSG_F_BLOCK, &bd->flags);
-}
-
-/*
- * Check if the error is a "real" error that we should return.
- */
-static inline int err_block_err(int ret)
-{
- if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
- return 1;
-
- return 0;
-}
-
-static ssize_t
-bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
- struct bsg_device *bd = file->private_data;
- int ret;
- ssize_t bytes_read;
-
- bsg_dbg(bd, "read %zd bytes\n", count);
-
- bsg_set_block(bd, file);
-
- bytes_read = 0;
- ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
- *ppos = bytes_read;
-
- if (!bytes_read || err_block_err(ret))
- bytes_read = ret;
-
- return bytes_read;
-}
-
-static int __bsg_write(struct bsg_device *bd, const char __user *buf,
- size_t count, ssize_t *bytes_written, fmode_t mode)
-{
- struct bsg_command *bc;
- struct request *rq;
- int ret, nr_commands;
-
- if (count % sizeof(struct sg_io_v4))
- return -EINVAL;
-
- nr_commands = count / sizeof(struct sg_io_v4);
- rq = NULL;
- bc = NULL;
- ret = 0;
- while (nr_commands) {
- struct request_queue *q = bd->queue;
-
- bc = bsg_alloc_command(bd);
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
- bc = NULL;
- break;
- }
-
- if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
- ret = -EFAULT;
- break;
- }
-
- /*
- * get a request, fill in the blanks, and add to request queue
- */
- rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- rq = NULL;
- break;
- }
-
- bsg_add_command(bd, q, bc, rq);
- bc = NULL;
- rq = NULL;
- nr_commands--;
- buf += sizeof(struct sg_io_v4);
- *bytes_written += sizeof(struct sg_io_v4);
- }
-
- if (bc)
- bsg_free_command(bc);
-
- return ret;
-}
-
-static ssize_t
-bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct bsg_device *bd = file->private_data;
- ssize_t bytes_written;
- int ret;
-
- bsg_dbg(bd, "write %zd bytes\n", count);
-
- if (unlikely(uaccess_kernel()))
- return -EINVAL;
-
- bsg_set_block(bd, file);
-
- bytes_written = 0;
- ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
-
- *ppos = bytes_written;
-
- /*
- * return bytes written on non-fatal errors
- */
- if (!bytes_written || err_block_err(ret))
- bytes_written = ret;
-
- bsg_dbg(bd, "returning %zd\n", bytes_written);
- return bytes_written;
-}
-
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
@@ -637,29 +241,20 @@ static struct bsg_device *bsg_alloc_device(void)
return NULL;
spin_lock_init(&bd->lock);
-
bd->max_queue = BSG_DEFAULT_CMDS;
-
- INIT_LIST_HEAD(&bd->busy_list);
- INIT_LIST_HEAD(&bd->done_list);
INIT_HLIST_NODE(&bd->dev_list);
-
- init_waitqueue_head(&bd->wq_free);
- init_waitqueue_head(&bd->wq_done);
return bd;
}
static int bsg_put_device(struct bsg_device *bd)
{
- int ret = 0, do_free;
struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
- do_free = atomic_dec_and_test(&bd->ref_count);
- if (!do_free) {
+ if (!atomic_dec_and_test(&bd->ref_count)) {
mutex_unlock(&bsg_mutex);
- goto out;
+ return 0;
}
hlist_del(&bd->dev_list);
@@ -670,20 +265,9 @@ static int bsg_put_device(struct bsg_device *bd)
/*
* close can always block
*/
- set_bit(BSG_F_BLOCK, &bd->flags);
-
- /*
- * correct error detection baddies here again. it's the responsibility
- * of the app to properly reap commands before close() if it wants
- * fool-proof error detection
- */
- ret = bsg_complete_all_commands(bd);
-
kfree(bd);
-out:
- if (do_free)
- blk_put_queue(q);
- return ret;
+ blk_put_queue(q);
+ return 0;
}
static struct bsg_device *bsg_add_device(struct inode *inode,
@@ -706,8 +290,6 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bd->queue = rq;
- bsg_set_block(bd, file);
-
atomic_set(&bd->ref_count, 1);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
@@ -781,24 +363,6 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
-static __poll_t bsg_poll(struct file *file, poll_table *wait)
-{
- struct bsg_device *bd = file->private_data;
- __poll_t mask = 0;
-
- poll_wait(file, &bd->wq_done, wait);
- poll_wait(file, &bd->wq_free, wait);
-
- spin_lock_irq(&bd->lock);
- if (!list_empty(&bd->done_list))
- mask |= EPOLLIN | EPOLLRDNORM;
- if (bd->queued_cmds < bd->max_queue)
- mask |= EPOLLOUT;
- spin_unlock_irq(&bd->lock);
-
- return mask;
-}
-
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
@@ -872,9 +436,6 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations bsg_fops = {
- .read = bsg_read,
- .write = bsg_write,
- .poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.unlocked_ioctl = bsg_ioctl,
@@ -979,21 +540,12 @@ static int __init bsg_init(void)
int ret, i;
dev_t devid;
- bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
- sizeof(struct bsg_command), 0, 0, NULL);
- if (!bsg_cmd_cachep) {
- printk(KERN_ERR "bsg: failed creating slab cache\n");
- return -ENOMEM;
- }
-
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
- if (IS_ERR(bsg_class)) {
- ret = PTR_ERR(bsg_class);
- goto destroy_kmemcache;
- }
+ if (IS_ERR(bsg_class))
+ return PTR_ERR(bsg_class);
bsg_class->devnode = bsg_devnode;
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
@@ -1014,8 +566,6 @@ unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
class_destroy(bsg_class);
-destroy_kmemcache:
- kmem_cache_destroy(bsg_cmd_cachep);
return ret;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 82b6c27b3245..2eb87444b157 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3666,6 +3666,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+ /* fall through */
case IOPRIO_CLASS_NONE:
/*
* no prio set, inherit CPU scheduling settings
@@ -4735,12 +4736,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data; \
+ unsigned int __data, __min = (MIN), __max = (MAX); \
+ \
cfq_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
+ if (__data < __min) \
+ __data = __min; \
+ else if (__data > __max) \
+ __data = __max; \
if (__CONV) \
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
@@ -4769,12 +4771,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data; \
+ unsigned int __data, __min = (MIN), __max = (MAX); \
+ \
cfq_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
+ if (__data < __min) \
+ __data = __min; \
+ else if (__data > __max) \
+ __data = __max; \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
return count; \
}
diff --git a/block/elevator.c b/block/elevator.c
index fa828b5bfd4b..5ea6e7d600e4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
-static int elevator_switch_mq(struct request_queue *q,
+int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e)
{
int ret;
lockdep_assert_held(&q->sysfs_lock);
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
-
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
blk_add_trace_msg(q, "elv switch: none");
out:
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
return ret;
}
@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
lockdep_assert_held(&q->sysfs_lock);
- if (q->mq_ops)
- return elevator_switch_mq(q, new_e);
+ if (q->mq_ops) {
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ err = elevator_switch_mq(q, new_e);
+
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+
+ return err;
+ }
/*
* Turn on BYPASS and drain all requests w/ elevator private data.
diff --git a/block/genhd.c b/block/genhd.c
index f1543a45e73b..8cc719a37b32 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1333,21 +1333,28 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_round_stats(gp->queue, cpu, hd);
part_stat_unlock();
part_in_flight(gp->queue, hd, inflight);
- seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
- "%u %lu %lu %lu %u %u %u %u\n",
+ seq_printf(seqf, "%4d %7d %s "
+ "%lu %lu %lu %u "
+ "%lu %lu %lu %u "
+ "%u %u %u "
+ "%lu %lu %lu %u\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
disk_name(gp, hd->partno, buf),
- part_stat_read(hd, ios[READ]),
- part_stat_read(hd, merges[READ]),
- part_stat_read(hd, sectors[READ]),
- jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
- part_stat_read(hd, ios[WRITE]),
- part_stat_read(hd, merges[WRITE]),
- part_stat_read(hd, sectors[WRITE]),
- jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
+ part_stat_read(hd, ios[STAT_READ]),
+ part_stat_read(hd, merges[STAT_READ]),
+ part_stat_read(hd, sectors[STAT_READ]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+ part_stat_read(hd, ios[STAT_WRITE]),
+ part_stat_read(hd, merges[STAT_WRITE]),
+ part_stat_read(hd, sectors[STAT_WRITE]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
- jiffies_to_msecs(part_stat_read(hd, time_in_queue))
+ jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
+ part_stat_read(hd, ios[STAT_DISCARD]),
+ part_stat_read(hd, merges[STAT_DISCARD]),
+ part_stat_read(hd, sectors[STAT_DISCARD]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
);
}
disk_part_iter_exit(&piter);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 3dcfd4ec0e11..5a8975a1201c 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -130,19 +130,24 @@ ssize_t part_stat_show(struct device *dev,
return sprintf(buf,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
- "%8u %8u %8u"
+ "%8u %8u %8u "
+ "%8lu %8lu %8llu %8u"
"\n",
- part_stat_read(p, ios[READ]),
- part_stat_read(p, merges[READ]),
- (unsigned long long)part_stat_read(p, sectors[READ]),
- jiffies_to_msecs(part_stat_read(p, ticks[READ])),
- part_stat_read(p, ios[WRITE]),
- part_stat_read(p, merges[WRITE]),
- (unsigned long long)part_stat_read(p, sectors[WRITE]),
- jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
+ part_stat_read(p, ios[STAT_READ]),
+ part_stat_read(p, merges[STAT_READ]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_READ]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+ part_stat_read(p, ios[STAT_WRITE]),
+ part_stat_read(p, merges[STAT_WRITE]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
- jiffies_to_msecs(part_stat_read(p, time_in_queue)));
+ jiffies_to_msecs(part_stat_read(p, time_in_queue)),
+ part_stat_read(p, ios[STAT_DISCARD]),
+ part_stat_read(p, merges[STAT_DISCARD]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
index 007f95eea0e1..903f3ed175d0 100644
--- a/block/partitions/aix.c
+++ b/block/partitions/aix.c
@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
u32 vgda_sector = 0;
u32 vgda_len = 0;
int numlvs = 0;
- struct pvd *pvd;
+ struct pvd *pvd = NULL;
struct lv_info {
unsigned short pps_per_lv;
unsigned short pps_found;
@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
if (lvip[i].pps_per_lv)
foundlvs += 1;
}
+ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
+ pvd = alloc_pvd(state, vgda_sector + 17);
}
put_dev_sector(sect);
}
- pvd = alloc_pvd(state, vgda_sector + 17);
if (pvd) {
int numpps = be16_to_cpu(pvd->pp_count);
int psn_part1 = be32_to_cpu(pvd->psn_part1);
@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
next_lp_ix += 1;
}
for (i = 0; i < state->limit; i += 1)
- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
+ char tmp[sizeof(n[i].name) + 1]; // null char
+
+ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
pr_warn("partition %s (%u pp's found) is "
"not contiguous\n",
- n[i].name, lvip[i].pps_found);
+ tmp, lvip[i].pps_found);
+ }
kfree(pvd);
}
kfree(n);
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 0417937dfe99..16766f267559 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -830,7 +830,6 @@ static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
{
char buf[64];
int r_objid, r_name, r_id1, r_id2, len;
- struct vblk_dgrp *dgrp;
BUG_ON (!buffer || !vb);
@@ -853,8 +852,6 @@ static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
if (len != get_unaligned_be32(buffer + 0x14))
return false;
- dgrp = &vb->vblk.dgrp;
-
ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf));
return true;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index a98db384048f..62aed77d0bb9 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -184,3 +184,113 @@ const struct blk_integrity_profile t10_pi_type3_ip = {
.verify_fn = t10_pi_type3_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
+
+/**
+ * t10_pi_prepare - prepare PI prior submitting request to device
+ * @rq: request with PI that should be prepared
+ * @protection_type: PI type (Type 1/Type 2/Type 3)
+ *
+ * For Type 1/Type 2, the virtual start sector is the one that was
+ * originally submitted by the block layer for the ref_tag usage. Due to
+ * partitioning, MD/DM cloning, etc. the actual physical start sector is
+ * likely to be different. Remap protection information to match the
+ * physical LBA.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void t10_pi_prepare(struct request *rq, u8 protection_type)
+{
+ const int tuple_sz = rq->q->integrity.tuple_size;
+ u32 ref_tag = t10_pi_ref_tag(rq);
+ struct bio *bio;
+
+ if (protection_type == T10_PI_TYPE3_PROTECTION)
+ return;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ u32 virt = bip_get_seed(bip) & 0xffffffff;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+
+ /* Already remapped? */
+ if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
+ break;
+
+ bip_for_each_vec(iv, bip, iter) {
+ void *p, *pmap;
+ unsigned int j;
+
+ pmap = kmap_atomic(iv.bv_page);
+ p = pmap + iv.bv_offset;
+ for (j = 0; j < iv.bv_len; j += tuple_sz) {
+ struct t10_pi_tuple *pi = p;
+
+ if (be32_to_cpu(pi->ref_tag) == virt)
+ pi->ref_tag = cpu_to_be32(ref_tag);
+ virt++;
+ ref_tag++;
+ p += tuple_sz;
+ }
+
+ kunmap_atomic(pmap);
+ }
+
+ bip->bip_flags |= BIP_MAPPED_INTEGRITY;
+ }
+}
+EXPORT_SYMBOL(t10_pi_prepare);
+
+/**
+ * t10_pi_complete - prepare PI prior returning request to the block layer
+ * @rq: request with PI that should be prepared
+ * @protection_type: PI type (Type 1/Type 2/Type 3)
+ * @intervals: total elements to prepare
+ *
+ * For Type 1/Type 2, the virtual start sector is the one that was
+ * originally submitted by the block layer for the ref_tag usage. Due to
+ * partitioning, MD/DM cloning, etc. the actual physical start sector is
+ * likely to be different. Since the physical start sector was submitted
+ * to the device, we should remap it back to virtual values expected by the
+ * block layer.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals)
+{
+ const int tuple_sz = rq->q->integrity.tuple_size;
+ u32 ref_tag = t10_pi_ref_tag(rq);
+ struct bio *bio;
+
+ if (protection_type == T10_PI_TYPE3_PROTECTION)
+ return;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ u32 virt = bip_get_seed(bip) & 0xffffffff;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+
+ bip_for_each_vec(iv, bip, iter) {
+ void *p, *pmap;
+ unsigned int j;
+
+ pmap = kmap_atomic(iv.bv_page);
+ p = pmap + iv.bv_offset;
+ for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
+ struct t10_pi_tuple *pi = p;
+
+ if (be32_to_cpu(pi->ref_tag) == ref_tag)
+ pi->ref_tag = cpu_to_be32(virt);
+ virt++;
+ ref_tag++;
+ intervals--;
+ p += tuple_sz;
+ }
+
+ kunmap_atomic(pmap);
+ }
+ }
+}
+EXPORT_SYMBOL(t10_pi_complete);
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index 6251d1b27f0c..81728717523d 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -15,6 +15,7 @@
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/verification.h>
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include <crypto/pkcs7.h>
@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
- } else if (trusted_keys == (void *)1UL) {
+ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
trusted_keys = secondary_trusted_keys;
#else
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index d880a4897159..8882e90e868e 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
- unsigned int bsize)
+static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int n)
{
- unsigned int n = bsize;
-
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
-
- return bsize;
}
-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
- unsigned int n)
+static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
- n = ablkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = ablkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
+ ablkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ ablkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
-
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@@ -373,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -447,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 38271303ce16..c22f4414856d 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -429,7 +429,6 @@ static struct aead_alg crypto_aegis128_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 0cc1a7525c85..b6fb21ebdc3e 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -121,7 +121,7 @@ static void crypto_aegis128l_ad(struct aegis_state *state,
(const union aegis_chunk *)src;
while (size >= AEGIS128L_CHUNK_SIZE) {
- crypto_aegis128l_update_a(state, src_chunk);
+ crypto_aegis128l_update_a(state, src_chunk);
size -= AEGIS128L_CHUNK_SIZE;
src_chunk += 1;
@@ -493,7 +493,6 @@ static struct aead_alg crypto_aegis128l_alg = {
.chunksize = AEGIS128L_CHUNK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index a489d741d33a..11f0f8ec9c7c 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -444,7 +444,6 @@ static struct aead_alg crypto_aegis256_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 314c52c967e5..b053179e0bc5 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
struct af_alg_ctx *ctx = ask->private;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
if (!ctx->more || ctx->used)
@@ -1155,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
/* make one iovec available as scatterlist */
err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
+ if (err < 0) {
+ rsgl->sg_num_bytes = 0;
return err;
+ }
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
diff --git a/crypto/api.c b/crypto/api.c
index 0ee632bba064..7aca9f86c5f3 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -229,7 +229,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
alg = crypto_alg_lookup(name, type, mask);
- if (!alg) {
+ if (!alg && !(mask & CRYPTO_NOLOAD)) {
request_module("crypto-%s", name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index e284d9cb9237..5b2f6a2b5585 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -63,7 +63,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
return verify_pkcs7_signature(NULL, 0,
prep->data, prep->datalen,
- (void *)1UL, usage,
+ VERIFY_USE_SECONDARY_KEYRING, usage,
pkcs7_view_content, prep);
}
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..80dc567801ec 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -42,6 +42,8 @@ static struct page *pq_scribble_page;
#define P(b, d) (b[d-2])
#define Q(b, d) (b[d-1])
+#define MAX_DISKS 255
+
/**
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
@@ -184,7 +186,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
struct dma_device *device = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
- BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
+ BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
@@ -196,7 +198,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
is_dma_pq_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = 0;
- unsigned char coefs[src_cnt];
+ unsigned char coefs[MAX_DISKS];
int i, j;
/* run the p+q asynchronously */
@@ -299,11 +301,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
- unsigned char coefs[disks-2];
+ unsigned char coefs[MAX_DISKS];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
- BUG_ON(disks < 4);
+ BUG_ON(disks < 4 || disks > MAX_DISKS);
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..a5edaabae12a 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,13 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
} else {
- struct page *blocks[disks];
+ struct page *blocks[NDISKS];
struct page *dest;
int count = 0;
int i;
+ BUG_ON(disks > NDISKS);
+
/* data+Q failure. Reconstruct data from P,
* then rebuild syndrome
*/
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 01c0d4aa2563..f93abf13b5d4 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
- unsigned int bsize)
+static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return bsize;
}
-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
- unsigned int n)
+static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
+ unsigned int n)
{
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
- n = blkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = blkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
+ blkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ blkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
- if (nbytes) {
+ if (more) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
-
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@@ -512,6 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 20ff2c746e0b..0959b268966c 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -104,7 +104,6 @@ static struct shash_alg digest_null = {
.final = null_final,
.base = {
.cra_name = "digest_null",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/dh.c b/crypto/dh.c
index 5659fe7f446d..09a44de4209d 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -16,14 +16,16 @@
#include <linux/mpi.h>
struct dh_ctx {
- MPI p;
- MPI g;
- MPI xa;
+ MPI p; /* Value is guaranteed to be set. */
+ MPI q; /* Value is optional. */
+ MPI g; /* Value is guaranteed to be set. */
+ MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
+ mpi_free(ctx->q);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
@@ -60,6 +62,12 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
if (!ctx->p)
return -EINVAL;
+ if (params->q && params->q_size) {
+ ctx->q = mpi_read_raw_data(params->q, params->q_size);
+ if (!ctx->q)
+ return -EINVAL;
+ }
+
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
@@ -93,6 +101,55 @@ err_clear_ctx:
return -EINVAL;
}
+/*
+ * SP800-56A public key verification:
+ *
+ * * If Q is provided as part of the domain paramenters, a full validation
+ * according to SP800-56A section 5.6.2.3.1 is performed.
+ *
+ * * If Q is not provided, a partial validation according to SP800-56A section
+ * 5.6.2.3.2 is performed.
+ */
+static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
+{
+ if (unlikely(!ctx->p))
+ return -EINVAL;
+
+ /*
+ * Step 1: Verify that 2 <= y <= p - 2.
+ *
+ * The upper limit check is actually y < p instead of y < p - 1
+ * as the mpi_sub_ui function is yet missing.
+ */
+ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
+ return -EINVAL;
+
+ /* Step 2: Verify that 1 = y^q mod p */
+ if (ctx->q) {
+ MPI val = mpi_alloc(0);
+ int ret;
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = mpi_powm(val, y, ctx->q, ctx->p);
+
+ if (ret) {
+ mpi_free(val);
+ return ret;
+ }
+
+ ret = mpi_cmp_ui(val, 1);
+
+ mpi_free(val);
+
+ if (ret != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -115,6 +172,9 @@ static int dh_compute_value(struct kpp_request *req)
ret = -EINVAL;
goto err_free_val;
}
+ ret = dh_is_pubkey_valid(ctx, base);
+ if (ret)
+ goto err_free_base;
} else {
base = ctx->g;
}
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 24fdb2ecaa85..edacda5f6a4d 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -14,10 +14,12 @@
#include <crypto/dh.h>
#include <crypto/kpp.h>
-#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
+#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
-static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
+static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
+ if (!dst || size > end - dst)
+ return NULL;
memcpy(dst, src, size);
return dst + size;
}
@@ -30,7 +32,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
static inline unsigned int dh_data_size(const struct dh *p)
{
- return p->key_size + p->p_size + p->g_size;
+ return p->key_size + p->p_size + p->q_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
@@ -42,25 +44,27 @@ EXPORT_SYMBOL_GPL(crypto_dh_key_len);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
{
u8 *ptr = buf;
+ u8 * const end = ptr + len;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_DH,
.len = len
};
- if (unlikely(!buf))
+ if (unlikely(!len))
return -EINVAL;
- if (len != crypto_dh_key_len(params))
+ ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
+ ptr = dh_pack_data(ptr, end, &params->key_size,
+ sizeof(params->key_size));
+ ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
+ ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
+ ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
+ ptr = dh_pack_data(ptr, end, params->key, params->key_size);
+ ptr = dh_pack_data(ptr, end, params->p, params->p_size);
+ ptr = dh_pack_data(ptr, end, params->q, params->q_size);
+ ptr = dh_pack_data(ptr, end, params->g, params->g_size);
+ if (ptr != end)
return -EINVAL;
-
- ptr = dh_pack_data(ptr, &secret, sizeof(secret));
- ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
- ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
- ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
- ptr = dh_pack_data(ptr, params->key, params->key_size);
- ptr = dh_pack_data(ptr, params->p, params->p_size);
- dh_pack_data(ptr, params->g, params->g_size);
-
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
@@ -79,6 +83,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
+ ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
@@ -88,7 +93,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
- params->g_size > params->p_size)
+ params->g_size > params->p_size || params->q_size > params->p_size)
return -EINVAL;
/* Don't allocate memory. Set pointers to data within
@@ -96,7 +101,9 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
*/
params->key = (void *)ptr;
params->p = (void *)(ptr + params->key_size);
- params->g = (void *)(ptr + params->key_size + params->p_size);
+ params->q = (void *)(ptr + params->key_size + params->p_size);
+ params->g = (void *)(ptr + params->key_size + params->p_size +
+ params->q_size);
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
@@ -106,6 +113,10 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
+ /* It is permissible to not provide Q. */
+ if (params->q_size == 0)
+ params->q = NULL;
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 466a112a4446..bc52d9562611 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -261,8 +261,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
-#define DRBG_CTR_NULL_LEN 128
-#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
+#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -555,8 +554,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
- ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
- buf, len);
+ ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
@@ -1644,9 +1642,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
- kfree(drbg->ctr_null_value_buf);
- drbg->ctr_null_value = NULL;
-
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
@@ -1697,15 +1692,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
- drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
- GFP_KERNEL);
- if (!drbg->ctr_null_value_buf) {
- drbg_fini_sym_kernel(drbg);
- return -ENOMEM;
- }
- drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
- alignmask + 1);
-
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
@@ -1715,6 +1701,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
+ sg_init_table(&drbg->sg_in, 1);
+ sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+
return alignmask;
}
@@ -1743,17 +1732,25 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
- struct scatterlist sg_in, sg_out;
+ struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
+ u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
- sg_init_one(&sg_in, inbuf, inlen);
- sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+ if (inbuf) {
+ /* Use caller-provided input buffer */
+ sg_set_buf(sg_in, inbuf, inlen);
+ } else {
+ /* Use scratchpad for in-place operation */
+ inlen = scratchpad_use;
+ memset(drbg->outscratchpad, 0, scratchpad_use);
+ sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
+ }
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
- skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
+ skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
@@ -1763,6 +1760,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
+ memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
@@ -1770,7 +1768,6 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
ret = 0;
out:
- memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 815541309a95..8facafd67802 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1019,6 +1019,36 @@ out:
return ret;
}
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [1, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+ vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
+ vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
+ vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
+ vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
+ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
+ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
+ return -EINVAL;
+
+ return 0;
+
+}
+
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@@ -1046,16 +1076,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
+ ecc_swap_digits(public_key, pk->x, ndigits);
+ ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
+ ret = ecc_is_pubkey_valid_partial(curve, pk);
+ if (ret)
+ goto err_alloc_product;
+
+ ecc_swap_digits(private_key, priv, ndigits);
+
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
- ecc_swap_digits(public_key, pk->x, ndigits);
- ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
- ecc_swap_digits(private_key, priv, ndigits);
-
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_swap_digits(product->x, secret, ndigits);
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index b80f45da829c..336ab1805639 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -13,9 +13,11 @@ struct ecc_curve {
struct ecc_point g;
u64 *p;
u64 *n;
+ u64 *a;
+ u64 *b;
};
-/* NIST P-192 */
+/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
@@ -24,6 +26,10 @@ static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
+ 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
+ 0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
.g = {
@@ -32,10 +38,12 @@ static struct ecc_curve nist_p192 = {
.ndigits = 3,
},
.p = nist_p192_p,
- .n = nist_p192_n
+ .n = nist_p192_n,
+ .a = nist_p192_a,
+ .b = nist_p192_b
};
-/* NIST P-256 */
+/* NIST P-256: a = p - 3 */
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
@@ -44,6 +52,10 @@ static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
+static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
+ 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
+static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
+ 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
.g = {
@@ -52,7 +64,9 @@ static struct ecc_curve nist_p256 = {
.ndigits = 4,
},
.p = nist_p256_p,
- .n = nist_p256_n
+ .n = nist_p256_n,
+ .a = nist_p256_a,
+ .b = nist_p256_b
};
#endif
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 1bffb3f712dd..d9f192b953b2 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -132,7 +132,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 954a7064a179..393a782679c7 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -188,7 +188,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -265,7 +265,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/crypto/md4.c b/crypto/md4.c
index 810fefb0a007..9965ec40d9f9 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -217,7 +217,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct md4_ctx),
.base = {
.cra_name = "md4",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/md5.c b/crypto/md5.c
index f776ef43d621..94dd78144ba3 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -229,7 +229,6 @@ static struct shash_alg alg = {
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 6180b2557836..d057cf5ac4a8 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -514,7 +514,6 @@ static struct aead_alg crypto_morus1280_alg = {
.chunksize = MORUS1280_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus1280_ctx),
.cra_alignmask = 0,
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 5eede3749e64..1ca76e54281b 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -511,7 +511,6 @@ static struct aead_alg crypto_morus640_alg = {
.chunksize = MORUS640_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus640_ctx),
.cra_alignmask = 0,
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b7a3a0613a30..47d3a6b83931 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -279,7 +279,6 @@ static struct shash_alg poly1305_alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 40e053b97b69..5f4472256e27 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -303,7 +303,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd128_ctx),
.base = {
.cra_name = "rmd128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 5f3e6ea35268..737645344d1c 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -347,7 +347,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index f50c025cc962..0e9d30676a01 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -49,7 +49,7 @@ struct rmd256_ctx {
static void rmd256_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
+ u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
@@ -100,7 +100,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -139,7 +139,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
@@ -178,7 +178,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
@@ -217,7 +217,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* combine results */
state[0] += aa;
@@ -322,7 +322,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd256_ctx),
.base = {
.cra_name = "rmd256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index e1315e4869e8..3ae1df5bb48c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -53,7 +53,7 @@ struct rmd320_ctx {
static void rmd320_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
+ u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
@@ -106,7 +106,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -145,7 +145,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
@@ -184,7 +184,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
@@ -223,7 +223,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
@@ -262,7 +262,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
- tmp = ee; ee = eee; eee = tmp;
+ swap(ee, eee);
/* combine results */
state[0] += aa;
@@ -371,7 +371,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd320_ctx),
.base = {
.cra_name = "rmd320",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index c16c94f88733..d0b92c1cd6e9 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -91,7 +91,7 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
- scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
+ scatterwalk_crypto_chain(dst, sg_next(src), 2);
return dst;
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 6877cbb9105f..2af64ef81f40 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -76,7 +76,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 8f9c47e1a96e..1e5ba6649e8d 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -271,7 +271,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -285,7 +285,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7f6735d9003f..7ed98367d4fb 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -250,7 +250,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -261,7 +260,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -272,7 +270,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -283,7 +280,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index eba965d18bfc..4097cd555eb6 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -23,6 +23,28 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+ 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+ 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+ 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+ 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+ 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
+};
+EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
+
+const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
+ 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+ 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+ 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+ 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+ 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+ 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+ 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+ 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
+};
+EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
+
static inline u64 Ch(u64 x, u64 y, u64 z)
{
return z ^ (x & (y ^ z));
@@ -171,7 +193,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +207,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0fe2a2923ad0..0bd8c6caa498 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
- unsigned int n = walk->nbytes - err;
- unsigned int nbytes;
-
- nbytes = walk->total - n;
-
- if (unlikely(err < 0)) {
- nbytes = 0;
- n = 0;
- } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
+ unsigned int n; /* bytes processed */
+ bool more;
+
+ if (unlikely(err < 0))
+ goto finish;
+
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
unmap_src:
skcipher_unmap_src(walk);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
@@ -131,28 +132,28 @@ unmap_src:
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
err = -EINVAL;
- nbytes = 0;
- } else
- n = skcipher_done_slow(walk, n);
+ goto finish;
+ }
+ skcipher_done_slow(walk, n);
+ goto already_advanced;
}
- if (err > 0)
- err = 0;
-
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+already_advanced:
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
+ err = 0;
+finish:
+ walk->nbytes = 0;
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
@@ -387,7 +388,6 @@ set_phys_lowmem:
}
return err;
}
-EXPORT_SYMBOL_GPL(skcipher_walk_next);
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
@@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask);
+ aligned_bs = ALIGN(bs, alignmask + 1);
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
@@ -437,7 +437,6 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
}
walk->page = NULL;
- walk->nbytes = walk->total;
return skcipher_walk_next(walk);
}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9e823d99f095..9a5c60f08aad 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -184,7 +184,6 @@ static struct shash_alg sm3_alg = {
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d5bcdd905007..bdde95e8d369 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
- if (secs)
+ if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
*b_size + (enc ? 0 : authsize),
iv);
- if (secs)
+ if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
- else
+ cond_resched();
+ } else {
ret = test_aead_cycles(req, enc, *b_size);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
if (ret) {
@@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
ahash_request_set_crypt(req, sg, output, speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
- else
+ cond_resched();
+ } else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
+ }
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
iv);
}
- if (secs)
+ if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
- if (secs)
+ if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
- else
+ cond_resched();
+ } else {
ret = test_acipher_cycles(req, enc,
*b_size);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1939,7 +1951,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 109:
- ret += tcrypt_test("vmac(aes)");
+ ret += tcrypt_test("vmac64(aes)");
break;
case 111:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 11e45352fd0b..a1d42245082a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -259,9 +259,15 @@ out_nostate:
return ret;
}
+enum hash_test {
+ HASH_TEST_DIGEST,
+ HASH_TEST_FINAL,
+ HASH_TEST_FINUP
+};
+
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
- bool use_digest, const int align_offset)
+ enum hash_test test_type, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- if (use_digest) {
+ switch (test_type) {
+ case HASH_TEST_DIGEST:
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- } else {
+ break;
+
+ case HASH_TEST_FINAL:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
@@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm,
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ break;
+
+ case HASH_TEST_FINUP:
+ memset(result, 1, digest_size);
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ break;
}
if (memcmp(result, template[i].digest,
@@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm,
}
}
+ if (test_type)
+ goto out;
+
j = 0;
for (i = 0; i < tcount; i++) {
/* alignment tests are only done with continuous buffers */
@@ -540,24 +575,24 @@ out_nobuf:
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
- unsigned int tcount, bool use_digest)
+ unsigned int tcount, enum hash_test test_type)
{
unsigned int alignmask;
int ret;
- ret = __test_hash(tfm, template, tcount, use_digest, 0);
+ ret = __test_hash(tfm, template, tcount, test_type, 0);
if (ret)
return ret;
/* test unaligned buffers, check with one byte offset */
- ret = __test_hash(tfm, template, tcount, use_digest, 1);
+ ret = __test_hash(tfm, template, tcount, test_type, 1);
if (ret)
return ret;
alignmask = crypto_tfm_alg_alignmask(&tfm->base);
if (alignmask) {
/* Check if alignment mask for tfm is correctly set. */
- ret = __test_hash(tfm, template, tcount, use_digest,
+ ret = __test_hash(tfm, template, tcount, test_type,
alignmask + 1);
if (ret)
return ret;
@@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, template, tcount, true);
+ err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
+ if (!err)
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
if (!err)
- err = test_hash(tfm, template, tcount, false);
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
crypto_free_ahash(tfm);
return err;
}
@@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(tgr192_tv_template)
}
}, {
- .alg = "vmac(aes)",
+ .alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
- .hash = __VECS(aes_vmac128_tv_template)
+ .hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b950aa234e43..173111c70746 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -641,15 +641,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -739,7 +741,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
"\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
"\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -748,15 +750,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -846,7 +850,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
"\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
"\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -4603,105 +4607,158 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
- '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07',
- '\x04', '\x01', '\x04', '\x03',};
-static const char vmac_string2[128] = {'a', 'b', 'c',};
-static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- };
+static const char vmac64_string1[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
+};
-static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
- 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's',
- 't', 'u', 'w', 'x', 'z'};
+static const char vmac64_string2[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c',
+};
-static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
- 'o', 'l', 'k', ']', '%',
- '9', '2', '7', '!', 'A'};
+static const char vmac64_string3[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+};
-static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
- 'i', '!', '#', 'w', '0',
- 'z', '/', '4', 'A', 'n'};
+static const char vmac64_string4[33] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
+ 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
+ 'z',
+};
-static const struct hash_testvec aes_vmac128_tv_template[] = {
- {
+static const char vmac64_string5[143] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
+ ']', '%', '9', '2', '7', '!', 'A',
+};
+
+static const char vmac64_string6[145] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'p', 't', '*', '7', 'l', 'i', '!', '#',
+ 'w', '0', 'z', '/', '4', 'A', 'n',
+};
+
+static const struct hash_testvec vmac64_aes_tv_template[] = {
+ { /* draft-krovetz-vmac-01 test vector 1 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
+ .psize = 16,
+ .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
+ }, { /* draft-krovetz-vmac-01 test vector 2 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
+ .psize = 19,
+ .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
+ }, { /* draft-krovetz-vmac-01 test vector 3 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 64,
+ .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
+ }, { /* draft-krovetz-vmac-01 test vector 4 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 316,
+ .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
+ .tap = { 1, 100, 200, 15 },
+ .np = 4,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = NULL,
- .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string1,
- .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string2,
- .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string3,
- .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
}, {
.key = "abcdefghijklmnop",
- .plaintext = NULL,
- .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
}, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string1,
- .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string2,
- .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string3,
- .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string4,
- .digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
- .psize = sizeof(vmac_string4),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string5,
- .digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
- .psize = sizeof(vmac_string5),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string6,
- .digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
- .psize = sizeof(vmac_string6),
- .ksize = 16,
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string4,
+ .psize = sizeof(vmac64_string4),
+ .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string5,
+ .psize = sizeof(vmac64_string5),
+ .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string6,
+ .psize = sizeof(vmac64_string6),
+ .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
},
};
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 321bc6ff2a9d..022d3dd76c3b 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -636,7 +636,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr192",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -648,7 +647,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -660,7 +658,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a816cfb2..5f436dfdfc61 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
/*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
* Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Last modified: 17 APR 08, 1700 PDT
+ */
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
@@ -31,10 +36,42 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+#define VMAC_NONCEBYTES 16
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+ struct crypto_cipher *cipher;
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+ union {
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ __le64 partial_words[VMAC_NHBYTES / 8];
+ };
+ unsigned int partial_size; /* size of the partial block */
+ bool first_block_processed;
+ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
+ union {
+ u8 bytes[VMAC_NONCEBYTES];
+ __be64 pads[VMAC_NONCEBYTES / 8];
+ } nonce;
+ unsigned int nonce_size; /* nonce bytes filled so far */
+};
+
+/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
@@ -318,13 +355,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
} while (0)
#endif
-static void vhash_abort(struct vmac_ctx *ctx)
-{
- ctx->polytmp[0] = ctx->polykey[0] ;
- ctx->polytmp[1] = ctx->polykey[1] ;
- ctx->first_block_processed = 0;
-}
-
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -364,280 +394,227 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
return rl;
}
-static void vhash_update(const unsigned char *m,
- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
- struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx,
+ const __le64 *mptr, unsigned int blocks)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- if (!mbytes)
- return;
-
- BUG_ON(mbytes % VMAC_NHBYTES);
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
-
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
-
- if (!ctx->first_block_processed) {
- ctx->first_block_processed = 1;
+ const u64 *kptr = tctx->nhkey;
+ const u64 pkh = tctx->polykey[0];
+ const u64 pkl = tctx->polykey[1];
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+ u64 rh, rl;
+
+ if (!dctx->first_block_processed) {
+ dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
+ blocks--;
}
- while (i--) {
+ while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
- ctx->polytmp[0] = ch;
- ctx->polytmp[1] = cl;
+ dctx->polytmp[0] = ch;
+ dctx->polytmp[1] = cl;
}
-static u64 vhash(unsigned char m[], unsigned int mbytes,
- u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i, remaining;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES;
- remaining = mbytes % VMAC_NHBYTES;
-
- if (ctx->first_block_processed) {
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
- } else if (i) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
- } else if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- goto do_l3;
- } else {/* Empty String */
- ch = pkh; cl = pkl;
- goto do_l3;
- }
-
- while (i--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
- if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- }
-
-do_l3:
- vhash_abort(ctx);
- remaining *= 8;
- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
+ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+ __be64 out[2];
+ u8 in[16] = { 0 };
+ unsigned int i;
+ int err;
-static u64 vmac(unsigned char m[], unsigned int mbytes,
- const unsigned char n[16], u64 *tagl,
- struct vmac_ctx_t *ctx)
-{
- u64 *in_n, *out_p;
- u64 p, h;
- int i;
-
- in_n = ctx->__vmac_ctx.cached_nonce;
- out_p = ctx->__vmac_ctx.cached_aes;
-
- i = n[15] & 1;
- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
- in_n[0] = *(u64 *)(n);
- in_n[1] = *(u64 *)(n+8);
- ((unsigned char *)in_n)[15] &= 0xFE;
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out_p, (unsigned char *)in_n);
-
- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
- p = be64_to_cpup(out_p + i);
- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return le64_to_cpu(p + h);
-}
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
- u64 in[2] = {0}, out[2];
- unsigned i;
- int err = 0;
-
- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
- ((unsigned char *)in)[0] = 0x80;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0x80;
+ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->nhkey[i] = be64_to_cpu(out[0]);
+ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
}
/* Fill poly key */
- ((unsigned char *)in)[0] = 0xC0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.polytmp[i] =
- ctx->__vmac_ctx.polykey[i] =
- be64_to_cpup(out) & mpoly;
- ctx->__vmac_ctx.polytmp[i+1] =
- ctx->__vmac_ctx.polykey[i+1] =
- be64_to_cpup(out+1) & mpoly;
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0xC0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+ in[15]++;
}
/* Fill ip key */
- ((unsigned char *)in)[0] = 0xE0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ in[0] = 0xE0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
- } while (ctx->__vmac_ctx.l3key[i] >= p64
- || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->l3key[i] = be64_to_cpu(out[0]);
+ tctx->l3key[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
+ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
- /* Invalidate nonce/aes cache and reset other elements */
- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
- ctx->__vmac_ctx.first_block_processed = 0;
-
- return err;
+ return 0;
}
-static int vmac_setkey(struct crypto_shash *parent,
- const u8 *key, unsigned int keylen)
+static int vmac_init(struct shash_desc *desc)
{
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- if (keylen != VMAC_KEY_LEN) {
- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
+ dctx->partial_size = 0;
+ dctx->first_block_processed = false;
+ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+ dctx->nonce_size = 0;
return 0;
}
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
- unsigned int len)
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- int expand;
- int min;
-
- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
- VMAC_NHBYTES - ctx->partial_size : 0;
-
- min = len < expand ? len : expand;
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int n;
+
+ /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
+ if (dctx->nonce_size < VMAC_NONCEBYTES) {
+ n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
+ memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
+ dctx->nonce_size += n;
+ p += n;
+ len -= n;
+ }
- memcpy(ctx->partial + ctx->partial_size, p, min);
- ctx->partial_size += min;
+ if (dctx->partial_size) {
+ n = min(len, VMAC_NHBYTES - dctx->partial_size);
+ memcpy(&dctx->partial[dctx->partial_size], p, n);
+ dctx->partial_size += n;
+ p += n;
+ len -= n;
+ if (dctx->partial_size == VMAC_NHBYTES) {
+ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+ dctx->partial_size = 0;
+ }
+ }
- if (len < expand)
- return 0;
+ if (len >= VMAC_NHBYTES) {
+ n = round_down(len, VMAC_NHBYTES);
+ /* TODO: 'p' may be misaligned here */
+ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+ p += n;
+ len -= n;
+ }
- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
- ctx->partial_size = 0;
+ if (len) {
+ memcpy(dctx->partial, p, len);
+ dctx->partial_size = len;
+ }
- len -= expand;
- p += expand;
+ return 0;
+}
- if (len % VMAC_NHBYTES) {
- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
- len % VMAC_NHBYTES);
- ctx->partial_size = len % VMAC_NHBYTES;
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx)
+{
+ unsigned int partial = dctx->partial_size;
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+
+ /* L1 and L2-hash the final block if needed */
+ if (partial) {
+ /* Zero-pad to next 128-bit boundary */
+ unsigned int n = round_up(partial, 16);
+ u64 rh, rl;
+
+ memset(&dctx->partial[partial], 0, n - partial);
+ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+ rh &= m62;
+ if (dctx->first_block_processed)
+ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+ rh, rl);
+ else
+ ADD128(ch, cl, rh, rl);
}
- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
-
- return 0;
+ /* L3-hash the 128-bit output of L2-hash */
+ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
+static int vmac_final(struct shash_desc *desc, u8 *out)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- vmac_t mac;
- u8 nonce[16] = {};
-
- /* vmac() ends up accessing outside the array bounds that
- * we specify. In appears to access up to the next 2-word
- * boundary. We'll just be uber cautious and zero the
- * unwritten bytes in the buffer.
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ int index;
+ u64 hash, pad;
+
+ if (dctx->nonce_size != VMAC_NONCEBYTES)
+ return -EINVAL;
+
+ /*
+ * The VMAC specification requires a nonce at least 1 bit shorter than
+ * the block cipher's block length, so we actually only accept a 127-bit
+ * nonce. We define the unused bit to be the first one and require that
+ * it be 0, so the needed prepending of a 0 bit is implicit.
*/
- if (ctx->partial_size) {
- memset(ctx->partial + ctx->partial_size, 0,
- VMAC_NHBYTES - ctx->partial_size);
- }
- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
- memcpy(out, &mac, sizeof(vmac_t));
- memzero_explicit(&mac, sizeof(vmac_t));
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
- ctx->partial_size = 0;
+ if (dctx->nonce.bytes[0] & 0x80)
+ return -EINVAL;
+
+ /* Finish calculating the VHASH of the message */
+ hash = vhash_final(tctx, dctx);
+
+ /* Generate pseudorandom pad by encrypting the nonce */
+ BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
+ index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
+ dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
+ crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
+ dctx->nonce.bytes);
+ pad = be64_to_cpu(dctx->nonce.pads[index]);
+
+ /* The VMAC is the sum of VHASH and the pseudorandom pad */
+ put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,7 +632,11 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(alg))
return PTR_ERR(alg);
- inst = shash_alloc_instance("vmac", alg);
+ err = -EINVAL;
+ if (alg->cra_blocksize != VMAC_NONCEBYTES)
+ goto out_put_alg;
+
+ inst = shash_alloc_instance(tmpl->name, alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
@@ -670,11 +651,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
- inst->alg.digestsize = sizeof(vmac_t);
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
+ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+ inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
@@ -691,8 +673,8 @@ out_put_alg:
return err;
}
-static struct crypto_template vmac_tmpl = {
- .name = "vmac",
+static struct crypto_template vmac64_tmpl = {
+ .name = "vmac64",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
@@ -700,12 +682,12 @@ static struct crypto_template vmac_tmpl = {
static int __init vmac_module_init(void)
{
- return crypto_register_template(&vmac_tmpl);
+ return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
- crypto_unregister_template(&vmac_tmpl);
+ crypto_unregister_template(&vmac64_tmpl);
}
module_init(vmac_module_init);
@@ -713,4 +695,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac");
+MODULE_ALIAS_CRYPTO("vmac64");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 7ee5a043a988..149e577fb772 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1127,7 +1127,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1139,7 +1138,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1151,7 +1149,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/xts.c b/crypto/xts.c
index 12284183bd20..ccf55fbb8bc2 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -138,7 +138,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -204,7 +204,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 95b9ccc08165..ab4d43923c4d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -9,6 +9,8 @@ source "drivers/bus/Kconfig"
source "drivers/connector/Kconfig"
+source "drivers/gnss/Kconfig"
+
source "drivers/mtd/Kconfig"
source "drivers/of/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 24cd47014657..578f469f72fb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -76,7 +76,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
-obj-$(CONFIG_SCSI) += scsi/
+obj-y += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
@@ -185,3 +185,4 @@ obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
+obj-$(CONFIG_GNSS) += gnss/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b533eeb6139d..4a46344bf0e3 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,11 +5,10 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
- depends on !IA64_HP_SIM
- depends on IA64 || X86 || ARM64
+ depends on ARCH_SUPPORTS_ACPI
depends on PCI
select PNP
- default y if (IA64 || X86)
+ default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
@@ -41,6 +40,9 @@ menuconfig ACPI
<http://www.acpi.info>
<http://www.uefi.org/acpi/specs>
+config ARCH_SUPPORTS_ACPI
+ bool
+
if ACPI
config ACPI_LEGACY_TABLES_LOOKUP
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f8fecfec5df9..9706613eecf9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
+static bool lpss_iosf_d3_entered;
static void lpss_iosf_enter_d3_state(void)
{
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
+
+ lpss_iosf_d3_entered = true;
+
exit:
mutex_unlock(&lpss_iosf_mutex);
}
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
mutex_lock(&lpss_iosf_mutex);
+ if (!lpss_iosf_d3_entered)
+ goto exit;
+
+ lpss_iosf_d3_entered = false;
+
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
LPSS_IOSF_GPIODEF0, value1, mask1);
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
LPSS_IOSF_PMCSR, value2, mask2);
+exit:
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_suspend(struct device *dev, bool runtime)
+static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- bool wakeup = runtime || device_may_wakeup(dev);
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
- if ((runtime || !pm_suspend_via_firmware()) &&
+ if (acpi_target_system_state() == ACPI_STATE_S0 &&
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
-static int acpi_lpss_resume(struct device *dev, bool runtime)
+static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
- if ((runtime || !pm_resume_via_firmware()) &&
- lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
return 0;
ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_lpss_suspend(dev, false);
+ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_resume_early(struct device *dev)
{
- int ret = acpi_lpss_resume(dev, false);
+ int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
static int acpi_lpss_runtime_resume(struct device *dev)
{
- int ret = acpi_lpss_resume(dev, true);
+ int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 51c386bf230d..0f28a38a43ea 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -164,8 +164,8 @@ struct acpi_namespace_node {
#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define ANOBJ_NODE_EARLY_INIT 0x80 /* acpi_exec only: Node was create via init file (-fi) */
-#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 3825df923480..bbb3b4d1e796 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -25,14 +25,15 @@
/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
#define ACPI_NS_NO_UPSEARCH 0
-#define ACPI_NS_SEARCH_PARENT 0x01
-#define ACPI_NS_DONT_OPEN_SCOPE 0x02
-#define ACPI_NS_NO_PEER_SEARCH 0x04
-#define ACPI_NS_ERROR_IF_FOUND 0x08
-#define ACPI_NS_PREFIX_IS_SCOPE 0x10
-#define ACPI_NS_EXTERNAL 0x20
-#define ACPI_NS_TEMPORARY 0x40
-#define ACPI_NS_OVERRIDE_IF_FOUND 0x80
+#define ACPI_NS_SEARCH_PARENT 0x0001
+#define ACPI_NS_DONT_OPEN_SCOPE 0x0002
+#define ACPI_NS_NO_PEER_SEARCH 0x0004
+#define ACPI_NS_ERROR_IF_FOUND 0x0008
+#define ACPI_NS_PREFIX_IS_SCOPE 0x0010
+#define ACPI_NS_EXTERNAL 0x0020
+#define ACPI_NS_TEMPORARY 0x0040
+#define ACPI_NS_OVERRIDE_IF_FOUND 0x0080
+#define ACPI_NS_EARLY_INIT 0x0100
/* Flags for acpi_ns_walk_namespace */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2733cd4e418c..3374d41582b5 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -180,6 +180,8 @@ char acpi_ut_remove_leading_zeros(char **string);
u8 acpi_ut_detect_hex_prefix(char **string);
+void acpi_ut_remove_hex_prefix(char **string);
+
u8 acpi_ut_detect_octal_prefix(char **string);
/*
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 556ff59bbbfc..3e5f95390f0d 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -763,7 +763,12 @@ acpi_db_command_dispatch(char *input_buffer,
case CMD_DISASSEMBLE:
case CMD_DISASM:
+#ifdef ACPI_DISASSEMBLER
(void)acpi_db_disassemble_method(acpi_gbl_db_args[1]);
+#else
+ acpi_os_printf
+ ("The AML Disassembler is not configured/present\n");
+#endif
break;
case CMD_DUMP:
@@ -872,7 +877,12 @@ acpi_db_command_dispatch(char *input_buffer,
case CMD_LIST:
+#ifdef ACPI_DISASSEMBLER
acpi_db_disassemble_aml(acpi_gbl_db_args[1], op);
+#else
+ acpi_os_printf
+ ("The AML Disassembler is not configured/present\n");
+#endif
break;
case CMD_LOCKS:
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 9fcecf104ba0..d8b7a0fe92ec 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -216,6 +216,7 @@ cleanup:
acpi_ut_remove_reference(obj_desc);
}
+#ifdef ACPI_DISASSEMBLER
/*******************************************************************************
*
* FUNCTION: acpi_db_disassemble_aml
@@ -242,9 +243,8 @@ void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op)
if (statements) {
num_statements = strtoul(statements, NULL, 0);
}
-#ifdef ACPI_DISASSEMBLER
+
acpi_dm_disassemble(NULL, op, num_statements);
-#endif
}
/*******************************************************************************
@@ -317,8 +317,6 @@ acpi_status acpi_db_disassemble_method(char *name)
walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
status = acpi_ps_parse_aml(walk_state);
-
-#ifdef ACPI_DISASSEMBLER
(void)acpi_dm_parse_deferred_ops(op);
/* Now we can disassemble the method */
@@ -326,7 +324,6 @@ acpi_status acpi_db_disassemble_method(char *name)
acpi_gbl_dm_opt_verbose = FALSE;
acpi_dm_disassemble(NULL, op, 0);
acpi_gbl_dm_opt_verbose = TRUE;
-#endif
acpi_ps_delete_parse_tree(op);
@@ -337,6 +334,7 @@ acpi_status acpi_db_disassemble_method(char *name)
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
return (AE_OK);
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 4647aa8efecb..f2526726daf6 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -10,6 +10,7 @@
#include "amlcode.h"
#include "acdebug.h"
#include "acinterp.h"
+#include "acparser.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbxface")
@@ -262,10 +263,17 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
}
}
- /* Now we can display it */
+ /* Now we can disassemble and display it */
#ifdef ACPI_DISASSEMBLER
acpi_dm_disassemble(walk_state, display_op, ACPI_UINT32_MAX);
+#else
+ /*
+ * The AML Disassembler is not configured - at least we can
+ * display the opcode value and name
+ */
+ acpi_os_printf("AML Opcode: %4.4X %s\n", op->common.aml_opcode,
+ acpi_ps_get_opcode_name(op->common.aml_opcode));
#endif
if ((op->common.aml_opcode == AML_IF_OP) ||
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7c937595dfcb..30fe89545d6a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -15,6 +15,10 @@
#include "acnamesp.h"
#include "acparser.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
+
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsfield")
@@ -259,6 +263,13 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
u64 position;
union acpi_parse_object *child;
+#ifdef ACPI_EXEC_APP
+ u64 value = 0;
+ union acpi_operand_object *result_desc;
+ union acpi_operand_object *obj_desc;
+ char *name_path;
+#endif
+
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@@ -391,6 +402,25 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+#ifdef ACPI_EXEC_APP
+ name_path =
+ acpi_ns_get_external_pathname(info->
+ field_node);
+ obj_desc =
+ acpi_ut_create_integer_object
+ (value);
+ if (ACPI_SUCCESS
+ (ae_lookup_init_file_entry
+ (name_path, &value))) {
+ acpi_ex_write_data_to_field
+ (obj_desc,
+ acpi_ns_get_attached_object
+ (info->field_node),
+ &result_desc);
+ }
+ acpi_ut_remove_reference(obj_desc);
+ ACPI_FREE(name_path);
+#endif
}
}
@@ -573,7 +603,9 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
!(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
-
+#ifdef ACPI_EXEC_APP
+ flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+#endif
/*
* Walk the list of entries in the field_list
* Note: field_list can be of zero length. In this case, Arg will be NULL.
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 3de794bcf8fa..69603ba52a3a 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
status =
acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
- value = (u32)value64;
+ if (ACPI_SUCCESS(status)) {
+ value = (u32)value64;
+ }
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
- value = (u32)value64;
+ if (ACPI_SUCCESS(status)) {
+ value = (u32)value64;
+ }
+
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index fc0c2e2328cd..d8b8fc2ff563 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -51,16 +51,18 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
return_ACPI_STATUS(status);
}
- /*
- * 1) Disable all GPEs
- * 2) Enable all wakeup GPEs
- */
+ /* Disable all GPEs */
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
acpi_gbl_system_awake_and_running = FALSE;
+ /* Enable all wakeup GPEs */
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 220a718fbce9..e3f10afde5ff 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -558,6 +558,14 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
(char *)&current_node->name,
current_node));
}
+#ifdef ACPI_EXEC_APP
+ if ((status == AE_ALREADY_EXISTS) &&
+ (this_node->flags & ANOBJ_NODE_EARLY_INIT)) {
+ this_node->flags &= ~ANOBJ_NODE_EARLY_INIT;
+ status = AE_OK;
+ }
+#endif
+
#ifdef ACPI_ASL_COMPILER
/*
* If this ACPI name already exists within the namespace as an
@@ -613,13 +621,6 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Special handling for the last segment (num_segments == 0) */
else {
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag
- && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags &= ~IMPLICIT_EXTERNAL;
- }
-#endif
-
/*
* Sanity typecheck of the target object:
*
@@ -683,6 +684,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
}
}
}
+#ifdef ACPI_EXEC_APP
+ if (flags & ACPI_NS_EARLY_INIT) {
+ this_node->flags |= ANOBJ_NODE_EARLY_INIT;
+ }
+#endif
*return_node = this_node;
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index e9c9a63bb6a4..f594ab75a5fe 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,6 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
- new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..34fc2f7476ed 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -22,6 +22,7 @@
#include "acdispat.h"
#include "amlcode.h"
#include "acconvert.h"
+#include "acnamesp.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
@@ -497,6 +498,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status =
acpi_ps_create_op(walk_state, aml_op_start, &op);
if (ACPI_FAILURE(status)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+ * executing it as a control method. However, if we encounter
+ * an error while loading the table, we need to keep trying to
+ * load the table rather than aborting the table load. Set the
+ * status to AE_OK to proceed with the table load.
+ */
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && status == AE_ALREADY_EXISTS) {
+ status = AE_OK;
+ }
if (status == AE_CTRL_PARSE_CONTINUE) {
continue;
}
@@ -515,12 +528,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- if (walk_state->opcode == AML_SCOPE_OP) {
+ if (acpi_ns_opens_scope
+ (acpi_ps_get_opcode_info
+ (walk_state->opcode)->object_type)) {
/*
- * If the scope op fails to parse, skip the body of the
- * scope op because the parse failure indicates that the
- * device may not exist.
+ * If the scope/device op fails to parse, skip the body of
+ * the scope op because the parse failure indicates that
+ * the device may not exist.
*/
+ ACPI_ERROR((AE_INFO,
+ "Skip parsing opcode %s",
+ acpi_ps_get_opcode_name
+ (walk_state->opcode)));
walk_state->parser_state.aml =
walk_state->aml + 1;
walk_state->parser_state.aml =
@@ -528,8 +547,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
(&walk_state->parser_state);
walk_state->aml =
walk_state->parser_state.aml;
- ACPI_ERROR((AE_INFO,
- "Skipping Scope block"));
}
continue;
@@ -694,6 +711,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
acpi_ps_next_parse_state(walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
+ } else
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && (ACPI_AML_EXCEPTION(status)
+ || status == AE_ALREADY_EXISTS
+ || status == AE_NOT_FOUND)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL flag means that we
+ * are currently loading a table by executing
+ * it as a control method. However, if we
+ * encounter an error while loading the table,
+ * we need to keep trying to load the table
+ * rather than aborting the table load (setting
+ * the status to AE_OK continues the table
+ * load). If we get a failure at this point, it
+ * means that the dispatcher got an error while
+ * trying to execute the Op.
+ */
+ status = AE_OK;
}
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 51891f9fb057..862149c8a208 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -516,9 +516,9 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
acpi_tb_check_duplication(table_desc, table_index);
if (ACPI_FAILURE(status)) {
if (status != AE_CTRL_TERMINATE) {
- ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ ACPI_EXCEPTION((AE_INFO, status,
"%4.4s 0x%8.8X%8.8X"
- " Table is duplicated",
+ " Table is already loaded",
acpi_ut_valid_nameseg
(table_desc->signature.
ascii) ? table_desc->
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 118f3ff1fbb5..8cc4392c61f3 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -355,6 +355,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
u16 original_count;
u16 new_count = 0;
acpi_cpu_flags lock_flags;
+ char *message;
ACPI_FUNCTION_NAME(ut_update_ref_count);
@@ -391,6 +392,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
+ message = "Incremement";
break;
case REF_DECREMENT:
@@ -420,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
if (new_count == 0) {
acpi_ut_delete_internal_obj(object);
}
+ message = "Decrement";
break;
default:
@@ -436,8 +439,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (new_count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
- new_count, object, object->common.type));
+ "Large Reference Count (0x%X) in object %p, Type=0x%.2X Operation=%s",
+ new_count, object, object->common.type, message));
}
}
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 5a64ddaed8a3..e47430272692 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
switch (lookup_status) {
case AE_ALREADY_EXISTS:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Failure creating";
break;
case AE_NOT_FOUND:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Could not resolve";
break;
default:
- acpi_os_printf("\n" ACPI_MSG_ERROR);
+ acpi_os_printf(ACPI_MSG_ERROR);
message = "Failure resolving";
break;
}
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 954f8e3e35cd..05ff20049b87 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -231,14 +231,34 @@ char acpi_ut_remove_whitespace(char **string)
u8 acpi_ut_detect_hex_prefix(char **string)
{
+ char *initial_position = *string;
+ acpi_ut_remove_hex_prefix(string);
+ if (*string != initial_position) {
+ return (TRUE); /* String is past leading 0x */
+ }
+
+ return (FALSE); /* Not a hex string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_hex_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Remove a hex "0x" prefix
+ *
+ ******************************************************************************/
+
+void acpi_ut_remove_hex_prefix(char **string)
+{
if ((**string == ACPI_ASCII_ZERO) &&
(tolower((int)*(*string + 1)) == 'x')) {
*string += 2; /* Go past the leading 0x */
- return (TRUE);
}
-
- return (FALSE); /* Not a hex string */
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index 8fadad242db6..5fde619a8bbd 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -218,7 +218,7 @@ u64 acpi_ut_implicit_strtoul64(char *string)
* implicit conversions, and the "0x" prefix is "not allowed".
* However, allow a "0x" prefix as an ACPI extension.
*/
- acpi_ut_detect_hex_prefix(&string);
+ acpi_ut_remove_hex_prefix(&string);
if (!acpi_ut_remove_leading_zeros(&string)) {
return_VALUE(0);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 7a3a541046ed..08f26db2da7e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -947,6 +947,24 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
return 0;
}
+static int rc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_root_complex *rc;
+
+ node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+ iort_match_node_callback, dev);
+ if (!node || node->revision < 1)
+ return -ENODEV;
+
+ rc = (struct acpi_iort_root_complex *)node->node_data;
+
+ *size = rc->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<rc->memory_address_limit;
+
+ return 0;
+}
+
/**
* iort_dma_setup() - Set-up device DMA parameters.
*
@@ -960,25 +978,28 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
int ret, msb;
/*
- * Set default coherent_dma_mask to 32 bit. Drivers are expected to
- * setup the correct supported mask.
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
*/
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
- /*
- * Set it to coherent_dma_mask by default if the architecture
- * code has not set it.
- */
- if (!dev->dma_mask)
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
+ }
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ if (dev->coherent_dma_mask)
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else
+ size = 1ULL << 32;
- if (dev_is_pci(dev))
+ if (dev_is_pci(dev)) {
ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- else
+ if (ret == -ENODEV)
+ ret = rc_dma_get_range(dev, &size);
+ } else {
ret = nc_dma_get_range(dev, &size);
+ }
if (!ret) {
msb = fls64(dmaaddr + size - 1);
@@ -993,6 +1014,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size
* retrieved from firmware.
*/
+ dev->bus_dma_mask = mask;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0113a5802a3..cb97b6105f52 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -23,18 +23,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/jiffies.h>
-#include <linux/async.h>
-#include <linux/dmi.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/types.h>
+
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -364,6 +364,20 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property energy_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -577,8 +591,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- printk_once(KERN_WARNING FW_BUG
- "battery: (dis)charge rate invalid.\n");
+ pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
@@ -717,10 +730,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
- return;
+ goto end;
}
}
pr_info("new extension: %s\n", hook->name);
+end:
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +746,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
*/
static void battery_hook_add_battery(struct acpi_battery *battery)
{
- struct acpi_battery_hook *hook_node;
+ struct acpi_battery_hook *hook_node, *tmp;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&battery->list);
@@ -744,15 +758,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
* when a battery gets hotplugged or initialized
* during the battery module initialization.
*/
- list_for_each_entry(hook_node, &battery_hook_list, list) {
+ list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat)) {
/*
* The notification of the extensions has failed, to
* prevent further errors we will unload the extension.
*/
- __battery_hook_unregister(hook_node, 0);
pr_err("error in extension, unloading: %s",
hook_node->name);
+ __battery_hook_unregister(hook_node, 0);
}
}
mutex_unlock(&hook_mutex);
@@ -798,6 +812,11 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.properties = charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_props);
+ } else if (battery->full_charge_capacity == 0) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = energy_battery_props;
battery->bat_desc.num_properties =
@@ -917,10 +936,11 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
static int acpi_battery_update(struct acpi_battery *battery, bool resume)
{
- int result, old_present = acpi_battery_present(battery);
- result = acpi_battery_get_status(battery);
+ int result = acpi_battery_get_status(battery);
+
if (result)
return result;
+
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
@@ -930,8 +950,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
if (resume)
return 0;
- if (!battery->update_time ||
- old_present != acpi_battery_present(battery)) {
+ if (!battery->update_time) {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1020,7 +1039,7 @@ static int acpi_battery_info_proc_show(struct seq_file *seq, void *offset)
acpi_battery_units(battery));
seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
+ battery->technology ? "" : "non-");
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design voltage: unknown\n");
@@ -1111,11 +1130,12 @@ static int acpi_battery_alarm_proc_show(struct seq_file *seq, void *offset)
goto end;
}
seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
+ if (battery->alarm) {
seq_printf(seq, "%u %sh\n", battery->alarm,
acpi_battery_units(battery));
+ } else {
+ seq_printf(seq, "unsupported\n");
+ }
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery alarm\n");
@@ -1148,9 +1168,9 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
}
result = acpi_battery_set_alarm(battery);
end:
- if (!result)
- return count;
- return result;
+ if (result)
+ return result;
+ return count;
}
static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
@@ -1169,8 +1189,7 @@ static const struct file_operations acpi_battery_alarm_fops = {
static int acpi_battery_add_fs(struct acpi_device *device)
{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
@@ -1246,7 +1265,9 @@ static int battery_notify(struct notifier_block *nb,
if (!acpi_battery_present(battery))
return 0;
- if (!battery->bat) {
+ if (battery->bat) {
+ acpi_battery_refresh(battery);
+ } else {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1254,8 +1275,7 @@ static int battery_notify(struct notifier_block *nb,
result = sysfs_add_battery(battery);
if (result)
return result;
- } else
- acpi_battery_refresh(battery);
+ }
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
@@ -1397,7 +1417,7 @@ static int acpi_battery_add(struct acpi_device *device)
}
#endif
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 84b4a62018eb..292088fcc624 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -66,37 +66,10 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
return 0;
}
#endif
-static int set_gbl_term_list(const struct dmi_system_id *id)
-{
- acpi_gbl_execute_tables_as_methods = 1;
- return 0;
-}
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
- /*
- * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
- * mode.
- * https://bugzilla.kernel.org/show_bug.cgi?id=198515
- */
- {
- .callback = set_gbl_term_list,
- .ident = "Dell Precision M5530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
- },
- },
- {
- .callback = set_gbl_term_list,
- .ident = "Dell XPS 15 9570",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
- },
- },
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
- * DSDT will be copied to memory.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
@@ -110,7 +83,7 @@ static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
{}
};
#else
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#endif
@@ -962,7 +935,7 @@ static int acpi_device_probe(struct device *dev)
return 0;
}
-static int acpi_device_remove(struct device * dev)
+static int acpi_device_remove(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
@@ -1060,8 +1033,11 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Check machine-specific quirks */
- dmi_check_system(acpi_quirks_dmi_table);
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 2345a5ee2dbb..a19ff3977ac4 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "ACPI: button: " fmt
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -235,9 +236,6 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- if (state)
- acpi_pm_wakeup_event(&device->dev);
-
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -252,7 +250,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
return ret;
}
-static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
+ void *offset)
{
struct acpi_device *device = seq->private;
int state;
@@ -366,7 +365,8 @@ int acpi_lid_open(void)
}
EXPORT_SYMBOL(acpi_lid_open);
-static int acpi_lid_update_state(struct acpi_device *device)
+static int acpi_lid_update_state(struct acpi_device *device,
+ bool signal_wakeup)
{
int state;
@@ -374,6 +374,9 @@ static int acpi_lid_update_state(struct acpi_device *device)
if (state < 0)
return state;
+ if (state && signal_wakeup)
+ acpi_pm_wakeup_event(&device->dev);
+
return acpi_lid_notify_state(device, state);
}
@@ -384,7 +387,7 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
(void)acpi_lid_notify_state(device, 1);
break;
case ACPI_BUTTON_LID_INIT_METHOD:
- (void)acpi_lid_update_state(device);
+ (void)acpi_lid_update_state(device, false);
break;
case ACPI_BUTTON_LID_INIT_IGNORE:
default:
@@ -409,7 +412,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
users = button->input->users;
mutex_unlock(&button->input->mutex);
if (users)
- acpi_lid_update_state(device);
+ acpi_lid_update_state(device, true);
} else {
int keycode;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 442a9e24f439..d4e5610e09c5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2042,7 +2042,21 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
.ident = "Thinkpad X1 Carbon 6th",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
+ },
+ },
+ {
+ .ident = "ThinkPad X1 Carbon 6th",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
+ },
+ },
+ {
+ .ident = "ThinkPad X1 Yoga 3rd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
},
},
{ },
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d15814e1727f..7c479002e798 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
const guid_t *guid;
int rc, i;
+ if (cmd_rc)
+ *cmd_rc = -EINVAL;
func = cmd;
if (cmd == ND_CMD_CALL) {
call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* If we return an error (like elsewhere) then caller wouldn't
* be able to rely upon data returned to make calculation.
*/
+ if (cmd_rc)
+ *cmd_rc = 0;
return 0;
}
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
mutex_lock(&acpi_desc->init_mutex);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- work_busy(&acpi_desc->dwork.work)
+ acpi_desc->scrub_busy
&& !acpi_desc->cancel ? "+\n" : "\n");
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
return 0;
}
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 1;
+ /* note this should only be set from within the workqueue */
+ if (tmo)
+ acpi_desc->scrub_tmo = tmo;
+ queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+ __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 0;
+ acpi_desc->scrub_count++;
+ if (acpi_desc->scrub_count_state)
+ sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
static void acpi_nfit_scrub(struct work_struct *work)
{
struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
mutex_lock(&acpi_desc->init_mutex);
query_rc = acpi_nfit_query_poison(acpi_desc);
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
- if (tmo) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
- acpi_desc->scrub_tmo = tmo;
- } else {
- acpi_desc->scrub_count++;
- if (acpi_desc->scrub_count_state)
- sysfs_notify_dirent(acpi_desc->scrub_count_state);
- }
+ if (tmo)
+ __sched_ars(acpi_desc, tmo);
+ else
+ notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
break;
}
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
return 0;
}
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
}
}
if (scheduled) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
dev_dbg(dev, "ars_scan triggered\n");
}
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 7d15856a739f..a97ff42fe311 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
unsigned int max_ars;
unsigned int scrub_count;
unsigned int scrub_mode;
+ unsigned int scrub_busy:1;
unsigned int cancel:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 8a8f43568510..b2a16ed7e81a 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -66,6 +66,14 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
* be removed if both new and old graphics cards are supported.
*/
{"Linux-Dell-Video", true},
+ /*
+ * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
+ * audio device which is turned off for power-saving in Windows OS.
+ * This power management feature observed on some Lenovo Thinkpad
+ * systems which will not be able to output audio via HDMI without
+ * a BIOS workaround.
+ */
+ {"Linux-Lenovo-NV-HDMI-Audio", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index 7ffa74048107..22c9e374c923 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -25,16 +25,121 @@
#define PMIC_A0LOCK_REG 0xc5
static struct pmic_table power_table[] = {
+/* {
+ .address = 0x00,
+ .reg = ??,
+ .bit = ??,
+ }, ** VSYS */
+ {
+ .address = 0x04,
+ .reg = 0x63,
+ .bit = 0x00,
+ }, /* SYSX -> VSYS_SX */
+ {
+ .address = 0x08,
+ .reg = 0x62,
+ .bit = 0x00,
+ }, /* SYSU -> VSYS_U */
+ {
+ .address = 0x0c,
+ .reg = 0x64,
+ .bit = 0x00,
+ }, /* SYSS -> VSYS_S */
+ {
+ .address = 0x10,
+ .reg = 0x6a,
+ .bit = 0x00,
+ }, /* V50S -> V5P0S */
+ {
+ .address = 0x14,
+ .reg = 0x6b,
+ .bit = 0x00,
+ }, /* HOST -> VHOST, USB2/3 host */
+ {
+ .address = 0x18,
+ .reg = 0x6c,
+ .bit = 0x00,
+ }, /* VBUS -> VBUS, USB2/3 OTG */
+ {
+ .address = 0x1c,
+ .reg = 0x6d,
+ .bit = 0x00,
+ }, /* HDMI -> VHDMI */
+/* {
+ .address = 0x20,
+ .reg = ??,
+ .bit = ??,
+ }, ** S285 */
{
.address = 0x24,
.reg = 0x66,
.bit = 0x00,
- },
+ }, /* X285 -> V2P85SX, camera */
+/* {
+ .address = 0x28,
+ .reg = ??,
+ .bit = ??,
+ }, ** V33A */
+ {
+ .address = 0x2c,
+ .reg = 0x69,
+ .bit = 0x00,
+ }, /* V33S -> V3P3S, display/ssd/audio */
+ {
+ .address = 0x30,
+ .reg = 0x68,
+ .bit = 0x00,
+ }, /* V33U -> V3P3U, SDIO wifi&bt */
+/* {
+ .address = 0x34 .. 0x40,
+ .reg = ??,
+ .bit = ??,
+ }, ** V33I, V18A, REFQ, V12A */
+ {
+ .address = 0x44,
+ .reg = 0x5c,
+ .bit = 0x00,
+ }, /* V18S -> V1P8S, SOC/USB PHY/SIM */
{
.address = 0x48,
.reg = 0x5d,
.bit = 0x00,
- },
+ }, /* V18X -> V1P8SX, eMMC/camara/audio */
+ {
+ .address = 0x4c,
+ .reg = 0x5b,
+ .bit = 0x00,
+ }, /* V18U -> V1P8U, LPDDR */
+ {
+ .address = 0x50,
+ .reg = 0x61,
+ .bit = 0x00,
+ }, /* V12X -> V1P2SX, SOC SFR */
+ {
+ .address = 0x54,
+ .reg = 0x60,
+ .bit = 0x00,
+ }, /* V12S -> V1P2S, MIPI */
+/* {
+ .address = 0x58,
+ .reg = ??,
+ .bit = ??,
+ }, ** V10A */
+ {
+ .address = 0x5c,
+ .reg = 0x56,
+ .bit = 0x00,
+ }, /* V10S -> V1P0S, SOC GFX */
+ {
+ .address = 0x60,
+ .reg = 0x57,
+ .bit = 0x00,
+ }, /* V10X -> V1P0SX, SOC display/DDR IO/PCIe */
+ {
+ .address = 0x64,
+ .reg = 0x59,
+ .bit = 0x00,
+ }, /* V105 -> V1P05S, L2 SRAM */
};
static struct pmic_table thermal_table[] = {
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index e5ea1974d1e3..d1e26cb599bf 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
if (cpu_node) {
cpu_node = acpi_find_processor_package_id(table, cpu_node,
level, flag);
- /* Only the first level has a guaranteed id */
- if (level == 0)
+ /*
+ * As per specification if the processor structure represents
+ * an actual processor, then ACPI processor ID must be valid.
+ * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+ * should be set if the UID is valid
+ */
+ if (level == 0 ||
+ cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
return cpu_node->acpi_processor_id;
return ACPI_PTR_DIFF(cpu_node, table);
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 5815356ea6ad..693cf05b0cc4 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -542,6 +542,23 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
return 0;
}
+static struct fwnode_handle *
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this fwnode.
+ * For ACPI this will be a data only sub-node.
+ */
+ fwnode_for_each_child_node(fwnode, child)
+ if (acpi_data_node_match(child, childname))
+ return child;
+
+ return NULL;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -579,7 +596,7 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
*/
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -607,7 +624,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -ENODEV ? -EINVAL : ret;
- args->adev = device;
+ args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
}
@@ -633,6 +650,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
u32 nargs, i;
if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ struct fwnode_handle *ref_fwnode;
+
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
@@ -641,6 +660,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
nargs = 0;
element++;
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (ref_fwnode = acpi_fwnode_handle(device);
+ element < end && element->type == ACPI_TYPE_STRING;
+ element++) {
+ ref_fwnode = acpi_fwnode_get_named_child_node(
+ ref_fwnode, element->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
/* assume following integer elements are all args */
for (i = 0; element + i < end && i < num_args; i++) {
int type = element[i].type;
@@ -653,11 +685,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
}
- if (nargs > MAX_ACPI_REFERENCE_ARGS)
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
if (idx == index) {
- args->adev = device;
+ args->fwnode = ref_fwnode;
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = element[i].integer.value;
@@ -995,16 +1027,36 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
return NULL;
}
+/*
+ * Return true if the node is an ACPI graph node. Called on either ports
+ * or endpoints.
+ */
+static bool is_acpi_graph_node(struct fwnode_handle *fwnode,
+ const char *str)
+{
+ unsigned int len = strlen(str);
+ const char *name;
+
+ if (!len || !is_acpi_data_node(fwnode))
+ return false;
+
+ name = to_acpi_data_node(fwnode)->name;
+
+ return (fwnode_property_present(fwnode, "reg") &&
+ !strncmp(name, str, len) && name[len] == '@') ||
+ fwnode_property_present(fwnode, str);
+}
+
/**
* acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* Looks up next endpoint ACPI firmware node below a given @fwnode. Returns
- * %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
- * of success the next endpoint is returned.
+ * %NULL if there is no next endpoint or in case of error. In case of success
+ * the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(
+static struct fwnode_handle *acpi_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
@@ -1013,8 +1065,14 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
- /* Ports must have port property */
- if (fwnode_property_present(port, "port"))
+ /*
+ * The names of the port nodes begin with "port@"
+ * followed by the number of the port node and they also
+ * have a "reg" property that also has the number of the
+ * port node. For compatibility reasons a node is also
+ * recognised as a port node from the "port" property.
+ */
+ if (is_acpi_graph_node(port, "port"))
break;
} while (port);
} else {
@@ -1029,15 +1087,19 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
port = fwnode_get_next_child_node(fwnode, port);
if (!port)
break;
- if (fwnode_property_present(port, "port"))
+ if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
}
- if (endpoint) {
- /* Endpoints must have "endpoint" property */
- if (!fwnode_property_present(endpoint, "endpoint"))
- return ERR_PTR(-EPROTO);
- }
+ /*
+ * The names of the endpoint nodes begin with "endpoint@" followed by
+ * the number of the endpoint node and they also have a "reg" property
+ * that also has the number of the endpoint node. For compatibility
+ * reasons a node is also recognised as an endpoint node from the
+ * "endpoint" property.
+ */
+ if (!is_acpi_graph_node(endpoint, "endpoint"))
+ return NULL;
return endpoint;
}
@@ -1074,65 +1136,42 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
* acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint
* @fwnode: Endpoint firmware node pointing to a remote device
- * @parent: Firmware node of remote port parent is filled here if not %NULL
- * @port: Firmware node of remote port is filled here if not %NULL
* @endpoint: Firmware node of remote endpoint is filled here if not %NULL
*
- * Function parses remote end of ACPI firmware remote endpoint and fills in
- * fields requested by the caller. Returns %0 in case of success and
- * negative errno otherwise.
+ * Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
- struct fwnode_handle **parent,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint)
+static struct fwnode_handle *
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
{
struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
- return ret;
+ return NULL;
+
+ /* Direct endpoint reference? */
+ if (!is_acpi_device_node(args.fwnode))
+ return args.nargs ? NULL : args.fwnode;
/*
* Always require two arguments with the reference: port and
* endpoint indices.
*/
if (args.nargs != 2)
- return -EPROTO;
+ return NULL;
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
port_nr = args.args[0];
endpoint_nr = args.args[1];
- if (parent)
- *parent = fwnode;
-
- if (!port && !endpoint)
- return 0;
-
fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr);
- if (!fwnode)
- return -EPROTO;
-
- if (port)
- *port = fwnode;
-
- if (!endpoint)
- return 0;
- fwnode = acpi_graph_get_child_prop_value(fwnode, "endpoint",
- endpoint_nr);
- if (!fwnode)
- return -EPROTO;
-
- *endpoint = fwnode;
-
- return 0;
+ return acpi_graph_get_child_prop_value(fwnode, "endpoint", endpoint_nr);
}
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
@@ -1186,70 +1225,14 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
- const char *childname)
-{
- struct fwnode_handle *child;
-
- /*
- * Find first matching named child node of this fwnode.
- * For ACPI this will be a data only sub-node.
- */
- fwnode_for_each_child_node(fwnode, child)
- if (acpi_data_node_match(child, childname))
- return child;
-
- return NULL;
-}
-
static int
acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int args_count, unsigned int index,
struct fwnode_reference_args *args)
{
- struct acpi_reference_args acpi_args;
- unsigned int i;
- int ret;
-
- ret = __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, &acpi_args);
- if (ret < 0)
- return ret;
- if (!args)
- return 0;
-
- args->nargs = acpi_args.nargs;
- args->fwnode = acpi_fwnode_handle(acpi_args.adev);
-
- for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
- args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
-
- return 0;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
-{
- struct fwnode_handle *endpoint;
-
- endpoint = acpi_graph_get_next_endpoint(fwnode, prev);
- if (IS_ERR(endpoint))
- return NULL;
-
- return endpoint;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *endpoint = NULL;
-
- acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, &endpoint);
-
- return endpoint;
+ return __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, args);
}
static struct fwnode_handle *
@@ -1265,8 +1248,10 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
endpoint->local_fwnode = fwnode;
- fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
- fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ if (fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port))
+ fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
+ if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
+ fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
return 0;
}
@@ -1292,9 +1277,9 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
- acpi_fwnode_graph_get_next_endpoint, \
+ acpi_graph_get_next_endpoint, \
.graph_get_remote_endpoint = \
- acpi_fwnode_graph_get_remote_endpoint, \
+ acpi_graph_get_remote_endpoint, \
.graph_get_port_parent = acpi_fwnode_get_parent, \
.graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
}; \
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 970dd87d347c..e1b6231cfa1c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1528,7 +1528,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
struct acpi_device *parent = device->parent;
- const struct acpi_device_id indirect_io_hosts[] = {
+ static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
};
@@ -1540,6 +1540,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
+ /*
+ * These devices have multiple I2cSerialBus resources and an i2c-client
+ * must be instantiated for each, each with its own i2c_device_id.
+ * Normally we only instantiate an i2c-client for the first resource,
+ * using the ACPI HID as id. These special cases are handled by the
+ * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
+ * which i2c_device_id to use for each resource.
+ */
+ static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
+ {"BSG1160", },
+ {}
+ };
if (acpi_is_indirect_io_slave(device))
return true;
@@ -1551,6 +1563,10 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
fwnode_property_present(&device->fwnode, "baud")))
return true;
+ /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
+ if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
+ return false;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list,
acpi_check_serial_bus_slave,
@@ -1612,7 +1628,8 @@ static int acpi_add_single_object(struct acpi_device **child,
* Note this must be done before the get power-/wakeup_dev-flags calls.
*/
if (type == ACPI_BUS_TYPE_DEVICE)
- acpi_bus_get_status(device);
+ if (acpi_bus_get_status(device) < 0)
+ acpi_set_device_status(device, 0);
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
@@ -1690,7 +1707,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
* acpi_add_single_object updates this once we've an acpi_device
* so that acpi_bus_get_status' quirk handling can be used.
*/
- *sta = 0;
+ *sta = ACPI_STA_DEFAULT;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5d0486f1cfcd..754d59f95500 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -338,6 +338,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Asus 1025C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+ },
+ },
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
@@ -718,9 +726,6 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
-#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
-
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
@@ -924,17 +929,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
- (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
- lps0_dsm_func_mask = bitmask;
- lps0_device_handle = adev->handle;
- /*
- * Use suspend-to-idle by default if the default
- * suspend mode was not set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
- }
+ lps0_dsm_func_mask = bitmask;
+ lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
bitmask);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ec5b0f190231..06c31ec3cc70 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -62,14 +62,20 @@ static const struct always_present_id always_present_ids[] = {
*/
ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
- * On the Dell Venue 11 Pro 7130 the DSDT hides the touchscreen ACPI
- * device until a certain time after _SB.PCI0.GFX0.LCD.LCD1._ON gets
- * called has passed *and* _STA has been called at least 3 times since.
+ * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
+ * the touchscreen ACPI device until a certain time
+ * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
+ * *and* _STA has been called at least 3 times since.
*/
ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
+ }),
+
/*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
@@ -103,13 +109,9 @@ static const struct always_present_id always_present_ids[] = {
bool acpi_device_always_present(struct acpi_device *adev)
{
- u32 *status = (u32 *)&adev->status;
- u32 old_status = *status;
bool ret = false;
unsigned int i;
- /* acpi_match_device_ids checks status, so set it to default */
- *status = ACPI_STA_DEFAULT;
for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) {
if (acpi_match_device_ids(adev, always_present_ids[i].hid))
continue;
@@ -125,15 +127,9 @@ bool acpi_device_always_present(struct acpi_device *adev)
!dmi_check_system(always_present_ids[i].dmi_ids))
continue;
- if (old_status != ACPI_STA_DEFAULT) /* Log only once */
- dev_info(&adev->dev,
- "Device [%s] is in always present list\n",
- adev->pnp.bus_id);
-
ret = true;
break;
}
- *status = old_status;
return ret;
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index ee4880bfdcdc..432e9ad77070 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU && !M68K
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 95283f3bb51c..d58763b6b009 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -51,7 +51,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -71,8 +70,12 @@
#include <linux/pid_namespace.h>
#include <linux/security.h>
#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
#include <uapi/linux/android/binder.h>
+
+#include <asm/cacheflush.h>
+
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -161,13 +164,13 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
#define binder_debug(mask, x...) \
do { \
if (binder_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
#define binder_user_error(x...) \
do { \
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
if (binder_stop_on_user_error) \
binder_stop_on_user_error = 2; \
} while (0)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2628806c64a2..3f3b7b253445 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -17,7 +17,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/list.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
@@ -28,6 +27,8 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
+#include <linux/ratelimit.h>
+#include <asm/cacheflush.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -36,11 +37,12 @@ struct list_lru binder_alloc_lru;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
-static uint32_t binder_alloc_debug_mask;
+static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@@ -48,7 +50,7 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
#define binder_alloc_debug(mask, x...) \
do { \
if (binder_alloc_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
@@ -152,8 +154,10 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
* free the buffer twice
*/
if (buffer->free_in_progress) {
- pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
- alloc->pid, current->pid, (u64)user_ptr);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid,
+ (u64)user_ptr);
return NULL;
}
buffer->free_in_progress = 1;
@@ -224,8 +228,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (!vma && need_mm) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
goto err_no_vma;
}
@@ -344,8 +349,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
int ret;
if (alloc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
return ERR_PTR(-ESRCH);
}
@@ -417,11 +423,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (buffer_size > largest_free_size)
largest_free_size = buffer_size;
}
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- alloc->pid, size);
- pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
- total_alloc_size, allocated_buffers, largest_alloc_size,
- total_free_size, free_buffers, largest_free_size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers,
+ largest_alloc_size, total_free_size,
+ free_buffers, largest_free_size);
return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
@@ -731,8 +740,10 @@ err_alloc_pages_failed:
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
- alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end,
+ failure_string, ret);
return ret;
}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 76e3b9c8a8a2..588eb3ec3507 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -248,14 +248,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class,
__field(int, debug_id)
__field(size_t, data_size)
__field(size_t, offsets_size)
+ __field(size_t, extra_buffers_size)
),
TP_fast_assign(
__entry->debug_id = buf->debug_id;
__entry->data_size = buf->data_size;
__entry->offsets_size = buf->offsets_size;
+ __entry->extra_buffers_size = buf->extra_buffers_size;
),
- TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
- __entry->debug_id, __entry->data_size, __entry->offsets_size)
+ TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd",
+ __entry->debug_id, __entry->data_size, __entry->offsets_size,
+ __entry->extra_buffers_size)
);
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2b16e7c8fff3..39b181d6bd0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
- depends on HAS_DMA
depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
- depends on HAS_DMA
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 738fb22978dd..b2b9eba1d214 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
return strcmp(buf, dmi->driver_data) < 0;
}
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /* Various Lenovo 50 series have LPM issues with older BIOSen */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+ },
+ .driver_data = "20180406", /* 1.31 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+ },
+ .driver_data = "20180420", /* 1.28 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+ },
+ .driver_data = "20180315", /* 1.33 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+ },
+ /*
+ * Note date based on release notes, 2.35 has been
+ * reported to be good, but I've been unable to get
+ * a hold of the reporter to get the DMI BIOS date.
+ * TODO: fix this.
+ */
+ .driver_data = "20180310", /* 2.35 */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ int year, month, date;
+ char buf[9];
+
+ if (!dmi)
+ return false;
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+ return strcmp(buf, dmi->driver_data) < 0;
+}
+
static bool ahci_broken_online(struct pci_dev *pdev)
{
#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_lpm(pdev)) {
+ pi.flags |= ATA_FLAG_NO_LPM;
+ dev_warn(&pdev->dev,
+ "BIOS update required for Link Power Management support\n");
+ }
+
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
dev_warn(&pdev->dev,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 0045dacd814b..72d90b4c3aae 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
*
* Return: 0 on success; Error code otherwise.
*/
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp, port_fbs;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 965842a08743..09620c2ffa0f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS) {
+ pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
emp = &pp->em_priv[pmp];
- else
+ } else {
return -EINVAL;
+ }
/* mask off the activity bits if we are in sw_activity
* mode, user should turn off sw_activity before setting
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 27d15ed7fa3d..984b37647b2f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->horkage |= ATA_HORKAGE_NOLPM;
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
if (dev->horkage & ATA_HORKAGE_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
@@ -6421,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->n_tags = ATA_MAX_QUEUE;
host->dev = dev;
host->ops = ops;
+ kref_init(&host->kref);
}
void __ata_port_probe(struct ata_port *ap)
@@ -7388,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
EXPORT_SYMBOL_GPL(ata_cable_unknown);
EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);
+EXPORT_SYMBOL_GPL(ata_host_get);
+EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d5412145d76d..01306c018398 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- qc = __ata_qc_from_tag(ap, i);
+ ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
static int ata_eh_nr_in_flight(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
unsigned int tag;
int nr = 0;
/* count only non-internal commands */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- if (ata_tag_internal(tag))
- continue;
- if (ata_qc_from_tag(ap, tag))
+ ata_qc_for_each(ap, qc, tag) {
+ if (qc)
nr++;
}
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
goto out_unlock;
if (cnt == ap->fastdrain_cnt) {
+ struct ata_queued_cmd *qc;
unsigned int tag;
/* No progress during the last interval, tag all
* in-flight qcs as timed out and freeze the port.
*/
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each(ap, qc, tag) {
if (qc)
qc->err_mask |= AC_ERR_TIMEOUT;
}
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
+ struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
ata_eh_set_pending(ap, 0);
/* include internal tag in iteration */
- for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
qc->flags |= ATA_QCFLAG_FAILED;
ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
/* has LLDD analyzed already? */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
struct ata_device *dev;
unsigned int all_err_mask = 0, eflags = 0;
int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= ehc->i.err_mask;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link)
continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
const char *frozen, *desc;
char tries_buf[6] = "";
int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
if (ehc->i.desc[0] != '\0')
desc = ehc->i.desc;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
#endif
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_raw(ap, qc, tag) {
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
char data_buf[20] = "";
char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
*/
void ata_eh_finish(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
int tag;
/* retry or finish qcs */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6a91d04351d9..8e270962b2f3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -597,8 +597,9 @@ static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[4], *argbuf = NULL, *sensebuf = NULL;
+ u8 args[4], *argbuf = NULL;
int argsize = 0;
enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
@@ -610,10 +611,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
if (args[3]) {
@@ -685,7 +683,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
- kfree(sensebuf);
kfree(argbuf);
return rc;
}
@@ -704,8 +701,9 @@ error:
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[7], *sensebuf = NULL;
+ u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
@@ -715,10 +713,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
scsi_cmd[1] = (3 << 1); /* Non-data */
@@ -769,7 +764,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
}
error:
- kfree(sensebuf);
return rc;
}
@@ -3805,10 +3799,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
*/
goto invalid_param_len;
}
- if (block > dev->n_sectors)
- goto out_of_range;
all = cdb[14] & 0x1;
+ if (all) {
+ /*
+ * Ignore the block address (zone ID) as defined by ZBC.
+ */
+ block = 0;
+ } else if (block >= dev->n_sectors) {
+ /*
+ * Block must be a valid zone ID (a zone start LBA).
+ */
+ fp = 2;
+ goto invalid_fld;
+ }
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3841,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
- out_of_range:
- /* "Logical Block Address out of range" */
- ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
- return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9e21c49cf6be..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-extern void ata_host_get(struct ata_host *host);
-extern void ata_host_put(struct ata_host *host);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d4caa23f5a88..6f0534047c6d 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/libata.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_imx"
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index f6c46e9a4dc0..e8b6a2e464c9 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -25,7 +25,6 @@
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
-#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -180,8 +179,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
- dma_cap_mask_t mask;
- struct pxad_param param;
int ret = 0;
/*
@@ -278,10 +275,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->private_data = data;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = pdata->dma_dreq;
memset(&config, 0, sizeof(config));
config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -294,8 +287,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
* Request the DMA channel
*/
data->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &pdev->dev, "data");
+ dma_request_slave_channel(&pdev->dev, "data");
if (!data->dma_chan)
return -EBUSY;
ret = dmaengine_slave_config(data->dma_chan, &config);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index bb96dc35950d..f5bd44b8bd63 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/libata.h>
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b8d9cfc60374..4dc528bf8e85 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
{
/* We let libATA core do actual (queue) tag allocation */
- /* all non NCQ/queued commands should have tag#0 */
- if (ata_tag_internal(tag)) {
- DPRINTK("mapping internal cmds to tag#0\n");
- return 0;
- }
-
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
DPRINTK("tag %d invalid : out of range\n", tag);
return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* Workaround for data length mismatch errata */
if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && ata_is_atapi(qc->tf.protocol)) {
u32 hcontrol;
/* Set HControl[27] to clear error registers */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 10ae11aa1926..72c9b922a77b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
struct nv_adma_port_priv *port0, *port1;
- struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
port0 = ap->host->ports[0]->private_data;
port1 = ap->host->ports[1]->private_data;
- sdev0 = ap->host->ports[0]->link.device[0].sdev;
- sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
skb_queue_head_init(&iadev->rx_dma_q);
iadev->rx_free_desc_qhead = NULL;
- iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+ iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
if (!iadev->rx_open) {
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..e89146ddede6 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc)
static int zatm_open(struct atm_vcc *vcc)
{
- struct zatm_dev *zatm_dev;
struct zatm_vcc *zatm_vcc;
short vpi = vcc->vpi;
int vci = vcc->vci;
int error;
DPRINTK(">zatm_open\n");
- zatm_dev = ZATM_DEV(vcc->dev);
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
vcc->dev_data = NULL;
if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
@@ -1483,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
if (copy_from_user(&info,
&((struct zatm_pool_req __user *) arg)->info,
sizeof(info))) return -EFAULT;
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index 296fb30dfa00..dea031484cc4 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -331,8 +331,7 @@ out_no_resource:
static int charlcd_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Power the display off */
charlcd_4bit_command(lcd, HD_DISPCTRL);
@@ -341,8 +340,7 @@ static int charlcd_suspend(struct device *dev)
static int charlcd_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Turn the display back on */
charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 8673fc2b9eb8..81c22d20d9d9 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -99,10 +99,7 @@ static atomic_t charlcd_available = ATOMIC_INIT(1);
/* sleeps that many milliseconds with a reschedule */
static void long_sleep(int ms)
{
- if (in_interrupt())
- mdelay(ms);
- else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
/* turn the backlight on or off */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 78d8f1986fec..f1a42f0f1ded 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
diff --git a/drivers/base/base.h b/drivers/base/base.h
index a75c3025fb78..7a419a7a6235 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -84,8 +84,6 @@ struct device_private {
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
-extern int device_private_init(struct device *dev);
-
/* initialisation functions */
extern int devices_init(void);
extern int buses_init(void);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 2880e2ab01f5..5d5b5988e88b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -74,52 +74,48 @@ static inline int get_cacheinfo_idx(enum cache_type type)
static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
- const __be32 *cache_size;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- cache_size = of_get_property(np, propname, NULL);
- if (cache_size)
- this_leaf->size = of_read_number(cache_size, 1);
+ if (of_property_read_u32(np, propname, &this_leaf->size))
+ this_leaf->size = 0;
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf,
struct device_node *np)
{
- const __be32 *line_size;
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
+ int ret;
+ u32 line_size;
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
- line_size = of_get_property(np, propname, NULL);
- if (line_size)
+ ret = of_property_read_u32(np, propname, &line_size);
+ if (!ret) {
+ this_leaf->coherency_line_size = line_size;
break;
+ }
}
-
- if (line_size)
- this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
- const __be32 *nr_sets;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- nr_sets = of_get_property(np, propname, NULL);
- if (nr_sets)
- this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+ if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
+ this_leaf->number_of_sets = 0;
}
static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index df3e1a44707a..04bbcd779e11 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -105,7 +105,7 @@ static int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (WARN_ON(dev == target))
+ if (dev == target)
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -113,7 +113,7 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (WARN_ON(link->consumer == target))
+ if (link->consumer == target)
return 1;
ret = device_is_dependent(link->consumer, target);
@@ -178,10 +178,10 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
- * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
- * when the consumer device driver unbinds from it. The combination of both
- * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
- * to be returned.
+ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
+ * automatically when the consumer device driver unbinds from it.
+ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
+ * set is invalid and will cause NULL to be returned.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@@ -198,7 +198,8 @@ struct device_link *device_link_add(struct device *consumer,
struct device_link *link;
if (!consumer || !supplier ||
- ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ ((flags & DL_FLAG_STATELESS) &&
+ (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
return NULL;
device_links_write_lock();
@@ -372,6 +373,36 @@ void device_link_del(struct device_link *link)
}
EXPORT_SYMBOL_GPL(device_link_del);
+/**
+ * device_link_remove - remove a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_remove(void *consumer, struct device *supplier)
+{
+ struct device_link *link;
+
+ if (WARN_ON(consumer == supplier))
+ return;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer == consumer) {
+ kref_put(&link->kref, __device_link_del);
+ break;
+ }
+ }
+
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_remove);
+
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
@@ -479,7 +510,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
kref_put(&link->kref, __device_link_del);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
@@ -515,8 +546,18 @@ void device_links_driver_cleanup(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+
+ /*
+ * autoremove the links between this @dev and its consumer
+ * devices that are not active, i.e. where the link state
+ * has moved to DL_STATE_SUPPLIER_UNBIND.
+ */
+ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ kref_put(&link->kref, __device_link_del);
+
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@@ -866,10 +907,19 @@ static const void *device_namespace(struct kobject *kobj)
return ns;
}
+static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev->class && dev->class->get_ownership)
+ dev->class->get_ownership(dev, uid, gid);
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
+ .get_ownership = device_get_ownership,
};
@@ -1597,6 +1647,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
+ if (!kobject_has_children(glue_dir))
+ kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
@@ -1736,7 +1788,7 @@ static void device_remove_sys_dev_entry(struct device *dev)
}
}
-int device_private_init(struct device *dev)
+static int device_private_init(struct device *dev)
{
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p)
@@ -2809,6 +2861,9 @@ void device_shutdown(void)
{
struct device *dev, *parent;
+ wait_for_device_probe();
+ device_block_probing();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
@@ -3002,12 +3057,12 @@ void func(const struct device *dev, const char *fmt, ...) \
} \
EXPORT_SYMBOL(func);
-define_dev_printk_level(dev_emerg, KERN_EMERG);
-define_dev_printk_level(dev_alert, KERN_ALERT);
-define_dev_printk_level(dev_crit, KERN_CRIT);
-define_dev_printk_level(dev_err, KERN_ERR);
-define_dev_printk_level(dev_warn, KERN_WARNING);
-define_dev_printk_level(dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_emerg, KERN_EMERG);
+define_dev_printk_level(_dev_alert, KERN_ALERT);
+define_dev_printk_level(_dev_crit, KERN_CRIT);
+define_dev_printk_level(_dev_err, KERN_ERR);
+define_dev_printk_level(_dev_warn, KERN_WARNING);
+define_dev_printk_level(_dev_notice, KERN_NOTICE);
define_dev_printk_level(_dev_info, KERN_INFO);
#endif
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 30cc9c877ebb..eb9443d5bae1 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -540,16 +540,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..edfc9f0b1180 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -16,6 +16,7 @@
* Copyright (c) 2007-2009 Novell Inc.
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static struct dentry *deferred_devices;
static bool initcalls_done;
/*
@@ -63,26 +65,6 @@ static bool initcalls_done;
static bool defer_all_probes;
/*
- * For initcall_debug, show the deferred probes executed in late_initcall
- * processing.
- */
-static void deferred_probe_debug(struct device *dev)
-{
- ktime_t calltime, delta, rettime;
- unsigned long long duration;
-
- printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
- task_pid_nr(current));
- calltime = ktime_get();
- bus_probe_device(dev);
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
- dev_name(dev), duration);
-}
-
-/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -125,11 +107,7 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_move_to_tail(dev);
dev_dbg(dev, "Retrying from deferred list\n");
- if (initcall_debug && !initcalls_done)
- deferred_probe_debug(dev);
- else
- bus_probe_device(dev);
-
+ bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
put_device(dev);
@@ -224,6 +202,69 @@ void device_unblock_probing(void)
driver_deferred_probe_trigger();
}
+/*
+ * deferred_devs_show() - Show the devices in the deferred probe pending list.
+ */
+static int deferred_devs_show(struct seq_file *s, void *data)
+{
+ struct device_private *curr;
+
+ mutex_lock(&deferred_probe_mutex);
+
+ list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
+ seq_printf(s, "%s\n", dev_name(curr->device));
+
+ mutex_unlock(&deferred_probe_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+
+static int deferred_probe_timeout = -1;
+static int __init deferred_probe_timeout_setup(char *str)
+{
+ deferred_probe_timeout = simple_strtol(str, NULL, 10);
+ return 1;
+}
+__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Returns -ENODEV if init is done and all built-in drivers have had a chance
+ * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
+ * timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (initcalls_done) {
+ if (!deferred_probe_timeout) {
+ dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+ return -ETIMEDOUT;
+ }
+ dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ return -ENODEV;
+ }
+ return -EPROBE_DEFER;
+}
+
+static void deferred_probe_timeout_work_func(struct work_struct *work)
+{
+ struct device_private *private, *p;
+
+ deferred_probe_timeout = 0;
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(private->device, "deferred probe pending");
+}
+static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
@@ -233,15 +274,36 @@ void device_unblock_probing(void)
*/
static int deferred_probe_initcall(void)
{
+ deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
+ NULL, &deferred_devs_fops);
+
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
initcalls_done = true;
+
+ /*
+ * Trigger deferred probe again, this time we won't defer anything
+ * that is optional
+ */
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ if (deferred_probe_timeout > 0) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ deferred_probe_timeout * HZ);
+ }
return 0;
}
late_initcall(deferred_probe_initcall);
+static void __exit deferred_probe_exit(void)
+{
+ debugfs_remove_recursive(deferred_devices);
+}
+__exitcall(deferred_probe_exit);
+
/**
* device_is_bound() - Check if device is bound to a driver
* @dev: device to check
@@ -434,14 +496,6 @@ re_probe:
goto probe_failed;
}
- /*
- * Ensure devices are listed in devices_kset in correct order
- * It's important to move Dev to the end of devices_kset before
- * calling .probe, because it could be recursive and parent Dev
- * should always go first
- */
- devices_kset_move_last(dev);
-
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -527,6 +581,23 @@ done:
return ret;
}
+/*
+ * For initcall_debug, show the driver probe time.
+ */
+static int really_probe_debug(struct device *dev, struct device_driver *drv)
+{
+ ktime_t calltime, delta, rettime;
+ int ret;
+
+ calltime = ktime_get();
+ ret = really_probe(dev, drv);
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, (s64) ktime_to_us(delta));
+ return ret;
+}
+
/**
* driver_probe_done
* Determine if the probe sequence is finished or not.
@@ -585,7 +656,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pm_runtime_get_sync(dev->parent);
pm_runtime_barrier(dev);
- ret = really_probe(dev, drv);
+ if (initcall_debug)
+ ret = really_probe_debug(dev, drv);
+ else
+ ret = really_probe(dev, drv);
pm_request_idle(dev);
if (dev->parent)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 7f732744f0d3..b5c865fe263b 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -219,11 +219,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* Some architectures don't have PAGE_KERNEL_RO */
-#ifndef PAGE_KERNEL_RO
-#define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
/* one pages buffer should be mapped/unmapped only once */
static int map_fw_priv_pages(struct fw_priv *fw_priv)
{
@@ -651,6 +646,8 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
{
+ int ret;
+
if (fw_fallback_config.ignore_sysfs_fallback) {
pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
return false;
@@ -659,6 +656,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
if ((opt_flags & FW_OPT_NOFALLBACK))
return false;
+ /* Also permit LSMs and IMA to fail firmware sysfs fallback */
+ ret = security_kernel_load_data(LOADING_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
return fw_force_sysfs_fallback(opt_flags);
}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index dd85b05a6a16..908e6520e804 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -30,9 +30,9 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ of_core_init();
platform_bus_init();
cpu_dev_init();
memory_dev_init();
container_dev_init();
- of_core_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f5e560188a18..c8a1cb0b6136 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -736,8 +736,6 @@ int hotplug_memory_register(int nid, struct mem_section *section)
mem->section_count++;
}
- if (mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid, false);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index a5e821d09656..1ac4c36e13bb 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -399,18 +399,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
- bool check_nid)
+int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
{
- int ret;
+ int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
- if (!mem_blk)
- return -EFAULT;
-
mem_blk->nid = nid;
- if (!node_online(nid))
- return 0;
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
@@ -433,7 +427,7 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
*/
- if (check_nid) {
+ if (system_state == SYSTEM_BOOTING) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
@@ -490,41 +484,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
return 0;
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
- bool check_nid)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long end_pfn = start_pfn + nr_pages;
- unsigned long pfn;
- struct memory_block *mem_blk = NULL;
- int err = 0;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(pfn);
- struct mem_section *mem_sect;
- int ret;
-
- if (!present_section_nr(section_nr))
- continue;
- mem_sect = __nr_to_section(section_nr);
-
- /* same memblock ? */
- if (mem_blk)
- if ((section_nr >= mem_blk->start_section_nr) &&
- (section_nr <= mem_blk->end_section_nr))
- continue;
-
- mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
-
- ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
- if (!err)
- err = ret;
-
- /* discard ref obtained in find_memory_block() */
- }
-
- if (mem_blk)
- kobject_put(&mem_blk->dev.kobj);
- return err;
+ return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
+ register_mem_sect_under_node);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df41b4780b3b..b413951c6abc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -153,6 +153,23 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index c298de8a8308..4b5714199490 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
}
static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
- unsigned int index)
+ unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
@@ -2253,7 +2253,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- genpd_lock(pd);
- ret = genpd_power_on(pd, 0);
- genpd_unlock(pd);
+ if (power_on) {
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
+ }
if (ret)
genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+ return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2359,19 +2361,43 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+ ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
if (ret < 1) {
device_unregister(genpd_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_set_active(genpd_dev);
pm_runtime_enable(genpd_dev);
+ genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
return genpd_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+{
+ int index;
+
+ if (!dev->of_node)
+ return NULL;
+
+ index = of_property_match_string(dev->of_node, "power-domain-names",
+ name);
+ if (index < 0)
+ return NULL;
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+
static const struct of_device_id idle_state_match[] = {
{ .compatible = "domain-idle-state", },
{ }
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index aff34c0c2a3e..6ad5ef48b61e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -45,3 +45,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
depends on SOUNDWIRE_BUS
+
+config REGMAP_SCCB
+ tristate
+ depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5ed0023fabda..f5b4e8851d00 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
+obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 53785e0e297a..a6bf34d6394e 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -94,10 +94,12 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
@@ -181,6 +183,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg);
bool regmap_readable(struct regmap *map, unsigned int reg);
bool regmap_volatile(struct regmap *map, unsigned int reg);
bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val);
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
new file mode 100644
index 000000000000..597042e2d009
--- /dev/null
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Register map access API - SCCB support
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "internal.h"
+
+/**
+ * sccb_is_available - Check if the adapter supports SCCB protocol
+ * @adap: I2C adapter
+ *
+ * Return true if the I2C adapter is capable of using SCCB helper functions,
+ * false otherwise.
+ */
+static bool sccb_is_available(struct i2c_adapter *adap)
+{
+ u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+
+ /*
+ * If we ever want support for hardware doing SCCB natively, we will
+ * introduce a sccb_xfer() callback to struct i2c_algorithm and check
+ * for it here.
+ */
+
+ return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
+}
+
+/**
+ * regmap_sccb_read - Read data from SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * This executes the 2-phase write transmission cycle that is followed by a
+ * 2-phase read transmission cycle, returning negative errno else zero on
+ * success.
+ */
+static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+ union i2c_smbus_data data;
+
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
+ if (ret < 0)
+ goto out;
+
+ *val = data.byte;
+out:
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ return ret;
+}
+
+/**
+ * regmap_sccb_write - Write data to SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * This executes the SCCB 3-phase write transmission cycle, returning negative
+ * errno else zero on success.
+ */
+static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_sccb_bus = {
+ .reg_write = regmap_sccb_write,
+ .reg_read = regmap_sccb_read,
+};
+
+static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 8 &&
+ sccb_is_available(i2c->adapter))
+ return &regmap_sccb_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sccb);
+
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 91d501eda8a9..0968059f1ef5 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -7,33 +7,24 @@
#include "internal.h"
-static int regmap_slimbus_byte_reg_read(void *context, unsigned int reg,
- unsigned int *val)
+static int regmap_slimbus_write(void *context, const void *data, size_t count)
{
struct slim_device *sdev = context;
- int v;
- v = slim_readb(sdev, reg);
-
- if (v < 0)
- return v;
-
- *val = v;
-
- return 0;
+ return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
}
-static int regmap_slimbus_byte_reg_write(void *context, unsigned int reg,
- unsigned int val)
+static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
struct slim_device *sdev = context;
- return slim_writeb(sdev, reg, val);
+ return slim_read(sdev, *(u16 *)reg, val_size, val);
}
static struct regmap_bus regmap_slimbus_bus = {
- .reg_write = regmap_slimbus_byte_reg_write,
- .reg_read = regmap_slimbus_byte_reg_read,
+ .write = regmap_slimbus_write,
+ .read = regmap_slimbus_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3bc84885eb91..0360a90ad6b6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,6 +168,17 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->readable_noinc_reg)
+ return map->readable_noinc_reg(map->dev, reg);
+
+ if (map->rd_noinc_table)
+ return regmap_check_range_table(map, reg, map->rd_noinc_table);
+
+ return true;
+}
+
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
size_t num)
{
@@ -766,10 +777,12 @@ struct regmap *__regmap_init(struct device *dev,
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
map->precious_table = config->precious_table;
+ map->rd_noinc_table = config->rd_noinc_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
spin_lock_init(&map->async_lock);
@@ -1285,6 +1298,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
regmap_debugfs_init(map, config->name);
@@ -2564,7 +2578,70 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
- * regmap_field_read() - Read a value to a single register field
+ * regmap_noinc_read(): Read data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to read from
+ * @reg: Register to read from
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus read operations will read a
+ * range of registers. Some devices have certain registers for which a read
+ * operation read will read from an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple reads as required to read val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ size_t read_len;
+ int ret;
+
+ if (!map->bus)
+ return -EINVAL;
+ if (!map->bus->read)
+ return -ENOTSUPP;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_read && map->max_raw_read < val_len)
+ read_len = map->max_raw_read;
+ else
+ read_len = val_len;
+ ret = _regmap_raw_read(map, reg, val, read_len);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + read_len;
+ val_len -= read_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_read);
+
+/**
+ * regmap_field_read(): Read a value to a single register field
*
* @field: Register field to read from
* @val: Pointer to store read value
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index cb0f1aad20b7..b9558ff20830 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -30,6 +30,7 @@ config BCMA_HOST_PCI
config BCMA_HOST_SOC
bool "Support for BCMA in a SoC"
+ depends on HAS_IOMEM
help
Host interface for a Broadcom AIX bus directly mapped into
the memory. This only works with the Broadcom SoCs from the
@@ -61,7 +62,7 @@ config BCMA_DRIVER_PCI_HOSTMODE
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
help
Driver for the Broadcom MIPS core attached to Broadcom specific
Advanced Microcontroller Bus.
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f6518067aa7d..581312ac375f 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -21,6 +21,7 @@
#define DAC960_DriverDate "21 Aug 2007"
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -2427,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
{
DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
- "Read Cache Enabled",
- "Read Ahead Enabled",
- "Intelligent Read Ahead Enabled",
- "-", "-", "-", "-" };
- unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
- "Logical Device Read Only",
- "Write Cache Enabled",
- "Intelligent Write Cache Enabled",
- "-", "-", "-", "-" };
+ static const unsigned char *ReadCacheStatus[] = {
+ "Read Cache Disabled",
+ "Read Cache Enabled",
+ "Read Ahead Enabled",
+ "Intelligent Read Ahead Enabled",
+ "-", "-", "-", "-"
+ };
+ static const unsigned char *WriteCacheStatus[] = {
+ "Write Cache Disabled",
+ "Logical Device Read Only",
+ "Write Cache Enabled",
+ "Intelligent Write Cache Enabled",
+ "-", "-", "-", "-"
+ };
unsigned char *GeometryTranslation;
if (LogicalDeviceInfo == NULL) continue;
switch (LogicalDeviceInfo->DriveGeometry)
@@ -4338,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
{
DAC960_Controller_T *Controller = Command->Controller;
- unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
- "NOT READY", "MEDIUM ERROR",
- "HARDWARE ERROR", "ILLEGAL REQUEST",
- "UNIT ATTENTION", "DATA PROTECT",
- "BLANK CHECK", "VENDOR-SPECIFIC",
- "COPY ABORTED", "ABORTED COMMAND",
- "EQUAL", "VOLUME OVERFLOW",
- "MISCOMPARE", "RESERVED" };
+ static const unsigned char *SenseErrors[] = {
+ "NO SENSE", "RECOVERED ERROR",
+ "NOT READY", "MEDIUM ERROR",
+ "HARDWARE ERROR", "ILLEGAL REQUEST",
+ "UNIT ATTENTION", "DATA PROTECT",
+ "BLANK CHECK", "VENDOR-SPECIFIC",
+ "COPY ABORTED", "ABORTED COMMAND",
+ "EQUAL", "VOLUME OVERFLOW",
+ "MISCOMPARE", "RESERVED"
+ };
unsigned char *CommandName = "UNKNOWN";
switch (Command->CommandType)
{
@@ -6426,7 +6433,7 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
return true;
}
-static int dac960_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v)
{
unsigned char *StatusMessage = "OK\n";
int ControllerNumber;
@@ -6446,14 +6453,16 @@ static int dac960_proc_show(struct seq_file *m, void *v)
return 0;
}
-static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
return 0;
}
-static int dac960_current_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
unsigned char *StatusMessage =
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ad9b687a236a..d4913516823f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -74,12 +74,12 @@ config AMIGA_Z2RAM
config CDROM
tristate
+ select BLK_SCSI_REQUEST
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
select CDROM
- select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index dc061158b403..8566b188368b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -36,8 +36,11 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
-obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+null_blk-objs := null_blk_main.o
+null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
+
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 096882e54095..136dc507d020 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1137,6 +1137,7 @@ noskb: if (buf)
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
+ /* fall through */
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 697f735b07a4..41060e9cedf2 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -284,8 +284,8 @@ freedev(struct aoedev *d)
e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
- if (d->bufpool)
- mempool_destroy(d->bufpool);
+
+ mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bb976598ee43..df8103dd40ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -254,20 +254,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, bool is_write,
+ unsigned int len, unsigned int off, unsigned int op,
sector_t sector)
{
void *mem;
int err = 0;
- if (is_write) {
+ if (op_is_write(op)) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!is_write) {
+ if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -296,7 +296,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
int err;
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), sector);
+ bio_op(bio), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -310,15 +310,15 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
if (PageTransHuge(page))
return -ENOTSUPP;
- err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
- page_endio(page, is_write, err);
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
+ page_endio(page, op_is_write(op), err);
return err;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index bc4ed2ed40a2..e35a234b0a8f 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -55,12 +55,10 @@
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
-# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
#else
# define __protected_by(x)
# define __protected_read_by(x)
# define __protected_write_by(x)
-# define __must_hold(x)
#endif
/* shared module parameters, defined in drbd_main.c */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a80809bd3057..ef8212a4b73e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2103,14 +2103,10 @@ static void drbd_destroy_mempools(void)
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
- if (drbd_ee_cache)
- kmem_cache_destroy(drbd_ee_cache);
- if (drbd_request_cache)
- kmem_cache_destroy(drbd_request_cache);
- if (drbd_bm_ext_cache)
- kmem_cache_destroy(drbd_bm_ext_cache);
- if (drbd_al_ext_cache)
- kmem_cache_destroy(drbd_al_ext_cache);
+ kmem_cache_destroy(drbd_ee_cache);
+ kmem_cache_destroy(drbd_request_cache);
+ kmem_cache_destroy(drbd_bm_ext_cache);
+ kmem_cache_destroy(drbd_al_ext_cache);
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index be9450f5ad1c..75f6b47169e6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2674,8 +2674,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0)
return false;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
+ curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
@@ -2790,6 +2789,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
+ /* fall through */
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -2968,6 +2968,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
+ /* fall through */
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
@@ -2979,6 +2980,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
+ /* else: fall through */
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index d146fedc38bb..19cac36e9737 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request
{
struct request_queue *q = device->rq_queue;
- generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ generic_start_io_acct(q, bio_op(req->master_bio),
req->i.size >> 9, &device->vdisk->part0);
}
@@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
{
struct request_queue *q = device->rq_queue;
- generic_end_io_acct(q, bio_data_dir(req->master_bio),
+ generic_end_io_acct(q, bio_op(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1476cb3439f4..b8f77e83d456 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
what = COMPLETED_OK;
}
- bio_put(req->private_bio);
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
@@ -1690,9 +1690,7 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
- device->rs_last_events =
- (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]);
+ device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8871b5044d9e..48f622728ce6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1461,7 +1461,6 @@ static void setup_rw_floppy(void)
int i;
int r;
int flags;
- int dflags;
unsigned long ready_date;
void (*function)(void);
@@ -1485,8 +1484,6 @@ static void setup_rw_floppy(void)
if (fd_wait_for_completion(ready_date, function))
return;
}
- dflags = DRS->flags;
-
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d6b6f434fd4b..ea9debf59b22 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -690,7 +690,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
struct file *file, *old_file;
- struct inode *inode;
int error;
error = -ENXIO;
@@ -711,7 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (error)
goto out_putf;
- inode = file->f_mapping->host;
old_file = lo->lo_backing_file;
error = -EINVAL;
@@ -1611,8 +1609,10 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
arg = (unsigned long) compat_ptr(arg);
+ /* fall through */
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c73626decb46..db253cd5b32a 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2575,8 +2575,7 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
- if (dd->dfs_node)
- debugfs_remove_recursive(dd->dfs_node);
+ debugfs_remove_recursive(dd->dfs_node);
}
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 74a05561b620..3863c00372bb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -112,12 +112,16 @@ struct nbd_device {
struct task_struct *task_setup;
};
+#define NBD_CMD_REQUEUED 1
+
struct nbd_cmd {
struct nbd_device *nbd;
+ struct mutex lock;
int index;
int cookie;
- struct completion send_complete;
blk_status_t status;
+ unsigned long flags;
+ u32 cmd_cookie;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
return disk_to_dev(nbd->disk);
}
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+ blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ u32 tag = blk_mq_unique_tag(req);
+ u64 cookie = cmd->cmd_cookie;
+
+ return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+ return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+ return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
- blk_mq_requeue_request(req, true);
+ mutex_unlock(&cmd->lock);
+ nbd_requeue_cmd(cmd);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct iov_iter from;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
+ u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
- u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
goto send_pages;
}
iov_iter_advance(&from, sent);
+ } else {
+ cmd->cmd_cookie++;
}
cmd->index = index;
cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &tag, sizeof(tag));
+ handle = nbd_cmd_handle(cmd);
+ memcpy(request.handle, &handle, sizeof(handle));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
}
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
*/
nsock->pending = req;
nsock->sent = sent;
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
+ u64 handle;
u16 hwq;
u32 tag;
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
struct iov_iter to;
+ int ret = 0;
reply.magic = 0;
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(u32));
-
+ memcpy(&handle, reply.handle, sizeof(handle));
+ tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
+
+ mutex_lock(&cmd->lock);
+ if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+ dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+ req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+ ret = -ENOENT;
+ goto out;
+ }
+ if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
- return ERR_PTR(-EIO);
+ ret = -EIO;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
}
- } else {
- /* See the comment in nbd_queue_rq. */
- wait_for_completion(&cmd->send_complete);
}
- return cmd;
+out:
+ mutex_unlock(&cmd->lock);
+ return ret ? ERR_PTR(ret) : cmd;
}
static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
*/
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
goto out;
}
@@ -818,7 +877,7 @@ again:
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
}
out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* that the server is misbehaving (or there was an error) before we're
* done sending everything over the wire.
*/
- init_completion(&cmd->send_complete);
+ mutex_lock(&cmd->lock);
+ clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_IOERR;
else if (!ret)
ret = BLK_STS_OK;
- complete(&cmd->send_complete);
+ mutex_unlock(&cmd->lock);
return ret;
}
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
+ cmd->flags = 0;
+ mutex_init(&cmd->lock);
return 0;
}
@@ -1571,7 +1633,7 @@ static int find_free_cb(int id, void *ptr, void *data)
}
/* Netlink interface. */
-static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
+static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_INDEX] = { .type = NLA_U32 },
[NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
[NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
@@ -1583,14 +1645,14 @@ static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
};
-static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
+static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
[NBD_SOCK_FD] = { .type = NLA_U32 },
};
/* We don't use this right now since we don't parse the incoming list, but we
* still want it here so userspace knows what to expect.
*/
-static struct nla_policy __attribute__((unused))
+static const struct nla_policy __attribute__((unused))
nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
[NBD_DEVICE_INDEX] = { .type = NLA_U32 },
[NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
new file mode 100644
index 000000000000..d81781f22dba
--- /dev/null
+++ b/drivers/block/null_blk.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BLK_NULL_BLK_H
+#define __BLK_NULL_BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
+
+struct nullb_cmd {
+ struct list_head list;
+ struct llist_node ll_list;
+ struct __call_single_data csd;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int tag;
+ blk_status_t error;
+ struct nullb_queue *nq;
+ struct hrtimer timer;
+};
+
+struct nullb_queue {
+ unsigned long *tag_map;
+ wait_queue_head_t wait;
+ unsigned int queue_depth;
+ struct nullb_device *dev;
+ unsigned int requeue_selection;
+
+ struct nullb_cmd *cmds;
+};
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned int nr_zones;
+ struct blk_zone *zones;
+ sector_t zone_size_sects;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned long zone_size; /* zone size in MB if device is zoned */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+ bool zoned; /* if device is zoned */
+};
+
+struct nullb {
+ struct nullb_device *dev;
+ struct list_head list;
+ unsigned int index;
+ struct request_queue *q;
+ struct gendisk *disk;
+ struct blk_mq_tag_set *tag_set;
+ struct blk_mq_tag_set __tag_set;
+ unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
+ spinlock_t lock;
+
+ struct nullb_queue *queues;
+ unsigned int nr_queues;
+ char disk_name[DISK_NAME_LEN];
+};
+
+#ifdef CONFIG_BLK_DEV_ZONED
+int null_zone_init(struct nullb_device *dev);
+void null_zone_exit(struct nullb_device *dev);
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd);
+void null_zone_write(struct nullb_cmd *cmd);
+void null_zone_reset(struct nullb_cmd *cmd);
+#else
+static inline int null_zone_init(struct nullb_device *dev)
+{
+ return -EINVAL;
+}
+static inline void null_zone_exit(struct nullb_device *dev) {}
+static inline blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ return BLK_STS_NOTSUPP;
+}
+static inline void null_zone_write(struct nullb_cmd *cmd) {}
+static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+#endif /* CONFIG_BLK_DEV_ZONED */
+#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk_main.c
index 042c778e5a4e..6127e3ff7b4b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk_main.c
@@ -7,14 +7,8 @@
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/blkdev.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/blk-mq.h>
-#include <linux/hrtimer.h>
-#include <linux/configfs.h>
-#include <linux/badblocks.h>
-#include <linux/fault-inject.h>
+#include "null_blk.h"
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
@@ -35,28 +29,6 @@ static inline u64 mb_per_tick(int mbps)
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
}
-struct nullb_cmd {
- struct list_head list;
- struct llist_node ll_list;
- struct __call_single_data csd;
- struct request *rq;
- struct bio *bio;
- unsigned int tag;
- blk_status_t error;
- struct nullb_queue *nq;
- struct hrtimer timer;
-};
-
-struct nullb_queue {
- unsigned long *tag_map;
- wait_queue_head_t wait;
- unsigned int queue_depth;
- struct nullb_device *dev;
- unsigned int requeue_selection;
-
- struct nullb_cmd *cmds;
-};
-
/*
* Status flags for nullb_device.
*
@@ -92,52 +64,6 @@ struct nullb_page {
#define NULLB_PAGE_LOCK (MAP_SZ - 1)
#define NULLB_PAGE_FREE (MAP_SZ - 2)
-struct nullb_device {
- struct nullb *nullb;
- struct config_item item;
- struct radix_tree_root data; /* data stored in the disk */
- struct radix_tree_root cache; /* disk cache data */
- unsigned long flags; /* device flags */
- unsigned int curr_cache;
- struct badblocks badblocks;
-
- unsigned long size; /* device size in MB */
- unsigned long completion_nsec; /* time in ns to complete a request */
- unsigned long cache_size; /* disk cache size in MB */
- unsigned int submit_queues; /* number of submission queues */
- unsigned int home_node; /* home node for the device */
- unsigned int queue_mode; /* block interface */
- unsigned int blocksize; /* block size */
- unsigned int irqmode; /* IRQ completion handler */
- unsigned int hw_queue_depth; /* queue depth */
- unsigned int index; /* index of the disk, only valid with a disk */
- unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool blocking; /* blocking blk-mq device */
- bool use_per_node_hctx; /* use per-node allocation for hardware context */
- bool power; /* power on/off the device */
- bool memory_backed; /* if data is stored in memory */
- bool discard; /* if support discard */
-};
-
-struct nullb {
- struct nullb_device *dev;
- struct list_head list;
- unsigned int index;
- struct request_queue *q;
- struct gendisk *disk;
- struct blk_mq_tag_set *tag_set;
- struct blk_mq_tag_set __tag_set;
- unsigned int queue_depth;
- atomic_long_t cur_bytes;
- struct hrtimer bw_timer;
- unsigned long cache_flush_pos;
- spinlock_t lock;
-
- struct nullb_queue *queues;
- unsigned int nr_queues;
- char disk_name[DISK_NAME_LEN];
-};
-
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
@@ -254,6 +180,14 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_zoned;
+module_param_named(zoned, g_zoned, bool, S_IRUGO);
+MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
+
+static unsigned long g_zone_size = 256;
+module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
+MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -357,6 +291,8 @@ NULLB_DEVICE_ATTR(memory_backed, bool);
NULLB_DEVICE_ATTR(discard, bool);
NULLB_DEVICE_ATTR(mbps, uint);
NULLB_DEVICE_ATTR(cache_size, ulong);
+NULLB_DEVICE_ATTR(zoned, bool);
+NULLB_DEVICE_ATTR(zone_size, ulong);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -390,6 +326,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
null_del_dev(dev->nullb);
mutex_unlock(&lock);
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
return count;
@@ -468,6 +405,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_mbps,
&nullb_device_attr_cache_size,
&nullb_device_attr_badblocks,
+ &nullb_device_attr_zoned,
+ &nullb_device_attr_zone_size,
NULL,
};
@@ -520,7 +459,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -579,6 +518,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
+ dev->zoned = g_zoned;
+ dev->zone_size = g_zone_size;
return dev;
}
@@ -587,6 +528,7 @@ static void null_free_dev(struct nullb_device *dev)
if (!dev)
return;
+ null_zone_exit(dev);
badblocks_exit(&dev->badblocks);
kfree(dev);
}
@@ -862,7 +804,9 @@ static struct nullb_page *null_lookup_page(struct nullb *nullb,
}
static struct nullb_page *null_insert_page(struct nullb *nullb,
- sector_t sector, bool ignore_cache)
+ sector_t sector, bool ignore_cache)
+ __releases(&nullb->lock)
+ __acquires(&nullb->lock)
{
u64 idx;
struct nullb_page *t_page;
@@ -1219,6 +1163,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
struct nullb *nullb = dev->nullb;
int err = 0;
+ if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd);
+ goto out;
+ }
+
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1283,6 +1232,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
}
}
cmd->error = errno_to_blk_status(err);
+
+ if (!cmd->error && dev->zoned) {
+ if (req_op(cmd->rq) == REQ_OP_WRITE)
+ null_zone_write(cmd);
+ else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
+ null_zone_reset(cmd);
+ }
out:
/* Complete IO by inline, softirq or timer */
switch (dev->irqmode) {
@@ -1810,6 +1766,15 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flush_queueable(nullb->q, true);
}
+ if (dev->zoned) {
+ rv = null_zone_init(dev);
+ if (rv)
+ goto out_cleanup_blk_queue;
+
+ blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
+ nullb->q->limits.zoned = BLK_ZONED_HM;
+ }
+
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
@@ -1828,13 +1793,16 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_zone;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
+out_cleanup_zone:
+ if (dev->zoned)
+ null_zone_exit(dev);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
@@ -1861,6 +1829,11 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
+ if (!is_power_of_2(g_zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
new file mode 100644
index 000000000000..a979ca00d7be
--- /dev/null
+++ b/drivers/block/null_blk_zoned.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/vmalloc.h>
+#include "null_blk.h"
+
+/* zone_size in MBs to sectors. */
+#define ZONE_SIZE_SHIFT 11
+
+static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
+{
+ return sect >> ilog2(dev->zone_size_sects);
+}
+
+int null_zone_init(struct nullb_device *dev)
+{
+ sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
+ sector_t sector = 0;
+ unsigned int i;
+
+ if (!is_power_of_2(dev->zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
+ dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
+ dev->nr_zones = dev_size >>
+ (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
+ dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dev->zones)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->nr_zones; i++) {
+ struct blk_zone *zone = &dev->zones[i];
+
+ zone->start = zone->wp = sector;
+ zone->len = dev->zone_size_sects;
+ zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone->cond = BLK_ZONE_COND_EMPTY;
+
+ sector += dev->zone_size_sects;
+ }
+
+ return 0;
+}
+
+void null_zone_exit(struct nullb_device *dev)
+{
+ kvfree(dev->zones);
+}
+
+static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
+ unsigned int zno, unsigned int nr_zones)
+{
+ struct blk_zone_report_hdr *hdr = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+ unsigned int zones_to_cpy;
+
+ bio_for_each_segment(bvec, rq->bio, iter) {
+ addr = kmap_atomic(bvec.bv_page);
+
+ zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
+
+ if (!hdr) {
+ hdr = (struct blk_zone_report_hdr *)addr;
+ hdr->nr_zones = nr_zones;
+ zones_to_cpy--;
+ addr += sizeof(struct blk_zone_report_hdr);
+ }
+
+ zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
+
+ memcpy(addr, &dev->zones[zno],
+ zones_to_cpy * sizeof(struct blk_zone));
+
+ kunmap_atomic(addr);
+
+ nr_zones -= zones_to_cpy;
+ zno += zones_to_cpy;
+
+ if (!nr_zones)
+ break;
+ }
+}
+
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int nr_zones = dev->nr_zones - zno;
+ unsigned int max_zones = (blk_rq_bytes(rq) /
+ sizeof(struct blk_zone)) - 1;
+
+ nr_zones = min_t(unsigned int, nr_zones, max_zones);
+
+ null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+
+ return BLK_STS_OK;
+}
+
+void null_zone_write(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int rq_sectors = blk_rq_sectors(rq);
+ unsigned int zno = null_zone_no(dev, sector);
+ struct blk_zone *zone = &dev->zones[zno];
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* Cannot write to a full zone */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_IMP_OPEN:
+ /* Writes must be at the write pointer position */
+ if (blk_rq_pos(rq) != zone->wp) {
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+
+ if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ zone->wp += rq_sectors;
+ if (zone->wp == zone->start + zone->len)
+ zone->cond = BLK_ZONE_COND_FULL;
+ break;
+ default:
+ /* Invalid zone condition */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+}
+
+void null_zone_reset(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ struct blk_zone *zone = &dev->zones[zno];
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+}
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index 4f27e7392e38..f5f63ca2889d 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -347,7 +347,7 @@ static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
static void bpck_read_eeprom ( PIA *pi, char * buf )
-{ int i,j,k,n,p,v,f, om, od;
+{ int i, j, k, p, v, f, om, od;
bpck_force_spp(pi);
@@ -356,7 +356,6 @@ static void bpck_read_eeprom ( PIA *pi, char * buf )
bpck_connect(pi);
- n = 0;
WR(4,0);
for (i=0;i<64;i++) {
WR(6,8);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 8961b190e256..7cf947586fe4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -426,6 +426,7 @@ static void run_fsm(void)
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
+ /* fall through */
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
@@ -445,6 +446,7 @@ static void run_fsm(void)
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
+ /* fall through */
case Hold:
schedule_fsm();
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b3f83cd96f33..6f1d25c1eb64 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -67,7 +67,7 @@
#include <scsi/scsi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
-
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "pktcdvd"
@@ -748,13 +748,13 @@ static const char *sense_key_string(__u8 index)
static void pkt_dump_sense(struct pktcdvd_device *pd,
struct packet_command *cgc)
{
- struct request_sense *sense = cgc->sense;
+ struct scsi_sense_hdr *sshdr = cgc->sshdr;
- if (sense)
+ if (sshdr)
pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
CDROM_PACKET_SIZE, cgc->cmd,
- sense->sense_key, sense->asc, sense->ascq,
- sense_key_string(sense->sense_key));
+ sshdr->sense_key, sshdr->asc, sshdr->ascq,
+ sense_key_string(sshdr->sense_key));
else
pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
@@ -787,18 +787,19 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
unsigned write_speed, unsigned read_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_SET_SPEED;
cgc.cmd[2] = (read_speed >> 8) & 0xff;
cgc.cmd[3] = read_speed & 0xff;
cgc.cmd[4] = (write_speed >> 8) & 0xff;
cgc.cmd[5] = write_speed & 0xff;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
@@ -1562,7 +1563,8 @@ static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
/* not all drives have the same disc_info length, so requeue
@@ -1591,7 +1593,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
cgc.cmd[8] = 8;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
cgc.buflen = be16_to_cpu(ti->track_information_length) +
@@ -1612,17 +1615,20 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
__u32 last_track;
int ret = -1;
- if ((ret = pkt_get_disc_info(pd, &di)))
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret)
return ret;
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
/* if this track is blank, try the previous. */
if (ti.blank) {
last_track--;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
}
@@ -1645,7 +1651,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
write_param_page *wp;
char buffer[128];
int ret, size;
@@ -1656,8 +1662,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
memset(buffer, 0, sizeof(buffer));
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1671,8 +1678,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
* now get it all
*/
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1722,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
- if ((ret = pkt_mode_select(pd, &cgc))) {
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1819,7 +1828,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
- if ((ret = pkt_get_disc_info(pd, &di))) {
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret) {
pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1830,7 +1840,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
+ ret = pkt_get_track_info(pd, track, 1, &ti);
+ if (ret) {
pkt_err(pd, "failed get_track\n");
return ret;
}
@@ -1905,12 +1916,12 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
int set)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
int ret;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.buflen = pd->mode_offset + 12;
/*
@@ -1918,7 +1929,8 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
*/
cgc.quiet = 1;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+ if (ret)
return ret;
buf[pd->mode_offset + 10] |= (!!set << 2);
@@ -1950,14 +1962,14 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned *write_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[256+18];
unsigned char *cap_buf;
int ret, offset;
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
@@ -2011,13 +2023,13 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
unsigned int size, st, sp;
int ret;
init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4; /* READ ATIP */
@@ -2032,7 +2044,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
size = sizeof(buf);
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4;
@@ -2083,17 +2095,18 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.timeout = 60*HZ;
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2103,19 +2116,22 @@ static int pkt_open_write(struct pktcdvd_device *pd)
int ret;
unsigned int write_speed, media_write_speed, read_speed;
- if ((ret = pkt_probe_settings(pd))) {
+ ret = pkt_probe_settings(pd);
+ if (ret) {
pkt_dbg(2, pd, "failed probe\n");
return ret;
}
- if ((ret = pkt_set_write_settings(pd))) {
+ ret = pkt_set_write_settings(pd);
+ if (ret) {
pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
pkt_write_caching(pd, USE_WCACHING);
- if ((ret = pkt_get_max_speed(pd, &write_speed)))
+ ret = pkt_get_max_speed(pd, &write_speed);
+ if (ret)
write_speed = 16 * 177;
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
@@ -2124,7 +2140,8 @@ static int pkt_open_write(struct pktcdvd_device *pd)
pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
- if ((ret = pkt_media_speed(pd, &media_write_speed)))
+ ret = pkt_media_speed(pd, &media_write_speed);
+ if (ret)
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
@@ -2132,14 +2149,16 @@ static int pkt_open_write(struct pktcdvd_device *pd)
}
read_speed = write_speed;
- if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
+ ret = pkt_set_speed(pd, write_speed, read_speed);
+ if (ret) {
pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
- if ((ret = pkt_perform_opc(pd))) {
+ ret = pkt_perform_opc(pd);
+ if (ret) {
pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
@@ -2161,10 +2180,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
+ ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
+ if (ret)
goto out;
- if ((ret = pkt_get_last_written(pd, &lba))) {
+ ret = pkt_get_last_written(pd, &lba);
+ if (ret) {
pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2175,7 +2196,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
q = bdev_get_queue(pd->bdev);
if (write) {
- if ((ret = pkt_open_write(pd)))
+ ret = pkt_open_write(pd);
+ if (ret)
goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
@@ -2190,7 +2212,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
clear_bit(PACKET_WRITABLE, &pd->flags);
}
- if ((ret = pkt_set_segment_merging(pd, q)))
+ ret = pkt_set_segment_merging(pd, q);
+ if (ret)
goto out_putdev;
if (write) {
@@ -2231,6 +2254,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
return pkt_devs[dev_minor];
}
@@ -2715,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
+ ret = -ENOMEM;
disk = alloc_disk(1);
if (!disk)
goto out_mem;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fa0729c1e776..7915f3b03736 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
- counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+ counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
@@ -181,6 +181,7 @@ struct rbd_image_header {
struct rbd_spec {
u64 pool_id;
const char *pool_name;
+ const char *pool_ns; /* NULL if default, never "" */
const char *image_id;
const char *image_name;
@@ -735,6 +736,7 @@ enum {
Opt_lock_timeout,
Opt_last_int,
/* int args above */
+ Opt_pool_ns,
Opt_last_string,
/* string args above */
Opt_read_only,
@@ -749,6 +751,7 @@ static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
{Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
+ {Opt_pool_ns, "_pool_ns=%s"},
/* string args above */
{Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
@@ -776,9 +779,14 @@ struct rbd_options {
#define RBD_EXCLUSIVE_DEFAULT false
#define RBD_TRIM_DEFAULT true
+struct parse_rbd_opts_ctx {
+ struct rbd_spec *spec;
+ struct rbd_options *opts;
+};
+
static int parse_rbd_opts_token(char *c, void *private)
{
- struct rbd_options *rbd_opts = private;
+ struct parse_rbd_opts_ctx *pctx = private;
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
@@ -786,7 +794,7 @@ static int parse_rbd_opts_token(char *c, void *private)
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
- pr_err("bad mount option arg (not int) at '%s'\n", c);
+ pr_err("bad option arg (not int) at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
@@ -802,7 +810,7 @@ static int parse_rbd_opts_token(char *c, void *private)
pr_err("queue_depth out of range\n");
return -EINVAL;
}
- rbd_opts->queue_depth = intval;
+ pctx->opts->queue_depth = intval;
break;
case Opt_lock_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
@@ -810,22 +818,28 @@ static int parse_rbd_opts_token(char *c, void *private)
pr_err("lock_timeout out of range\n");
return -EINVAL;
}
- rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ break;
+ case Opt_pool_ns:
+ kfree(pctx->spec->pool_ns);
+ pctx->spec->pool_ns = match_strdup(argstr);
+ if (!pctx->spec->pool_ns)
+ return -ENOMEM;
break;
case Opt_read_only:
- rbd_opts->read_only = true;
+ pctx->opts->read_only = true;
break;
case Opt_read_write:
- rbd_opts->read_only = false;
+ pctx->opts->read_only = false;
break;
case Opt_lock_on_read:
- rbd_opts->lock_on_read = true;
+ pctx->opts->lock_on_read = true;
break;
case Opt_exclusive:
- rbd_opts->exclusive = true;
+ pctx->opts->exclusive = true;
break;
case Opt_notrim:
- rbd_opts->trim = false;
+ pctx->opts->trim = false;
break;
default:
/* libceph prints "bad option" msg */
@@ -1452,7 +1466,7 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
struct ceph_osd_request *osd_req = obj_request->osd_req;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
- ktime_get_real_ts(&osd_req->r_mtime);
+ ktime_get_real_ts64(&osd_req->r_mtime);
osd_req->r_data_offset = obj_request->ex.oe_off;
}
@@ -1475,7 +1489,13 @@ rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
req->r_callback = rbd_osd_req_callback;
req->r_priv = obj_req;
+ /*
+ * Data objects may be stored in a separate pool, but always in
+ * the same namespace in that pool as the header in its pool.
+ */
+ ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
req->r_base_oloc.pool = rbd_dev->layout.pool_id;
+
if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
goto err_req;
@@ -4119,6 +4139,14 @@ static ssize_t rbd_pool_id_show(struct device *dev,
(unsigned long long) rbd_dev->spec->pool_id);
}
+static ssize_t rbd_pool_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
+}
+
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -4217,6 +4245,7 @@ static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
+static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
@@ -4235,6 +4264,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_config_info.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
+ &dev_attr_pool_ns.attr,
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
@@ -4295,6 +4325,7 @@ static void rbd_spec_free(struct kref *kref)
struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
kfree(spec->pool_name);
+ kfree(spec->pool_ns);
kfree(spec->image_id);
kfree(spec->image_name);
kfree(spec->snap_name);
@@ -4353,6 +4384,12 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->header.data_pool_id = CEPH_NOPOOL;
ceph_oid_init(&rbd_dev->header_oid);
rbd_dev->header_oloc.pool = spec->pool_id;
+ if (spec->pool_ns) {
+ WARN_ON(!*spec->pool_ns);
+ rbd_dev->header_oloc.pool_ns =
+ ceph_find_or_create_string(spec->pool_ns,
+ strlen(spec->pool_ns));
+ }
mutex_init(&rbd_dev->watch_mutex);
rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
@@ -4633,6 +4670,17 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
parent_spec->pool_id = pool_id;
parent_spec->image_id = image_id;
parent_spec->snap_id = snap_id;
+
+ /* TODO: support cloning across namespaces */
+ if (rbd_dev->spec->pool_ns) {
+ parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
+ GFP_KERNEL);
+ if (!parent_spec->pool_ns) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
} else {
@@ -5146,8 +5194,7 @@ static int rbd_add_parse_args(const char *buf,
const char *mon_addrs;
char *snap_name;
size_t mon_addrs_size;
- struct rbd_spec *spec = NULL;
- struct rbd_options *rbd_opts = NULL;
+ struct parse_rbd_opts_ctx pctx = { 0 };
struct ceph_options *copts;
int ret;
@@ -5171,22 +5218,22 @@ static int rbd_add_parse_args(const char *buf,
goto out_err;
}
- spec = rbd_spec_alloc();
- if (!spec)
+ pctx.spec = rbd_spec_alloc();
+ if (!pctx.spec)
goto out_mem;
- spec->pool_name = dup_token(&buf, NULL);
- if (!spec->pool_name)
+ pctx.spec->pool_name = dup_token(&buf, NULL);
+ if (!pctx.spec->pool_name)
goto out_mem;
- if (!*spec->pool_name) {
+ if (!*pctx.spec->pool_name) {
rbd_warn(NULL, "no pool name provided");
goto out_err;
}
- spec->image_name = dup_token(&buf, NULL);
- if (!spec->image_name)
+ pctx.spec->image_name = dup_token(&buf, NULL);
+ if (!pctx.spec->image_name)
goto out_mem;
- if (!*spec->image_name) {
+ if (!*pctx.spec->image_name) {
rbd_warn(NULL, "no image name provided");
goto out_err;
}
@@ -5207,24 +5254,24 @@ static int rbd_add_parse_args(const char *buf,
if (!snap_name)
goto out_mem;
*(snap_name + len) = '\0';
- spec->snap_name = snap_name;
+ pctx.spec->snap_name = snap_name;
/* Initialize all rbd options to the defaults */
- rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
- if (!rbd_opts)
+ pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
+ if (!pctx.opts)
goto out_mem;
- rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
- rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
- rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
- rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
- rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
- rbd_opts->trim = RBD_TRIM_DEFAULT;
+ pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
+ pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
+ pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
+ pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
+ pctx.opts->trim = RBD_TRIM_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
- mon_addrs + mon_addrs_size - 1,
- parse_rbd_opts_token, rbd_opts);
+ mon_addrs + mon_addrs_size - 1,
+ parse_rbd_opts_token, &pctx);
if (IS_ERR(copts)) {
ret = PTR_ERR(copts);
goto out_err;
@@ -5232,15 +5279,15 @@ static int rbd_add_parse_args(const char *buf,
kfree(options);
*ceph_opts = copts;
- *opts = rbd_opts;
- *rbd_spec = spec;
+ *opts = pctx.opts;
+ *rbd_spec = pctx.spec;
return 0;
out_mem:
ret = -ENOMEM;
out_err:
- kfree(rbd_opts);
- rbd_spec_put(spec);
+ kfree(pctx.opts);
+ rbd_spec_put(pctx.spec);
kfree(options);
return ret;
@@ -5586,8 +5633,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_register_watch(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("image %s/%s does not exist\n",
+ pr_info("image %s/%s%s%s does not exist\n",
rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name);
goto err_out_format;
}
@@ -5609,8 +5658,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_spec_fill_names(rbd_dev);
if (ret) {
if (ret == -ENOENT)
- pr_info("snap %s/%s@%s does not exist\n",
+ pr_info("snap %s/%s%s%s@%s does not exist\n",
rbd_dev->spec->pool_name,
+ rbd_dev->spec->pool_ns ?: "",
+ rbd_dev->spec->pool_ns ? "/" : "",
rbd_dev->spec->image_name,
rbd_dev->spec->snap_name);
goto err_out_probe;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index dddb3f2490b6..1a92f9e65937 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(card->queue, bio_data_dir(bio),
- &card->gendisk->part0, start_time);
+ generic_end_io_acct(card->queue, bio_op(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index bc7aea6d7b7c..87b9e7fbf062 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -657,8 +657,8 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
if (unlikely(skdev->dbg_level > 1)) {
dev_dbg(&skdev->pdev->dev,
- "skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ "skreq=%x sksg_list=%p sksg_dma=%pad\n",
+ skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
@@ -1190,8 +1190,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
- dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
- skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
+ &skmsg->mb_dma_address, skd_in_flight(skdev));
dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
@@ -1250,9 +1250,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
}
dev_dbg(&skdev->pdev->dev,
- "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ &skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
@@ -2685,8 +2685,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
(FIT_QCMD_ALIGN - 1),
- "not aligned: msg_buf %p mb_dma_address %#llx\n",
- skmsg->msg_buf, skmsg->mb_dma_address);
+ "not aligned: msg_buf %p mb_dma_address %pad\n",
+ skmsg->msg_buf, &skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b5cedccb5d7d..8986adab9bf5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -251,14 +251,9 @@ static DEFINE_SPINLOCK(minor_lock);
#define GRANTS_PER_INDIRECT_FRAME \
(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
-#define PSEGS_PER_INDIRECT_FRAME \
- (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
-
#define INDIRECT_GREFS(_grants) \
DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
-#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
-
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
@@ -1441,7 +1436,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status == REQ_WAITING)
- return 0;
+ return false;
bret->status = blkif_get_final_status(s->status,
s2->status);
@@ -1542,7 +1537,7 @@ static bool blkif_completion(unsigned long *id,
}
}
- return 1;
+ return true;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7436b2d27fa3..a1d6b5597c17 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
-
+ zram->disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -336,6 +337,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
char *file_name;
+ size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
struct address_space *mapping;
@@ -356,7 +358,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- strlcpy(file_name, buf, len);
+ strlcpy(file_name, buf, PATH_MAX);
+ /* ignore trailing newline */
+ sz = strlen(file_name);
+ if (sz > 0 && file_name[sz - 1] == '\n')
+ file_name[sz - 1] = 0x00;
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
if (IS_ERR(backing_dev)) {
@@ -400,6 +406,18 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
+ /*
+ * With writeback feature, zram does asynchronous IO so it's no longer
+ * synchronous device so let's remove synchronous io flag. Othewise,
+ * upper layer(e.g., swap) could wait IO completion rather than
+ * (submit and return), which will cause system sluggish.
+ * Furthermore, when the IO function returns(e.g., swap_readpage),
+ * upper layer expects IO was done so it could deallocate the page
+ * freely but in fact, IO is going on so finally could cause
+ * use-after-free when the IO is really done.
+ */
+ zram->disk->queue->backing_dev_info->capabilities &=
+ ~BDI_CAP_SYNCHRONOUS_IO;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1274,17 +1292,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write, struct bio *bio)
+ int offset, unsigned int op, struct bio *bio)
{
unsigned long start_time = jiffies;
- int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (!is_write) {
+ if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
@@ -1293,14 +1310,14 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, op, &zram->disk->part0, start_time);
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
if (unlikely(ret < 0)) {
- if (!is_write)
+ if (!op_is_write(op))
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -1338,7 +1355,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio)), bio) < 0)
+ bio_op(bio), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -1390,7 +1407,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
int offset, ret;
u32 index;
@@ -1414,7 +1431,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
+ ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -1429,7 +1446,7 @@ out:
switch (ret) {
case 0:
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
break;
case 1:
ret = 0;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f3c643a0473c..2df11cc08a46 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -159,6 +159,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -194,6 +195,19 @@ config BT_HCIUART_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIUART_RTL
+ bool "Realtek protocol support"
+ depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
+ select BT_HCIUART_3WIRE
+ select BT_RTL
+ help
+ The Realtek protocol support enables Bluetooth HCI over 3-Wire
+ serial port internface for Realtek Bluetooth controllers.
+
+ Say Y here to compile support for Realtek protocol.
+
config BT_HCIUART_QCA
bool "Qualcomm Atheros protocol support"
depends on BT_HCIUART
@@ -364,6 +378,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKUART
+ tristate "MediaTek HCI UART driver"
+ depends on SERIAL_DEV_BUS
+ help
+ MediaTek Bluetooth HCI UART driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with serial interface.
+
+ Say Y here to compile support for MediaTek Bluetooth UART devices
+ into the kernel or say M to compile it as module (btmtkuart).
+
config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support"
depends on RPMSG || (COMPILE_TEST && RPMSG=n)
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index ec16c55eb6e9..b7e393cfc1e3 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index ab090a313a5f..0588639b899a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -490,7 +490,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
count = skb->len;
/* Max HCI frame size seems to be 1511 + 1 */
- nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
+ nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) {
BT_ERR("Can't allocate memory for new packet");
return -ENOMEM;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 82437a69f99c..cc6e56223656 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -565,7 +565,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL);
if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index c6f7cc57db14..d1c2adf08576 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -289,7 +289,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb->dev = (void *) hdev;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
+ urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
@@ -298,7 +298,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
- dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
if (!dr) {
usb_free_urb(urb);
return -ENOMEM;
@@ -343,7 +343,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_anchor_urb(urb, &data->tx_anchor);
- err = usb_submit_urb(urb, GFP_ATOMIC);
+ err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
bt_dev_err(hdev, "urb %p submission failed", urb);
kfree(urb->setup_packet);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 888bac49a87b..fb3d03928460 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -718,7 +718,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
}
/* Allocate buffer */
- skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL);
if (!skb) {
BT_ERR("No free skb");
ret = -ENOMEM;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
new file mode 100644
index 000000000000..ed2a5c7cb77f
--- /dev/null
+++ b/drivers/bluetooth/btmtkuart.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek serial devices
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
+
+#define MTK_STP_TLR_SIZE 2
+
+#define BTMTKUART_TX_STATE_ACTIVE 1
+#define BTMTKUART_TX_STATE_WAKEUP 2
+#define BTMTKUART_TX_WAIT_VND_EVT 3
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7
+};
+
+struct mtk_stp_hdr {
+ u8 prefix;
+ __be16 dlen;
+ u8 cs;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtkuart_dev {
+ struct hci_dev *hdev;
+ struct serdev_device *serdev;
+ struct clk *clk;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *rx_skb;
+
+ u8 stp_pad[6];
+ u8 stp_cursor;
+ u16 stp_dlen;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
+ const void *param)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ u32 hlen;
+ int err;
+
+ hlen = sizeof(*hdr) + plen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = op;
+ hdr->dlen = cpu_to_le16(plen + 1);
+ hdr->flag = flag;
+ memcpy(wc.data, param, plen);
+
+ set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
+ * state to be cleared. The driver speicfic event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mtk_setup_fw(struct hci_dev *hdev)
+{
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30)
+ return -EINVAL;
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen,
+ fw_ptr);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ break;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ release_firmware(fw);
+
+ return err;
+}
+
+static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ err = hci_recv_frame(hdev, skb);
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
+};
+
+static void btmtkuart_tx_work(struct work_struct *work)
+{
+ struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
+ tx_work);
+ struct serdev_device *serdev = bdev->serdev;
+ struct hci_dev *hdev = bdev->hdev;
+
+ while (1) {
+ clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ while (1) {
+ struct sk_buff *skb = skb_dequeue(&bdev->txq);
+ int len;
+
+ if (!skb)
+ break;
+
+ len = serdev_device_write_buf(serdev, skb->data,
+ skb->len);
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len > 0) {
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+
+ if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
+ break;
+ }
+
+ clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
+}
+
+static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
+{
+ if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
+ set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ schedule_work(&bdev->tx_work);
+}
+
+static const unsigned char *
+mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ int *sz_h4)
+{
+ struct mtk_stp_hdr *shdr;
+
+ /* The cursor is reset when all the data of STP is consumed out */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
+ bdev->stp_cursor = 0;
+
+ /* Filling pad until all STP info is obtained */
+ while (bdev->stp_cursor < 6 && count > 0) {
+ bdev->stp_pad[bdev->stp_cursor] = *data;
+ bdev->stp_cursor++;
+ data++;
+ count--;
+ }
+
+ /* Retrieve STP info and have a sanity check */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
+ shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
+ bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
+
+ /* Resync STP when unexpected data is being read */
+ if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
+ bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ shdr->prefix, bdev->stp_dlen);
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+ }
+ }
+
+ /* Directly quit when there's no data found for H4 can process */
+ if (count <= 0)
+ return NULL;
+
+ /* Tranlate to how much the size of data H4 can handle so far */
+ *sz_h4 = min_t(int, count, bdev->stp_dlen);
+
+ /* Update the remaining size of STP packet */
+ bdev->stp_dlen -= *sz_h4;
+
+ /* Data points to STP payload which can be handled by H4 */
+ return data;
+}
+
+static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ const unsigned char *p_left = data, *p_h4;
+ int sz_left = count, sz_h4, adv;
+ int err;
+
+ while (sz_left > 0) {
+ /* The serial data received from MT7622 BT controller is
+ * at all time padded around with the STP header and tailer.
+ *
+ * A full STP packet is looking like
+ * -----------------------------------
+ * | STP header | H:4 | STP tailer |
+ * -----------------------------------
+ * but it doesn't guarantee to contain a full H:4 packet which
+ * means that it's possible for multiple STP packets forms a
+ * full H:4 packet that means extra STP header + length doesn't
+ * indicate a full H:4 frame, things can fragment. Whose length
+ * recorded in STP header just shows up the most length the
+ * H:4 engine can handle currently.
+ */
+
+ p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
+ if (!p_h4)
+ break;
+
+ adv = p_h4 - p_left;
+ sz_left -= adv;
+ p_left += adv;
+
+ bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ sz_h4, mtk_recv_pkts,
+ ARRAY_SIZE(mtk_recv_pkts));
+ if (IS_ERR(bdev->rx_skb)) {
+ err = PTR_ERR(bdev->rx_skb);
+ bt_dev_err(bdev->hdev,
+ "Frame reassembly failed (%d)", err);
+ bdev->rx_skb = NULL;
+ return err;
+ }
+
+ sz_left -= sz_h4;
+ p_left += sz_h4;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ int err;
+
+ err = btmtkuart_recv(bdev->hdev, data, count);
+ if (err < 0)
+ return err;
+
+ bdev->hdev->stat.byte_rx += count;
+
+ return count;
+}
+
+static void btmtkuart_write_wakeup(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+
+ btmtkuart_tx_wakeup(bdev);
+}
+
+static const struct serdev_device_ops btmtkuart_client_ops = {
+ .receive_buf = btmtkuart_receive_buf,
+ .write_wakeup = btmtkuart_write_wakeup,
+};
+
+static int btmtkuart_open(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev;
+ int err;
+
+ err = serdev_device_open(bdev->serdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to open UART device %s",
+ dev_name(&bdev->serdev->dev));
+ goto err_open;
+ }
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ dev = &bdev->serdev->dev;
+
+ /* Enable the power domain and clock the device requires */
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_disable_rpm;
+ }
+
+ err = clk_prepare_enable(bdev->clk);
+ if (err < 0)
+ goto err_put_rpm;
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(dev);
+err_disable_rpm:
+ pm_runtime_disable(dev);
+err_open:
+ return err;
+}
+
+static int btmtkuart_close(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev = &bdev->serdev->dev;
+
+ /* Shutdown the clock and power domain the device requires */
+ clk_disable_unprepare(bdev->clk);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ serdev_device_close(bdev->serdev);
+
+ return 0;
+}
+
+static int btmtkuart_flush(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+
+ /* Flush any pending characters */
+ serdev_device_write_flush(bdev->serdev);
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ kfree_skb(bdev->rx_skb);
+ bdev->rx_skb = NULL;
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ return 0;
+}
+
+static int btmtkuart_setup(struct hci_dev *hdev)
+{
+ u8 param = 0x1;
+ int err = 0;
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_fw(hdev);
+ if (err < 0)
+ return err;
+
+ /* Activate function the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ return err;
+ }
+
+ /* Enable Bluetooth protocol */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_shutdown(struct hci_dev *hdev)
+{
+ u8 param = 0x0;
+ int err;
+
+ /* Disable the device */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_stp_hdr *shdr;
+ int err, dlen, type = 0;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ /* Make sure that there is enough rooms for STP header and trailer */
+ if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
+ (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
+ err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Add the STP header */
+ dlen = skb->len;
+ shdr = skb_push(skb, sizeof(*shdr));
+ shdr->prefix = 0x80;
+ shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
+ shdr->cs = 0; /* MT7622 doesn't care about checksum value */
+
+ /* Add the STP trailer */
+ skb_put_zero(skb, MTK_STP_TLR_SIZE);
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ btmtkuart_tx_wakeup(bdev);
+ return 0;
+}
+
+static int btmtkuart_probe(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev;
+ struct hci_dev *hdev;
+
+ bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->clk = devm_clk_get(&serdev->dev, "ref");
+ if (IS_ERR(bdev->clk))
+ return PTR_ERR(bdev->clk);
+
+ bdev->serdev = serdev;
+ serdev_device_set_drvdata(serdev, bdev);
+
+ serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
+
+ INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&serdev->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtkuart_open;
+ hdev->close = btmtkuart_close;
+ hdev->flush = btmtkuart_flush;
+ hdev->setup = btmtkuart_setup;
+ hdev->shutdown = btmtkuart_shutdown;
+ hdev->send = btmtkuart_send_frame;
+ SET_HCIDEV_DEV(hdev, &serdev->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ if (hci_register_dev(hdev) < 0) {
+ dev_err(&serdev->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void btmtkuart_remove(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ struct hci_dev *hdev = bdev->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_of_match_table[] = {
+ { .compatible = "mediatek,mt7622-bluetooth"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_of_match_table);
+#endif
+
+static struct serdev_device_driver btmtkuart_driver = {
+ .probe = btmtkuart_probe,
+ .remove = btmtkuart_remove,
+ .driver = {
+ .name = "btmtkuart",
+ .of_match_table = of_match_ptr(mtk_of_match_table),
+ },
+};
+
+module_serdev_device_driver(btmtkuart_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7622);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8219816c54a0..ec9e03a6b778 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -27,7 +27,7 @@
#define VERSION "0.1"
-static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
@@ -35,36 +35,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
char cmd;
int err = 0;
- BT_DBG("%s: ROME Patch Version Request", hdev->name);
+ bt_dev_dbg(hdev, "QCA Version Request");
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
- err);
+ bt_dev_err(hdev, "Reading QCA version information failed (%d)",
+ err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*ver)) {
- BT_ERR("%s: Version size mismatch len %d", hdev->name,
- skb->len);
+ bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "QCA TLV with no header");
err = -EILSEQ;
goto out;
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_APP_VER_RES_EVT) {
- BT_ERR("%s: Wrong packet received %d %d", hdev->name,
- edl->cresp, edl->rtype);
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
err = -EIO;
goto out;
}
@@ -76,30 +75,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
- /* ROME chipset version can be decided by patch and SoC
+ /* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
*/
- *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+ *soc_version = (le32_to_cpu(ver->soc_id) << 16) |
(le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ if (*soc_version == 0)
+ err = -EILSEQ;
out:
kfree_skb(skb);
+ if (err)
+ bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
return err;
}
+EXPORT_SYMBOL_GPL(qca_read_soc_version);
-static int rome_reset(struct hci_dev *hdev)
+static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
int err;
- BT_DBG("%s: ROME HCI_RESET", hdev->name);
+ bt_dev_dbg(hdev, "QCA HCI_RESET");
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Reset failed (%d)", err);
return err;
}
@@ -108,7 +112,7 @@ static int rome_reset(struct hci_dev *hdev)
return 0;
}
-static void rome_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -207,7 +211,7 @@ static void rome_tlv_check_data(struct rome_config *config,
}
}
-static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
const u8 *data, enum rome_tlv_dnld_mode mode)
{
struct sk_buff *skb;
@@ -225,22 +229,22 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
cmd);
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
- BT_ERR("%s: TLV response size mismatch", hdev->name);
+ bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "TLV with no header");
err = -EILSEQ;
goto out;
}
@@ -249,8 +253,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
- BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
- hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
+ edl->cresp, edl->rtype, tlv_resp->result);
err = -EIO;
}
@@ -260,23 +264,23 @@ out:
return err;
}
-static int rome_download_firmware(struct hci_dev *hdev,
+static int qca_download_firmware(struct hci_dev *hdev,
struct rome_config *config)
{
const struct firmware *fw;
const u8 *segment;
int ret, remain, i = 0;
- bt_dev_info(hdev, "ROME Downloading %s", config->fwname);
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
- BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
- config->fwname, ret);
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
return ret;
}
- rome_tlv_check_data(config, fw);
+ qca_tlv_check_data(config, fw);
segment = fw->data;
remain = fw->size;
@@ -290,7 +294,7 @@ static int rome_download_firmware(struct hci_dev *hdev,
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
config->dnld_mode = ROME_SKIP_EVT_NONE;
- ret = rome_tlv_send_segment(hdev, segsize, segment,
+ ret = qca_tlv_send_segment(hdev, segsize, segment,
config->dnld_mode);
if (ret)
break;
@@ -314,11 +318,10 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
cmd[2] = sizeof(bdaddr_t); /* size */
memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
return err;
}
@@ -328,57 +331,65 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
{
- u32 rome_ver = 0;
struct rome_config config;
int err;
+ u8 rom_ver;
- BT_DBG("%s: ROME setup on UART", hdev->name);
+ bt_dev_dbg(hdev, "QCA setup on UART");
config.user_baud_rate = baudrate;
- /* Get ROME version information */
- err = rome_patch_ver_req(hdev, &rome_ver);
- if (err < 0 || rome_ver == 0) {
- BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
- return err;
- }
-
- bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver);
-
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990) {
+ /* Firmware files to download are based on ROM version.
+ * ROM version is derived from last two bytes of soc_ver.
+ */
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ } else {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
}
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x.bin", rom_ver);
+ else
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/nvm_%08x.bin", soc_ver);
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
}
/* Perform HCI reset */
- err = rome_reset(hdev);
+ err = qca_send_reset(hdev);
if (err < 0) {
- BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
return err;
}
- bt_dev_info(hdev, "ROME setup on UART is completed");
+ bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
}
-EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+EXPORT_SYMBOL_GPL(qca_uart_setup);
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 13d77fd873b6..0c01f375fe83 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -37,6 +37,9 @@
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
+#define QCA_WCN3990_POWERON_PULSE 0xFC
+#define QCA_WCN3990_POWEROFF_PULSE 0xC0
+
enum qca_bardrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
@@ -124,10 +127,19 @@ struct tlv_type_hdr {
__u8 data[0];
} __packed;
+enum qca_btsoc_type {
+ QCA_INVALID = -1,
+ QCA_AR3002,
+ QCA_ROME,
+ QCA_WCN3990
+};
+
#if IS_ENABLED(CONFIG_BT_QCA)
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
#else
@@ -136,7 +148,13 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
return -EOPNOTSUPP;
}
-static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 437f080deaab..7f9ea8e4c1b2 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -34,9 +34,12 @@
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
+#define RTL_CONFIG_MAGIC 0x8723ab55
#define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1)
+#define IC_MATCH_FL_HCIVER (1 << 2)
+#define IC_MATCH_FL_HCIBUS (1 << 3)
#define IC_INFO(lmps, hcir) \
.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \
.lmp_subver = (lmps), \
@@ -46,49 +49,130 @@ struct id_table {
__u16 match_flags;
__u16 lmp_subver;
__u16 hci_rev;
+ __u8 hci_ver;
+ __u8 hci_bus;
bool config_needed;
+ bool has_rom_version;
char *fw_name;
char *cfg_name;
};
+struct btrtl_device_info {
+ const struct id_table *ic_info;
+ u8 rom_version;
+ u8 *fw_data;
+ int fw_len;
+ u8 *cfg_data;
+ int cfg_len;
+};
+
static const struct id_table ic_id_table[] = {
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ /* 8723BS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xb,
+ .hci_ver = 6,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723bs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723bs_config" },
+
/* 8723B */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723b_fw.bin",
- .cfg_name = "rtl_bt/rtl8723b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723b_config" },
/* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723d_fw.bin",
- .cfg_name = "rtl_bt/rtl8723d_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723d_config" },
+
+ /* 8723DS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xd,
+ .hci_ver = 8,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723ds_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723ds_config" },
/* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821a_fw.bin",
- .cfg_name = "rtl_bt/rtl8821a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821a_config" },
/* 8821C */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
- .cfg_name = "rtl_bt/rtl8821c_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
- .cfg_name = "rtl_bt/rtl8761a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8761a_config" },
/* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
- .cfg_name = "rtl_bt/rtl8822b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8822b_config" },
};
+static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
+ u8 hci_ver, u8 hci_bus)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
+ (ic_id_table[i].lmp_subver != lmp_subver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
+ (ic_id_table[i].hci_rev != hci_rev))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
+ (ic_id_table[i].hci_ver != hci_ver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
+ (ic_id_table[i].hci_bus != hci_bus))
+ continue;
+
+ break;
+ }
+ if (i >= ARRAY_SIZE(ic_id_table))
+ return NULL;
+
+ return &ic_id_table[i];
+}
+
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
struct rtl_rom_version_evt *rom_version;
@@ -97,20 +181,20 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
/* Read RTL ROM version command */
skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Read ROM version failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "Read ROM version failed (%ld)\n",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*rom_version)) {
- BT_ERR("%s: RTL version event length mismatch", hdev->name);
+ rtl_dev_err(hdev, "RTL version event length mismatch\n");
kfree_skb(skb);
return -EIO;
}
rom_version = (struct rtl_rom_version_evt *)skb->data;
- bt_dev_info(hdev, "rom_version status=%x version=%x",
- rom_version->status, rom_version->version);
+ rtl_dev_info(hdev, "rom_version status=%x version=%x\n",
+ rom_version->status, rom_version->version);
*version = rom_version->version;
@@ -118,16 +202,16 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0;
}
-static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
- const struct firmware *fw,
+static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
unsigned char **_buf)
{
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
- int i, ret, len;
+ int i, len;
size_t min_size;
- u8 opcode, length, data, rom_version = 0;
+ u8 opcode, length, data;
int project_id = -1;
const unsigned char *fwptr, *chip_id_base;
const unsigned char *patch_length_base, *patch_offset_base;
@@ -146,17 +230,13 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
};
- ret = rtl_read_rom_version(hdev, &rom_version);
- if (ret)
- return ret;
-
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- fwptr = fw->data + fw->size - sizeof(extension_sig);
+ fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig);
if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
- BT_ERR("%s: extension section signature mismatch", hdev->name);
+ rtl_dev_err(hdev, "extension section signature mismatch\n");
return -EINVAL;
}
@@ -166,7 +246,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Once we have that, we double-check that that project_id is suitable
* for the hardware we are working with.
*/
- while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+ while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) {
opcode = *--fwptr;
length = *--fwptr;
data = *--fwptr;
@@ -177,8 +257,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
break;
if (length == 0) {
- BT_ERR("%s: found instruction with length 0",
- hdev->name);
+ rtl_dev_err(hdev, "found instruction with length 0\n");
return -EINVAL;
}
@@ -191,7 +270,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (project_id < 0) {
- BT_ERR("%s: failed to find version instruction", hdev->name);
+ rtl_dev_err(hdev, "failed to find version instruction\n");
return -EINVAL;
}
@@ -202,19 +281,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
- BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+ rtl_dev_err(hdev, "unknown project id %d\n", project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
- BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
+ if (btrtl_dev->ic_info->lmp_subver !=
+ project_id_to_lmp_subver[i].lmp_subver) {
+ rtl_dev_err(hdev, "firmware is for %x but this is a %x\n",
+ project_id_to_lmp_subver[i].lmp_subver,
+ btrtl_dev->ic_info->lmp_subver);
return -EINVAL;
}
- epatch_info = (struct rtl_epatch_header *)fw->data;
+ epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
- BT_ERR("%s: bad EPATCH signature", hdev->name);
+ rtl_dev_err(hdev, "bad EPATCH signature\n");
return -EINVAL;
}
@@ -229,16 +310,16 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Find the right patch for this chip.
*/
min_size += 8 * num_patches;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+ chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header);
patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
for (i = 0; i < num_patches; i++) {
u16 chip_id = get_unaligned_le16(chip_id_base +
(i * sizeof(u16)));
- if (chip_id == rom_version + 1) {
+ if (chip_id == btrtl_dev->rom_version + 1) {
patch_length = get_unaligned_le16(patch_length_base +
(i * sizeof(u16)));
patch_offset = get_unaligned_le32(patch_offset_base +
@@ -248,21 +329,22 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (!patch_offset) {
- BT_ERR("%s: didn't find patch for chip id %d",
- hdev->name, rom_version);
+ rtl_dev_err(hdev, "didn't find patch for chip id %d",
+ btrtl_dev->rom_version);
return -EINVAL;
}
BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
min_size = patch_offset + patch_length;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
/* Copy the firmware into a new buffer and write the version at
* the end.
*/
len = patch_length;
- buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+ buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
+ GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -301,15 +383,14 @@ static int rtl_download_firmware(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: download fw command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "download fw command failed (%ld)\n",
+ PTR_ERR(skb));
ret = -PTR_ERR(skb);
goto out;
}
if (skb->len != sizeof(struct rtl_download_response)) {
- BT_ERR("%s: download fw event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "download fw event length mismatch\n");
kfree_skb(skb);
ret = -EIO;
goto out;
@@ -324,12 +405,12 @@ out:
return ret;
}
-static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
{
const struct firmware *fw;
int ret;
- bt_dev_info(hdev, "rtl: loading %s", name);
+ rtl_dev_info(hdev, "rtl: loading %s\n", name);
ret = request_firmware(&fw, name, &hdev->dev);
if (ret < 0)
return ret;
@@ -343,96 +424,37 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
}
-static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
- const struct firmware *fw;
- int ret;
-
- bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin");
- ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
- return ret;
- }
-
- if (fw->size < 8) {
- ret = -EINVAL;
- goto out;
- }
+ if (btrtl_dev->fw_len < 8)
+ return -EINVAL;
/* Check that the firmware doesn't have the epatch signature
* (which is only for RTL8723B and newer).
*/
- if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
- BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
- ret = -EINVAL;
- goto out;
+ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) {
+ rtl_dev_err(hdev, "unexpected EPATCH signature!\n");
+ return -EINVAL;
}
- ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
- release_firmware(fw);
- return ret;
+ return rtl_download_firmware(hdev, btrtl_dev->fw_data,
+ btrtl_dev->fw_len);
}
-static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
- u16 lmp_subver)
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
unsigned char *fw_data = NULL;
- const struct firmware *fw;
int ret;
- int cfg_sz;
- u8 *cfg_buff = NULL;
u8 *tbuff;
- char *cfg_name = NULL;
- char *fw_name = NULL;
- int i;
- for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
- (ic_id_table[i].lmp_subver != lmp_subver))
- continue;
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
- (ic_id_table[i].hci_rev != hci_rev))
- continue;
-
- break;
- }
-
- if (i >= ARRAY_SIZE(ic_id_table)) {
- BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x",
- hdev->name, lmp_subver, hci_rev);
- return -EINVAL;
- }
-
- cfg_name = ic_id_table[i].cfg_name;
-
- if (cfg_name) {
- cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
- if (cfg_sz < 0) {
- cfg_sz = 0;
- if (ic_id_table[i].config_needed)
- BT_ERR("Necessary config file %s not found\n",
- cfg_name);
- }
- } else
- cfg_sz = 0;
-
- fw_name = ic_id_table[i].fw_name;
- bt_dev_info(hdev, "rtl: loading %s", fw_name);
- ret = request_firmware(&fw, fw_name, &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- goto err_req_fw;
- }
-
- ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data);
if (ret < 0)
goto out;
- if (cfg_sz) {
- tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (btrtl_dev->cfg_len > 0) {
+ tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
@@ -441,22 +463,18 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
memcpy(tbuff, fw_data, ret);
kfree(fw_data);
- memcpy(tbuff + ret, cfg_buff, cfg_sz);
- ret += cfg_sz;
+ memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
+ ret += btrtl_dev->cfg_len;
fw_data = tbuff;
}
- bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret);
+ rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret);
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- release_firmware(fw);
kfree(fw_data);
-err_req_fw:
- if (cfg_sz)
- kfree(cfg_buff);
return ret;
}
@@ -467,14 +485,13 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -482,49 +499,264 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
return skb;
}
-int btrtl_setup_realtek(struct hci_dev *hdev)
+void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
+ kfree(btrtl_dev->fw_data);
+ kfree(btrtl_dev->cfg_data);
+ kfree(btrtl_dev);
+}
+EXPORT_SYMBOL_GPL(btrtl_free);
+
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
+ char cfg_name[40];
u16 hci_rev, lmp_subver;
+ u8 hci_ver;
+ int ret;
+
+ btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
+ if (!btrtl_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
skb = btrtl_read_local_version(hdev);
- if (IS_ERR(skb))
- return -PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ goto err_free;
+ }
resp = (struct hci_rp_read_local_version *)skb->data;
- bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x "
- "lmp_ver=%02x lmp_subver=%04x",
- resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
+ rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n",
+ resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
+ hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info) {
+ rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ lmp_subver, hci_rev, hci_ver);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->has_rom_version) {
+ ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version);
+ if (ret)
+ goto err_free;
+ }
+
+ btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
+ &btrtl_dev->fw_data);
+ if (btrtl_dev->fw_len < 0) {
+ rtl_dev_err(hdev, "firmware file %s not found\n",
+ btrtl_dev->ic_info->fw_name);
+ ret = btrtl_dev->fw_len;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->cfg_name) {
+ if (postfix) {
+ snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
+ btrtl_dev->ic_info->cfg_name, postfix);
+ } else {
+ snprintf(cfg_name, sizeof(cfg_name), "%s.bin",
+ btrtl_dev->ic_info->cfg_name);
+ }
+ btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name,
+ &btrtl_dev->cfg_data);
+ if (btrtl_dev->ic_info->config_needed &&
+ btrtl_dev->cfg_len <= 0) {
+ rtl_dev_err(hdev, "mandatory config file %s not found\n",
+ btrtl_dev->ic_info->cfg_name);
+ ret = btrtl_dev->cfg_len;
+ goto err_free;
+ }
+ }
+
+ return btrtl_dev;
+
+err_free:
+ btrtl_free(btrtl_dev);
+err_alloc:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(btrtl_initialize);
+
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
/* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
- switch (lmp_subver) {
+ switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
- return btrtl_setup_rtl8723a(hdev);
+ return btrtl_setup_rtl8723a(hdev, btrtl_dev);
case RTL_ROM_LMP_8723B:
case RTL_ROM_LMP_8821A:
case RTL_ROM_LMP_8761A:
case RTL_ROM_LMP_8822B:
- return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver);
+ return btrtl_setup_rtl8723b(hdev, btrtl_dev);
default:
- bt_dev_info(hdev, "rtl: assuming no firmware upload needed");
+ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
return 0;
}
}
+EXPORT_SYMBOL_GPL(btrtl_download_firmware);
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ struct btrtl_device_info *btrtl_dev;
+ int ret;
+
+ btrtl_dev = btrtl_initialize(hdev, NULL);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ ret = btrtl_download_firmware(hdev, btrtl_dev);
+
+ btrtl_free(btrtl_dev);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
+{
+ switch (device_baudrate) {
+ case 0x0252a00a:
+ return 230400;
+
+ case 0x05f75004:
+ return 921600;
+
+ case 0x00005004:
+ return 1000000;
+
+ case 0x04928002:
+ case 0x01128002:
+ return 1500000;
+
+ case 0x00005002:
+ return 2000000;
+
+ case 0x0000b001:
+ return 2500000;
+
+ case 0x04928001:
+ return 3000000;
+
+ case 0x052a6001:
+ return 3500000;
+
+ case 0x00005001:
+ return 4000000;
+
+ case 0x0252c014:
+ default:
+ return 115200;
+ }
+}
+
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control)
+{
+ struct rtl_vendor_config *config;
+ struct rtl_vendor_config_entry *entry;
+ int i, total_data_len;
+ bool found = false;
+
+ total_data_len = btrtl_dev->cfg_len - sizeof(*config);
+ if (total_data_len <= 0) {
+ rtl_dev_warn(hdev, "no config loaded\n");
+ return -EINVAL;
+ }
+
+ config = (struct rtl_vendor_config *)btrtl_dev->cfg_data;
+ if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) {
+ rtl_dev_err(hdev, "invalid config magic\n");
+ return -EINVAL;
+ }
+
+ if (total_data_len < le16_to_cpu(config->total_len)) {
+ rtl_dev_err(hdev, "config is too short\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_data_len; ) {
+ entry = ((void *)config->entry) + i;
+
+ switch (le16_to_cpu(entry->offset)) {
+ case 0xc:
+ if (entry->len < sizeof(*device_baudrate)) {
+ rtl_dev_err(hdev, "invalid UART config entry\n");
+ return -EINVAL;
+ }
+
+ *device_baudrate = get_unaligned_le32(entry->data);
+ *controller_baudrate = btrtl_convert_baudrate(
+ *device_baudrate);
+
+ if (entry->len >= 13)
+ *flow_control = !!(entry->data[12] & BIT(2));
+ else
+ *flow_control = false;
+
+ found = true;
+ break;
+
+ default:
+ rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n",
+ le16_to_cpu(entry->offset), entry->len);
+ break;
+ };
+
+ i += sizeof(*entry) + entry->len;
+ }
+
+ if (!found) {
+ rtl_dev_err(hdev, "no UART config entry found\n");
+ return -ENOENT;
+ }
+
+ rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate);
+ rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate);
+ rtl_dev_dbg(hdev, "flow control %d\n", *flow_control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_get_uart_settings);
+
MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 38ffe4890cd1..f5e36f3993a8 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -17,6 +17,13 @@
#define RTL_FRAG_LEN 252
+#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__)
+
+struct btrtl_device_info;
+
struct rtl_download_cmd {
__u8 index;
__u8 data[RTL_FRAG_LEN];
@@ -38,15 +45,61 @@ struct rtl_epatch_header {
__le16 num_patches;
} __packed;
+struct rtl_vendor_config_entry {
+ __le16 offset;
+ __u8 len;
+ __u8 data[0];
+} __packed;
+
+struct rtl_vendor_config {
+ __le32 signature;
+ __le16 total_len;
+ struct rtl_vendor_config_entry entry[0];
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_RTL)
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix);
+void btrtl_free(struct btrtl_device_info *btrtl_dev);
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control);
#else
+static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void btrtl_free(struct btrtl_device_info *btrtl_dev)
+{
+}
+
+static inline int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btrtl_setup_realtek(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
+static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate,
+ bool *flow_control)
+{
+ return -ENOENT;
+}
+
#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f73a27ea28cc..cd2e5cf14ea5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -374,6 +374,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
@@ -509,9 +510,10 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->evt_skb;
while (count) {
@@ -556,7 +558,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
data->evt_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -564,9 +566,10 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->acl_skb;
while (count) {
@@ -613,7 +616,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
}
data->acl_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -621,9 +624,10 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->sco_skb;
while (count) {
@@ -668,7 +672,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
}
data->sco_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -1066,6 +1070,7 @@ static void btusb_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *)skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev);
+ unsigned long flags;
BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
urb->actual_length);
@@ -1079,9 +1084,9 @@ static void btusb_tx_complete(struct urb *urb)
hdev->stat.err_tx++;
done:
- spin_lock(&data->txlock);
+ spin_lock_irqsave(&data->txlock, flags);
data->tx_in_flight--;
- spin_unlock(&data->txlock);
+ spin_unlock_irqrestore(&data->txlock, flags);
kfree(urb->setup_packet);
@@ -1593,13 +1598,13 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
ret = request_firmware(&fw, fwname, &hdev->dev);
if (ret < 0) {
if (ret == -EINVAL) {
- BT_ERR("%s Intel firmware file request failed (%d)",
- hdev->name, ret);
+ bt_dev_err(hdev, "Intel firmware file request failed (%d)",
+ ret);
return NULL;
}
- BT_ERR("%s failed to open Intel firmware file: %s(%d)",
- hdev->name, fwname, ret);
+ bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)",
+ fwname, ret);
/* If the correct firmware patch file is not found, use the
* default firmware patch file instead
@@ -1607,8 +1612,8 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
ver->hw_platform, ver->hw_variant);
if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
- BT_ERR("%s failed to open default Intel fw file: %s",
- hdev->name, fwname);
+ bt_dev_err(hdev, "failed to open default fw file: %s",
+ fwname);
return NULL;
}
}
@@ -1637,7 +1642,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* process.
*/
if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
- BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read");
return -EINVAL;
}
(*fw_ptr)++;
@@ -1651,7 +1656,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* of command parameter. If not, the firmware file is corrupted.
*/
if (remain < cmd->plen) {
- BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len");
return -EFAULT;
}
@@ -1684,8 +1689,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
remain -= sizeof(*evt);
if (remain < evt->plen) {
- BT_ERR("%s Intel fw corrupted: invalid evt len",
- hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt len");
return -EFAULT;
}
@@ -1699,15 +1703,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* file is corrupted.
*/
if (!evt || !evt_param || remain < 0) {
- BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt read");
return -EFAULT;
}
skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
cmd_param, evt->evt, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
- hdev->name, cmd->opcode, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)",
+ cmd->opcode, PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -1716,15 +1720,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* the contents of the event.
*/
if (skb->len != evt->plen) {
- BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name,
- le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
if (memcmp(skb->data, evt_param, evt->plen)) {
- BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)",
- hdev->name, le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
@@ -1753,8 +1757,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
*/
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending initial HCI reset command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1890,7 +1894,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -2084,8 +2088,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* for now only accept this single value.
*/
if (ver.hw_platform != 0x37) {
- BT_ERR("%s: Unsupported Intel hardware platform (%u)",
- hdev->name, ver.hw_platform);
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+ ver.hw_platform);
return -EINVAL;
}
@@ -2104,8 +2108,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x14: /* QnJ, IcP */
break;
default:
- BT_ERR("%s: Unsupported Intel hardware variant (%u)",
- hdev->name, ver.hw_variant);
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
return -EINVAL;
}
@@ -2134,8 +2138,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* choice is to return an error and abort the device initialization.
*/
if (ver.fw_variant != 0x06) {
- BT_ERR("%s: Unsupported Intel firmware variant (%u)",
- hdev->name, ver.fw_variant);
+ bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
+ ver.fw_variant);
return -ENODEV;
}
@@ -2151,8 +2155,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* that this bootloader does not send them, then abort the setup.
*/
if (params.limited_cce != 0x00) {
- BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
- hdev->name, params.limited_cce);
+ bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
+ params.limited_cce);
return -EINVAL;
}
@@ -2202,14 +2206,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
- BT_ERR("%s: Failed to load Intel firmware file (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
return err;
}
@@ -2235,13 +2238,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
if (fw->size < 644) {
- BT_ERR("%s: Invalid size of firmware file (%zu)",
- hdev->name, fw->size);
+ bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
+ fw->size);
err = -EBADF;
goto done;
}
@@ -2272,18 +2275,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
if (err == -EINTR) {
- BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ bt_dev_err(hdev, "Firmware loading interrupted");
goto done;
}
if (err) {
- BT_ERR("%s: Firmware loading timeout", hdev->name);
+ bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
goto done;
}
if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
- BT_ERR("%s: Firmware loading failed", hdev->name);
+ bt_dev_err(hdev, "Firmware loading failed");
err = -ENOEXEC;
goto done;
}
@@ -2322,12 +2325,12 @@ done:
msecs_to_jiffies(1000));
if (err == -EINTR) {
- BT_ERR("%s: Device boot interrupted", hdev->name);
+ bt_dev_err(hdev, "Device boot interrupted");
return -EINTR;
}
if (err) {
- BT_ERR("%s: Device boot timeout", hdev->name);
+ bt_dev_err(hdev, "Device boot timeout");
return -ETIMEDOUT;
}
@@ -2364,6 +2367,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
struct sk_buff *skb;
long ret;
+ /* In the shutdown sequence where Bluetooth is turned off followed
+ * by WiFi being turned off, turning WiFi back on causes issue with
+ * the RF calibration.
+ *
+ * To ensure that any RF activity has been stopped, issue HCI Reset
+ * command to clear all ongoing activity including advertising,
+ * scanning etc.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return ret;
+ }
+ kfree_skb(skb);
+
/* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED
* goes off. This command turns off the BT LED immediately.
@@ -2371,8 +2390,7 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: turning off Intel device LED failed (%ld)",
- hdev->name, ret);
+ bt_dev_err(hdev, "turning off Intel device LED failed");
return ret;
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 6a8d0d06aba7..8eede1197cd2 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -21,13 +21,18 @@
*
*/
-#include <linux/kernel.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include "btrtl.h"
#include "hci_uart.h"
#define HCI_3WIRE_ACK_PKT 0
@@ -65,6 +70,9 @@ enum {
};
struct h5 {
+ /* Must be the first member, hci_serdev.c expects this. */
+ struct hci_uart serdev_hu;
+
struct sk_buff_head unack; /* Unack'ed packets queue */
struct sk_buff_head rel; /* Reliable packets queue */
struct sk_buff_head unrel; /* Unreliable packets queue */
@@ -95,6 +103,19 @@ struct h5 {
H5_SLEEPING,
H5_WAKING_UP,
} sleep;
+
+ const struct h5_vnd *vnd;
+ const char *id;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *device_wake_gpio;
+};
+
+struct h5_vnd {
+ int (*setup)(struct h5 *h5);
+ void (*open)(struct h5 *h5);
+ void (*close)(struct h5 *h5);
+ const struct acpi_gpio_mapping *acpi_gpio_map;
};
static void h5_reset_rx(struct h5 *h5);
@@ -193,9 +214,13 @@ static int h5_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
- h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
- if (!h5)
- return -ENOMEM;
+ if (hu->serdev) {
+ h5 = serdev_device_get_drvdata(hu->serdev);
+ } else {
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+ }
hu->priv = h5;
h5->hu = hu;
@@ -210,6 +235,9 @@ static int h5_open(struct hci_uart *hu)
h5->tx_win = H5_TX_WIN_MAX;
+ if (h5->vnd && h5->vnd->open)
+ h5->vnd->open(h5);
+
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
/* Send initial sync request */
@@ -229,7 +257,21 @@ static int h5_close(struct hci_uart *hu)
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
- kfree(h5);
+ if (h5->vnd && h5->vnd->close)
+ h5->vnd->close(h5);
+
+ if (!hu->serdev)
+ kfree(h5);
+
+ return 0;
+}
+
+static int h5_setup(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (h5->vnd && h5->vnd->setup)
+ return h5->vnd->setup(h5);
return 0;
}
@@ -744,18 +786,172 @@ static const struct hci_uart_proto h5p = {
.name = "Three-wire (H5)",
.open = h5_open,
.close = h5_close,
+ .setup = h5_setup,
.recv = h5_recv,
.enqueue = h5_enqueue,
.dequeue = h5_dequeue,
.flush = h5_flush,
};
+static int h5_serdev_probe(struct serdev_device *serdev)
+{
+ const struct acpi_device_id *match;
+ struct device *dev = &serdev->dev;
+ struct h5 *h5;
+
+ h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
+
+ h5->hu = &h5->serdev_hu;
+ h5->serdev_hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, h5);
+
+ if (has_acpi_companion(dev)) {
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return -ENODEV;
+
+ h5->vnd = (const struct h5_vnd *)match->driver_data;
+ h5->id = (char *)match->id;
+
+ if (h5->vnd->acpi_gpio_map)
+ devm_acpi_dev_add_driver_gpios(dev,
+ h5->vnd->acpi_gpio_map);
+ }
+
+ h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(h5->enable_gpio))
+ return PTR_ERR(h5->enable_gpio);
+
+ h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(h5->device_wake_gpio))
+ return PTR_ERR(h5->device_wake_gpio);
+
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
+}
+
+static void h5_serdev_remove(struct serdev_device *serdev)
+{
+ struct h5 *h5 = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&h5->serdev_hu);
+}
+
+#ifdef CONFIG_BT_HCIUART_RTL
+static int h5_btrtl_setup(struct h5 *h5)
+{
+ struct btrtl_device_info *btrtl_dev;
+ struct sk_buff *skb;
+ __le32 baudrate_data;
+ u32 device_baudrate;
+ unsigned int controller_baudrate;
+ bool flow_control;
+ int err;
+
+ btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
+ &controller_baudrate, &device_baudrate,
+ &flow_control);
+ if (err)
+ goto out_free;
+
+ baudrate_data = cpu_to_le32(device_baudrate);
+ skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
+ &baudrate_data, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
+ err = PTR_ERR(skb);
+ goto out_free;
+ } else {
+ kfree_skb(skb);
+ }
+ /* Give the device some time to set up the new baudrate. */
+ usleep_range(10000, 20000);
+
+ serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
+ serdev_device_set_flow_control(h5->hu->serdev, flow_control);
+
+ err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
+ /* Give the device some time before the hci-core sends it a reset */
+ usleep_range(10000, 20000);
+
+out_free:
+ btrtl_free(btrtl_dev);
+
+ return err;
+}
+
+static void h5_btrtl_open(struct h5 *h5)
+{
+ /* Devices always start with these fixed parameters */
+ serdev_device_set_flow_control(h5->hu->serdev, false);
+ serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
+ serdev_device_set_baudrate(h5->hu->serdev, 115200);
+
+ /* The controller needs up to 500ms to wakeup */
+ gpiod_set_value_cansleep(h5->enable_gpio, 1);
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
+ msleep(500);
+}
+
+static void h5_btrtl_close(struct h5 *h5)
+{
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
+ gpiod_set_value_cansleep(h5->enable_gpio, 0);
+}
+
+static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
+static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
+static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
+static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
+ { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
+ { "enable-gpios", &btrtl_enable_gpios, 1 },
+ { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
+ {},
+};
+
+static struct h5_vnd rtl_vnd = {
+ .setup = h5_btrtl_setup,
+ .open = h5_btrtl_open,
+ .close = h5_btrtl_close,
+ .acpi_gpio_map = acpi_btrtl_gpios,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id h5_acpi_match[] = {
+#ifdef CONFIG_BT_HCIUART_RTL
+ { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
+#endif
+
+static struct serdev_device_driver h5_serdev_driver = {
+ .probe = h5_serdev_probe,
+ .remove = h5_serdev_remove,
+ .driver = {
+ .name = "hci_uart_h5",
+ .acpi_match_table = ACPI_PTR(h5_acpi_match),
+ },
+};
+
int __init h5_init(void)
{
+ serdev_device_driver_register(&h5_serdev_driver);
return hci_uart_register_proto(&h5p);
}
int __exit h5_deinit(void)
{
+ serdev_device_driver_unregister(&h5_serdev_driver);
return hci_uart_unregister_proto(&h5p);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 7c166e3b308b..46ace321bf60 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -458,7 +458,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 51790dd02afb..e182f6019f68 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -5,7 +5,7 @@
* protocol extension to H4.
*
* Copyright (C) 2007 Texas Instruments, Inc.
- * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -31,9 +31,14 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
@@ -119,12 +124,51 @@ struct qca_data {
u64 votes_off;
};
+enum qca_speed_type {
+ QCA_INIT_SPEED = 1,
+ QCA_OPER_SPEED
+};
+
+/*
+ * Voltage regulator information required for configuring the
+ * QCA Bluetooth chipset
+ */
+struct qca_vreg {
+ const char *name;
+ unsigned int min_uV;
+ unsigned int max_uV;
+ unsigned int load_uA;
+};
+
+struct qca_vreg_data {
+ enum qca_btsoc_type soc_type;
+ struct qca_vreg *vregs;
+ size_t num_vregs;
+};
+
+/*
+ * Platform data for the QCA Bluetooth power driver.
+ */
+struct qca_power {
+ struct device *dev;
+ const struct qca_vreg_data *vreg_data;
+ struct regulator_bulk_data *vreg_bulk;
+ bool vregs_on;
+};
+
struct qca_serdev {
struct hci_uart serdev_hu;
struct gpio_desc *bt_en;
struct clk *susclk;
+ enum qca_btsoc_type btsoc_type;
+ struct qca_power *bt_power;
+ u32 init_speed;
+ u32 oper_speed;
};
+static int qca_power_setup(struct hci_uart *hu, bool on);
+static void qca_power_shutdown(struct hci_dev *hdev);
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -402,10 +446,11 @@ static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
+ int ret;
BT_DBG("hu %p qca_open", hu);
- qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+ qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -453,19 +498,32 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
- qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
-
- timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
-
if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ } else {
+ hu->init_speed = qcadev->init_speed;
+ hu->oper_speed = qcadev->oper_speed;
+ ret = qca_power_setup(hu, true);
+ if (ret) {
+ destroy_workqueue(qca->workqueue);
+ kfree_skb(qca->rx_skb);
+ hu->priv = NULL;
+ kfree(qca);
+ return ret;
+ }
+ }
}
+ timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
+ qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+ timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
+ qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -549,10 +607,13 @@ static int qca_close(struct hci_uart *hu)
qca->hu = NULL;
if (hu->serdev) {
- serdev_device_close(hu->serdev);
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(hu->hdev);
+ else
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+
+ serdev_device_close(hu->serdev);
}
kfree_skb(qca->rx_skb);
@@ -872,6 +933,8 @@ static uint8_t qca_get_baudrate_value(int speed)
return QCA_BAUDRATE_2000000;
case 3000000:
return QCA_BAUDRATE_3000000;
+ case 3200000:
+ return QCA_BAUDRATE_3200000;
case 3500000:
return QCA_BAUDRATE_3500000;
default:
@@ -884,19 +947,27 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
struct sk_buff *skb;
+ struct qca_serdev *qcadev;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
- if (baudrate > QCA_BAUDRATE_3000000)
+ if (baudrate > QCA_BAUDRATE_3200000)
return -EINVAL;
cmd[4] = baudrate;
- skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) {
bt_dev_err(hdev, "Failed to allocate baudrate packet");
return -ENOMEM;
}
+ /* Disabling hardware flow control is mandatory while
+ * sending change baudrate request to wcn3990 SoC.
+ */
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, true);
+
/* Assign commands to change baudrate and packet type. */
skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
@@ -912,6 +983,9 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
set_current_state(TASK_RUNNING);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, false);
+
return 0;
}
@@ -923,50 +997,195 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
hci_uart_set_baudrate(hu, speed);
}
+static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ /* These power pulses are single byte command which are sent
+ * at required baudrate to wcn3990. On wcn3990, we have an external
+ * circuit at Tx pin which decodes the pulse sent at specific baudrate.
+ * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
+ * and also we use the same power inputs to turn on and off for
+ * Wi-Fi/BT. Powering up the power sources will not enable BT, until
+ * we send a power on pulse at 115200 bps. This algorithm will help to
+ * save power. Disabling hardware flow control is mandatory while
+ * sending power pulses to SoC.
+ */
+ bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hci_uart_set_flow_control(hu, true);
+
+ skb_put_u8(skb, cmd);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* Wait for 100 uS for SoC to settle down */
+ usleep_range(100, 200);
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
+static unsigned int qca_get_speed(struct hci_uart *hu,
+ enum qca_speed_type speed_type)
+{
+ unsigned int speed = 0;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+ } else {
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ }
+
+ return speed;
+}
+
+static int qca_check_speeds(struct hci_uart *hu)
+{
+ struct qca_serdev *qcadev;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ } else {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+{
+ unsigned int speed, qca_baudrate;
+ int ret;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ speed = qca_get_speed(hu, QCA_INIT_SPEED);
+ if (speed)
+ host_set_baudrate(hu, speed);
+ } else {
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
+ if (!speed)
+ return 0;
+
+ qca_baudrate = qca_get_baudrate_value(speed);
+ bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
+ ret = qca_set_baudrate(hu->hdev, qca_baudrate);
+ if (ret)
+ return ret;
+
+ host_set_baudrate(hu, speed);
+ }
+
+ return 0;
+}
+
+static int qca_wcn3990_init(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ int ret;
+
+ /* Forcefully enable wcn3990 to enter in to boot mode. */
+ host_set_baudrate(hu, 2400);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ if (ret)
+ return ret;
+
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
+ if (ret)
+ return ret;
+
+ /* Wait for 100 ms for SoC to boot */
+ msleep(100);
+
+ /* Now the device is in ready state to communicate with host.
+ * To sync host with device we need to reopen port.
+ * Without this, we will have RTS and CTS synchronization
+ * issues.
+ */
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ struct qca_serdev *qcadev;
int ret;
+ int soc_ver = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
- bt_dev_info(hdev, "ROME setup");
+ ret = qca_check_speeds(hu);
+ if (ret)
+ return ret;
/* Patch downloading has to be done without IBS mode */
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
- /* Setup initial baudrate */
- speed = 0;
- if (hu->init_speed)
- speed = hu->init_speed;
- else if (hu->proto->init_speed)
- speed = hu->proto->init_speed;
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ bt_dev_info(hdev, "setting up wcn3990");
+ ret = qca_wcn3990_init(hu);
+ if (ret)
+ return ret;
- if (speed)
- host_set_baudrate(hu, speed);
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
+ return ret;
+ } else {
+ bt_dev_info(hdev, "ROME setup");
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ }
/* Setup user speed if needed */
- speed = 0;
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
-
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (speed) {
+ ret = qca_set_speed(hu, QCA_OPER_SPEED);
+ if (ret)
+ return ret;
+
qca_baudrate = qca_get_baudrate_value(speed);
+ }
- bt_dev_info(hdev, "Set UART speed to %d", speed);
- ret = qca_set_baudrate(hdev, qca_baudrate);
- if (ret) {
- bt_dev_err(hdev, "Failed to change the baud rate (%d)",
- ret);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ /* Get QCA version information */
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
return ret;
- }
- host_set_baudrate(hu, speed);
}
+ bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup_rome(hdev, qca_baudrate);
+ ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
@@ -1002,9 +1221,123 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_vreg_data qca_soc_data = {
+ .soc_type = QCA_WCN3990,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 15000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1350000, 300000 },
+ { "vddch0", 3300000, 3400000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
+static void qca_power_shutdown(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ qca_power_setup(hu, false);
+}
+
+static int qca_enable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ int ret;
+
+ ret = regulator_set_voltage(regulator, vregs.min_uV,
+ vregs.max_uV);
+ if (ret)
+ return ret;
+
+ if (vregs.load_uA)
+ ret = regulator_set_load(regulator,
+ vregs.load_uA);
+
+ if (ret)
+ return ret;
+
+ return regulator_enable(regulator);
+
+}
+
+static void qca_disable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ regulator_disable(regulator);
+ regulator_set_voltage(regulator, 0, vregs.max_uV);
+ if (vregs.load_uA)
+ regulator_set_load(regulator, 0);
+
+}
+
+static int qca_power_setup(struct hci_uart *hu, bool on)
+{
+ struct qca_vreg *vregs;
+ struct regulator_bulk_data *vreg_bulk;
+ struct qca_serdev *qcadev;
+ int i, num_vregs, ret = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
+ !qcadev->bt_power->vreg_bulk)
+ return -EINVAL;
+
+ vregs = qcadev->bt_power->vreg_data->vregs;
+ vreg_bulk = qcadev->bt_power->vreg_bulk;
+ num_vregs = qcadev->bt_power->vreg_data->num_vregs;
+ BT_DBG("on: %d", on);
+ if (on && !qcadev->bt_power->vregs_on) {
+ for (i = 0; i < num_vregs; i++) {
+ ret = qca_enable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ BT_ERR("failed to enable regulator:%s", vregs[i].name);
+ /* turn off regulators which are enabled */
+ for (i = i - 1; i >= 0; i--)
+ qca_disable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ } else {
+ qcadev->bt_power->vregs_on = true;
+ }
+ } else if (!on && qcadev->bt_power->vregs_on) {
+ /* turn off regulator in reverse order */
+ i = qcadev->bt_power->vreg_data->num_vregs - 1;
+ for ( ; i >= 0; i--)
+ qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+
+ qcadev->bt_power->vregs_on = false;
+ }
+
+ return ret;
+}
+
+static int qca_init_regulators(struct qca_power *qca,
+ const struct qca_vreg *vregs, size_t num_vregs)
+{
+ int i;
+
+ qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!qca->vreg_bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ qca->vreg_bulk[i].supply = vregs[i].name;
+
+ return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+}
+
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ const struct qca_vreg_data *data;
int err;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
@@ -1012,47 +1345,84 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
+ data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
+ if (data && data->soc_type == QCA_WCN3990) {
+ qcadev->btsoc_type = QCA_WCN3990;
+ qcadev->bt_power = devm_kzalloc(&serdev->dev,
+ sizeof(struct qca_power),
+ GFP_KERNEL);
+ if (!qcadev->bt_power)
+ return -ENOMEM;
+
+ qcadev->bt_power->dev = &serdev->dev;
+ qcadev->bt_power->vreg_data = data;
+ err = qca_init_regulators(qcadev->bt_power, data->vregs,
+ data->num_vregs);
+ if (err) {
+ BT_ERR("Failed to init regulators:%d", err);
+ goto out;
+ }
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en)) {
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ qcadev->bt_power->vregs_on = false;
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
- if (IS_ERR(qcadev->susclk)) {
- dev_err(&serdev->dev, "failed to acquire clk\n");
- return PTR_ERR(qcadev->susclk);
- }
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err) {
+ BT_ERR("wcn3990 serdev registration failed");
+ goto out;
+ }
+ } else {
+ qcadev->btsoc_type = QCA_ROME;
+ qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(qcadev->bt_en)) {
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
+ }
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
- clk_disable_unprepare(qcadev->susclk);
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err)
+ clk_disable_unprepare(qcadev->susclk);
+ }
+
+out: return err;
- return err;
}
static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- hci_uart_unregister_device(&qcadev->serdev_hu);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(qcadev->serdev_hu.hdev);
+ else
+ clk_disable_unprepare(qcadev->susclk);
- clk_disable_unprepare(qcadev->susclk);
+ hci_uart_unregister_device(&qcadev->serdev_hu);
}
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 3d56ebcda720..6a94aa6a22c2 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -45,6 +45,8 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
.cs_stride = 0x18,
};
+#define MAX_CS_REGS_COUNT 6
+
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
@@ -112,9 +114,12 @@ err:
static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
const struct imx_weim_devtype *devtype)
{
- u32 cs_idx, value[devtype->cs_regs_count];
+ u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
+ if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
+ return -EINVAL;
+
/* get the CS index from this child node's "reg" property. */
ret = of_property_read_u32(np, "reg", &cs_idx);
if (ret)
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a78b8e7085e9..113fc6edb2b0 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -282,6 +282,7 @@
#include <linux/blkdev.h>
#include <linux/times.h>
#include <linux/uaccess.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
@@ -345,10 +346,10 @@ static LIST_HEAD(cdrom_list);
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
- if (cgc->sense) {
- cgc->sense->sense_key = 0x05;
- cgc->sense->asc = 0x20;
- cgc->sense->ascq = 0x00;
+ if (cgc->sshdr) {
+ cgc->sshdr->sense_key = 0x05;
+ cgc->sshdr->asc = 0x20;
+ cgc->sshdr->ascq = 0x00;
}
cgc->stat = -EIO;
@@ -2222,9 +2223,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
blk_execute_rq(q, cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
- struct request_sense *s = req->sense;
+ struct scsi_sense_hdr sshdr;
+
ret = -EIO;
- cdi->last_sense = s->sense_key;
+ scsi_normalize_sense(req->sense, req->sense_len,
+ &sshdr);
+ cdi->last_sense = sshdr.sense_key;
}
if (blk_rq_unmap_user(bio))
@@ -2943,7 +2947,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
struct cdrom_msf msf;
int blocksize = 0, format = 0, lba;
int ret;
@@ -2971,13 +2975,13 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
if (cgc->buffer == NULL)
return -ENOMEM;
- memset(&sense, 0, sizeof(sense));
- cgc->sense = &sense;
+ memset(&sshdr, 0, sizeof(sshdr));
+ cgc->sshdr = &sshdr;
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
- if (ret && sense.sense_key == 0x05 &&
- sense.asc == 0x20 &&
- sense.ascq == 0x00) {
+ if (ret && sshdr.sense_key == 0x05 &&
+ sshdr.asc == 0x20 &&
+ sshdr.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
@@ -2986,7 +2990,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
ret = cdrom_switch_blocksize(cdi, blocksize);
if (ret)
goto out;
- cgc->sense = NULL;
+ cgc->sshdr = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
ret |= cdrom_switch_blocksize(cdi, blocksize);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 212f447938ae..ce277ee0a28a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -554,3 +554,17 @@ config ADI
endmenu
+config RANDOM_TRUST_CPU
+ bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ depends on X86 || S390 || PPC
+ default n
+ help
+ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
+ for the purposes of initializing Linux's CRNG. Since this is not
+ something that can be independently audited, this amounts to trusting
+ that CPU manufacturer (perhaps with the insistence or mandate
+ of a Nation State's intelligence or law enforcement agencies)
+ has not installed a hidden back door to compromise the CPU's
+ random number generation facilities.
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 53fe633df1e8..c9bf2c219841 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,7 +11,7 @@
#include "agp.h"
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index e50c29c97ca7..c69e39fdd02b 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
/* Address to map to */
pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
- aperturebase = tmp << 25;
+ aperturebase = (u64)tmp << 25;
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
nb_order = (nb_order >> 1) & 7;
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
- nb_aper = nb_base << 25;
+ nb_aper = (u64)nb_base << 25;
/* Northbridge seems to contain crap. Try the AGP bridge. */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index be426eb2a353..4a22b4b41aef 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -579,7 +579,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
- struct hpet __iomem *hpet;
struct hpets *hpetp;
int err;
unsigned long v;
@@ -591,7 +590,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
case HPET_DPI:
case HPET_IRQFREQ:
timer = devp->hd_timer;
- hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
break;
case HPET_IE_ON:
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c34b257d852d..dac895dc01b9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,19 +307,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
-config HW_RANDOM_MSM
- tristate "Qualcomm SoCs Random Number Generator support"
- depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Qualcomm SoCs.
-
- To compile this driver as a module, choose M here. the
- module will be called msm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 533e913c93d1..e35ec3ce3a20 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 661c82cde0f2..433426242b87 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 1947aed7c044..94235761955c 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 250123bc4905..14730be54edf 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
deleted file mode 100644
index 841fee845ec9..000000000000
--- a/drivers/char/hw_random/msm-rng.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hw_random.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-/* Device specific register offsets */
-#define PRNG_DATA_OUT 0x0000
-#define PRNG_STATUS 0x0004
-#define PRNG_LFSR_CFG 0x0100
-#define PRNG_CONFIG 0x0104
-
-/* Device specific register masks and config values */
-#define PRNG_LFSR_CFG_MASK 0x0000ffff
-#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
-#define PRNG_CONFIG_HW_ENABLE BIT(1)
-#define PRNG_STATUS_DATA_AVAIL BIT(0)
-
-#define MAX_HW_FIFO_DEPTH 16
-#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
-#define WORD_SZ 4
-
-struct msm_rng {
- void __iomem *base;
- struct clk *clk;
- struct hwrng hwrng;
-};
-
-#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
-
-static int msm_rng_enable(struct hwrng *hwrng, int enable)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- u32 val;
- int ret;
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- if (enable) {
- /* Enable PRNG only if it is not already enabled */
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- if (val & PRNG_CONFIG_HW_ENABLE)
- goto already_enabled;
-
- val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
- val &= ~PRNG_LFSR_CFG_MASK;
- val |= PRNG_LFSR_CFG_CLOCKS;
- writel(val, rng->base + PRNG_LFSR_CFG);
-
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val |= PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- } else {
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val &= ~PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- }
-
-already_enabled:
- clk_disable_unprepare(rng->clk);
- return 0;
-}
-
-static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- size_t currsize = 0;
- u32 *retdata = data;
- size_t maxsize;
- int ret;
- u32 val;
-
- /* calculate max size bytes to transfer back to caller */
- maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- /* read random data from hardware */
- do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
-
- val = readl_relaxed(rng->base + PRNG_DATA_OUT);
- if (!val)
- break;
-
- *retdata++ = val;
- currsize += WORD_SZ;
-
- /* make sure we stay on 32bit boundary */
- if ((maxsize - currsize) < WORD_SZ)
- break;
- } while (currsize < maxsize);
-
- clk_disable_unprepare(rng->clk);
-
- return currsize;
-}
-
-static int msm_rng_init(struct hwrng *hwrng)
-{
- return msm_rng_enable(hwrng, 1);
-}
-
-static void msm_rng_cleanup(struct hwrng *hwrng)
-{
- msm_rng_enable(hwrng, 0);
-}
-
-static int msm_rng_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct msm_rng *rng;
- int ret;
-
- rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
- if (!rng)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, rng);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(rng->base))
- return PTR_ERR(rng->base);
-
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
-
- rng->hwrng.name = KBUILD_MODNAME,
- rng->hwrng.init = msm_rng_init,
- rng->hwrng.cleanup = msm_rng_cleanup,
- rng->hwrng.read = msm_rng_read,
-
- ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register hwrng\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id msm_rng_of_match[] = {
- { .compatible = "qcom,prng", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_rng_of_match);
-
-static struct platform_driver msm_rng_driver = {
- .probe = msm_rng_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(msm_rng_of_match),
- }
-};
-module_platform_driver(msm_rng_driver);
-
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_AUTHOR("The Linux Foundation");
-MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 263a5bb8e605..791182aa8e04 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ad353be871bf..90ec010bffbd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
out_err:
- ipmi_unregister_smi(new_smi->intf);
- new_smi->intf = NULL;
+ if (new_smi->intf) {
+ ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
kfree(init_name);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index fbfc05e3f3d1..bb882ab161fe 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
{
unsigned long flags;
- int ret = 0;
+ int ret = -ENODATA;
u8 status;
spin_lock_irqsave(&kcs_bmc->lock, flags);
- if (!kcs_bmc->running) {
- kcs_force_abort(kcs_bmc);
- ret = -ENODEV;
- goto out_unlock;
- }
-
- status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
- switch (status) {
- case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
- kcs_bmc_handle_cmd(kcs_bmc);
- break;
-
- case KCS_STATUS_IBF:
- kcs_bmc_handle_data(kcs_bmc);
- break;
+ status = read_status(kcs_bmc);
+ if (status & KCS_STATUS_IBF) {
+ if (!kcs_bmc->running)
+ kcs_force_abort(kcs_bmc);
+ else if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_handle_cmd(kcs_bmc);
+ else
+ kcs_bmc_handle_data(kcs_bmc);
- default:
- ret = -ENODATA;
- break;
+ ret = 0;
}
-out_unlock:
spin_unlock_irqrestore(&kcs_bmc->lock, flags);
return ret;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..7b4e4de778e4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
+ vma_set_anonymous(vma);
return 0;
}
@@ -765,6 +766,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
+ /* fall through */
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
if ((unsigned long long)offset >= -MAX_ERRNO) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 370e0a64ead1..a219964cb770 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1748,8 +1748,6 @@ static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int cm4000_config(struct pcmcia_device * link, int devno)
{
- struct cm4000_dev *dev;
-
link->config_flags |= CONF_AUTO_SET_IO;
/* read the config-tuples */
@@ -1759,8 +1757,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
if (pcmcia_enable_device(link))
goto cs_release;
- dev = link->priv;
-
return 0;
cs_release:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cd888d4ee605..bf5f99fc36f1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -782,6 +782,7 @@ static void invalidate_batched_entropy(void);
static void crng_initialize(struct crng_state *crng)
{
int i;
+ int arch_init = 1;
unsigned long rv;
memcpy(&crng->state[0], "expand 32-byte k", 16);
@@ -792,10 +793,18 @@ static void crng_initialize(struct crng_state *crng)
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
+ !arch_get_random_long(&rv)) {
rv = random_get_entropy();
+ arch_init = 0;
+ }
crng->state[i] ^= rv;
}
+#ifdef CONFIG_RANDOM_TRUST_CPU
+ if (arch_init) {
+ crng_init = 2;
+ pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ }
+#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
@@ -1122,8 +1131,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1161,8 +1168,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
-
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,
@@ -1659,6 +1664,21 @@ int wait_for_random_bytes(void)
EXPORT_SYMBOL(wait_for_random_bytes);
/*
+ * Returns whether or not the urandom pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ * false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1725,30 +1745,31 @@ EXPORT_SYMBOL(del_random_ready_callback);
* key known by the NSA). So it's useful if we need the speed, but
* only if we're willing to trust the hardware manufacturer not to
* have put in a back door.
+ *
+ * Return number of bytes filled in.
*/
-void get_random_bytes_arch(void *buf, int nbytes)
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
+ int left = nbytes;
char *p = buf;
- trace_get_random_bytes_arch(nbytes, _RET_IP_);
- while (nbytes) {
+ trace_get_random_bytes_arch(left, _RET_IP_);
+ while (left) {
unsigned long v;
- int chunk = min(nbytes, (int)sizeof(unsigned long));
+ int chunk = min_t(int, left, sizeof(unsigned long));
if (!arch_get_random_long(&v))
break;
-
+
memcpy(p, &v, chunk);
p += chunk;
- nbytes -= chunk;
+ left -= chunk;
}
- if (nbytes)
- get_random_bytes(p, nbytes);
+ return nbytes - left;
}
EXPORT_SYMBOL(get_random_bytes_arch);
-
/*
* init_std_data - initialize pool with system data
*
@@ -1895,14 +1916,22 @@ static int
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 buf[16];
+ __u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
+ int b, i = 0;
+
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
+ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ if (!arch_get_random_int(&t))
+ break;
+ buf[i] ^= t;
+ }
+
count -= bytes;
p += bytes;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 94fedeeec035..4948c8bda6b1 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -193,14 +193,6 @@ static unsigned long rtc_freq; /* Current periodic IRQ rate */
static unsigned long rtc_irq_data; /* our output to the world */
static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
-#ifdef RTC_IRQ
-/*
- * rtc_task_lock nests inside rtc_lock.
- */
-static DEFINE_SPINLOCK(rtc_task_lock);
-static rtc_task_t *rtc_callback;
-#endif
-
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
@@ -264,11 +256,6 @@ static irqreturn_t rtc_interrupt(int irq, void *dev_id)
spin_unlock(&rtc_lock);
- /* Now do the rest of the actions */
- spin_lock(&rtc_task_lock);
- if (rtc_callback)
- rtc_callback->func(rtc_callback->private_data);
- spin_unlock(&rtc_task_lock);
wake_up_interruptible(&rtc_wait);
kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0a62c19937b6..46caadca916a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -81,42 +81,66 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - find and reserve a TPM chip
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_num = 0;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip) {
+ get_device(&chip->dev);
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+
+ mutex_unlock(&idr_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
* @chip: a &struct tpm_chip instance, %NULL for the default chip
*
* Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_chip_put_ops() after use.
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
*
* Return:
* A reserved &struct tpm_chip instance.
* %NULL if a chip is not found.
* %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
{
- struct tpm_chip *res = NULL;
- int chip_num = 0;
- int chip_prev;
-
- mutex_lock(&idr_lock);
+ int rc;
- if (!chip) {
- do {
- chip_prev = chip_num;
- chip = idr_get_next(&dev_nums_idr, &chip_num);
- if (chip && !tpm_try_get_ops(chip)) {
- res = chip;
- break;
- }
- } while (chip_prev != chip_num);
- } else {
+ if (chip) {
if (!tpm_try_get_ops(chip))
- res = chip;
+ return chip;
+ return NULL;
}
- mutex_unlock(&idr_lock);
-
- return res;
+ chip = tpm_default_chip();
+ if (!chip)
+ return NULL;
+ rc = tpm_try_get_ops(chip);
+ /* release additional reference we got from tpm_default_chip() */
+ put_device(&chip->dev);
+ if (rc)
+ return NULL;
+ return chip;
}
/**
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index e32f6e85dc6d..1a803b0cf980 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,7 +29,6 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
-#include <linux/pm_runtime.h>
#include <linux/tpm_eventlog.h>
#include "tpm.h"
@@ -369,10 +368,13 @@ err_len:
return -EINVAL;
}
-static int tpm_request_locality(struct tpm_chip *chip)
+static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
if (!chip->ops->request_locality)
return 0;
@@ -385,10 +387,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
return 0;
}
-static void tpm_relinquish_locality(struct tpm_chip *chip)
+static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return;
+
if (!chip->ops->relinquish_locality)
return;
@@ -399,6 +404,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
chip->locality = -1;
}
+static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip,
struct tpm_space *space,
u8 *buf, size_t bufsiz,
@@ -423,7 +450,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
TSS2_RESMGR_TPM_RC_LAYER);
- return bufsiz;
+ return sizeof(*header);
}
if (bufsiz > TPM_BUFSIZE)
@@ -439,24 +466,24 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
return -E2BIG;
}
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_lock(&chip->tpm_mutex);
-
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, true);
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
- rc = tpm_request_locality(chip);
+ if (need_locality) {
+ rc = tpm_request_locality(chip, flags);
if (rc < 0)
goto out_no_locality;
}
- if (chip->dev.parent)
- pm_runtime_get_sync(chip->dev.parent);
+ rc = tpm_cmd_ready(chip, flags);
+ if (rc)
+ goto out;
rc = tpm2_prepare_space(chip, space, ordinal, buf);
if (rc)
@@ -516,19 +543,22 @@ out_recv:
}
rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
+ if (rc)
+ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
out:
- if (chip->dev.parent)
- pm_runtime_put_sync(chip->dev.parent);
+ rc = tpm_go_idle(chip, flags);
+ if (rc)
+ goto out;
if (need_locality)
- tpm_relinquish_locality(chip);
+ tpm_relinquish_locality(chip, flags);
out_no_locality:
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_unlock(&chip->tpm_mutex);
return rc ? rc : len;
}
@@ -930,7 +960,7 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -954,7 +984,7 @@ int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -1013,7 +1043,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1142,7 +1172,7 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1262,7 +1292,7 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1324,7 +1354,7 @@ int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
@@ -1352,7 +1382,7 @@ int tpm_unseal_trusted(struct tpm_chip *chip,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4426649e431c..f3501d05264f 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -424,23 +424,24 @@ struct tpm_buf {
u8 *data;
};
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
{
struct tpm_input_header *head;
+ head = (struct tpm_input_header *)buf->data;
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
buf->data_page = alloc_page(GFP_HIGHUSER);
if (!buf->data_page)
return -ENOMEM;
buf->flags = 0;
buf->data = kmap(buf->data_page);
-
- head = (struct tpm_input_header *) buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-
+ tpm_buf_reset(buf, tag, ordinal);
return 0;
}
@@ -511,9 +512,17 @@ extern const struct file_operations tpm_fops;
extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
+/**
+ * enum tpm_transmit_flags - flags for tpm_transmit()
+ *
+ * @TPM_TRANSMIT_UNLOCKED: do not lock the chip
+ * @TPM_TRANSMIT_NESTED: discard setup steps (power management,
+ * locality) including locking (i.e. implicit
+ * UNLOCKED)
+ */
enum tpm_transmit_flags {
TPM_TRANSMIT_UNLOCKED = BIT(0),
- TPM_TRANSMIT_RAW = BIT(1),
+ TPM_TRANSMIT_NESTED = BIT(1),
};
ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
@@ -538,7 +547,7 @@ static inline void tpm_msleep(unsigned int delay_msec)
delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -569,7 +578,7 @@ static inline u32 tpm2_rc_value(u32 rc)
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
struct tpm2_digest *digests);
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
unsigned int flags);
int tpm2_seal_trusted(struct tpm_chip *chip,
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d31b09099216..c31b490bd41d 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -27,46 +27,6 @@ enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
};
-struct tpm2_startup_in {
- __be16 startup_type;
-} __packed;
-
-struct tpm2_get_tpm_pt_in {
- __be32 cap_id;
- __be32 property_id;
- __be32 property_cnt;
-} __packed;
-
-struct tpm2_get_tpm_pt_out {
- u8 more_data;
- __be32 subcap_id;
- __be32 property_cnt;
- __be32 property_id;
- __be32 value;
-} __packed;
-
-struct tpm2_get_random_in {
- __be16 size;
-} __packed;
-
-struct tpm2_get_random_out {
- __be16 size;
- u8 buffer[TPM_MAX_RNG_DATA];
-} __packed;
-
-union tpm2_cmd_params {
- struct tpm2_startup_in startup_in;
- struct tpm2_get_tpm_pt_in get_tpm_pt_in;
- struct tpm2_get_tpm_pt_out get_tpm_pt_out;
- struct tpm2_get_random_in getrandom_in;
- struct tpm2_get_random_out getrandom_out;
-};
-
-struct tpm2_cmd {
- tpm_cmd_header header;
- union tpm2_cmd_params params;
-} __packed;
-
struct tpm2_hash {
unsigned int crypto_id;
unsigned int tpm_id;
@@ -321,82 +281,72 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
}
-#define TPM2_GETRANDOM_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_random_in))
-
-static const struct tpm_input_header tpm2_getrandom_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GETRANDOM_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_RANDOM)
-};
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
/**
* tpm2_get_random() - get random bytes from the TPM RNG
*
- * @chip: TPM chip to use
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * @chip: a &tpm_chip instance
+ * @dest: destination buffer
+ * @max: the max number of random bytes to pull
*
* Return:
- * Size of the output buffer, or -EIO on error.
+ * size of the buffer on success,
+ * -errno otherwise
*/
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
- struct tpm2_cmd cmd;
- u32 recd, rlength;
- u32 num_bytes;
+ struct tpm2_get_random_out *out;
+ struct tpm_buf buf;
+ u32 recd;
+ u32 num_bytes = max;
int err;
int total = 0;
int retries = 5;
- u8 *dest = out;
+ u8 *dest_ptr = dest;
- num_bytes = min_t(u32, max, sizeof(cmd.params.getrandom_out.buffer));
-
- if (!out || !num_bytes ||
- max > sizeof(cmd.params.getrandom_out.buffer))
+ if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- do {
- cmd.header.in = tpm2_getrandom_header;
- cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err)
+ return err;
- err = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
+ do {
+ tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_u16(&buf, num_bytes);
+ err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
offsetof(struct tpm2_get_random_out,
buffer),
0, "attempting get random");
if (err)
- break;
+ goto out;
- recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size),
- num_bytes);
- rlength = be32_to_cpu(cmd.header.out.length);
- if (rlength < offsetof(struct tpm2_get_random_out, buffer) +
- recd)
- return -EFAULT;
- memcpy(dest, cmd.params.getrandom_out.buffer, recd);
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+ if (tpm_buf_length(&buf) <
+ offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ err = -EFAULT;
+ goto out;
+ }
+ memcpy(dest_ptr, out->buffer, recd);
- dest += recd;
+ dest_ptr += recd;
total += recd;
num_bytes -= recd;
} while (retries-- && total < max);
+ tpm_buf_destroy(&buf);
return total ? total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return err;
}
-#define TPM2_GET_TPM_PT_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_tpm_pt_in))
-
-#define TPM2_GET_TPM_PT_OUT_BODY_SIZE \
- sizeof(struct tpm2_get_tpm_pt_out)
-
-static const struct tpm_input_header tpm2_get_tpm_pt_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_CAPABILITY)
-};
-
/**
* tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
* @chip: TPM chip to use
@@ -471,7 +421,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
{
unsigned int blob_len;
struct tpm_buf buf;
- u32 hash, rlength;
+ u32 hash;
int i;
int rc;
@@ -546,8 +496,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
rc = -E2BIG;
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 4 + blob_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
rc = -EFAULT;
goto out;
}
@@ -657,7 +606,6 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
u16 data_len;
u8 *data;
int rc;
- u32 rlength;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
if (rc)
@@ -685,9 +633,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
- ->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 6 + data_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
rc = -EFAULT;
goto out;
}
@@ -733,69 +679,71 @@ out:
return rc;
}
+struct tpm2_get_cap_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
/**
* tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
- * @chip: TPM chip to use.
+ * @chip: a &tpm_chip instance
* @property_id: property ID.
* @value: output variable.
* @desc: passed to tpm_transmit_cmd()
*
- * Return: Same as with tpm_transmit_cmd.
+ * Return:
+ * 0 on success,
+ * -errno or a TPM return code otherwise
*/
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
const char *desc)
{
- struct tpm2_cmd cmd;
+ struct tpm2_get_cap_out *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
- TPM2_GET_TPM_PT_OUT_BODY_SIZE, 0, desc);
- if (!rc)
- *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
-
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, property_id);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ *value = be32_to_cpu(out->value);
+ }
+ tpm_buf_destroy(&buf);
return rc;
}
EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
-#define TPM2_SHUTDOWN_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_startup_in))
-
-static const struct tpm_input_header tpm2_shutdown_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_SHUTDOWN_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_SHUTDOWN)
-};
-
/**
- * tpm2_shutdown() - send shutdown command to the TPM chip
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
*
- * @chip: TPM chip to use.
- * @shutdown_type: shutdown type. The value is either
- * TPM_SU_CLEAR or TPM_SU_STATE.
+ * @chip: a &tpm_chip instance
+ * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE.
*/
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
{
- struct tpm2_cmd cmd;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_shutdown_header;
- cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0,
- "stopping the TPM");
-
- /* In places where shutdown command is sent there's no much we can do
- * except print the error code on a system failure.
- */
- if (rc < 0 && rc != -EPIPE)
- dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
- rc);
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+ if (rc)
+ return;
+ tpm_buf_append_u16(&buf, shutdown_type);
+ tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ "stopping the TPM");
+ tpm_buf_destroy(&buf);
}
/*
@@ -863,31 +811,37 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
}
/**
- * tpm2_probe() - probe TPM 2.0
- * @chip: TPM chip to use
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip: a &tpm_chip instance
*
- * Return: < 0 error and 0 on success.
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
*
- * Send idempotent TPM 2.0 command and see whether TPM 2.0 chip replied based on
- * the reply tag.
+ * Return:
+ * 0 on success,
+ * -errno otherwise
*/
int tpm2_probe(struct tpm_chip *chip)
{
- struct tpm2_cmd cmd;
+ struct tpm_output_header *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0, NULL);
- if (rc < 0)
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
return rc;
-
- if (be16_to_cpu(cmd.header.out.tag) == TPM2_ST_NO_SESSIONS)
- chip->flags |= TPM_CHIP_FLAG_TPM2;
-
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ /* We ignore TPM return codes on purpose. */
+ if (rc >= 0) {
+ out = (struct tpm_output_header *)buf.data;
+ if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ }
+ tpm_buf_destroy(&buf);
return 0;
}
EXPORT_SYMBOL_GPL(tpm2_probe);
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 6122d3276f72..d2e101b32482 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -39,7 +39,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
if (space->session_tbl[i])
tpm2_flush_context_cmd(chip, space->session_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
}
}
@@ -84,7 +84,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
tpm_buf_append(&tbuf, &buf[*offset], body_size);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -133,7 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
tpm_buf_append_u32(&tbuf, handle);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -170,7 +170,7 @@ static void tpm2_flush_space(struct tpm_chip *chip)
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
tpm2_flush_sessions(chip, space);
}
@@ -377,7 +377,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
return 0;
out_no_slots:
- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
+ tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED);
dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
phandle);
return -ENOMEM;
@@ -465,7 +465,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
return rc;
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
space->context_tbl[i] = ~0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 34fbc6cb097b..36952ef98f90 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -132,7 +132,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
}
/**
- * crb_go_idle - request tpm crb device to go the idle state
+ * __crb_go_idle - request tpm crb device to go the idle state
*
* @dev: crb device
* @priv: crb private data
@@ -147,7 +147,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*
* Return: 0 always
*/
-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -163,11 +163,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
dev_warn(dev, "goIdle timed out\n");
return -ETIME;
}
+
return 0;
}
+static int crb_go_idle(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_go_idle(dev, priv);
+}
+
/**
- * crb_cmd_ready - request tpm crb device to enter ready state
+ * __crb_cmd_ready - request tpm crb device to enter ready state
*
* @dev: crb device
* @priv: crb private data
@@ -181,7 +190,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -200,6 +209,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
return 0;
}
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_cmd_ready(dev, priv);
+}
+
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
@@ -401,6 +418,8 @@ static const struct tpm_class_ops tpm_crb = {
.send = crb_send,
.cancel = crb_cancel,
.req_canceled = crb_req_canceled,
+ .go_idle = crb_go_idle,
+ .cmd_ready = crb_cmd_ready,
.request_locality = crb_request_locality,
.relinquish_locality = crb_relinquish_locality,
.req_complete_mask = CRB_DRV_STS_COMPLETE,
@@ -520,7 +539,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv);
if (ret)
goto out_relinquish_locality;
@@ -565,7 +584,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv);
out_relinquish_locality:
@@ -628,32 +647,7 @@ static int crb_acpi_add(struct acpi_device *device)
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- rc = __crb_request_locality(dev, priv, 0);
- if (rc)
- return rc;
-
- rc = crb_cmd_ready(dev, priv);
- if (rc)
- goto out;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- rc = tpm_chip_register(chip);
- if (rc) {
- crb_go_idle(dev, priv);
- pm_runtime_put_noidle(dev);
- pm_runtime_disable(dev);
- goto out;
- }
-
- pm_runtime_put_sync(dev);
-
-out:
- __crb_relinquish_locality(dev, priv, 0);
-
- return rc;
+ return tpm_chip_register(chip);
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -663,52 +657,11 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
- pm_runtime_disable(dev);
-
return 0;
}
-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_go_idle(dev, priv);
-}
-
-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_cmd_ready(dev, priv);
-}
-
-static int __maybe_unused crb_pm_suspend(struct device *dev)
-{
- int ret;
-
- ret = tpm_pm_suspend(dev);
- if (ret)
- return ret;
-
- return crb_pm_runtime_suspend(dev);
-}
-
-static int __maybe_unused crb_pm_resume(struct device *dev)
-{
- int ret;
-
- ret = crb_pm_runtime_resume(dev);
- if (ret)
- return ret;
-
- return tpm_pm_resume(dev);
-}
-
static const struct dev_pm_ops crb_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
};
static const struct acpi_device_id crb_device_ids[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 6116cd05e228..9086edc9066b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
/* Lock the adapter for the duration of the whole sequence. */
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
if (tpm_dev.chip_type == SLB9645) {
/* use a combined read for newer chips
@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
}
out:
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* prepend the 'register address' to the buffer */
tpm_dev.buf[0] = addr;
@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
usleep_range(sleep_low, sleep_hi);
}
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 8b46aaa9e049..d2345d9fd7b5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -875,6 +875,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
+ chip->hwrng.quality = priv->rng_quality;
+
/* Maximum timeouts */
chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index f6e1dbe212a7..f48125f1e6e0 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -99,6 +99,7 @@ struct tpm_tis_data {
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
+ unsigned short rng_quality;
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 424ff2fde1f2..9914f6973463 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
static int tpm_tis_spi_probe(struct spi_device *dev)
{
struct tpm_tis_spi_phy *phy;
+ int irq;
phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
GFP_KERNEL);
@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy->iobuf)
return -ENOMEM;
- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index e4f79f920450..87a0ce47f201 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -418,7 +418,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
proxy_dev->state |= STATE_DRIVER_COMMAND;
rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
- TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW,
+ TPM_TRANSMIT_NESTED,
"attempting to set locality");
proxy_dev->state &= ~STATE_DRIVER_COMMAND;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 17084cfcf53e..5b5b5d72eab7 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1309,51 +1309,35 @@ static const struct attribute_group port_attribute_group = {
.attrs = port_sysfs_entries,
};
-static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
+static int debugfs_show(struct seq_file *s, void *data)
{
- struct port *port;
- char *buf;
- ssize_t ret, out_offset, out_count;
+ struct port *port = s->private;
+
+ seq_printf(s, "name: %s\n", port->name ? port->name : "");
+ seq_printf(s, "guest_connected: %d\n", port->guest_connected);
+ seq_printf(s, "host_connected: %d\n", port->host_connected);
+ seq_printf(s, "outvq_full: %d\n", port->outvq_full);
+ seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
+ seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
+ seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
+ seq_printf(s, "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
- out_count = 1024;
- buf = kmalloc(out_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ return 0;
+}
- port = filp->private_data;
- out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count,
- "name: %s\n", port->name ? port->name : "");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "guest_connected: %d\n", port->guest_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "host_connected: %d\n", port->host_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "outvq_full: %d\n", port->outvq_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_sent: %lu\n", port->stats.bytes_sent);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_received: %lu\n",
- port->stats.bytes_received);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_discarded: %lu\n",
- port->stats.bytes_discarded);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "console_vtermno: %u\n", port->cons.vtermno);
-
- ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
- kfree(buf);
- return ret;
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
}
static const struct file_operations port_debugfs_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .read = debugfs_read,
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void set_console_size(struct port *port, u16 rows, u16 cols)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 721572a8c429..292056bbb30e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -45,6 +45,12 @@ config COMMON_CLK_MAX77686
This driver supports Maxim 77620/77686/77802 crystal oscillator
clock.
+config COMMON_CLK_MAX9485
+ tristate "Maxim 9485 Programmable Clock Generator"
+ depends on I2C
+ help
+ This driver supports Maxim 9485 Programmable Audio Clock Generator
+
config COMMON_CLK_RK808
tristate "Clock driver for RK805/RK808/RK818"
depends on MFD_RK808
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ae40cbe770f0..a84c5573cabe 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
@@ -96,7 +97,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 8854adb37847..dc38c85a4833 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -1,14 +1,21 @@
config CLK_ACTIONS
bool "Clock driver for Actions Semi SoCs"
depends on ARCH_ACTIONS || COMPILE_TEST
+ select REGMAP_MMIO
default ARCH_ACTIONS
if CLK_ACTIONS
# SoC Drivers
+config CLK_OWL_S700
+ bool "Support for the Actions Semi OWL S700 clocks"
+ depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
+ default ARM64 && ARCH_ACTIONS
+
config CLK_OWL_S900
bool "Support for the Actions Semi OWL S900 clocks"
depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
default ARM64 && ARCH_ACTIONS
+
endif
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index 76e431434d10..78c17d56f991 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -9,4 +9,5 @@ clk-owl-y += owl-composite.o
clk-owl-y += owl-pll.o
# SoC support
+obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
new file mode 100644
index 000000000000..5e9531392ee5
--- /dev/null
+++ b/drivers/clk/actions/owl-s700.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi S700 clock driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s700-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSIPLLCLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_SICLK (0x0034)
+#define CMU_BUSCLK1 (0x0038)
+#define CMU_HDECLK (0x003C)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_UART3CLK (0x0068)
+#define CMU_UART4CLK (0x006C)
+#define CMU_UART5CLK (0x0070)
+#define CMU_UART6CLK (0x0074)
+#define CMU_PWM0CLK (0x0078)
+#define CMU_PWM1CLK (0x007C)
+#define CMU_PWM2CLK (0x0080)
+#define CMU_PWM3CLK (0x0084)
+#define CMU_PWM4CLK (0x0088)
+#define CMU_PWM5CLK (0x008C)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_USBPLL (0x00B0)
+#define CMU_ETHERNETPLL (0x00B4)
+#define CMU_CVBSPLL (0x00B8)
+#define CMU_SSTSCLK (0x00C0)
+
+static struct clk_pll_table clk_audio_pll_table[] = {
+ {0, 45158400}, {1, 49152000},
+ {0, 0},
+};
+
+static struct clk_pll_table clk_cvbs_pll_table[] = {
+ {27, 29 * 12000000}, {28, 30 * 12000000}, {29, 31 * 12000000},
+ {30, 32 * 12000000}, {31, 33 * 12000000}, {32, 34 * 12000000},
+ {33, 35 * 12000000}, {34, 36 * 12000000}, {35, 37 * 12000000},
+ {36, 38 * 12000000}, {37, 39 * 12000000}, {38, 40 * 12000000},
+ {39, 41 * 12000000}, {40, 42 * 12000000}, {41, 43 * 12000000},
+ {42, 44 * 12000000}, {43, 45 * 12000000}, {0, 0},
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT(clk_core_pll, "core_pll", CMU_COREPLL, 12000000, 9, 0, 8, 4, 174, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_dev_pll, "dev_pll", CMU_DEVPLL, 6000000, 8, 0, 8, 8, 126, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ddr_pll, "ddr_pll", CMU_DDRPLL, 6000000, 8, 0, 8, 2, 180, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_nand_pll, "nand_pll", CMU_NANDPLL, 6000000, 8, 0, 8, 2, 86, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_display_pll, "display_pll", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 140, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_cvbs_pll, "cvbs_pll", CMU_CVBSPLL, 0, 8, 0, 8, 27, 43, clk_cvbs_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_audio_pll, "audio_pll", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ethernet_pll, "ethernet_pll", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, NULL, CLK_IGNORE_UNUSED);
+
+static const char *cpu_clk_mux_p[] = {"losc", "hosc", "core_pll", "noc1_clk_div"};
+static const char *dev_clk_p[] = { "hosc", "dev_pll"};
+static const char *noc_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll", "cvbs_pll"};
+
+static const char *csi_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *de_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *hde_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll"};
+static const char *nand_clk_mux_p[] = { "nand_pll", "display_pll", "dev_clk", "ddr_pll"};
+static const char *sd_clk_mux_p[] = { "dev_clk", "nand_pll", };
+static const char *uart_clk_mux_p[] = { "hosc", "dev_pll"};
+static const char *pwm_clk_mux_p[] = { "losc", "hosc"};
+static const char *gpu_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_clk", "cvbs_pll"};
+static const char *lcd_clk_mux_p[] = { "display_pll", "dev_clk" };
+static const char *i2s_clk_mux_p[] = { "audio_pll" };
+static const char *sensor_clk_mux_p[] = { "hosc", "si"};
+
+/* mux clocks */
+static OWL_MUX(clk_cpu, "cpu_clk", cpu_clk_mux_p, CMU_BUSCLK, 0, 2, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_dev, "dev_clk", dev_clk_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc0_clk_mux, "noc0_clk_mux", noc_clk_mux_p, CMU_BUSCLK, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc1_clk_mux, "noc1_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_hp_clk_mux, "hp_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+ {12, 1, 13}, {13, 1, 14}, {14, 1, 15}, {15, 1, 16},
+ {16, 1, 17}, {17, 1, 18}, {18, 1, 19}, {19, 1, 20},
+ {20, 1, 21}, {21, 1, 22}, {22, 1, 23}, {23, 1, 24},
+ {24, 1, 25}, {25, 1, 26},
+
+ /* bit8: /128 */
+ {256, 1, 1 * 128}, {257, 1, 2 * 128}, {258, 1, 3 * 128}, {259, 1, 4 * 128},
+ {260, 1, 5 * 128}, {261, 1, 6 * 128}, {262, 1, 7 * 128}, {263, 1, 8 * 128},
+ {264, 1, 9 * 128}, {265, 1, 10 * 128}, {266, 1, 11 * 128}, {267, 1, 12 * 128},
+ {268, 1, 13 * 128}, {269, 1, 14 * 128}, {270, 1, 15 * 128}, {271, 1, 16 * 128},
+ {272, 1, 17 * 128}, {273, 1, 18 * 128}, {274, 1, 19 * 128}, {275, 1, 20 * 128},
+ {276, 1, 21 * 128}, {277, 1, 22 * 128}, {278, 1, 23 * 128}, {279, 1, 24 * 128},
+ {280, 1, 25 * 128}, {281, 1, 26 * 128},
+
+ {0, 0},
+};
+
+static struct clk_factor_table lcd_factor_table[] = {
+ /* bit0 ~ 3 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+
+ /* bit8: /7 */
+ {256, 1, 1 * 7}, {257, 1, 2 * 7}, {258, 1, 3 * 7}, {259, 1, 4 * 7},
+ {260, 1, 5 * 7}, {261, 1, 6 * 7}, {262, 1, 7 * 7}, {263, 1, 8 * 7},
+ {264, 1, 9 * 7}, {265, 1, 10 * 7}, {266, 1, 11 * 7}, {267, 1, 12 * 7},
+ {0, 0},
+};
+
+static struct clk_div_table hdmia_div_table[] = {
+ {0, 1}, {1, 2}, {2, 3}, {3, 4},
+ {4, 6}, {5, 8}, {6, 12}, {7, 16},
+ {8, 24},
+ {0, 0},
+};
+
+static struct clk_div_table rmii_div_table[] = {
+ {0, 4}, {1, 10},
+};
+
+/* divider clocks */
+static OWL_DIVIDER(clk_noc0, "noc0_clk", "noc0_clk_mux", CMU_BUSCLK, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1, "noc1_clk", "noc1_clk_mux", CMU_BUSCLK1, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1_clk_div, "noc1_clk_div", "noc1_clk", CMU_BUSCLK1, 20, 1, NULL, 0, 0);
+static OWL_DIVIDER(clk_hp_clk_div, "hp_clk_div", "hp_clk_mux", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_ahb, "ahb_clk", "hp_clk_div", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_apb, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor0, "sensor0", "sensor_src", CMU_SENSORCLK, 0, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor1, "sensor1", "sensor_src", CMU_SENSORCLK, 8, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_rmii_ref, "rmii_ref", "ethernet_pll", CMU_ETHERNETPLL, 2, 1, rmii_div_table, 0, 0);
+
+static struct clk_factor_table de_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {8, 1, 12}, {0, 0, 0},
+};
+
+static struct clk_factor_table hde_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {0, 0, 0},
+};
+
+/* gate clocks */
+static OWL_GATE(clk_gpio, "gpio", "apb_clk", CMU_DEVCLKEN1, 25, 0, 0);
+static OWL_GATE(clk_dmac, "dmac", "hp_clk_div", CMU_DEVCLKEN0, 17, 0, 0);
+static OWL_GATE(clk_timer, "timer", "hosc", CMU_DEVCLKEN1, 22, 0, 0);
+static OWL_GATE_NO_PARENT(clk_dsi, "dsi_clk", CMU_DEVCLKEN0, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_tvout, "tvout_clk", CMU_DEVCLKEN0, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_hdmi_dev, "hdmi_dev", CMU_DEVCLKEN0, 5, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mpll0, "usb3_480mpll0", CMU_USBPLL, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mphy0, "usb3_480mphy0", CMU_USBPLL, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_5gphy, "usb3_5gphy", CMU_USBPLL, 1, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_cce, "usb3_cce", CMU_DEVCLKEN0, 25, 0, 0);
+static OWL_GATE(clk_i2c0, "i2c0", "hosc", CMU_DEVCLKEN1, 0, 0, 0);
+static OWL_GATE(clk_i2c1, "i2c1", "hosc", CMU_DEVCLKEN1, 1, 0, 0);
+static OWL_GATE(clk_i2c2, "i2c2", "hosc", CMU_DEVCLKEN1, 2, 0, 0);
+static OWL_GATE(clk_i2c3, "i2c3", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+static OWL_GATE(clk_spi0, "spi0", "ahb_clk", CMU_DEVCLKEN1, 4, 0, 0);
+static OWL_GATE(clk_spi1, "spi1", "ahb_clk", CMU_DEVCLKEN1, 5, 0, 0);
+static OWL_GATE(clk_spi2, "spi2", "ahb_clk", CMU_DEVCLKEN1, 6, 0, 0);
+static OWL_GATE(clk_spi3, "spi3", "ahb_clk", CMU_DEVCLKEN1, 7, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_pllen, "usbh0_pllen", CMU_USBPLL, 12, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_phy, "usbh0_phy", CMU_USBPLL, 10, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_cce, "usbh0_cce", CMU_DEVCLKEN0, 26, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_pllen, "usbh1_pllen", CMU_USBPLL, 13, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_phy, "usbh1_phy", CMU_USBPLL, 11, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_cce, "usbh1_cce", CMU_DEVCLKEN0, 27, 0, 0);
+static OWL_GATE_NO_PARENT(clk_irc_switch, "irc_switch", CMU_DEVCLKEN1, 15, 0, 0);
+
+/* composite clocks */
+
+static OWL_COMP_DIV(clk_csi, "csi", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_CSICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 13, 0),
+ OWL_DIVIDER_HW(CMU_CSICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_si, "si", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_SICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_DIVIDER_HW(CMU_SICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_de, "de", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
+ OWL_FACTOR_HW(CMU_DECLK, 0, 3, 0, de_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_hde, "hde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_HDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 9, 0),
+ OWL_FACTOR_HW(CMU_HDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vde, "vde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 10, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vce, "vce", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_nand, "nand", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, NULL),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_FACTOR(clk_sd0, "sd0", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd1, "sd1", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd2, "sd2", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 24, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_uart0, "uart0", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart1, "uart1", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 9, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart2, "uart2", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 10, 0),
+ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart3, "uart3", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 11, 0),
+ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart4, "uart4", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 12, 0),
+ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart5, "uart5", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 13, 0),
+ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart6, "uart6", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm0, "pwm0", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 16, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(clk_pwm1, "pwm1", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 17, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm2, "pwm2", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm3, "pwm3", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm4, "pwm4", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm5, "pwm5", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_gpu3d, "gpu3d", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 4, 3),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_lcd, "lcd", lcd_clk_mux_p,
+ OWL_MUX_HW(CMU_LCDCLK, 12, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 1, 0),
+ OWL_FACTOR_HW(CMU_LCDCLK, 0, 9, 0, lcd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_hdmi_audio, "hdmia", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), /*CMU_AUDIOPLL 24,1 unused*/
+ OWL_GATE_HW(CMU_DEVCLKEN1, 28, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2srx, "i2srx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 27, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2stx, "i2stx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, hdmia_div_table),
+ 0);
+
+/* for bluetooth pcm communication */
+static OWL_COMP_FIXED_FACTOR(clk_pcm1, "pcm1", "audio_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 2, 0);
+
+static OWL_COMP_DIV(clk_sensor_src, "sensor_src", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ {0},
+ OWL_DIVIDER_HW(CMU_SENSORCLK, 5, 2, 0, NULL),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(clk_ethernet, "ethernet", "ethernet_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ 1, 20, 0);
+
+static OWL_COMP_DIV_FIXED(clk_thermal_sensor, "thermal_sensor", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN0, 31, 0),
+ OWL_DIVIDER_HW(CMU_SSTSCLK, 20, 10, 0, NULL),
+ 0);
+
+static struct owl_clk_common *s700_clks[] = {
+ &clk_core_pll.common,
+ &clk_dev_pll.common,
+ &clk_ddr_pll.common,
+ &clk_nand_pll.common,
+ &clk_display_pll.common,
+ &clk_cvbs_pll .common,
+ &clk_audio_pll.common,
+ &clk_ethernet_pll.common,
+ &clk_cpu.common,
+ &clk_dev.common,
+ &clk_ahb.common,
+ &clk_apb.common,
+ &clk_dmac.common,
+ &clk_noc0_clk_mux.common,
+ &clk_noc1_clk_mux.common,
+ &clk_hp_clk_mux.common,
+ &clk_hp_clk_div.common,
+ &clk_noc1_clk_div.common,
+ &clk_noc0.common,
+ &clk_noc1.common,
+ &clk_sensor_src.common,
+ &clk_gpio.common,
+ &clk_timer.common,
+ &clk_dsi.common,
+ &clk_csi.common,
+ &clk_si.common,
+ &clk_de.common,
+ &clk_hde.common,
+ &clk_vde.common,
+ &clk_vce.common,
+ &clk_nand.common,
+ &clk_sd0.common,
+ &clk_sd1.common,
+ &clk_sd2.common,
+ &clk_uart0.common,
+ &clk_uart1.common,
+ &clk_uart2.common,
+ &clk_uart3.common,
+ &clk_uart4.common,
+ &clk_uart5.common,
+ &clk_uart6.common,
+ &clk_pwm0.common,
+ &clk_pwm1.common,
+ &clk_pwm2.common,
+ &clk_pwm3.common,
+ &clk_pwm4.common,
+ &clk_pwm5.common,
+ &clk_gpu3d.common,
+ &clk_i2c0.common,
+ &clk_i2c1.common,
+ &clk_i2c2.common,
+ &clk_i2c3.common,
+ &clk_spi0.common,
+ &clk_spi1.common,
+ &clk_spi2.common,
+ &clk_spi3.common,
+ &clk_usb3_480mpll0.common,
+ &clk_usb3_480mphy0.common,
+ &clk_usb3_5gphy.common,
+ &clk_usb3_cce.common,
+ &clk_lcd.common,
+ &clk_hdmi_audio.common,
+ &clk_i2srx.common,
+ &clk_i2stx.common,
+ &clk_sensor0.common,
+ &clk_sensor1.common,
+ &clk_hdmi_dev.common,
+ &clk_ethernet.common,
+ &clk_rmii_ref.common,
+ &clk_usb2h0_pllen.common,
+ &clk_usb2h0_phy.common,
+ &clk_usb2h0_cce.common,
+ &clk_usb2h1_pllen.common,
+ &clk_usb2h1_phy.common,
+ &clk_usb2h1_cce.common,
+ &clk_tvout.common,
+ &clk_thermal_sensor.common,
+ &clk_irc_switch.common,
+ &clk_pcm1.common,
+};
+
+static struct clk_hw_onecell_data s700_hw_clks = {
+ .hws = {
+ [CLK_CORE_PLL] = &clk_core_pll.common.hw,
+ [CLK_DEV_PLL] = &clk_dev_pll.common.hw,
+ [CLK_DDR_PLL] = &clk_ddr_pll.common.hw,
+ [CLK_NAND_PLL] = &clk_nand_pll.common.hw,
+ [CLK_DISPLAY_PLL] = &clk_display_pll.common.hw,
+ [CLK_CVBS_PLL] = &clk_cvbs_pll .common.hw,
+ [CLK_AUDIO_PLL] = &clk_audio_pll.common.hw,
+ [CLK_ETHERNET_PLL] = &clk_ethernet_pll.common.hw,
+ [CLK_CPU] = &clk_cpu.common.hw,
+ [CLK_DEV] = &clk_dev.common.hw,
+ [CLK_AHB] = &clk_ahb.common.hw,
+ [CLK_APB] = &clk_apb.common.hw,
+ [CLK_DMAC] = &clk_dmac.common.hw,
+ [CLK_NOC0_CLK_MUX] = &clk_noc0_clk_mux.common.hw,
+ [CLK_NOC1_CLK_MUX] = &clk_noc1_clk_mux.common.hw,
+ [CLK_HP_CLK_MUX] = &clk_hp_clk_mux.common.hw,
+ [CLK_HP_CLK_DIV] = &clk_hp_clk_div.common.hw,
+ [CLK_NOC1_CLK_DIV] = &clk_noc1_clk_div.common.hw,
+ [CLK_NOC0] = &clk_noc0.common.hw,
+ [CLK_NOC1] = &clk_noc1.common.hw,
+ [CLK_SENOR_SRC] = &clk_sensor_src.common.hw,
+ [CLK_GPIO] = &clk_gpio.common.hw,
+ [CLK_TIMER] = &clk_timer.common.hw,
+ [CLK_DSI] = &clk_dsi.common.hw,
+ [CLK_CSI] = &clk_csi.common.hw,
+ [CLK_SI] = &clk_si.common.hw,
+ [CLK_DE] = &clk_de.common.hw,
+ [CLK_HDE] = &clk_hde.common.hw,
+ [CLK_VDE] = &clk_vde.common.hw,
+ [CLK_VCE] = &clk_vce.common.hw,
+ [CLK_NAND] = &clk_nand.common.hw,
+ [CLK_SD0] = &clk_sd0.common.hw,
+ [CLK_SD1] = &clk_sd1.common.hw,
+ [CLK_SD2] = &clk_sd2.common.hw,
+ [CLK_UART0] = &clk_uart0.common.hw,
+ [CLK_UART1] = &clk_uart1.common.hw,
+ [CLK_UART2] = &clk_uart2.common.hw,
+ [CLK_UART3] = &clk_uart3.common.hw,
+ [CLK_UART4] = &clk_uart4.common.hw,
+ [CLK_UART5] = &clk_uart5.common.hw,
+ [CLK_UART6] = &clk_uart6.common.hw,
+ [CLK_PWM0] = &clk_pwm0.common.hw,
+ [CLK_PWM1] = &clk_pwm1.common.hw,
+ [CLK_PWM2] = &clk_pwm2.common.hw,
+ [CLK_PWM3] = &clk_pwm3.common.hw,
+ [CLK_PWM4] = &clk_pwm4.common.hw,
+ [CLK_PWM5] = &clk_pwm5.common.hw,
+ [CLK_GPU3D] = &clk_gpu3d.common.hw,
+ [CLK_I2C0] = &clk_i2c0.common.hw,
+ [CLK_I2C1] = &clk_i2c1.common.hw,
+ [CLK_I2C2] = &clk_i2c2.common.hw,
+ [CLK_I2C3] = &clk_i2c3.common.hw,
+ [CLK_SPI0] = &clk_spi0.common.hw,
+ [CLK_SPI1] = &clk_spi1.common.hw,
+ [CLK_SPI2] = &clk_spi2.common.hw,
+ [CLK_SPI3] = &clk_spi3.common.hw,
+ [CLK_USB3_480MPLL0] = &clk_usb3_480mpll0.common.hw,
+ [CLK_USB3_480MPHY0] = &clk_usb3_480mphy0.common.hw,
+ [CLK_USB3_5GPHY] = &clk_usb3_5gphy.common.hw,
+ [CLK_USB3_CCE] = &clk_usb3_cce.common.hw,
+ [CLK_LCD] = &clk_lcd.common.hw,
+ [CLK_HDMI_AUDIO] = &clk_hdmi_audio.common.hw,
+ [CLK_I2SRX] = &clk_i2srx.common.hw,
+ [CLK_I2STX] = &clk_i2stx.common.hw,
+ [CLK_SENSOR0] = &clk_sensor0.common.hw,
+ [CLK_SENSOR1] = &clk_sensor1.common.hw,
+ [CLK_HDMI_DEV] = &clk_hdmi_dev.common.hw,
+ [CLK_ETHERNET] = &clk_ethernet.common.hw,
+ [CLK_RMII_REF] = &clk_rmii_ref.common.hw,
+ [CLK_USB2H0_PLLEN] = &clk_usb2h0_pllen.common.hw,
+ [CLK_USB2H0_PHY] = &clk_usb2h0_phy.common.hw,
+ [CLK_USB2H0_CCE] = &clk_usb2h0_cce.common.hw,
+ [CLK_USB2H1_PLLEN] = &clk_usb2h1_pllen.common.hw,
+ [CLK_USB2H1_PHY] = &clk_usb2h1_phy.common.hw,
+ [CLK_USB2H1_CCE] = &clk_usb2h1_cce.common.hw,
+ [CLK_TVOUT] = &clk_tvout.common.hw,
+ [CLK_THERMAL_SENSOR] = &clk_thermal_sensor.common.hw,
+ [CLK_IRC_SWITCH] = &clk_irc_switch.common.hw,
+ [CLK_PCM1] = &clk_pcm1.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static const struct owl_clk_desc s700_clk_desc = {
+ .clks = s700_clks,
+ .num_clks = ARRAY_SIZE(s700_clks),
+
+ .hw_clks = &s700_hw_clks,
+};
+
+static int s700_clk_probe(struct platform_device *pdev)
+{
+ const struct owl_clk_desc *desc;
+
+ desc = &s700_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s700_clk_of_match[] = {
+ { .compatible = "actions,s700-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s700_clk_driver = {
+ .probe = s700_clk_probe,
+ .driver = {
+ .name = "s700-cmu",
+ .of_match_table = s700_clk_of_match
+ },
+};
+
+static int __init s700_clk_init(void)
+{
+ return platform_driver_register(&s700_clk_driver);
+}
+core_initcall(s700_clk_init);
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 082596f37c1d..facc169ebb68 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
+obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
diff --git a/drivers/clk/at91/clk-i2s-mux.c b/drivers/clk/at91/clk-i2s-mux.c
new file mode 100644
index 000000000000..f0c3c3079f04
--- /dev/null
+++ b/drivers/clk/at91/clk-i2s-mux.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Microchip Technology Inc,
+ * Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
+ *
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <soc/at91/atmel-sfr.h>
+
+#define I2S_BUS_NR 2
+
+struct clk_i2s_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 bus_id;
+};
+
+#define to_clk_i2s_mux(hw) container_of(hw, struct clk_i2s_mux, hw)
+
+static u8 clk_i2s_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, AT91_SFR_I2SCLKSEL, &val);
+
+ return (val & BIT(mux->bus_id)) >> mux->bus_id;
+}
+
+static int clk_i2s_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+
+ return regmap_update_bits(mux->regmap, AT91_SFR_I2SCLKSEL,
+ BIT(mux->bus_id), index << mux->bus_id);
+}
+
+static const struct clk_ops clk_i2s_mux_ops = {
+ .get_parent = clk_i2s_mux_get_parent,
+ .set_parent = clk_i2s_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw * __init
+at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
+ const char * const *parent_names,
+ unsigned int num_parents, u8 bus_id)
+{
+ struct clk_init_data init = {};
+ struct clk_i2s_mux *i2s_ck;
+ int ret;
+
+ i2s_ck = kzalloc(sizeof(*i2s_ck), GFP_KERNEL);
+ if (!i2s_ck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_i2s_mux_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ i2s_ck->hw.init = &init;
+ i2s_ck->bus_id = bus_id;
+ i2s_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &i2s_ck->hw);
+ if (ret) {
+ kfree(i2s_ck);
+ return ERR_PTR(ret);
+ }
+
+ return &i2s_ck->hw;
+}
+
+static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np)
+{
+ struct regmap *regmap_sfr;
+ u8 bus_id;
+ const char *parent_names[2];
+ struct device_node *i2s_mux_np;
+ struct clk_hw *hw;
+ int ret;
+
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ return;
+
+ for_each_child_of_node(np, i2s_mux_np) {
+ if (of_property_read_u8(i2s_mux_np, "reg", &bus_id))
+ continue;
+
+ if (bus_id > I2S_BUS_NR)
+ continue;
+
+ ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2);
+ if (ret != 2)
+ continue;
+
+ hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name,
+ parent_names, 2, bus_id);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw);
+ }
+}
+
+CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux",
+ of_sama5d2_clk_i2s_mux_setup);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..596136793fc4 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
-#define AST2400_HPLL_STRAPPED BIT(18)
+#define AST2400_HPLL_PROGRAMMED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
- [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
+ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -109,7 +109,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */
[ASPEED_CLK_GATE_UART3CLK] = { 25, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 26, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
- [ASPEED_CLK_GATE_SDCLKCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
+ [ASPEED_CLK_GATE_SDCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
+ u32 rst = BIT(gate->reset_idx);
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
u32 reg;
+ /*
+ * If the IP is in reset, treat the clock as not enabled,
+ * this happens with some clocks such as the USB one when
+ * coming from cold reset. Without this, aspeed_clk_enable()
+ * will fail to lift the reset.
+ */
+ if (gate->reset_idx >= 0) {
+ regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+ if (reg & rst)
+ return 0;
+ }
+
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, freq, div;
+ u32 val, div, clkin, hpll;
+ const u16 hpll_rates[][4] = {
+ {384, 360, 336, 408},
+ {400, 375, 350, 425},
+ };
+ int rate;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
- if (val & CLKIN_25MHZ_EN)
- freq = 25000000;
- else if (val & AST2400_CLK_SOURCE_SEL)
- freq = 48000000;
- else
- freq = 24000000;
- hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
- pr_debug("clkin @%u MHz\n", freq / 1000000);
+ rate = (val >> 8) & 3;
+ if (val & CLKIN_25MHZ_EN) {
+ clkin = 25000000;
+ hpll = hpll_rates[1][rate];
+ } else if (val & AST2400_CLK_SOURCE_SEL) {
+ clkin = 48000000;
+ hpll = hpll_rates[0][rate];
+ } else {
+ clkin = 24000000;
+ hpll = hpll_rates[0][rate];
+ }
+ hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+ pr_debug("clkin @%u MHz\n", clkin / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
- * and we assume that it is enabled
+ * and we assume that it is enabled. It can be configured through the
+ * HPLL_PARAM register, or set to a specified frequency by strapping.
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
- WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
- aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+ if (val & AST2400_HPLL_PROGRAMMED)
+ hw = aspeed_ast2400_calc_pll("hpll", val);
+ else
+ hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+ hpll * 1000000);
+
+ aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index a2f8c42e527a..92bc4aca0f95 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CS2000 -- CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
*
* Copyright (C) 2015 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a5d402de5584..20724abd38bd 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -177,8 +177,15 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
mult, div);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ /*
+ * If parent clock is not registered, registration would fail.
+ * Clear OF_POPULATED flag so that clock registration can be
+ * attempted again from probe function.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
return clk;
+ }
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
if (ret) {
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
new file mode 100644
index 000000000000..5e80f3d090f3
--- /dev/null
+++ b/drivers/clk/clk-max9485.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+
+#include <dt-bindings/clock/maxim,max9485.h>
+
+#define MAX9485_NUM_CLKS 4
+
+/* This chip has only one register of 8 bit width. */
+
+#define MAX9485_FS_12KHZ (0 << 0)
+#define MAX9485_FS_32KHZ (1 << 0)
+#define MAX9485_FS_44_1KHZ (2 << 0)
+#define MAX9485_FS_48KHZ (3 << 0)
+
+#define MAX9485_SCALE_256 (0 << 2)
+#define MAX9485_SCALE_384 (1 << 2)
+#define MAX9485_SCALE_768 (2 << 2)
+
+#define MAX9485_DOUBLE BIT(4)
+#define MAX9485_CLKOUT1_ENABLE BIT(5)
+#define MAX9485_CLKOUT2_ENABLE BIT(6)
+#define MAX9485_MCLK_ENABLE BIT(7)
+#define MAX9485_FREQ_MASK 0x1f
+
+struct max9485_rate {
+ unsigned long out;
+ u8 reg_value;
+};
+
+/*
+ * Ordered by frequency. For frequency the hardware can generate with
+ * multiple settings, the one with lowest jitter is listed first.
+ */
+static const struct max9485_rate max9485_rates[] = {
+ { 3072000, MAX9485_FS_12KHZ | MAX9485_SCALE_256 },
+ { 4608000, MAX9485_FS_12KHZ | MAX9485_SCALE_384 },
+ { 8192000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 },
+ { 9126000, MAX9485_FS_12KHZ | MAX9485_SCALE_768 },
+ { 11289600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 },
+ { 16384000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 16934400, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 },
+ { 18384000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 },
+ { 22579200, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 },
+ { 49152000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 67737600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 73728000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { } /* sentinel */
+};
+
+struct max9485_driver_data;
+
+struct max9485_clk_hw {
+ struct clk_hw hw;
+ struct clk_init_data init;
+ u8 enable_bit;
+ struct max9485_driver_data *drvdata;
+};
+
+struct max9485_driver_data {
+ struct clk *xclk;
+ struct i2c_client *client;
+ u8 reg_value;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct max9485_clk_hw hw[MAX9485_NUM_CLKS];
+};
+
+static inline struct max9485_clk_hw *to_max9485_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct max9485_clk_hw, hw);
+}
+
+static int max9485_update_bits(struct max9485_driver_data *drvdata,
+ u8 mask, u8 value)
+{
+ int ret;
+
+ drvdata->reg_value &= ~mask;
+ drvdata->reg_value |= value;
+
+ dev_dbg(&drvdata->client->dev,
+ "updating mask 0x%02x value 0x%02x -> 0x%02x\n",
+ mask, value, drvdata->reg_value);
+
+ ret = i2c_master_send(drvdata->client,
+ &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static int max9485_clk_prepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ return max9485_update_bits(clk_hw->drvdata,
+ clk_hw->enable_bit,
+ clk_hw->enable_bit);
+}
+
+static void max9485_clk_unprepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ max9485_update_bits(clk_hw->drvdata, clk_hw->enable_bit, 0);
+}
+
+/*
+ * CLKOUT - configurable clock output
+ */
+static int max9485_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (entry->out == rate)
+ break;
+
+ if (entry->out == 0)
+ return -EINVAL;
+
+ return max9485_update_bits(clk_hw->drvdata,
+ MAX9485_FREQ_MASK,
+ entry->reg_value);
+}
+
+static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ struct max9485_driver_data *drvdata = clk_hw->drvdata;
+ u8 val = drvdata->reg_value & MAX9485_FREQ_MASK;
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (val == entry->reg_value)
+ return entry->out;
+
+ return 0;
+}
+
+static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct max9485_rate *curr, *prev = NULL;
+
+ for (curr = max9485_rates; curr->out != 0; curr++) {
+ /* Exact matches */
+ if (curr->out == rate)
+ return rate;
+
+ /*
+ * Find the first entry that has a frequency higher than the
+ * requested one.
+ */
+ if (curr->out > rate) {
+ unsigned int mid;
+
+ /*
+ * If this is the first entry, clamp the value to the
+ * lowest possible frequency.
+ */
+ if (!prev)
+ return curr->out;
+
+ /*
+ * Otherwise, determine whether the previous entry or
+ * current one is closer.
+ */
+ mid = prev->out + ((curr->out - prev->out) / 2);
+
+ return (mid > rate) ? prev->out : curr->out;
+ }
+
+ prev = curr;
+ }
+
+ /* If the last entry was still too high, clamp the value */
+ return prev->out;
+}
+
+struct max9485_clk {
+ const char *name;
+ int parent_index;
+ const struct clk_ops ops;
+ u8 enable_bit;
+};
+
+static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
+ [MAX9485_MCLKOUT] = {
+ .name = "mclkout",
+ .parent_index = -1,
+ .enable_bit = MAX9485_MCLK_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT] = {
+ .name = "clkout",
+ .parent_index = -1,
+ .ops = {
+ .set_rate = max9485_clkout_set_rate,
+ .round_rate = max9485_clkout_round_rate,
+ .recalc_rate = max9485_clkout_recalc_rate,
+ },
+ },
+ [MAX9485_CLKOUT1] = {
+ .name = "clkout1",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT1_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT2] = {
+ .name = "clkout2",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT2_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+};
+
+static struct clk_hw *
+max9485_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct max9485_driver_data *drvdata = data;
+ unsigned int idx = clkspec->args[0];
+
+ return &drvdata->hw[idx].hw;
+}
+
+static int max9485_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max9485_driver_data *drvdata;
+ struct device *dev = &client->dev;
+ const char *xclk_name;
+ int i, ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(drvdata->xclk))
+ return PTR_ERR(drvdata->xclk);
+
+ xclk_name = __clk_get_name(drvdata->xclk);
+
+ drvdata->supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(drvdata->supply))
+ return PTR_ERR(drvdata->supply);
+
+ ret = regulator_enable(drvdata->supply);
+ if (ret < 0)
+ return ret;
+
+ drvdata->reset_gpio =
+ devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(drvdata->reset_gpio))
+ return PTR_ERR(drvdata->reset_gpio);
+
+ i2c_set_clientdata(client, drvdata);
+ drvdata->client = client;
+
+ ret = i2c_master_recv(drvdata->client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+ if (ret < 0) {
+ dev_warn(dev, "Unable to read device register: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < MAX9485_NUM_CLKS; i++) {
+ int parent_index = max9485_clks[i].parent_index;
+ const char *name;
+
+ if (of_property_read_string_index(dev->of_node,
+ "clock-output-names",
+ i, &name) == 0) {
+ drvdata->hw[i].init.name = name;
+ } else {
+ drvdata->hw[i].init.name = max9485_clks[i].name;
+ }
+
+ drvdata->hw[i].init.ops = &max9485_clks[i].ops;
+ drvdata->hw[i].init.num_parents = 1;
+ drvdata->hw[i].init.flags = 0;
+
+ if (parent_index > 0) {
+ drvdata->hw[i].init.parent_names =
+ &drvdata->hw[parent_index].init.name;
+ drvdata->hw[i].init.flags |= CLK_SET_RATE_PARENT;
+ } else {
+ drvdata->hw[i].init.parent_names = &xclk_name;
+ }
+
+ drvdata->hw[i].enable_bit = max9485_clks[i].enable_bit;
+ drvdata->hw[i].hw.init = &drvdata->hw[i].init;
+ drvdata->hw[i].drvdata = drvdata;
+
+ ret = devm_clk_hw_register(dev, &drvdata->hw[i].hw);
+ if (ret < 0)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, max9485_of_clk_get, drvdata);
+}
+
+static int __maybe_unused max9485_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 0);
+
+ return 0;
+}
+
+static int __maybe_unused max9485_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+ int ret;
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 1);
+
+ ret = i2c_master_send(client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static const struct dev_pm_ops max9485_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max9485_suspend, max9485_resume)
+};
+
+static const struct of_device_id max9485_dt_ids[] = {
+ { .compatible = "maxim,max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max9485_dt_ids);
+
+static const struct i2c_device_id max9485_i2c_ids[] = {
+ { .name = "max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9485_i2c_ids);
+
+static struct i2c_driver max9485_driver = {
+ .driver = {
+ .name = "max9485",
+ .pm = &max9485_pm_ops,
+ .of_match_table = max9485_dt_ids,
+ },
+ .probe = max9485_i2c_probe,
+ .id_table = max9485_i2c_ids,
+};
+module_i2c_driver(max9485_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
+MODULE_DESCRIPTION("MAX9485 Programmable Audio Clock Generator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index bb2a6f2f5516..a985bf5e1ac6 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- int step;
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
ftmp = rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
- step = do_div(ftmp, clk->info->range.step_size);
+ do_div(ftmp, clk->info->range.step_size);
- return step * clk->info->range.step_size + fmin;
+ return ftmp * clk->info->range.step_size + fmin;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 09b6718956bd..153b3a2b5857 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -74,6 +74,33 @@ static int si514_enable_output(struct clk_si514 *data, bool enable)
SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
}
+static int si514_prepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ return si514_enable_output(data, true);
+}
+
+static void si514_unprepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ si514_enable_output(data, false);
+}
+
+static int si514_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI514_CONTROL_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si514_get_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
@@ -235,12 +262,17 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
+ unsigned int old_oe_state;
int err;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &old_oe_state);
+ if (err)
+ return err;
+
si514_enable_output(data, false);
err = si514_set_muldiv(data, &settings);
@@ -255,12 +287,16 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si514_enable_output(data, true);
+ if (old_oe_state & SI514_CONTROL_OE)
+ si514_enable_output(data, true);
return err;
}
static const struct clk_ops si514_clk_ops = {
+ .prepare = si514_prepare,
+ .unprepare = si514_unprepare,
+ .is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
.round_rate = si514_round_rate,
.set_rate = si514_set_rate,
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index 1e2a3b8f9454..64e607f3232a 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -86,6 +86,33 @@ static int si544_enable_output(struct clk_si544 *data, bool enable)
SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
}
+static int si544_prepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ return si544_enable_output(data, true);
+}
+
+static void si544_unprepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ si544_enable_output(data, false);
+}
+
+static int si544_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI544_OE_STATE_ODC_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si544_get_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
@@ -273,6 +300,7 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
+ unsigned int old_oe_state;
int err;
if (!is_valid_frequency(data, rate))
@@ -282,6 +310,10 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
if (err)
return err;
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &old_oe_state);
+ if (err)
+ return err;
+
si544_enable_output(data, false);
/* Allow FCAL for this frequency update */
@@ -303,12 +335,16 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si544_enable_output(data, true);
+ if (old_oe_state & SI544_OE_STATE_ODC_OE)
+ si544_enable_output(data, true);
return err;
}
static const struct clk_ops si544_clk_ops = {
+ .prepare = si544_prepare,
+ .unprepare = si544_unprepare,
+ .is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
.round_rate = si544_round_rate,
.set_rate = si544_set_rate,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..d31055ae6ec6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
-#include <linux/stringify.h>
#include "clk.h"
@@ -68,6 +67,7 @@ struct clk_core {
unsigned long max_rate;
unsigned long accuracy;
int phase;
+ struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
struct hlist_head clks;
@@ -691,6 +691,9 @@ static void clk_core_unprepare(struct clk_core *core)
"Unpreparing critical %s\n", core->name))
return;
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_unprotect(core);
+
if (--core->prepare_count > 0)
return;
@@ -765,6 +768,16 @@ static int clk_core_prepare(struct clk_core *core)
core->prepare_count++;
+ /*
+ * CLK_SET_RATE_GATE is a special case of clock protection
+ * Instead of a consumer claiming exclusive rate control, it is
+ * actually the provider which prevents any consumer from making any
+ * operation which could result in a rate change or rate glitch while
+ * the clock is prepared.
+ */
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_protect(core);
+
return 0;
unprepare:
clk_core_unprepare(core->parent);
@@ -1888,9 +1901,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (clk_core_rate_is_protected(core))
return -EBUSY;
- if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
- return -EBUSY;
-
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, req_rate);
if (!top)
@@ -2402,6 +2412,172 @@ int clk_get_phase(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_phase);
+static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
+{
+ /* Assume a default value of 50% */
+ core->duty.num = 1;
+ core->duty.den = 2;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
+
+static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret = 0;
+
+ if (!core->ops->get_duty_cycle)
+ return clk_core_update_duty_cycle_parent_nolock(core);
+
+ ret = core->ops->get_duty_cycle(core->hw, duty);
+ if (ret)
+ goto reset;
+
+ /* Don't trust the clock provider too much */
+ if (duty->den == 0 || duty->num > duty->den) {
+ ret = -EINVAL;
+ goto reset;
+ }
+
+ return 0;
+
+reset:
+ clk_core_reset_duty_cycle_nolock(core);
+ return ret;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & CLK_DUTY_CYCLE_PARENT) {
+ ret = clk_core_update_duty_cycle_nolock(core->parent);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ } else {
+ clk_core_reset_duty_cycle_nolock(core);
+ }
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty);
+
+static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (clk_core_rate_is_protected(core))
+ return -EBUSY;
+
+ trace_clk_set_duty_cycle(core, duty);
+
+ if (!core->ops->set_duty_cycle)
+ return clk_core_set_duty_cycle_parent_nolock(core, duty);
+
+ ret = core->ops->set_duty_cycle(core->hw, duty);
+ if (!ret)
+ memcpy(&core->duty, duty, sizeof(*duty));
+
+ trace_clk_set_duty_cycle_complete(core, duty);
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
+ ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ }
+
+ return ret;
+}
+
+/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Apply the duty cycle ratio if the ratio is valid and the clock can
+ * perform this operation
+ *
+ * Returns (0) on success, a negative errno otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
+{
+ int ret;
+ struct clk_duty duty;
+
+ if (!clk)
+ return 0;
+
+ /* sanity check the ratio */
+ if (den == 0 || num > den)
+ return -EINVAL;
+
+ duty.num = num;
+ duty.den = den;
+
+ clk_prepare_lock();
+
+ if (clk->exclusive_count)
+ clk_core_rate_unprotect(clk->core);
+
+ ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
+
+ if (clk->exclusive_count)
+ clk_core_rate_protect(clk->core);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
+
+static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
+ unsigned int scale)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret;
+
+ clk_prepare_lock();
+
+ ret = clk_core_update_duty_cycle_nolock(core);
+ if (!ret)
+ ret = mult_frac(scale, duty->num, duty->den);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+/**
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio of a clock node multiplied by the provided
+ * scaling factor, or negative errno on error.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
+{
+ if (!clk)
+ return 0;
+
+ return clk_core_get_scaled_duty_cycle(clk->core, scale);
+}
+EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
+
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
@@ -2455,12 +2631,13 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c));
+ clk_core_get_phase(c),
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2482,9 +2659,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
- seq_puts(s, " enable prepare protect \n");
- seq_puts(s, " clock count count count rate accuracy phase\n");
- seq_puts(s, "----------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -2511,6 +2688,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"duty_cycle\": %u",
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
@@ -2559,7 +2738,7 @@ static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
@@ -2572,6 +2751,7 @@ static const struct {
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
+ ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
@@ -2610,6 +2790,17 @@ static int possible_parents_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
+static int clk_duty_cycle_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+ struct clk_duty *duty = &core->duty;
+
+ seq_printf(s, "%u/%u\n", duty->num, duty->den);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
+
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
@@ -2628,6 +2819,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
+ debugfs_create_file("clk_duty_cycle", 0444, root, core,
+ &clk_duty_cycle_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
@@ -2846,6 +3039,11 @@ static int __clk_core_init(struct clk_core *core)
core->phase = 0;
/*
+ * Set clk's duty cycle.
+ */
+ clk_core_update_duty_cycle_nolock(core);
+
+ /*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -2934,6 +3132,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
return clk;
}
+/* keep in sync with __clk_put */
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
@@ -3313,6 +3512,7 @@ int __clk_get(struct clk *clk)
return 1;
}
+/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3346,6 +3546,7 @@ void __clk_put(struct clk *clk)
module_put(owner);
+ kfree_const(clk->con_id);
kfree(clk);
}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 7513411140b6..9ab3db8b3988 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
struct clk *clk;
int rc;
- if (index < 0)
- return ERR_PTR(-EINVAL);
-
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
@@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct clk *clk;
- if (dev) {
+ if (dev && dev->of_node) {
clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index aae62a5b8734..d1bbee19ed0f 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
if (IS_ERR(usb1)) {
- if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ if (PTR_ERR(usb1) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
index 081b039fcb02..6481337382a6 100644
--- a/drivers/clk/davinci/psc-da830.c
+++ b/drivers/clk/davinci/psc-da830.c
@@ -14,6 +14,7 @@
#include "psc.h"
+LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif");
LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
@@ -22,7 +23,7 @@ static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(3, 0, aemif, pll0_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index d196dcbed560..5a18bca464cd 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -16,8 +16,7 @@
#include "psc.h"
-LPSC_CLKDEV2(emifa_clkdev, NULL, "ti-aemif",
- "aemif", "davinci_nand.0");
+LPSC_CLKDEV1(emifa_clkdev, NULL, "ti-aemif");
LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
index 8c73086cc676..c75424f4ea3b 100644
--- a/drivers/clk/davinci/psc-dm365.c
+++ b/drivers/clk/davinci/psc-dm365.c
@@ -21,7 +21,8 @@ LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
index fc0230e3a3d6..0cea6e0bd5f0 100644
--- a/drivers/clk/davinci/psc-dm644x.c
+++ b/drivers/clk/davinci/psc-dm644x.c
@@ -21,7 +21,8 @@ LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
"fck", "davinci_mdio.0");
LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
index c3f82ed70a80..20012dc7471a 100644
--- a/drivers/clk/davinci/psc-dm646x.c
+++ b/drivers/clk/davinci/psc-dm646x.c
@@ -18,7 +18,8 @@
LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
"fck", "davinci_mdio.0");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
+ NULL, "ti-aemif");
LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 6a42529d31a9..cc5614567a70 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
#ifdef CONFIG_ARCH_DAVINCI_DM355
extern const struct davinci_psc_init_data dm355_psc_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
extern const struct davinci_psc_init_data dm365_psc_init_data;
#endif
#ifdef CONFIG_ARCH_DAVINCI_DM644x
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index caa8bd40692c..fc8e782d817b 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sizes.h>
#include <soc/imx/revision.h>
#include <dt-bindings/clock/imx5-clock.h>
@@ -175,13 +176,13 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_PER_ROOT] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[IMX5_CLK_AHB] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
- clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
- clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
- clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
- clk[IMX5_CLK_TMAX1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
- clk[IMX5_CLK_TMAX2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
- clk[IMX5_CLK_TMAX3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
- clk[IMX5_CLK_SPBA] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+ clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2_flags("ahb_max", "ahb", MXC_CCM_CCGR0, 28, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", MXC_CCM_CCGR0, 24, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", MXC_CCM_CCGR0, 26, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX1] = imx_clk_gate2_flags("tmax1", "ahb", MXC_CCM_CCGR1, 0, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX2] = imx_clk_gate2_flags("tmax2", "ahb", MXC_CCM_CCGR1, 2, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX3] = imx_clk_gate2_flags("tmax3", "ahb", MXC_CCM_CCGR1, 4, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_SPBA] = imx_clk_gate2_flags("spba", "ipg", MXC_CCM_CCGR5, 0, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPG] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[IMX5_CLK_AXI_A] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[IMX5_CLK_AXI_B] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
@@ -252,8 +253,8 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_ECSPI2_PER_GATE] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[IMX5_CLK_CSPI_IPG_GATE] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[IMX5_CLK_SDMA_GATE] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
- clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
- clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+ clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2_flags("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2_flags("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPU_SEL] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[IMX5_CLK_IPU_GATE] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[IMX5_CLK_NFC_GATE] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
@@ -267,7 +268,7 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_VPU_SEL] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[IMX5_CLK_VPU_GATE] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[IMX5_CLK_VPU_REFERENCE_GATE] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
- clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+ clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2_flags("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24, CLK_IS_CRITICAL);
clk[IMX5_CLK_SSI_APM] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[IMX5_CLK_SSI1_ROOT_SEL] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
@@ -316,21 +317,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
-
- clk_prepare_enable(clk[IMX5_CLK_GPC_DVFS]);
- clk_prepare_enable(clk[IMX5_CLK_AHB_MAX]); /* esdhc3 */
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ1]);
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ2]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_SPBA]);
- clk_prepare_enable(clk[IMX5_CLK_EMI_FAST_GATE]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_EMI_SLOW_GATE]); /* eim */
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC1_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC2_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_ESC_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSP_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
- clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
}
static void __init mx50_clocks_init(struct device_node *np)
@@ -442,10 +428,10 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
- clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
- clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
- clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
- clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+ clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2_flags("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12, CLK_IS_CRITICAL);
clk[IMX5_CLK_SPDIF_XTAL_SEL] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2,
mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
clk[IMX5_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b9ea7037e193..8c7c2fcb8d94 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -65,7 +65,7 @@ static const char *ipg_per_sels[] = { "ipg", "osc", };
static const char *ecspi_sels[] = { "pll3_60m", "osc", };
static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
- "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+ "video_27m", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
static const char *cko2_sels[] = {
"mmdc_ch0_axi", "mmdc_ch1_axi", "usdhc4", "usdhc1",
@@ -96,12 +96,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clk[IMX6QDL_CLK_END];
static struct clk_onecell_data clk_data;
-static unsigned int const clks_init_on[] __initconst = {
- IMX6QDL_CLK_MMDC_CH0_AXI,
- IMX6QDL_CLK_ROM,
- IMX6QDL_CLK_ARM,
-};
-
static struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -417,7 +411,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *anatop_base, *base;
- int i;
int ret;
clk[IMX6QDL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -794,7 +787,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18);
else
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
- clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
+ clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
@@ -808,7 +801,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
- clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clk[IMX6QDL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
@@ -878,9 +871,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
*/
clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clk[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 66b1dd1cfad0..eb6bcbf345a3 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -104,10 +104,6 @@ static struct clk_onecell_data clk_data;
static void __iomem *ccm_base;
static void __iomem *anatop_base;
-static const u32 clks_init_on[] __initconst = {
- IMX6SL_CLK_IPG, IMX6SL_CLK_ARM, IMX6SL_CLK_MMDC_ROOT,
-};
-
/*
* ERR005311 CCM: After exit from WAIT mode, unwanted interrupt(s) taken
* during WAIT mode entry process could cause cache memory
@@ -195,7 +191,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
int ret;
clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -426,13 +421,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
pr_warn("%s: failed to set AHB clock rate %d!\n",
__func__, ret);
- /*
- * Make sure those always on clocks are enabled to maintain the correct
- * usecount and enabling/disabling of parent PLLs.
- */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 3651c77fbabe..52379ee49aec 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -92,6 +92,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
/* Do not bypass PLLs initially */
@@ -253,6 +254,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+ clks[IMX6SLL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -267,13 +269,17 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
+ clks[IMX6SLL_CLK_GPIO6] = imx_clk_gate2("gpio6", "ipg", base + 0x70, 0);
clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2);
clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6SLL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -283,6 +289,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6SLL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 10c771b91ef6..d9f2890ffe62 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -92,14 +92,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clks[IMX6SX_CLK_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6SX_CLK_AIPS_TZ1, IMX6SX_CLK_AIPS_TZ2, IMX6SX_CLK_AIPS_TZ3,
- IMX6SX_CLK_IPMUX1, IMX6SX_CLK_IPMUX2, IMX6SX_CLK_IPMUX3,
- IMX6SX_CLK_WAKEUP, IMX6SX_CLK_MMDC_P0_FAST, IMX6SX_CLK_MMDC_P0_IPG,
- IMX6SX_CLK_ROM, IMX6SX_CLK_ARM, IMX6SX_CLK_IPG, IMX6SX_CLK_OCRAM,
- IMX6SX_CLK_PER2_MAIN, IMX6SX_CLK_PERCLK, IMX6SX_CLK_TZASC1,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -142,7 +134,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6SX_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -332,7 +323,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_QSPI1_PODF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
clks[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
clks[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3);
- clks[IMX6SX_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6SX_CLK_PERCLK] = imx_clk_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_VID_PODF] = imx_clk_divider("vid_podf", "vid_sel", base + 0x20, 24, 2);
clks[IMX6SX_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
clks[IMX6SX_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
@@ -380,8 +371,8 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
/* name parent_name reg shift */
/* CCGR0 */
- clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
clks[IMX6SX_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6SX_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -394,7 +385,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
clks[IMX6SX_CLK_DCIC1] = imx_clk_gate2("dcic1", "display_podf", base + 0x68, 24);
clks[IMX6SX_CLK_DCIC2] = imx_clk_gate2("dcic2", "display_podf", base + 0x68, 26);
- clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL);
/* CCGR1 */
clks[IMX6SX_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -407,10 +398,11 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
- clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2("wakeup", "ipg", base + 0x6c, 18);
+ clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_GPT_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x6c, 20);
clks[IMX6SX_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22);
clks[IMX6SX_CLK_GPU] = imx_clk_gate2("gpu", "gpu_core_podf", base + 0x6c, 26);
+ clks[IMX6SX_CLK_OCRAM_S] = imx_clk_gate2("ocram_s", "ahb", base + 0x6c, 28);
clks[IMX6SX_CLK_CANFD] = imx_clk_gate2("canfd", "can_podf", base + 0x6c, 30);
/* CCGR2 */
@@ -420,10 +412,10 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SX_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6SX_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14);
- clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2("ipmux1", "ahb", base + 0x70, 16);
- clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2("ipmux2", "ahb", base + 0x70, 18);
- clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2("ipmux3", "ahb", base + 0x70, 20);
- clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2("tzasc1", "mmdc_podf", base + 0x70, 22);
+ clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "display_podf", base + 0x70, 28);
clks[IMX6SX_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "display_podf", base + 0x70, 30);
@@ -437,15 +429,15 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
clks[IMX6SX_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18);
- clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28);
+ clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6SX_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "display_podf", base + 0x78, 0);
clks[IMX6SX_CLK_QSPI2] = imx_clk_gate2("qspi2", "qspi2_podf", base + 0x78, 10);
clks[IMX6SX_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12);
- clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2("per2_main", "ahb", base + 0x78, 14);
+ clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
clks[IMX6SX_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
clks[IMX6SX_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
@@ -456,7 +448,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6SX_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6SX_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio);
@@ -502,9 +494,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index ba563ba50b40..361b43f9742e 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -79,12 +79,6 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2,
- IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
- IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -129,7 +123,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -142,6 +135,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
@@ -336,8 +330,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
- clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -360,6 +354,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
if (clk_on_imx6ull())
clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
+ clks[IMX6UL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -376,6 +371,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
+ clks[IMX6UL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6UL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
if (clk_on_imx6ull()) {
@@ -389,6 +386,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
+ clks[IMX6UL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -405,11 +403,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6UL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
- clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28);
+ clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12);
@@ -423,7 +422,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6UL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
@@ -497,10 +496,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
- /* keep all the clks on just for bringup */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (clk_on_imx6ull())
clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 27217a7ea17e..881b772c4ac9 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -797,6 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
+ clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 32fcc75f6f77..4479c102e899 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -134,7 +134,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
"i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
- .div = { CGU_REG_I2SCDR, 0, 1, 8, -1, -1, -1 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 6 },
},
@@ -161,7 +161,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
},
[JZ4740_CLK_UDC] = {
- "udc", CGU_CLK_MUX | CGU_CLK_DIV,
+ "udc", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
.div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 815659eebea3..efaa70f682b4 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,13 +1,19 @@
config COMMON_CLK_AMLOGIC
bool
- depends on OF
depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_REGMAP_MESON
+
+config COMMON_CLK_AMLOGIC_AUDIO
+ bool
+ depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_AMLOGIC
config COMMON_CLK_MESON_AO
bool
depends on OF
depends on ARCH_MESON || COMPILE_TEST
select COMMON_CLK_REGMAP_MESON
+ select RESET_CONTROLLER
config COMMON_CLK_REGMAP_MESON
bool
@@ -15,9 +21,8 @@ config COMMON_CLK_REGMAP_MESON
config COMMON_CLK_MESON8B
bool
- depends on COMMON_CLK_AMLOGIC
+ select COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
- select COMMON_CLK_REGMAP_MESON
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
@@ -25,10 +30,8 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -36,11 +39,18 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.
+
+config COMMON_CLK_AXG_AUDIO
+ tristate "Meson AXG Audio Clock Controller Driver"
+ depends on COMMON_CLK_AXG
+ select COMMON_CLK_AMLOGIC_AUDIO
+ select MFD_SYSCON
+ help
+ Support for the audio clock controller on AmLogic A113D devices,
+ aka axg, Say Y if you want audio subsystem to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index d0d13aeb369a..72ec8c40d848 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,9 +2,11 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
+obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
new file mode 100644
index 000000000000..a0ed41e73bde
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "clkc-audio.h"
+#include "axg-audio.h"
+
+#define AXG_MST_IN_COUNT 8
+#define AXG_SLV_SCLK_COUNT 10
+#define AXG_SLV_LRCLK_COUNT 10
+
+#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_mux_data){ \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_names = (_pnames), \
+ .num_parents = ARRAY_SIZE(_pnames), \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_div_data){ \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_PCLK_GATE(_name, _bit) \
+ AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0)
+
+/* Audio peripheral clocks */
+static AXG_PCLK_GATE(ddr_arb, 0);
+static AXG_PCLK_GATE(pdm, 1);
+static AXG_PCLK_GATE(tdmin_a, 2);
+static AXG_PCLK_GATE(tdmin_b, 3);
+static AXG_PCLK_GATE(tdmin_c, 4);
+static AXG_PCLK_GATE(tdmin_lb, 5);
+static AXG_PCLK_GATE(tdmout_a, 6);
+static AXG_PCLK_GATE(tdmout_b, 7);
+static AXG_PCLK_GATE(tdmout_c, 8);
+static AXG_PCLK_GATE(frddr_a, 9);
+static AXG_PCLK_GATE(frddr_b, 10);
+static AXG_PCLK_GATE(frddr_c, 11);
+static AXG_PCLK_GATE(toddr_a, 12);
+static AXG_PCLK_GATE(toddr_b, 13);
+static AXG_PCLK_GATE(toddr_c, 14);
+static AXG_PCLK_GATE(loopback, 15);
+static AXG_PCLK_GATE(spdifin, 16);
+static AXG_PCLK_GATE(spdifout, 17);
+static AXG_PCLK_GATE(resample, 18);
+static AXG_PCLK_GATE(power_detect, 19);
+
+/* Audio Master Clocks */
+static const char * const mst_mux_parent_names[] = {
+ "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3",
+ "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
+};
+
+#define AXG_MST_MCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, CLK_MUX_ROUND_CLOSEST, \
+ mst_mux_parent_names, CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_DIV(_name, _reg) \
+ AXG_AUD_DIV(_name##_div, _reg, 0, 16, CLK_DIVIDER_ROUND_CLOSEST, \
+ "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_GATE(_name, _reg) \
+ AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+/* Sample Clocks */
+#define AXG_MST_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ "axg_mst_"#_name"_mclk", 0)
+
+static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
+ _hi_shift, _hi_width, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_sclk_div_data) { \
+ .div = { \
+ .reg_off = (_reg), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ }, \
+ .hi = { \
+ .reg_off = (_reg), \
+ .shift = (_hi_shift), \
+ .width = (_hi_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_sclk_div_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ "axg_mst_"#_name"_sclk_pre_en", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+ _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_clk_triphase_data) { \
+ .ph0 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift0), \
+ .width = (_width), \
+ }, \
+ .ph1 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift1), \
+ .width = (_width), \
+ }, \
+ .ph2 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift2), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_clk_triphase_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
+ "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+#define AXG_MST_LRCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
+ "axg_mst_"#_name"_sclk_post_en", 0) \
+
+static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_LRCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
+ "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static const char * const tdm_sclk_parent_names[] = {
+ "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk",
+ "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk",
+ "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2",
+ "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5",
+ "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8",
+ "axg_slv_sclk9"
+};
+
+#define AXG_TDM_SCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_sclk_parent_names, 0)
+
+static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK(_name, _reg) \
+ struct clk_regmap axg_tdm##_name##_sclk = { \
+ .data = &(struct meson_clk_phase_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = 29, \
+ .width = 1, \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_tdm"#_name"_sclk", \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]) \
+ { "axg_tdm"#_name"_sclk_post_en" }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
+ }, \
+}
+
+static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static const char * const tdm_lrclk_parent_names[] = {
+ "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk",
+ "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk",
+ "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2",
+ "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5",
+ "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8",
+ "axg_slv_lrclk9"
+};
+
+#define AXG_TDM_LRLCK(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_names, 0)
+
+static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/*
+ * Array of all clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &axg_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &axg_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &axg_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe() */
+static struct clk_regmap *const axg_audio_clk_regmaps[] = {
+ &axg_ddr_arb,
+ &axg_pdm,
+ &axg_tdmin_a,
+ &axg_tdmin_b,
+ &axg_tdmin_c,
+ &axg_tdmin_lb,
+ &axg_tdmout_a,
+ &axg_tdmout_b,
+ &axg_tdmout_c,
+ &axg_frddr_a,
+ &axg_frddr_b,
+ &axg_frddr_c,
+ &axg_toddr_a,
+ &axg_toddr_b,
+ &axg_toddr_c,
+ &axg_loopback,
+ &axg_spdifin,
+ &axg_spdifout,
+ &axg_resample,
+ &axg_power_detect,
+ &axg_mst_a_mclk_sel,
+ &axg_mst_b_mclk_sel,
+ &axg_mst_c_mclk_sel,
+ &axg_mst_d_mclk_sel,
+ &axg_mst_e_mclk_sel,
+ &axg_mst_f_mclk_sel,
+ &axg_mst_a_mclk_div,
+ &axg_mst_b_mclk_div,
+ &axg_mst_c_mclk_div,
+ &axg_mst_d_mclk_div,
+ &axg_mst_e_mclk_div,
+ &axg_mst_f_mclk_div,
+ &axg_mst_a_mclk,
+ &axg_mst_b_mclk,
+ &axg_mst_c_mclk,
+ &axg_mst_d_mclk,
+ &axg_mst_e_mclk,
+ &axg_mst_f_mclk,
+ &axg_spdifout_clk_sel,
+ &axg_spdifout_clk_div,
+ &axg_spdifout_clk,
+ &axg_spdifin_clk_sel,
+ &axg_spdifin_clk_div,
+ &axg_spdifin_clk,
+ &axg_pdm_dclk_sel,
+ &axg_pdm_dclk_div,
+ &axg_pdm_dclk,
+ &axg_pdm_sysclk_sel,
+ &axg_pdm_sysclk_div,
+ &axg_pdm_sysclk,
+ &axg_mst_a_sclk_pre_en,
+ &axg_mst_b_sclk_pre_en,
+ &axg_mst_c_sclk_pre_en,
+ &axg_mst_d_sclk_pre_en,
+ &axg_mst_e_sclk_pre_en,
+ &axg_mst_f_sclk_pre_en,
+ &axg_mst_a_sclk_div,
+ &axg_mst_b_sclk_div,
+ &axg_mst_c_sclk_div,
+ &axg_mst_d_sclk_div,
+ &axg_mst_e_sclk_div,
+ &axg_mst_f_sclk_div,
+ &axg_mst_a_sclk_post_en,
+ &axg_mst_b_sclk_post_en,
+ &axg_mst_c_sclk_post_en,
+ &axg_mst_d_sclk_post_en,
+ &axg_mst_e_sclk_post_en,
+ &axg_mst_f_sclk_post_en,
+ &axg_mst_a_sclk,
+ &axg_mst_b_sclk,
+ &axg_mst_c_sclk,
+ &axg_mst_d_sclk,
+ &axg_mst_e_sclk,
+ &axg_mst_f_sclk,
+ &axg_mst_a_lrclk_div,
+ &axg_mst_b_lrclk_div,
+ &axg_mst_c_lrclk_div,
+ &axg_mst_d_lrclk_div,
+ &axg_mst_e_lrclk_div,
+ &axg_mst_f_lrclk_div,
+ &axg_mst_a_lrclk,
+ &axg_mst_b_lrclk,
+ &axg_mst_c_lrclk,
+ &axg_mst_d_lrclk,
+ &axg_mst_e_lrclk,
+ &axg_mst_f_lrclk,
+ &axg_tdmin_a_sclk_sel,
+ &axg_tdmin_b_sclk_sel,
+ &axg_tdmin_c_sclk_sel,
+ &axg_tdmin_lb_sclk_sel,
+ &axg_tdmout_a_sclk_sel,
+ &axg_tdmout_b_sclk_sel,
+ &axg_tdmout_c_sclk_sel,
+ &axg_tdmin_a_sclk_pre_en,
+ &axg_tdmin_b_sclk_pre_en,
+ &axg_tdmin_c_sclk_pre_en,
+ &axg_tdmin_lb_sclk_pre_en,
+ &axg_tdmout_a_sclk_pre_en,
+ &axg_tdmout_b_sclk_pre_en,
+ &axg_tdmout_c_sclk_pre_en,
+ &axg_tdmin_a_sclk_post_en,
+ &axg_tdmin_b_sclk_post_en,
+ &axg_tdmin_c_sclk_post_en,
+ &axg_tdmin_lb_sclk_post_en,
+ &axg_tdmout_a_sclk_post_en,
+ &axg_tdmout_b_sclk_post_en,
+ &axg_tdmout_c_sclk_post_en,
+ &axg_tdmin_a_sclk,
+ &axg_tdmin_b_sclk,
+ &axg_tdmin_c_sclk,
+ &axg_tdmin_lb_sclk,
+ &axg_tdmout_a_sclk,
+ &axg_tdmout_b_sclk,
+ &axg_tdmout_c_sclk,
+ &axg_tdmin_a_lrclk,
+ &axg_tdmin_b_lrclk,
+ &axg_tdmin_c_lrclk,
+ &axg_tdmin_lb_lrclk,
+ &axg_tdmout_a_lrclk,
+ &axg_tdmout_b_lrclk,
+ &axg_tdmout_c_lrclk,
+};
+
+static struct clk *devm_clk_get_enable(struct device *dev, char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s", id);
+ return clk;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable %s", id);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret) {
+ dev_err(dev, "failed to add reset action on %s", id);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+static const struct clk_ops axg_clk_no_ops = {};
+
+static struct clk_hw *axg_clk_hw_register_bypass(struct device *dev,
+ const char *name,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ char *clk_name;
+ int ret;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ if (!clk_name)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clk_name;
+ init.ops = &axg_clk_no_ops;
+ init.flags = 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ kfree(clk_name);
+
+ return ret ? ERR_PTR(ret) : hw;
+}
+
+static int axg_register_clk_hw_input(struct device *dev,
+ const char *name,
+ unsigned int clkid)
+{
+ struct clk *parent_clk = devm_clk_get(dev, name);
+ struct clk_hw *hw = NULL;
+
+ if (IS_ERR(parent_clk)) {
+ int err = PTR_ERR(parent_clk);
+
+ /* It is ok if an input clock is missing */
+ if (err == -ENOENT) {
+ dev_dbg(dev, "%s not provided", name);
+ } else {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s clock", name);
+ return err;
+ }
+ } else {
+ hw = axg_clk_hw_register_bypass(dev, name,
+ __clk_get_name(parent_clk));
+ }
+
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to register %s clock", name);
+ return PTR_ERR(hw);
+ }
+
+ axg_audio_hw_onecell_data.hws[clkid] = hw;
+ return 0;
+}
+
+static int axg_register_clk_hw_inputs(struct device *dev,
+ const char *basename,
+ unsigned int count,
+ unsigned int clkid)
+{
+ char *name;
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ name = kasprintf(GFP_KERNEL, "%s%d", basename, i);
+ if (!name)
+ return -ENOMEM;
+
+ ret = axg_register_clk_hw_input(dev, name, clkid + i);
+ kfree(name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config axg_audio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AUDIO_CLK_PDMIN_CTRL1,
+};
+
+static int axg_audio_clkc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ struct resource *res;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk_hw *hw;
+ int ret, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ /* Get the mandatory peripheral clock */
+ clk = devm_clk_get_enable(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = device_reset(dev);
+ if (ret) {
+ dev_err(dev, "failed to reset device\n");
+ return ret;
+ }
+
+ /* Register the peripheral input clock */
+ hw = axg_clk_hw_register_bypass(dev, "audio_pclk",
+ __clk_get_name(clk));
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw;
+
+ /* Register optional input master clocks */
+ ret = axg_register_clk_hw_inputs(dev, "mst_in",
+ AXG_MST_IN_COUNT,
+ AUD_CLKID_MST0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave sclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
+ AXG_SLV_SCLK_COUNT,
+ AUD_CLKID_SLV_SCLK0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave lrclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
+ AXG_SLV_LRCLK_COUNT,
+ AUD_CLKID_SLV_LRCLK0);
+ if (ret)
+ return ret;
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++)
+ axg_audio_clk_regmaps[i]->map = map;
+
+ /* Take care to skip the registered input clocks */
+ for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) {
+ hw = axg_audio_hw_onecell_data.hws[i];
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "failed to register clock %s\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &axg_audio_hw_onecell_data);
+}
+
+static const struct of_device_id clkc_match_table[] = {
+ { .compatible = "amlogic,axg-audio-clkc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
+
+static struct platform_driver axg_audio_driver = {
+ .probe = axg_audio_clkc_probe,
+ .driver = {
+ .name = "axg-audio-clkc",
+ .of_match_table = clkc_match_table,
+ },
+};
+module_platform_driver(axg_audio_driver);
+
+MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
new file mode 100644
index 000000000000..7191b39c9d65
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_H
+#define __AXG_AUDIO_CLKC_H
+
+/*
+ * Audio Clock register offsets
+ *
+ * Register offsets from the datasheet must be multiplied by 4 before
+ * to get the right offset
+ */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+
+/*
+ * CLKID index values
+ * These indices are entirely contrived and do not map onto the hardware.
+ */
+
+#define AUD_CLKID_PCLK 0
+#define AUD_CLKID_MST0 1
+#define AUD_CLKID_MST1 2
+#define AUD_CLKID_MST2 3
+#define AUD_CLKID_MST3 4
+#define AUD_CLKID_MST4 5
+#define AUD_CLKID_MST5 6
+#define AUD_CLKID_MST6 7
+#define AUD_CLKID_MST7 8
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
+
+/* include the CLKIDs which are part of the DT bindings */
+#include <dt-bindings/clock/axg-audio-clkc.h>
+
+#define NR_CLKS 151
+
+#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index bd4dbc696b88..00ce62ad6416 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -12,7 +12,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -626,6 +625,137 @@ static struct clk_regmap axg_mpll3 = {
},
};
+static const struct pll_rate_table axg_pcie_pll_rate_table[] = {
+ {
+ .rate = 100000000,
+ .m = 200,
+ .n = 3,
+ .od = 1,
+ .od2 = 3,
+ },
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence axg_pcie_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_PCIE_PLL_CNTL6, .def = 0x002323c6 },
+};
+
+static struct clk_regmap axg_pcie_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_PCIE_PLL_CNTL6,
+ .shift = 6,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_pcie_pll_rate_table,
+ .init_regs = axg_pcie_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_pcie_mux = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "mpll3", "pcie_pll" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_ref = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 1,
+ /* skip the parent 0, reserved for debug */
+ .table = (u32[]){ 1 },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_ref",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "pcie_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
@@ -779,6 +909,63 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap axg_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap axg_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
@@ -821,6 +1008,7 @@ static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static MESON_GATE(axg_mipi_enable, HHI_MIPI_CNTL0, 29);
/* Always On (AO) domain gates */
@@ -910,6 +1098,15 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &axg_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &axg_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &axg_fclk_div7_div.hw,
+ [CLKID_PCIE_PLL] = &axg_pcie_pll.hw,
+ [CLKID_PCIE_MUX] = &axg_pcie_mux.hw,
+ [CLKID_PCIE_REF] = &axg_pcie_ref.hw,
+ [CLKID_PCIE_CML_EN0] = &axg_pcie_cml_en0.hw,
+ [CLKID_PCIE_CML_EN1] = &axg_pcie_cml_en1.hw,
+ [CLKID_MIPI_ENABLE] = &axg_mipi_enable.hw,
+ [CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &axg_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -988,6 +1185,15 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_fclk_div4,
&axg_fclk_div5,
&axg_fclk_div7,
+ &axg_pcie_pll,
+ &axg_pcie_mux,
+ &axg_pcie_ref,
+ &axg_pcie_cml_en0,
+ &axg_pcie_cml_en1,
+ &axg_mipi_enable,
+ &axg_gen_clk_sel,
+ &axg_gen_clk_div,
+ &axg_gen_clk,
};
static const struct of_device_id clkc_match_table[] = {
@@ -995,49 +1201,17 @@ static const struct of_device_id clkc_match_table[] = {
{}
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
- void __iomem *clk_base = NULL;
struct regmap *map;
int ret, i;
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the regmap backed clocks */
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index b421df6a7ea0..1d04144a1b2c 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -16,6 +16,7 @@
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
+#define HHI_MIPI_CNTL0 0x00
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
@@ -127,8 +128,13 @@
#define CLKID_FCLK_DIV4_DIV 73
#define CLKID_FCLK_DIV5_DIV 74
#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
-#define NR_CLKS 76
+#define NR_CLKS 85
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
deleted file mode 100644
index 58f546e04807..000000000000
--- a/drivers/clk/meson/clk-audio-divider.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017 AmLogic, Inc.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-/*
- * i2s master clock divider: The algorithm of the generic clk-divider used with
- * a very precise clock parent such as the mpll tends to select a low divider
- * factor. This gives poor results with this particular divider, especially with
- * high frequencies (> 100 MHz)
- *
- * This driver try to select the maximum possible divider with the rate the
- * upstream clock can provide.
- */
-
-#include <linux/clk-provider.h>
-#include "clkc.h"
-
-static inline struct meson_clk_audio_div_data *
-meson_clk_audio_div_data(struct clk_regmap *clk)
-{
- return (struct meson_clk_audio_div_data *)clk->data;
-}
-
-static int _div_round(unsigned long parent_rate, unsigned long rate,
- unsigned long flags)
-{
- if (flags & CLK_DIVIDER_ROUND_CLOSEST)
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate);
-}
-
-static int _get_val(unsigned long parent_rate, unsigned long rate)
-{
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
-}
-
-static int _valid_divider(unsigned int width, int divider)
-{
- int max_divider = 1 << width;
-
- return clamp(divider, 1, max_divider);
-}
-
-static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long divider;
-
- divider = meson_parm_read(clk->map, &adiv->div);
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
-}
-
-static long audio_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long max_prate;
- int divider;
-
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- divider = _div_round(*parent_rate, rate, adiv->flags);
- divider = _valid_divider(adiv->div.width, divider);
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
- }
-
- /* Get the maximum parent rate */
- max_prate = clk_hw_round_rate(clk_hw_get_parent(hw), ULONG_MAX);
-
- /* Get the corresponding rounded down divider */
- divider = max_prate / rate;
- divider = _valid_divider(adiv->div.width, divider);
-
- /* Get actual rate of the parent */
- *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- divider * rate);
-
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
-}
-
-static int audio_divider_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- int val = _get_val(parent_rate, rate);
-
- meson_parm_write(clk->map, &adiv->div, val);
-
- return 0;
-}
-
-const struct clk_ops meson_clk_audio_divider_ro_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
-};
-
-const struct clk_ops meson_clk_audio_divider_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
- .set_rate = audio_divider_set_rate,
-};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
new file mode 100644
index 000000000000..cba43748ce3d
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc.h"
+
+#define phase_step(_width) (360 / (1 << (_width)))
+
+static inline struct meson_clk_phase_data *
+meson_clk_phase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_phase_data *)clk->data;
+}
+
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
+{
+ return phase_step(width) * val;
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
+
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
+{
+ unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
+
+ /*
+ * This last calculation is here for cases when degrees is rounded
+ * to 360, in which case val == (1 << width).
+ */
+ return val % (1 << width);
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
+
+static int meson_clk_phase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &phase->ph);
+
+ return meson_clk_degrees_from_val(val, phase->ph.width);
+}
+
+static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, phase->ph.width);
+ meson_parm_write(clk->map, &phase->ph, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_phase_ops = {
+ .get_phase = meson_clk_phase_get_phase,
+ .set_phase = meson_clk_phase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
new file mode 100644
index 000000000000..4a59936251e5
--- /dev/null
+++ b/drivers/clk/meson/clk-triphase.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc-audio.h"
+
+/*
+ * This is a special clock for the audio controller.
+ * The phase of mst_sclk clock output can be controlled independently
+ * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
+ * Controlling these 3 phases as just one makes things simpler and
+ * give the same clock view to all the element on the i2s bus.
+ * If necessary, we can still control the phase in the tdm block
+ * which makes these independent control redundant.
+ */
+static inline struct meson_clk_triphase_data *
+meson_clk_triphase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_triphase_data *)clk->data;
+}
+
+static void meson_clk_triphase_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Get phase 0 and sync it to phase 1 and 2 */
+ val = meson_parm_read(clk->map, &tph->ph0);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+}
+
+static int meson_clk_triphase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Phase are in sync, reading phase 0 is enough */
+ val = meson_parm_read(clk->map, &tph->ph0);
+
+ return meson_clk_degrees_from_val(val, tph->ph0.width);
+}
+
+static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
+ meson_parm_write(clk->map, &tph->ph0, val);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_triphase_ops = {
+ .init = meson_clk_triphase_sync,
+ .get_phase = meson_clk_triphase_get_phase,
+ .set_phase = meson_clk_triphase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/clkc-audio.h
new file mode 100644
index 000000000000..0a7c157ebf81
--- /dev/null
+++ b/drivers/clk/meson/clkc-audio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLKC_AUDIO_H
+#define __MESON_CLKC_AUDIO_H
+
+#include "clkc.h"
+
+struct meson_clk_triphase_data {
+ struct parm ph0;
+ struct parm ph1;
+ struct parm ph2;
+};
+
+struct meson_sclk_div_data {
+ struct parm div;
+ struct parm hi;
+ unsigned int cached_div;
+ struct clk_duty cached_duty;
+};
+
+extern const struct clk_ops meson_clk_triphase_ops;
+extern const struct clk_ops meson_sclk_div_ops;
+
+#endif /* __MESON_CLKC_AUDIO_H */
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 2fb084330ee9..24cec16b6038 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -91,11 +91,13 @@ struct meson_clk_mpll_data {
#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
-struct meson_clk_audio_div_data {
- struct parm div;
- u8 flags;
+struct meson_clk_phase_data {
+ struct parm ph;
};
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
+
#define MESON_GATE(_name, _reg, _bit) \
struct clk_regmap _name = { \
.data = &(struct clk_regmap_gate_data){ \
@@ -117,7 +119,6 @@ extern const struct clk_ops meson_clk_pll_ops;
extern const struct clk_ops meson_clk_cpu_ops;
extern const struct clk_ops meson_clk_mpll_ro_ops;
extern const struct clk_ops meson_clk_mpll_ops;
-extern const struct clk_ops meson_clk_audio_divider_ro_ops;
-extern const struct clk_ops meson_clk_audio_divider_ops;
+extern const struct clk_ops meson_clk_phase_ops;
#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..86d3ae58e84c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -498,6 +497,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -970,28 +970,26 @@ static struct clk_regmap gxbb_cts_amclk_sel = {
.mask = 0x3,
.shift = 9,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_regmap gxbb_cts_amclk_div = {
- .data = &(struct meson_clk_audio_div_data){
- .div = {
- .reg_off = HHI_AUD_CLK_CNTL,
- .shift = 0,
- .width = 8,
- },
+ .data = &(struct clk_regmap_div_data) {
+ .offset = HHI_AUD_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
- .ops = &meson_clk_audio_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cts_amclk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1018,13 +1016,13 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.mask = 0x3,
.shift = 25,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1626,6 +1624,63 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap gxbb_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1875,6 +1930,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2037,6 +2095,9 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2201,6 +2262,9 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_vdec_hevc_sel,
&gxbb_vdec_hevc_div,
&gxbb_vdec_hevc,
+ &gxbb_gen_clk_sel,
+ &gxbb_gen_clk_div,
+ &gxbb_gen_clk,
};
struct clkc_data {
@@ -2227,17 +2291,9 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int gxbb_clkc_probe(struct platform_device *pdev)
{
const struct clkc_data *clkc_data;
- struct resource *res;
- void __iomem *clk_base;
struct regmap *map;
int ret, i;
struct device *dev = &pdev->dev;
@@ -2249,31 +2305,8 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the common regmap backed clocks */
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index ec1a812bf1fd..20dfb1daf5b8 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -66,7 +66,6 @@
#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
@@ -158,8 +157,10 @@
#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_HEVC_SEL 154
#define CLKID_VDEC_HEVC_DIV 155
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
-#define NR_CLKS 157
+#define NR_CLKS 160
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
new file mode 100644
index 000000000000..bc64019b8eeb
--- /dev/null
+++ b/drivers/clk/meson/sclk-div.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * Sample clock generator divider:
+ * This HW divider gates with value 0 but is otherwise a zero based divider:
+ *
+ * val >= 1
+ * divider = val + 1
+ *
+ * The duty cycle may also be set for the LR clock variant. The duty cycle
+ * ratio is:
+ *
+ * hi = [0 - val]
+ * duty_cycle = (1 + hi) / (1 + val)
+ */
+
+#include "clkc-audio.h"
+
+static inline struct meson_sclk_div_data *
+meson_sclk_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_sclk_div_data *)clk->data;
+}
+
+static int sclk_div_maxval(struct meson_sclk_div_data *sclk)
+{
+ return (1 << sclk->div.width) - 1;
+}
+
+static int sclk_div_maxdiv(struct meson_sclk_div_data *sclk)
+{
+ return sclk_div_maxval(sclk) + 1;
+}
+
+static int sclk_div_getdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, int maxdiv)
+{
+ int div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+ return clamp(div, 2, maxdiv);
+}
+
+static int sclk_div_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ struct meson_sclk_div_data *sclk)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ int bestdiv = 0, i;
+ unsigned long maxdiv, now, parent_now;
+ unsigned long best = 0, best_parent = 0;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = sclk_div_maxdiv(sclk);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT))
+ return sclk_div_getdiv(hw, rate, *prate, maxdiv);
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 2; i <= maxdiv; i++) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ if (rate * i == *prate)
+ return i;
+
+ parent_now = clk_hw_round_rate(parent, rate * i);
+ now = DIV_ROUND_UP_ULL((u64)parent_now, i);
+
+ if (abs(rate - now) < abs(rate - best)) {
+ bestdiv = i;
+ best = now;
+ best_parent = parent_now;
+ }
+ }
+
+ if (!bestdiv)
+ bestdiv = sclk_div_maxdiv(sclk);
+ else
+ *prate = best_parent;
+
+ return bestdiv;
+}
+
+static long sclk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int div;
+
+ div = sclk_div_bestdiv(hw, rate, prate, sclk);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static void sclk_apply_ratio(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ unsigned int hi = DIV_ROUND_CLOSEST(sclk->cached_div *
+ sclk->cached_duty.num,
+ sclk->cached_duty.den);
+
+ if (hi)
+ hi -= 1;
+
+ meson_parm_write(clk->map, &sclk->hi, hi);
+}
+
+static int sclk_div_set_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (MESON_PARM_APPLICABLE(&sclk->hi)) {
+ memcpy(&sclk->cached_duty, duty, sizeof(*duty));
+ sclk_apply_ratio(clk, sclk);
+ }
+
+ return 0;
+}
+
+static int sclk_div_get_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int hi;
+
+ if (!MESON_PARM_APPLICABLE(&sclk->hi)) {
+ duty->num = 1;
+ duty->den = 2;
+ return 0;
+ }
+
+ hi = meson_parm_read(clk->map, &sclk->hi);
+ duty->num = hi + 1;
+ duty->den = sclk->cached_div;
+ return 0;
+}
+
+static void sclk_apply_divider(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ if (MESON_PARM_APPLICABLE(&sclk->hi))
+ sclk_apply_ratio(clk, sclk);
+
+ meson_parm_write(clk->map, &sclk->div, sclk->cached_div - 1);
+}
+
+static int sclk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned long maxdiv = sclk_div_maxdiv(sclk);
+
+ sclk->cached_div = sclk_div_getdiv(hw, rate, prate, maxdiv);
+
+ if (clk_hw_is_enabled(hw))
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static unsigned long sclk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ return DIV_ROUND_UP_ULL((u64)prate, sclk->cached_div);
+}
+
+static int sclk_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static void sclk_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ meson_parm_write(clk->map, &sclk->div, 0);
+}
+
+static int sclk_div_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (meson_parm_read(clk->map, &sclk->div))
+ return 1;
+
+ return 0;
+}
+
+static void sclk_div_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &sclk->div);
+
+ /* if the divider is initially disabled, assume max */
+ if (!val)
+ sclk->cached_div = sclk_div_maxdiv(sclk);
+ else
+ sclk->cached_div = val + 1;
+
+ sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+}
+
+const struct clk_ops meson_sclk_div_ops = {
+ .recalc_rate = sclk_div_recalc_rate,
+ .round_rate = sclk_div_round_rate,
+ .set_rate = sclk_div_set_rate,
+ .enable = sclk_div_enable,
+ .disable = sclk_div_disable,
+ .is_enabled = sclk_div_is_enabled,
+ .get_duty_cycle = sclk_div_get_duty_cycle,
+ .set_duty_cycle = sclk_div_set_duty_cycle,
+ .init = sclk_div_init,
+};
+EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..499f5962c8b0 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Peripheral clocks
*
@@ -5,10 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2 or later. This program is licensed "as is"
- * without any warranty of any kind, whether express or implied.
- *
* Most of the peripheral clocks can be modelled like this:
* _____ _______ _______
* TBG-A-P --| | | | | | ______
@@ -35,6 +32,7 @@
#define CLK_SEL 0x10
#define CLK_DIS 0x14
+#define ARMADA_37XX_DVFS_LOAD_1 1
#define LOAD_LEVEL_NR 4
#define ARMADA_37XX_NB_L0L1 0x18
@@ -418,7 +416,6 @@ static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 val;
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
@@ -428,9 +425,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
val &= pm_cpu->mask_mux;
}
- if (val >= num_parents)
- return -EINVAL;
-
return val;
}
@@ -507,6 +501,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+ unsigned int cur_level;
+
+ if (rate != 1200 * 1000 * 1000)
+ return;
+
+ regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+ cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+ if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+ return;
+
+ regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+ ARMADA_37XX_NB_CPU_LOAD_MASK,
+ ARMADA_37XX_DVFS_LOAD_1);
+ msleep(20);
+}
+
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -537,6 +565,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ clk_pm_cpu_set_rate_wa(rate, base);
+
regmap_update_bits(base, reg, mask, load_level);
return rate;
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6416c1f8e632..e88f8e01fe3a 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -292,8 +292,10 @@ static void __init pxa25x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
CLK_GET_RATE_NOCACHE, 3686400);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE, 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
0, 26, 1);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 25a30194d27a..d40b63e7bbce 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -314,9 +314,10 @@ static void __init pxa27x_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768 * KHz);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768 * KHz));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 2d126df2bccd..7aa120c3bd08 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -286,9 +286,10 @@ static void __init pxa3xx_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
CLK_GET_RATE_NOCACHE,
120 * MHz);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 9c3480dcc38a..064768699fe7 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -59,6 +59,15 @@ config QCOM_CLK_SMD_RPM
Say Y if you want to support the clocks exposed by the RPM on
platforms such as apq8016, apq8084, msm8974 etc.
+config QCOM_CLK_RPMH
+ tristate "RPMh Clock Driver"
+ depends on COMMON_CLK_QCOM && QCOM_RPMH
+ help
+ RPMh manages shared resources on some Qualcomm Technologies, Inc.
+ SoCs. It accepts requests from other hardware subsystems via RSC.
+ Say Y if you want to support the clocks exposed by RPMh on
+ platforms such as SDM845.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -245,6 +254,16 @@ config SDM_VIDEOCC_845
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SDM_DISPCC_845
+ tristate "SDM845 Display Clock Controller"
+ select SDM_GCC_845
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SDM845 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 762c01137c2f..21a45035930d 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -37,7 +37,9 @@ obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 3c49a60072f1..a91d97cecbad 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index f981b486c468..66755f0f84fc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_ALPHA_PLL_H__
#define __QCOM_CLK_ALPHA_PLL_H__
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index c58c5538b1b6..bc2205c450b6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 1702efb1c511..b3561e0a3984 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_BRANCH_H__
#define __QCOM_CLK_BRANCH_H__
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b209a2fe86b9..dbd5a9e83554 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -7,6 +7,8 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
struct freq_tbl {
unsigned long freq;
u8 src;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d330733..ce80db27ccf2 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 90d95cd11ec6..6cfc1bccb255 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_REGMAP_H__
#define __QCOM_CLK_REGMAP_H__
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
new file mode 100644
index 000000000000..9f4fc7773fb2
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+#define CLK_RPMH_ARC_EN_OFFSET 0
+#define CLK_RPMH_VRM_EN_OFFSET 4
+
+/**
+ * struct clk_rpmh - individual rpmh clock data structure
+ * @hw: handle between common and hardware-specific interfaces
+ * @res_name: resource name for the rpmh clock
+ * @div: clock divider to compute the clock rate
+ * @res_addr: base address of the rpmh resource within the RPMh
+ * @res_on_val: rpmh clock enable value
+ * @state: rpmh clock requested state
+ * @aggr_state: rpmh clock aggregated state
+ * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
+ * @valid_state_mask: mask to determine the state of the rpmh clock
+ * @dev: device to which it is attached
+ * @peer: pointer to the clock rpmh sibling
+ */
+struct clk_rpmh {
+ struct clk_hw hw;
+ const char *res_name;
+ u8 div;
+ u32 res_addr;
+ u32 res_on_val;
+ u32 state;
+ u32 aggr_state;
+ u32 last_sent_aggr_state;
+ u32 valid_state_mask;
+ struct device *dev;
+ struct clk_rpmh *peer;
+};
+
+struct clk_rpmh_desc {
+ struct clk_hw **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpmh_clk_lock);
+
+#define __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ _res_en_offset, _res_on, _div) \
+ static struct clk_rpmh _platform##_##_name_active; \
+ static struct clk_rpmh _platform##_##_name = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name_active, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE) | \
+ BIT(RPMH_SLEEP_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpmh _platform##_##_name_active = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPMH_ARC(_platform, _name, _name_active, _res_name, \
+ _res_on, _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_ARC_EN_OFFSET, _res_on, _div)
+
+#define DEFINE_CLK_RPMH_VRM(_platform, _name, _name_active, _res_name, \
+ _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+
+static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
+{
+ return container_of(_hw, struct clk_rpmh, hw);
+}
+
+static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
+{
+ return (c->last_sent_aggr_state & BIT(state))
+ != (c->aggr_state & BIT(state));
+}
+
+static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
+{
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state, on_val;
+ enum rpmh_state state = RPMH_SLEEP_STATE;
+ int ret;
+
+ cmd.addr = c->res_addr;
+ cmd_state = c->aggr_state;
+ on_val = c->res_on_val;
+
+ for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) {
+ if (has_state_changed(c, state)) {
+ if (cmd_state & BIT(state))
+ cmd.data = on_val;
+
+ ret = rpmh_write_async(c->dev, state, &cmd, 1);
+ if (ret) {
+ dev_err(c->dev, "set %s state of %s failed: (%d)\n",
+ !state ? "sleep" :
+ state == RPMH_WAKE_ONLY_STATE ?
+ "wake" : "active", c->res_name, ret);
+ return ret;
+ }
+ }
+ }
+
+ c->last_sent_aggr_state = c->aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+
+ return 0;
+}
+
+/*
+ * Update state and aggregate state values based on enable value.
+ */
+static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing required to be done if already off or on */
+ if (enable == c->state)
+ return 0;
+
+ c->state = enable ? c->valid_state_mask : 0;
+ c->aggr_state = c->state | c->peer->state;
+ c->peer->aggr_state = c->aggr_state;
+
+ ret = clk_rpmh_send_aggregate_command(c);
+ if (!ret)
+ return 0;
+
+ if (ret && enable)
+ c->state = 0;
+ else if (ret)
+ c->state = c->valid_state_mask;
+
+ WARN(1, "clk: %s failed to %s\n", c->res_name,
+ enable ? "enable" : "disable");
+ return ret;
+}
+
+static int clk_rpmh_prepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+ int ret = 0;
+
+ mutex_lock(&rpmh_clk_lock);
+ ret = clk_rpmh_aggregate_state_send_command(c, true);
+ mutex_unlock(&rpmh_clk_lock);
+
+ return ret;
+};
+
+static void clk_rpmh_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ mutex_lock(&rpmh_clk_lock);
+ clk_rpmh_aggregate_state_send_command(c, false);
+ mutex_unlock(&rpmh_clk_lock);
+};
+
+static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_rpmh *r = to_clk_rpmh(hw);
+
+ /*
+ * RPMh clocks have a fixed rate. Return static rate.
+ */
+ return prate / r->div;
+}
+
+static const struct clk_ops clk_rpmh_ops = {
+ .prepare = clk_rpmh_prepare,
+ .unprepare = clk_rpmh_unprepare,
+ .recalc_rate = clk_rpmh_recalc_rate,
+};
+
+/* Resource name must match resource id present in cmd-db. */
+DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+
+static struct clk_hw *sdm845_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
+ .clks = sdm845_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
+};
+
+static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_rpmh_desc *rpmh = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rpmh->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rpmh->clks[idx];
+}
+
+static int clk_rpmh_probe(struct platform_device *pdev)
+{
+ struct clk_hw **hw_clks;
+ struct clk_rpmh *rpmh_clk;
+ const struct clk_rpmh_desc *desc;
+ int ret, i;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ hw_clks = desc->clks;
+
+ for (i = 0; i < desc->num_clks; i++) {
+ u32 res_addr;
+
+ rpmh_clk = to_clk_rpmh(hw_clks[i]);
+ res_addr = cmd_db_read_addr(rpmh_clk->res_name);
+ if (!res_addr) {
+ dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
+ rpmh_clk->res_name);
+ return -ENODEV;
+ }
+ rpmh_clk->res_addr += res_addr;
+ rpmh_clk->dev = &pdev->dev;
+
+ ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ hw_clks[i]->init->name);
+ return ret;
+ }
+ }
+
+ /* typecast to silence compiler warning */
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get,
+ (void *)desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add clock provider\n");
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Registered RPMh clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
+
+static struct platform_driver clk_rpmh_driver = {
+ .probe = clk_rpmh_probe,
+ .driver = {
+ .name = "clk-rpmh",
+ .of_match_table = clk_rpmh_match_table,
+ },
+};
+
+static int __init clk_rpmh_init(void)
+{
+ return platform_driver_register(&clk_rpmh_driver);
+}
+subsys_initcall(clk_rpmh_init);
+
+static void __exit clk_rpmh_exit(void)
+{
+ platform_driver_unregister(&clk_rpmh_driver);
+}
+module_exit(clk_rpmh_exit);
+
+MODULE_DESCRIPTION("QCOM RPMh Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 39ce64c2783b..db9b2471ac40 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/export.h>
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 00196ee15e73..4aa33ee70bae 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
+
#ifndef __QCOM_CLK_COMMON_H__
#define __QCOM_CLK_COMMON_H__
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
new file mode 100644
index 000000000000..0cc4909b5dbe
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_byteclk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_GPLL0_OUT_MAIN_DIV, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+ "bi_tcxo",
+ "disp_cc_pll0",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x20ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x2108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x2070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x20a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_axi_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20e8,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x2104,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x5008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_sdm845_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
+};
+
+static struct gdsc *disp_cc_sdm845_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sdm845_desc = {
+ .config = &disp_cc_sdm845_regmap_config,
+ .clks = disp_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sdm845_clocks),
+ .resets = disp_cc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sdm845_resets),
+ .gdscs = disp_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id disp_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
+
+static int disp_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config disp_cc_pll0_config = {};
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ disp_cc_pll0_config.l = 0x2c;
+ disp_cc_pll0_config.alpha = 0xcaaa;
+
+ clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /* Enable hardware clock gating for DSI and MDP clocks */
+ regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sdm845_driver = {
+ .probe = disp_cc_sdm845_probe,
+ .driver = {
+ .name = "disp_cc-sdm845",
+ .of_match_table = disp_cc_sdm845_match_table,
+ },
+};
+
+static int __init disp_cc_sdm845_init(void)
+{
+ return platform_driver_register(&disp_cc_sdm845_driver);
+}
+subsys_initcall(disp_cc_sdm845_init);
+
+static void __exit disp_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sdm845_driver);
+}
+module_exit(disp_cc_sdm845_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DISPCC SDM845 Driver");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 486d9610355c..9c99a71ea71e 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -106,8 +106,6 @@ static const char * const gcc_xo_pcie_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 46cb256b4aa2..8902ad42ba87 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -179,8 +179,6 @@ static const char * const gcc_xo_ddr_500_200[] = {
"ddrpllapss",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
F(48000000, P_XO, 1, 0, 0),
F(200000000, P_FEPLL200, 1, 0, 0),
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 28eb200d0f1e..5f61225657ab 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -1220,7 +1220,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1269,7 +1268,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1353,7 +1351,6 @@ static struct clk_rcg tsif_ref_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 0462f4a8c932..505c6263141d 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -32,8 +32,6 @@
#include "clk-regmap-mux.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index b99dd406e907..849046fbed6d 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -947,7 +947,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -996,7 +995,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index c347a0d44bc8..7e930e25c79f 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1558,7 +1558,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1607,7 +1606,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1656,7 +1654,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1705,7 +1702,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1754,7 +1750,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index d6c7f50ba86a..ac2b0aa1e8b5 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -264,8 +264,6 @@ static const char * const gcc_xo_gpll1_emclk_sleep[] = {
"sleep_clk",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb551c75fba6..fd495e0471bb 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1628,7 +1628,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1677,7 +1676,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1726,7 +1724,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1775,7 +1772,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1824,7 +1820,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 348e30da4f18..08e2900d172c 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -62,8 +62,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4_vote",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 1e38efc37180..53f0f369a33e 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -57,8 +57,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_fixed_factor xo = {
.mult = 1,
.div = 1,
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..9a3290fdd01b 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -32,8 +32,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
@@ -2781,6 +2779,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75018,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 78d87f5c7098..9f0ae403d5f5 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -25,8 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_AUD_REF_CLK,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index e78e6f5b99fc..fa1a196350f1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -25,8 +25,6 @@
#include "gdsc.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_AUD_REF_CLK,
@@ -1103,6 +1101,7 @@ static struct clk_branch gcc_camera_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1129,6 +1128,7 @@ static struct clk_branch gcc_camera_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1270,6 +1270,7 @@ static struct clk_branch gcc_disp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1328,6 +1329,7 @@ static struct clk_branch gcc_disp_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1397,6 +1399,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2985,6 +2988,7 @@ static struct clk_branch gcc_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3011,6 +3015,7 @@ static struct clk_branch gcc_video_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3049,6 +3054,36 @@ static struct clk_branch gcc_vs_ctrl_clk = {
},
};
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.pd = {
@@ -3344,6 +3379,8 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
[GPLL4] = &gpll4.clkr,
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3433,10 +3470,6 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /* Enable CPUSS clocks */
- regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
- regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
-
return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 30777f9f1a43..4ce1d7c88377 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -219,8 +219,6 @@ static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 715e7cd94125..91818516c3e0 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -184,8 +184,6 @@ static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"dsi1pllbyte",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..7d4ee109435c 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -34,8 +34,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_MMPLL0,
@@ -2910,6 +2908,7 @@ static struct gdsc mmagic_bimc_gdsc = {
.name = "mmagic_bimc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = ALWAYS_ON,
};
static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 9073b7a710ac..5d6a7724a194 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -18,8 +18,6 @@
#include "clk-pll.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index f9ba71311727..9022bbe1297e 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -21,6 +21,7 @@ config CLK_RENESAS
select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
+ select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -125,6 +126,11 @@ config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R9A06G032
+ bool "Renesas R9A06G032 clock driver"
+ help
+ This is a driver for R9A06G032 clocks
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index fe5bac9215e5..e4aa3d6143d2 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
+obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 775b0ceaa337..a85dd50e8911 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -103,6 +103,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
@@ -132,6 +133,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
DEF_MOD("cmt3", 300, R8A7795_CLK_R),
DEF_MOD("cmt2", 301, R8A7795_CLK_R),
DEF_MOD("cmt1", 302, R8A7795_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
new file mode 100644
index 000000000000..a0b6ecdc63dd
--- /dev/null
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -0,0 +1,893 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A09G032 clock driver
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+
+struct r9a06g032_gate {
+ u16 gate, reset, ready, midle,
+ scon, mirack, mistat;
+};
+
+/* This is used to describe a clock for instantiation */
+struct r9a06g032_clkdesc {
+ const char *name;
+ uint32_t type: 3;
+ uint32_t index: 8;
+ uint32_t source : 8; /* source index + 1 (0 == none) */
+ /* these are used to populate the bitsel struct */
+ union {
+ struct r9a06g032_gate gate;
+ /* for dividers */
+ struct {
+ unsigned int div_min : 10, div_max : 10, reg: 10;
+ u16 div_table[4];
+ };
+ /* For fixed-factor ones */
+ struct {
+ u16 div, mul;
+ };
+ unsigned int factor;
+ unsigned int frequency;
+ /* for dual gate */
+ struct {
+ uint16_t group : 1, index: 3;
+ u16 sel, g1, r1, g2, r2;
+ } dual;
+ };
+} __packed;
+
+#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
+ { .gate = _clk, .reset = _rst, \
+ .ready = _rdy, .midle = _midle, \
+ .scon = _scon, .mirack = _mirack, .mistat = _mistat }
+#define D_GATE(_idx, _n, _src, ...) \
+ { .type = K_GATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .gate = I_GATE(__VA_ARGS__), }
+#define D_ROOT(_idx, _n, _mul, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, .name = _n, \
+ .div = _div, .mul = _mul }
+#define D_FFC(_idx, _n, _src, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .div = _div, .mul = 1}
+#define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) \
+ { .type = K_DIV, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .reg = _reg, .div_min = _min, .div_max = _max, \
+ .div_table = { __VA_ARGS__ } }
+#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+ { .type = K_DUALGATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .dual = { .group = _g, .index = _gi, \
+ .g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
+
+enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
+
+/* Internal clock IDs */
+#define R9A06G032_CLKOUT 0
+#define R9A06G032_CLKOUT_D10 2
+#define R9A06G032_CLKOUT_D16 3
+#define R9A06G032_CLKOUT_D160 4
+#define R9A06G032_CLKOUT_D1OR2 5
+#define R9A06G032_CLKOUT_D20 6
+#define R9A06G032_CLKOUT_D40 7
+#define R9A06G032_CLKOUT_D5 8
+#define R9A06G032_CLKOUT_D8 9
+#define R9A06G032_DIV_ADC 10
+#define R9A06G032_DIV_I2C 11
+#define R9A06G032_DIV_NAND 12
+#define R9A06G032_DIV_P1_PG 13
+#define R9A06G032_DIV_P2_PG 14
+#define R9A06G032_DIV_P3_PG 15
+#define R9A06G032_DIV_P4_PG 16
+#define R9A06G032_DIV_P5_PG 17
+#define R9A06G032_DIV_P6_PG 18
+#define R9A06G032_DIV_QSPI0 19
+#define R9A06G032_DIV_QSPI1 20
+#define R9A06G032_DIV_REF_SYNC 21
+#define R9A06G032_DIV_SDIO0 22
+#define R9A06G032_DIV_SDIO1 23
+#define R9A06G032_DIV_SWITCH 24
+#define R9A06G032_DIV_UART 25
+#define R9A06G032_DIV_MOTOR 64
+#define R9A06G032_CLK_DDRPHY_PLLCLK_D4 78
+#define R9A06G032_CLK_ECAT100_D4 79
+#define R9A06G032_CLK_HSR100_D2 80
+#define R9A06G032_CLK_REF_SYNC_D4 81
+#define R9A06G032_CLK_REF_SYNC_D8 82
+#define R9A06G032_CLK_SERCOS100_D2 83
+#define R9A06G032_DIV_CA7 84
+
+#define R9A06G032_UART_GROUP_012 154
+#define R9A06G032_UART_GROUP_34567 155
+
+#define R9A06G032_CLOCK_COUNT (R9A06G032_UART_GROUP_34567 + 1)
+
+static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = {
+ D_ROOT(CLKOUT, "clkout", 25, 1),
+ D_ROOT(CLK_PLL_USB, "clk_pll_usb", 12, 10),
+ D_FFC(CLKOUT_D10, "clkout_d10", CLKOUT, 10),
+ D_FFC(CLKOUT_D16, "clkout_d16", CLKOUT, 16),
+ D_FFC(CLKOUT_D160, "clkout_d160", CLKOUT, 160),
+ D_DIV(CLKOUT_D1OR2, "clkout_d1or2", CLKOUT, 0, 1, 2),
+ D_FFC(CLKOUT_D20, "clkout_d20", CLKOUT, 20),
+ D_FFC(CLKOUT_D40, "clkout_d40", CLKOUT, 40),
+ D_FFC(CLKOUT_D5, "clkout_d5", CLKOUT, 5),
+ D_FFC(CLKOUT_D8, "clkout_d8", CLKOUT, 8),
+ D_DIV(DIV_ADC, "div_adc", CLKOUT, 77, 50, 250),
+ D_DIV(DIV_I2C, "div_i2c", CLKOUT, 78, 12, 16),
+ D_DIV(DIV_NAND, "div_nand", CLKOUT, 82, 12, 32),
+ D_DIV(DIV_P1_PG, "div_p1_pg", CLKOUT, 68, 12, 200),
+ D_DIV(DIV_P2_PG, "div_p2_pg", CLKOUT, 62, 12, 128),
+ D_DIV(DIV_P3_PG, "div_p3_pg", CLKOUT, 64, 8, 128),
+ D_DIV(DIV_P4_PG, "div_p4_pg", CLKOUT, 66, 8, 128),
+ D_DIV(DIV_P5_PG, "div_p5_pg", CLKOUT, 71, 10, 40),
+ D_DIV(DIV_P6_PG, "div_p6_pg", CLKOUT, 18, 12, 64),
+ D_DIV(DIV_QSPI0, "div_qspi0", CLKOUT, 73, 3, 7),
+ D_DIV(DIV_QSPI1, "div_qspi1", CLKOUT, 25, 3, 7),
+ D_DIV(DIV_REF_SYNC, "div_ref_sync", CLKOUT, 56, 2, 16, 2, 4, 8, 16),
+ D_DIV(DIV_SDIO0, "div_sdio0", CLKOUT, 74, 20, 128),
+ D_DIV(DIV_SDIO1, "div_sdio1", CLKOUT, 75, 20, 128),
+ D_DIV(DIV_SWITCH, "div_switch", CLKOUT, 37, 5, 40),
+ D_DIV(DIV_UART, "div_uart", CLKOUT, 79, 12, 128),
+ D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, 0x749, 0x74a, 0x74b, 0, 0xae3, 0, 0),
+ D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, 0x74c, 0x74d, 0x74e, 0, 0xae4, 0, 0),
+ D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, 0x74f, 0x750, 0x751, 0, 0xae5, 0, 0),
+ D_GATE(CLK_25_PG7, "clk_25_pg7", CLKOUT_D40, 0x752, 0x753, 0x754, 0, 0xae6, 0, 0),
+ D_GATE(CLK_25_PG8, "clk_25_pg8", CLKOUT_D40, 0x755, 0x756, 0x757, 0, 0xae7, 0, 0),
+ D_GATE(CLK_ADC, "clk_adc", DIV_ADC, 0x1ea, 0x1eb, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT100, "clk_ecat100", CLKOUT_D10, 0x405, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR100, "clk_hsr100", CLKOUT_D10, 0x483, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C0, "clk_i2c0", DIV_I2C, 0x1e6, 0x1e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C1, "clk_i2c1", DIV_I2C, 0x1e8, 0x1e9, 0, 0, 0, 0, 0),
+ D_GATE(CLK_MII_REF, "clk_mii_ref", CLKOUT_D40, 0x342, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NAND, "clk_nand", DIV_NAND, 0x284, 0x285, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NOUSBP2_PG6, "clk_nousbp2_pg6", DIV_P2_PG, 0x774, 0x775, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG2, "clk_p1_pg2", DIV_P1_PG, 0x862, 0x863, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG3, "clk_p1_pg3", DIV_P1_PG, 0x864, 0x865, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG4, "clk_p1_pg4", DIV_P1_PG, 0x866, 0x867, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG3, "clk_p4_pg3", DIV_P4_PG, 0x824, 0x825, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG4, "clk_p4_pg4", DIV_P4_PG, 0x826, 0x827, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P6_PG1, "clk_p6_pg1", DIV_P6_PG, 0x8a0, 0x8a1, 0x8a2, 0, 0xb60, 0, 0),
+ D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
+ D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
+ D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
+ D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RMII_REF, "clk_rmii_ref", CLKOUT_D20, 0x341, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO0, "clk_sdio0", DIV_SDIO0, 0x64, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO1, "clk_sdio1", DIV_SDIO1, 0x644, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS100, "clk_sercos100", CLKOUT_D10, 0x425, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SLCD, "clk_slcd", DIV_P1_PG, 0x860, 0x861, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI0, "clk_spi0", DIV_P3_PG, 0x7e0, 0x7e1, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI1, "clk_spi1", DIV_P3_PG, 0x7e2, 0x7e3, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI2, "clk_spi2", DIV_P3_PG, 0x7e4, 0x7e5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI3, "clk_spi3", DIV_P3_PG, 0x7e6, 0x7e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI4, "clk_spi4", DIV_P4_PG, 0x820, 0x821, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, 0x822, 0x823, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, 0x982, 0x983, 0, 0, 0, 0, 0),
+ D_DIV(DIV_MOTOR, "div_motor", CLKOUT_D5, 84, 2, 8),
+ D_GATE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441),
+ D_GATE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0),
+ D_GATE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461),
+ D_GATE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0),
+ D_GATE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0),
+ D_GATE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0),
+ D_GATE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0),
+ D_GATE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0),
+ D_GATE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103),
+ D_GATE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101),
+ D_GATE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, 0x78c, 0x78d, 0, 0x78e, 0, 0xb04, 0xb05),
+ D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, 0x789, 0x78a, 0x78b, 0, 0xb03, 0, 0),
+ D_FFC(CLK_DDRPHY_PLLCLK_D4, "clk_ddrphy_pllclk_d4", CLK_DDRPHY_PLLCLK, 4),
+ D_FFC(CLK_ECAT100_D4, "clk_ecat100_d4", CLK_ECAT100, 4),
+ D_FFC(CLK_HSR100_D2, "clk_hsr100_d2", CLK_HSR100, 2),
+ D_FFC(CLK_REF_SYNC_D4, "clk_ref_sync_d4", CLK_REF_SYNC, 4),
+ D_FFC(CLK_REF_SYNC_D8, "clk_ref_sync_d8", CLK_REF_SYNC, 8),
+ D_FFC(CLK_SERCOS100_D2, "clk_sercos100_d2", CLK_SERCOS100, 2),
+ D_DIV(DIV_CA7, "div_ca7", CLK_REF_SYNC, 57, 1, 4, 1, 2, 4),
+ D_GATE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0),
+ D_GATE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0),
+ D_GATE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0),
+ D_GATE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0),
+ D_GATE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0),
+ D_GATE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0),
+ D_GATE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0),
+ D_DIV(RTOS_MDC, "rtos_mdc", CLK_REF_SYNC, 100, 80, 640, 80, 160, 320, 640),
+ D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, 0xba0, 0xba1, 0, 0xba2, 0, 0xbc0, 0xbc1),
+ D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, 0x323, 0x324, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT25, "clk_ecat25", CLK_ECAT100_D4, 0x403, 0x404, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, 0xc60, 0xc61, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, 0x424, 0x423, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0),
+ D_GATE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141),
+ D_GATE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1),
+ D_GATE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2),
+ D_GATE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5),
+ D_GATE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2),
+ D_GATE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2),
+ D_GATE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0),
+ D_GATE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1),
+ D_GATE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0),
+ D_GATE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0),
+ D_GATE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0),
+ D_GATE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0),
+ D_GATE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182),
+ D_GATE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2),
+ D_GATE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25),
+ D_GATE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0),
+ D_GATE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0),
+ D_GATE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0),
+ D_GATE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0),
+ D_GATE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302),
+ D_GATE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2),
+ D_GATE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0),
+ D_GATE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82),
+ D_GATE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662),
+ D_GATE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0),
+ D_GATE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0),
+ D_GATE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0),
+ D_GATE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0),
+ D_GATE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0),
+ D_GATE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0),
+ D_GATE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0),
+ D_GATE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0),
+ D_GATE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0),
+ D_GATE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0),
+ /*
+ * These are not hardware clocks, but are needed to handle the special
+ * case where we have a 'selector bit' that doesn't just change the
+ * parent for a clock, but also the gate it's suposed to use.
+ */
+ {
+ .index = R9A06G032_UART_GROUP_012,
+ .name = "uart_group_012",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_UART,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
+ .dual.group = 0,
+ },
+ {
+ .index = R9A06G032_UART_GROUP_34567,
+ .name = "uart_group_34567",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_P2_PG,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
+ .dual.group = 1,
+ },
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+};
+
+struct r9a06g032_priv {
+ struct clk_onecell_data data;
+ spinlock_t lock; /* protects concurent access to gates */
+ void __iomem *reg;
+};
+
+/* register/bit pairs are encoded as an uint16_t */
+static void
+clk_rdesc_set(struct r9a06g032_priv *clocks,
+ u16 one, unsigned int on)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ val = (val & ~(1U << (one & 0x1f))) | ((!!on) << (one & 0x1f));
+ writel(val, reg);
+}
+
+static int
+clk_rdesc_get(struct r9a06g032_priv *clocks,
+ uint16_t one)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ return !!(val & (1U << (one & 0x1f)));
+}
+
+/*
+ * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's
+ * clock gate framework as the gates on the R9A09G032 have a special enabling
+ * sequence, therefore we use this little proxy.
+ */
+struct r9a06g032_clk_gate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+
+ struct r9a06g032_gate gate;
+};
+
+#define to_r9a06g032_gate(_hw) container_of(_hw, struct r9a06g032_clk_gate, hw)
+
+static void
+r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks,
+ struct r9a06g032_gate *g, int on)
+{
+ unsigned long flags;
+
+ WARN_ON(!g->gate);
+
+ spin_lock_irqsave(&clocks->lock, flags);
+ clk_rdesc_set(clocks, g->gate, on);
+ /* De-assert reset */
+ if (g->reset)
+ clk_rdesc_set(clocks, g->reset, 1);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+
+ /* Hardware manual recommends 5us delay after enabling clock & reset */
+ udelay(5);
+
+ /* If the peripheral is memory mapped (i.e. an AXI slave), there is an
+ * associated SLVRDY bit in the System Controller that needs to be set
+ * so that the FlexWAY bus fabric passes on the read/write requests.
+ */
+ if (g->ready || g->midle) {
+ spin_lock_irqsave(&clocks->lock, flags);
+ if (g->ready)
+ clk_rdesc_set(clocks, g->ready, on);
+ /* Clear 'Master Idle Request' bit */
+ if (g->midle)
+ clk_rdesc_set(clocks, g->midle, !on);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+ }
+ /* Note: We don't wait for FlexWAY Socket Connection signal */
+}
+
+static int r9a06g032_clk_gate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 1);
+ return 0;
+}
+
+static void r9a06g032_clk_gate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 0);
+}
+
+static int r9a06g032_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ /* if clock is in reset, the gate might be on, and still not 'be' on */
+ if (g->gate.reset && !clk_rdesc_get(g->clocks, g->gate.reset))
+ return 0;
+
+ return clk_rdesc_get(g->clocks, g->gate.gate);
+}
+
+static const struct clk_ops r9a06g032_clk_gate_ops = {
+ .enable = r9a06g032_clk_gate_enable,
+ .disable = r9a06g032_clk_gate_disable,
+ .is_enabled = r9a06g032_clk_gate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_gate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_gate *g;
+ struct clk_init_data init;
+
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_gate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->gate = desc->gate;
+ g->hw.init = &init;
+
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_gate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_div {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 reg;
+ u16 min, max;
+ u8 table_size;
+ u16 table[8]; /* we know there are no more than 8 */
+};
+
+#define to_r9a06g032_div(_hw) \
+ container_of(_hw, struct r9a06g032_clk_div, hw)
+
+static unsigned long
+r9a06g032_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+ u32 div = readl(reg);
+
+ if (div < clk->min)
+ div = clk->min;
+ else if (div > clk->max)
+ div = clk->max;
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+/*
+ * Attempts to find a value that is in range of min,max,
+ * and if a table of set dividers was specified for this
+ * register, try to find the fixed divider that is the closest
+ * to the target frequency
+ */
+static long
+r9a06g032_div_clamp_div(struct r9a06g032_clk_div *clk,
+ unsigned long rate, unsigned long prate)
+{
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(prate, rate + 1);
+ int i;
+
+ if (div <= clk->min)
+ return clk->min;
+ if (div >= clk->max)
+ return clk->max;
+
+ for (i = 0; clk->table_size && i < clk->table_size - 1; i++) {
+ if (div >= clk->table[i] && div <= clk->table[i + 1]) {
+ unsigned long m = rate -
+ DIV_ROUND_UP(prate, clk->table[i]);
+ unsigned long p =
+ DIV_ROUND_UP(prate, clk->table[i + 1]) -
+ rate;
+ /*
+ * select the divider that generates
+ * the value closest to the ideal frequency
+ */
+ div = p >= m ? clk->table[i] : clk->table[i + 1];
+ return div;
+ }
+ }
+ return div;
+}
+
+static long
+r9a06g032_div_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 div = DIV_ROUND_UP(*prate, rate);
+
+ pr_devel("%s %pC %ld (prate %ld) (wanted div %u)\n", __func__,
+ hw->clk, rate, *prate, div);
+ pr_devel(" min %d (%ld) max %d (%ld)\n",
+ clk->min, DIV_ROUND_UP(*prate, clk->min),
+ clk->max, DIV_ROUND_UP(*prate, clk->max));
+
+ div = r9a06g032_div_clamp_div(clk, rate, *prate);
+ /*
+ * this is a hack. Currently the serial driver asks for a clock rate
+ * that is 16 times the baud rate -- and that is wildly outside the
+ * range of the UART divider, somehow there is no provision for that
+ * case of 'let the divider as is if outside range'.
+ * The serial driver *shouldn't* play with these clocks anyway, there's
+ * several uarts attached to this divider, and changing this impacts
+ * everyone.
+ */
+ if (clk->index == R9A06G032_DIV_UART) {
+ pr_devel("%s div uart hack!\n", __func__);
+ return clk_get_rate(hw->clk);
+ }
+ pr_devel("%s %pC %ld / %u = %ld\n", __func__, hw->clk,
+ *prate, div, DIV_ROUND_UP(*prate, div));
+ return DIV_ROUND_UP(*prate, div);
+}
+
+static int
+r9a06g032_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(parent_rate, rate + 1);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+
+ pr_devel("%s %pC rate %ld parent %ld div %d\n", __func__, hw->clk,
+ rate, parent_rate, div);
+
+ /*
+ * Need to write the bit 31 with the divider value to
+ * latch it. Technically we should wait until it has been
+ * cleared too.
+ * TODO: Find whether this callback is sleepable, in case
+ * the hardware /does/ require some sort of spinloop here.
+ */
+ writel(div | BIT(31), reg);
+
+ return 0;
+}
+
+static const struct clk_ops r9a06g032_clk_div_ops = {
+ .recalc_rate = r9a06g032_div_recalc_rate,
+ .round_rate = r9a06g032_div_round_rate,
+ .set_rate = r9a06g032_div_set_rate,
+};
+
+static struct clk *
+r9a06g032_register_div(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct r9a06g032_clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+ unsigned int i;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_div_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ div->clocks = clocks;
+ div->index = desc->index;
+ div->reg = desc->reg;
+ div->hw.init = &init;
+ div->min = desc->div_min;
+ div->max = desc->div_max;
+ /* populate (optional) divider table fixed values */
+ for (i = 0; i < ARRAY_SIZE(div->table) &&
+ i < ARRAY_SIZE(desc->div_table) && desc->div_table[i]; i++) {
+ div->table[div->table_size++] = desc->div_table[i];
+ }
+
+ clk = clk_register(NULL, &div->hw);
+ if (IS_ERR(clk)) {
+ kfree(div);
+ return NULL;
+ }
+ return clk;
+}
+
+/*
+ * This clock provider handles the case of the R9A06G032 where you have
+ * peripherals that have two potential clock source and two gates, one for
+ * each of the clock source - the used clock source (for all sub clocks)
+ * is selected by a single bit.
+ * That single bit affects all sub-clocks, and therefore needs to change the
+ * active gate (and turn the others off) and force a recalculation of the rates.
+ *
+ * This implements two clock providers, one 'bitselect' that
+ * handles the switch between both parents, and another 'dualgate'
+ * that knows which gate to poke at, depending on the parent's bit position.
+ */
+struct r9a06g032_clk_bitsel {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+};
+
+#define to_clk_bitselect(_hw) \
+ container_of(_hw, struct r9a06g032_clk_bitsel, hw)
+
+static u8 r9a06g032_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ return clk_rdesc_get(set->clocks, set->selector);
+}
+
+static int r9a06g032_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ /* a single bit in the register selects one of two parent clocks */
+ clk_rdesc_set(set->clocks, set->selector, !!index);
+
+ return 0;
+}
+
+static const struct clk_ops clk_bitselect_ops = {
+ .get_parent = r9a06g032_clk_mux_get_parent,
+ .set_parent = r9a06g032_clk_mux_set_parent,
+};
+
+static struct clk *
+r9a06g032_register_bitsel(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_bitsel *g;
+ struct clk_init_data init;
+ const char *names[2];
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ names[0] = parent_name;
+ names[1] = "clk_pll_usb";
+
+ init.name = desc->name;
+ init.ops = &clk_bitselect_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = names;
+ init.num_parents = 2;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = desc->dual.sel;
+ g->hw.init = &init;
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_dualgate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+ struct r9a06g032_gate gate[2];
+};
+
+#define to_clk_dualgate(_hw) \
+ container_of(_hw, struct r9a06g032_clk_dualgate, hw)
+
+static int
+r9a06g032_clk_dualgate_setenable(struct r9a06g032_clk_dualgate *g, int enable)
+{
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ /* we always turn off the 'other' gate, regardless */
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[!sel_bit], 0);
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[sel_bit], enable);
+
+ return 0;
+}
+
+static int r9a06g032_clk_dualgate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 1);
+
+ return 0;
+}
+
+static void r9a06g032_clk_dualgate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 0);
+}
+
+static int r9a06g032_clk_dualgate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *g = to_clk_dualgate(hw);
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ return clk_rdesc_get(g->clocks, g->gate[sel_bit].gate);
+}
+
+static const struct clk_ops r9a06g032_clk_dualgate_ops = {
+ .enable = r9a06g032_clk_dualgate_enable,
+ .disable = r9a06g032_clk_dualgate_disable,
+ .is_enabled = r9a06g032_clk_dualgate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_dualgate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc,
+ uint16_t sel)
+{
+ struct r9a06g032_clk_dualgate *g;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = sel;
+ g->gate[0].gate = desc->dual.g1;
+ g->gate[0].reset = desc->dual.r1;
+ g->gate[1].gate = desc->dual.g2;
+ g->gate[1].reset = desc->dual.r2;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_dualgate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ g->hw.init = &init;
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_dualgate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+static void r9a06g032_clocks_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct r9a06g032_priv *clocks;
+ struct clk **clks;
+ struct clk *mclk;
+ unsigned int i;
+ u16 uart_group_sel[2];
+ int error;
+
+ clocks = devm_kzalloc(dev, sizeof(*clocks), GFP_KERNEL);
+ clks = devm_kcalloc(dev, R9A06G032_CLOCK_COUNT, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clocks || !clks)
+ return -ENOMEM;
+
+ spin_lock_init(&clocks->lock);
+
+ clocks->data.clks = clks;
+ clocks->data.clk_num = R9A06G032_CLOCK_COUNT;
+
+ mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(mclk))
+ return PTR_ERR(mclk);
+
+ clocks->reg = of_iomap(np, 0);
+ if (WARN_ON(!clocks->reg))
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
+ const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
+ const char *parent_name = d->source ?
+ __clk_get_name(clocks->data.clks[d->source - 1]) :
+ __clk_get_name(mclk);
+ struct clk *clk = NULL;
+
+ switch (d->type) {
+ case K_FFC:
+ clk = clk_register_fixed_factor(NULL, d->name,
+ parent_name, 0,
+ d->mul, d->div);
+ break;
+ case K_GATE:
+ clk = r9a06g032_register_gate(clocks, parent_name, d);
+ break;
+ case K_DIV:
+ clk = r9a06g032_register_div(clocks, parent_name, d);
+ break;
+ case K_BITSEL:
+ /* keep that selector register around */
+ uart_group_sel[d->dual.group] = d->dual.sel;
+ clk = r9a06g032_register_bitsel(clocks, parent_name, d);
+ break;
+ case K_DUALGATE:
+ clk = r9a06g032_register_dualgate(clocks, parent_name,
+ d,
+ uart_group_sel[d->dual.group]);
+ break;
+ }
+ clocks->data.clks[d->index] = clk;
+ }
+ error = of_clk_add_provider(np, of_clk_src_onecell_get, &clocks->data);
+ if (error)
+ return error;
+
+ return devm_add_action_or_reset(dev,
+ r9a06g032_clocks_del_clk_provider, np);
+}
+
+static const struct of_device_id r9a06g032_match[] = {
+ { .compatible = "renesas,r9a06g032-sysctrl" },
+ { }
+};
+
+static struct platform_driver r9a06g032_clock_driver = {
+ .driver = {
+ .name = "renesas,r9a06g032-sysctrl",
+ .of_match_table = r9a06g032_match,
+ },
+};
+
+static int __init r9a06g032_clocks_init(void)
+{
+ return platform_driver_probe(&r9a06g032_clock_driver,
+ r9a06g032_clocks_probe);
+}
+
+subsys_initcall(r9a06g032_clocks_init);
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 98e7b9429b83..ff35ab463a6f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,12 +6,14 @@
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
+obj-y += clk-half-divider.o
obj-y += clk-inverter.o
obj-y += clk-mmc-phase.o
obj-y += clk-muxgrf.o
obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-y += clk-px30.o
obj-y += clk-rv1108.o
obj-y += clk-rk3036.o
obj-y += clk-rk3128.o
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
new file mode 100644
index 000000000000..b8da6e799423
--- /dev/null
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "clk.h"
+
+#define div_mask(width) ((1 << (width)) - 1)
+
+static bool _is_best_half_div(unsigned long rate, unsigned long now,
+ unsigned long best, unsigned long flags)
+{
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ return abs(rate - now) < abs(rate - best);
+
+ return now <= rate && now > best;
+}
+
+static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+ val = val * 2 + 3;
+
+ return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
+}
+
+static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate, u8 width,
+ unsigned long flags)
+{
+ unsigned int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = div_mask(width);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ if (bestdiv < 3)
+ bestdiv = 0;
+ else
+ bestdiv = (bestdiv - 3) / 2;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 0; i <= maxdiv; i++) {
+ if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ((u64)rate * (i * 2 + 3)) / 2);
+ now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
+ (i * 2 + 3));
+
+ if (_is_best_half_div(rate, now, best, flags)) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = div_mask(width);
+ *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ }
+
+ return bestdiv;
+}
+
+static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int div;
+
+ div = clk_half_divider_bestdiv(hw, rate, prate,
+ divider->width,
+ divider->flags);
+
+ return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+}
+
+static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ value = (value - 3) / 2;
+ value = min_t(unsigned int, value, div_mask(divider->width));
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ }
+ val |= value << divider->shift;
+ clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return 0;
+}
+
+const struct clk_ops clk_half_divider_ops = {
+ .recalc_rate = clk_half_divider_recalc_rate,
+ .round_rate = clk_half_divider_round_rate,
+ .set_rate = clk_half_divider_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_half_divider_ops);
+
+/**
+ * Register a clock branch.
+ * Most clock branches have a form like
+ *
+ * src1 --|--\
+ * |M |--[GATE]-[DIV]-
+ * src2 --|--/
+ *
+ * sometimes without one of those components.
+ */
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock)
+{
+ struct clk *clk;
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_divider *div = NULL;
+ const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
+ *gate_ops = NULL;
+
+ if (num_parents > 1) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + muxdiv_offset;
+ mux->shift = mux_shift;
+ mux->mask = BIT(mux_width) - 1;
+ mux->flags = mux_flags;
+ mux->lock = lock;
+ mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+ : &clk_mux_ops;
+ }
+
+ if (gate_offset >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_gate;
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+ gate->bit_idx = gate_shift;
+ gate->lock = lock;
+ gate_ops = &clk_gate_ops;
+ }
+
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto err_div;
+
+ div->flags = div_flags;
+ div->reg = base + muxdiv_offset;
+ div->shift = div_shift;
+ div->width = div_width;
+ div->lock = lock;
+ div_ops = &clk_half_divider_ops;
+ }
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags);
+
+ return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
new file mode 100644
index 000000000000..601a77f1af78
--- /dev/null
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang<zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/px30-cru.h>
+#include "clk.h"
+
+#define PX30_GRF_SOC_STATUS0 0x480
+
+enum px30_plls {
+ apll, dpll, cpll, npll, apll_b_h, apll_b_l,
+};
+
+enum px30_pmu_plls {
+ gpll,
+};
+
+static struct rockchip_pll_rate_table px30_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE(624000000, 1, 52, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define PX30_DIV_ACLKM_MASK 0x7
+#define PX30_DIV_ACLKM_SHIFT 12
+#define PX30_DIV_PCLK_DBG_MASK 0xf
+#define PX30_DIV_PCLK_DBG_SHIFT 8
+
+#define PX30_CLKSEL0(_aclk_core, _pclk_dbg) \
+{ \
+ .reg = PX30_CLKSEL_CON(0), \
+ .val = HIWORD_UPDATE(_aclk_core, PX30_DIV_ACLKM_MASK, \
+ PX30_DIV_ACLKM_SHIFT) | \
+ HIWORD_UPDATE(_pclk_dbg, PX30_DIV_PCLK_DBG_MASK, \
+ PX30_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define PX30_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ PX30_CLKSEL0(_aclk_core, _pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table px30_cpuclk_rates[] __initdata = {
+ PX30_CPUCLK_RATE(1608000000, 1, 7),
+ PX30_CPUCLK_RATE(1584000000, 1, 7),
+ PX30_CPUCLK_RATE(1560000000, 1, 7),
+ PX30_CPUCLK_RATE(1536000000, 1, 7),
+ PX30_CPUCLK_RATE(1512000000, 1, 7),
+ PX30_CPUCLK_RATE(1488000000, 1, 5),
+ PX30_CPUCLK_RATE(1464000000, 1, 5),
+ PX30_CPUCLK_RATE(1440000000, 1, 5),
+ PX30_CPUCLK_RATE(1416000000, 1, 5),
+ PX30_CPUCLK_RATE(1392000000, 1, 5),
+ PX30_CPUCLK_RATE(1368000000, 1, 5),
+ PX30_CPUCLK_RATE(1344000000, 1, 5),
+ PX30_CPUCLK_RATE(1320000000, 1, 5),
+ PX30_CPUCLK_RATE(1296000000, 1, 5),
+ PX30_CPUCLK_RATE(1272000000, 1, 5),
+ PX30_CPUCLK_RATE(1248000000, 1, 5),
+ PX30_CPUCLK_RATE(1224000000, 1, 5),
+ PX30_CPUCLK_RATE(1200000000, 1, 5),
+ PX30_CPUCLK_RATE(1104000000, 1, 5),
+ PX30_CPUCLK_RATE(1008000000, 1, 5),
+ PX30_CPUCLK_RATE(912000000, 1, 5),
+ PX30_CPUCLK_RATE(816000000, 1, 3),
+ PX30_CPUCLK_RATE(696000000, 1, 3),
+ PX30_CPUCLK_RATE(600000000, 1, 3),
+ PX30_CPUCLK_RATE(408000000, 1, 1),
+ PX30_CPUCLK_RATE(312000000, 1, 1),
+ PX30_CPUCLK_RATE(216000000, 1, 1),
+ PX30_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data px30_cpuclk_data = {
+ .core_reg = PX30_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0xf,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 7,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m"};
+PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k_pmu" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_core" };
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_ddrstdby_p) = { "clk_ddrphy1x", "clk_stdby_2wrap" };
+PNAME(mux_4plls_p) = { "gpll", "dummy_cpll", "usb480m", "npll" };
+PNAME(mux_cpll_npll_p) = { "cpll", "npll" };
+PNAME(mux_npll_cpll_p) = { "npll", "cpll" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "dummy_cpll" };
+PNAME(mux_gpll_npll_p) = { "gpll", "npll" };
+PNAME(mux_gpll_xin24m_p) = { "gpll", "xin24m"};
+PNAME(mux_gpll_cpll_npll_p) = { "gpll", "dummy_cpll", "npll" };
+PNAME(mux_gpll_cpll_npll_xin24m_p) = { "gpll", "dummy_cpll", "npll", "xin24m" };
+PNAME(mux_gpll_xin24m_npll_p) = { "gpll", "xin24m", "npll"};
+PNAME(mux_pdm_p) = { "clk_pdm_src", "clk_pdm_frac" };
+PNAME(mux_i2s0_tx_p) = { "clk_i2s0_tx_src", "clk_i2s0_tx_frac", "mclk_i2s0_tx_in", "xin12m"};
+PNAME(mux_i2s0_rx_p) = { "clk_i2s0_rx_src", "clk_i2s0_rx_frac", "mclk_i2s0_rx_in", "xin12m"};
+PNAME(mux_i2s1_p) = { "clk_i2s1_src", "clk_i2s1_frac", "i2s1_clkin", "xin12m"};
+PNAME(mux_i2s2_p) = { "clk_i2s2_src", "clk_i2s2_frac", "i2s2_clkin", "xin12m"};
+PNAME(mux_i2s0_tx_out_p) = { "clk_i2s0_tx", "xin12m", "clk_i2s0_rx"};
+PNAME(mux_i2s0_rx_out_p) = { "clk_i2s0_rx", "xin12m", "clk_i2s0_tx"};
+PNAME(mux_i2s1_out_p) = { "clk_i2s1", "xin12m"};
+PNAME(mux_i2s2_out_p) = { "clk_i2s2", "xin12m"};
+PNAME(mux_i2s0_tx_rx_p) = { "clk_i2s0_tx_mux", "clk_i2s0_rx_mux"};
+PNAME(mux_i2s0_rx_tx_p) = { "clk_i2s0_rx_mux", "clk_i2s0_tx_mux"};
+PNAME(mux_uart_src_p) = { "gpll", "xin24m", "usb480m", "npll" };
+PNAME(mux_uart1_p) = { "clk_uart1_src", "clk_uart1_np5", "clk_uart1_frac" };
+PNAME(mux_uart2_p) = { "clk_uart2_src", "clk_uart2_np5", "clk_uart2_frac" };
+PNAME(mux_uart3_p) = { "clk_uart3_src", "clk_uart3_np5", "clk_uart3_frac" };
+PNAME(mux_uart4_p) = { "clk_uart4_src", "clk_uart4_np5", "clk_uart4_frac" };
+PNAME(mux_uart5_p) = { "clk_uart5_src", "clk_uart5_np5", "clk_uart5_frac" };
+PNAME(mux_cif_out_p) = { "xin24m", "dummy_cpll", "npll", "usb480m" };
+PNAME(mux_dclk_vopb_p) = { "dclk_vopb_src", "dclk_vopb_frac", "xin24m" };
+PNAME(mux_dclk_vopl_p) = { "dclk_vopl_src", "dclk_vopl_frac", "xin24m" };
+PNAME(mux_gmac_p) = { "clk_gmac_src", "gmac_clkin" };
+PNAME(mux_gmac_rmii_sel_p) = { "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx_div2" };
+PNAME(mux_rtc32k_pmu_p) = { "xin32k", "pmu_pvtm_32k", "clk_rtc32k_frac", };
+PNAME(mux_wifi_pmu_p) = { "xin24m", "clk_wifi_pmu_src" };
+PNAME(mux_uart0_pmu_p) = { "clk_uart0_pmu_src", "clk_uart0_np5", "clk_uart0_frac" };
+PNAME(mux_usbphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_mipidsiphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_gpu_p) = { "clk_gpu_div", "clk_gpu_np5" };
+
+static struct rockchip_pll_clock px30_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, PX30_PLL_CON(0),
+ PX30_MODE_CON, 0, 0, 0, px30_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ 0, PX30_PLL_CON(8),
+ PX30_MODE_CON, 4, 1, 0, NULL),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, PX30_PLL_CON(16),
+ PX30_MODE_CON, 2, 2, 0, px30_pll_rates),
+ [npll] = PLL(pll_rk3328, PLL_NPLL, "npll", mux_pll_p,
+ 0, PX30_PLL_CON(24),
+ PX30_MODE_CON, 6, 4, 0, px30_pll_rates),
+};
+
+static struct rockchip_pll_clock px30_pmu_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p, 0, PX30_PMU_PLL_CON(0),
+ PX30_PMU_MODE, 0, 3, 0, px30_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch px30_pdm_fracmux __initdata =
+ MUX(0, "clk_pdm_mux", mux_pdm_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(26), 15, 1, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_tx_fracmux __initdata =
+ MUX(0, "clk_i2s0_tx_mux", mux_i2s0_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_rx_fracmux __initdata =
+ MUX(0, "clk_i2s0_rx_mux", mux_i2s0_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s1_fracmux __initdata =
+ MUX(0, "clk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(30), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s2_fracmux __initdata =
+ MUX(0, "clk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(32), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart1_fracmux __initdata =
+ MUX(0, "clk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(35), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart2_fracmux __initdata =
+ MUX(0, "clk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(38), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart3_fracmux __initdata =
+ MUX(0, "clk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(41), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart4_fracmux __initdata =
+ MUX(0, "clk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(44), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart5_fracmux __initdata =
+ MUX(0, "clk_uart5_mux", mux_uart5_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(47), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopb_fracmux __initdata =
+ MUX(0, "dclk_vopb_mux", mux_dclk_vopb_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(5), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopl_fracmux __initdata =
+ MUX(0, "dclk_vopl_mux", mux_dclk_vopl_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(8), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_rtc32k_pmu_fracmux __initdata =
+ MUX(SCLK_RTC32K_PMU, "clk_rtc32k_pmu", mux_rtc32k_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(0), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart0_pmu_fracmux __initdata =
+ MUX(0, "clk_uart0_pmu_mux", mux_uart0_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(4), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+
+ MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ PX30_MODE_CON, 8, 2, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+
+ /* PD_CORE */
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 8, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "aclk_core_prf", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(0, "pclk_dbg_niu", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(0, "pclk_core_dbg", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(0, "pclk_core_grf", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 6, GFLAGS),
+
+ GATE(0, "clk_jtag", "jtag_clkin", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(SCLK_PVTM, "clk_pvtm", "xin24m", 0,
+ PX30_CLKGATE_CON(17), 4, GFLAGS),
+
+ /* PD_GPU */
+ COMPOSITE_NODIV(0, "clk_gpu_src", mux_4plls_p, 0,
+ PX30_CLKSEL_CON(1), 6, 2, MFLAGS,
+ PX30_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "clk_gpu_div", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_gpu_np5", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_GPU, "clk_gpu", mux_gpu_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(1), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_gpu", "clk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(1), 13, 2, DFLAGS,
+ PX30_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(0, "aclk_gpu_niu", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(0, "aclk_gpu_prf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(0, "pclk_gpu_grf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 9, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+
+ /* PD_DDR */
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_DDRCLK, "sclk_ddrc", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ COMPOSITE_NOGATE(0, "clk_ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS),
+ FACTOR_GATE(0, "clk_ddrphy1x", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(0), 14, GFLAGS),
+ FACTOR_GATE(0, "clk_stdby_2wrap", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_ddrstdby", mux_ddrstdby_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 4, 1, MFLAGS,
+ PX30_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(0, "aclk_split", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(0, "clk_msch", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "aclk_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(0, "clk_core_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "aclk_cmd_buff", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "clk_ddrmon", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 11, GFLAGS),
+
+ GATE(0, "clk_ddrmon_timer", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_DDR, "pclk_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "pclk_ddrmon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "pclk_ddrc", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(0, "pclk_msch", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(0, "pclk_stdby", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(0, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(0, "pclk_cmdbuff", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ /* PD_VI */
+ COMPOSITE(ACLK_VI_PRE, "aclk_vi_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 8, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VI_PRE, "hclk_vi_pre", "aclk_vi_pre", 0,
+ PX30_CLKSEL_CON(11), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 12, GFLAGS),
+ COMPOSITE(SCLK_ISP, "clk_isp", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 9, GFLAGS),
+ COMPOSITE(SCLK_CIF_OUT, "clk_cif_out", mux_cif_out_p, 0,
+ PX30_CLKSEL_CON(13), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ PX30_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(PCLK_ISP, "pclkin_isp", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_CIF, "pclkin_cif", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+
+ /* PD_VO */
+ COMPOSITE(ACLK_VO_PRE, "aclk_vo_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(3), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VO_PRE, "hclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VO_PRE, "pclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 12, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE(SCLK_RGA_CORE, "clk_rga_core", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 1, GFLAGS),
+
+ COMPOSITE(SCLK_VOPB_PWM, "clk_vopb_pwm", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(7), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE(0, "dclk_vopb_src", mux_cpll_npll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(5), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopb_frac", "dclk_vopb_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(6), 0,
+ PX30_CLKGATE_CON(2), 3, GFLAGS,
+ &px30_dclk_vopb_fracmux),
+ GATE(DCLK_VOPB, "dclk_vopb", "dclk_vopb_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE(0, "dclk_vopl_src", mux_npll_cpll_p, 0,
+ PX30_CLKSEL_CON(8), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopl_frac", "dclk_vopl_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(9), 0,
+ PX30_CLKGATE_CON(2), 7, GFLAGS,
+ &px30_dclk_vopl_fracmux),
+ GATE(DCLK_VOPL, "dclk_vopl", "dclk_vopl_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 8, GFLAGS),
+
+ /* PD_VPU */
+ COMPOSITE(0, "aclk_vpu_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(10), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vpu_pre", "aclk_vpu_pre", 0,
+ PX30_CLKSEL_CON(10), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(SCLK_CORE_VPU, "sclk_core_vpu", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(13), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 1, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 7
+ */
+
+ COMPOSITE_NODIV(ACLK_PERI_SRC, "aclk_peri_src", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(14), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(5), 7, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_PERI_PRE, "aclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 8, GFLAGS),
+ DIV(HCLK_PERI_PRE, "hclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 8, 5, DFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_MMC_NAND, "hclk_mmc_nand", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE(SCLK_NANDC, "clk_nandc", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 13, GFLAGS),
+
+ COMPOSITE(SCLK_SDIO, "clk_sdio", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 3, GFLAGS),
+
+ COMPOSITE(SCLK_EMMC, "clk_emmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(20), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 6, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "clk_sfc", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(22), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(6), 7, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
+ PX30_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
+ PX30_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
+ PX30_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
+ PX30_SDIO_CON1, 1),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
+ PX30_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
+ PX30_EMMC_CON1, 1),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_pre", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 12, GFLAGS),
+ COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 15, GFLAGS),
+
+ /* PD_USB */
+ GATE(HCLK_USB, "hclk_usb", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(SCLK_OTG_ADP, "clk_otg_adp", "clk_rtc32k_pmu", 0,
+ PX30_CLKGATE_CON(7), 3, GFLAGS),
+
+ /* PD_GMAC */
+ COMPOSITE(SCLK_GMAC_SRC, "clk_gmac_src", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(22), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(7), 11, GFLAGS),
+ MUX(SCLK_GMAC, "clk_gmac", mux_gmac_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 6, 1, MFLAGS),
+ GATE(SCLK_MAC_REF, "clk_mac_ref", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(SCLK_GMAC_RX_TX, "clk_gmac_rx_tx", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 13, GFLAGS),
+ FACTOR(0, "clk_gmac_rx_tx_div2", "clk_gmac_rx_tx", 0, 1, 2),
+ FACTOR(0, "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx", 0, 1, 20),
+ MUX(SCLK_GMAC_RMII, "clk_gmac_rmii_sel", mux_gmac_rmii_sel_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 7, 1, MFLAGS),
+
+ GATE(0, "aclk_gmac_pre", "aclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_gmac_pre", "aclk_gmac_pre", 0,
+ PX30_CLKSEL_CON(23), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(7), 12, GFLAGS),
+
+ COMPOSITE(SCLK_MAC_OUT, "clk_mac_out", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 5, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 8
+ */
+
+ /* PD_BUS */
+ COMPOSITE_NODIV(ACLK_BUS_SRC, "aclk_bus_src", mux_gpll_cpll_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_BUS_PRE, "hclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 8, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_BUS_PRE, "aclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_BUS_PRE, "pclk_bus_pre", "aclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 8, 2, DFLAGS,
+ PX30_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(0, "pclk_top_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 10, GFLAGS),
+
+ COMPOSITE(0, "clk_pdm_src", mux_gpll_xin24m_npll_p, 0,
+ PX30_CLKSEL_CON(26), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_pdm_frac", "clk_pdm_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(27), 0,
+ PX30_CLKGATE_CON(9), 10, GFLAGS,
+ &px30_pdm_fracmux),
+ GATE(SCLK_PDM, "clk_pdm", "clk_pdm_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(9), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_i2s0_tx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(28), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 12, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_tx_frac", "clk_i2s0_tx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(29), 0,
+ PX30_CLKGATE_CON(9), 13, GFLAGS,
+ &px30_i2s0_tx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_TX, "clk_i2s0_tx", mux_i2s0_tx_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(9), 14, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_tx_out_pre", mux_i2s0_tx_out_p, 0,
+ PX30_CLKSEL_CON(28), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(SCLK_I2S0_TX_OUT, "clk_i2s0_tx_out", "clk_i2s0_tx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 8, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s0_rx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(58), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_rx_frac", "clk_i2s0_rx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(59), 0,
+ PX30_CLKGATE_CON(17), 1, GFLAGS,
+ &px30_i2s0_rx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_RX, "clk_i2s0_rx", mux_i2s0_rx_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(17), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_rx_out_pre", mux_i2s0_rx_out_p, 0,
+ PX30_CLKSEL_CON(58), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(SCLK_I2S0_RX_OUT, "clk_i2s0_rx_out", "clk_i2s0_rx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 11, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s1_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(30), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(31), 0,
+ PX30_CLKGATE_CON(10), 1, GFLAGS,
+ &px30_i2s1_fracmux),
+ GATE(SCLK_I2S1, "clk_i2s1", "clk_i2s1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s1_out_pre", mux_i2s1_out_p, 0,
+ PX30_CLKSEL_CON(30), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(SCLK_I2S1_OUT, "clk_i2s1_out", "clk_i2s1_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 9, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s2_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(32), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(33), 0,
+ PX30_CLKGATE_CON(10), 5, GFLAGS,
+ &px30_i2s2_fracmux),
+ GATE(SCLK_I2S2, "clk_i2s2", "clk_i2s2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s2_out_pre", mux_i2s2_out_p, 0,
+ PX30_CLKSEL_CON(32), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(SCLK_I2S2_OUT, "clk_i2s2_out", "clk_i2s2_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 10, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(SCLK_UART1_SRC, "clk_uart1_src", mux_uart_src_p, CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(34), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart1_np5", "clk_uart1_src", 0,
+ PX30_CLKSEL_CON(35), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(36), 0,
+ PX30_CLKGATE_CON(10), 14, GFLAGS,
+ &px30_uart1_fracmux),
+ GATE(SCLK_UART1, "clk_uart1", "clk_uart1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 15, GFLAGS),
+
+ COMPOSITE(SCLK_UART2_SRC, "clk_uart2_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(37), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart2_np5", "clk_uart2_src", 0,
+ PX30_CLKSEL_CON(38), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(39), 0,
+ PX30_CLKGATE_CON(11), 2, GFLAGS,
+ &px30_uart2_fracmux),
+ GATE(SCLK_UART2, "clk_uart2", "clk_uart2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 3, GFLAGS),
+
+ COMPOSITE(0, "clk_uart3_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(40), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 4, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart3_np5", "clk_uart3_src", 0,
+ PX30_CLKSEL_CON(41), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 5, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(42), 0,
+ PX30_CLKGATE_CON(11), 6, GFLAGS,
+ &px30_uart3_fracmux),
+ GATE(SCLK_UART3, "clk_uart3", "clk_uart3_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 7, GFLAGS),
+
+ COMPOSITE(0, "clk_uart4_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(43), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart4_np5", "clk_uart4_src", 0,
+ PX30_CLKSEL_CON(44), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(45), 0,
+ PX30_CLKGATE_CON(11), 10, GFLAGS,
+ &px30_uart4_fracmux),
+ GATE(SCLK_UART4, "clk_uart4", "clk_uart4_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_uart5_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(46), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart5_np5", "clk_uart5_src", 0,
+ PX30_CLKSEL_CON(47), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(48), 0,
+ PX30_CLKGATE_CON(11), 14, GFLAGS,
+ &px30_uart5_fracmux),
+ GATE(SCLK_UART5, "clk_uart5", "clk_uart5_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 15, GFLAGS),
+
+ COMPOSITE(SCLK_I2C0, "clk_i2c0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 0, GFLAGS),
+ COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 1, GFLAGS),
+ COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 2, GFLAGS),
+ COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 3, GFLAGS),
+ COMPOSITE(SCLK_PWM0, "clk_pwm0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 5, GFLAGS),
+ COMPOSITE(SCLK_PWM1, "clk_pwm1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 6, GFLAGS),
+ COMPOSITE(SCLK_SPI0, "clk_spi0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE(SCLK_SPI1, "clk_spi1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 8, GFLAGS),
+
+ GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 5, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "xin24m", 0,
+ PX30_CLKSEL_CON(54), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 9, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0,
+ PX30_CLKSEL_CON(55), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP, "clk_otp", "xin24m", 0,
+ PX30_CLKSEL_CON(56), 0, 3, DFLAGS,
+ PX30_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP_USR, "clk_otp_usr", "clk_otp", 0,
+ PX30_CLKSEL_CON(56), 4, 2, DFLAGS,
+ PX30_CLKGATE_CON(13), 6, GFLAGS),
+
+ GATE(0, "clk_cpu_boost", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(12), 12, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "aclk_crypto_pre", "aclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "hclk_crypto_pre", "hclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO_APK, "clk_crypto_apk", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 15, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_BUS_TOP */
+ GATE(0, "pclk_top_niu", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(0, "pclk_top_cru", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(PCLK_OTP_PHY, "pclk_otp_phy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 2, GFLAGS),
+ GATE(0, "pclk_ddrphy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 3, GFLAGS),
+ GATE(PCLK_MIPIDSIPHY, "pclk_mipidsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(0, "pclk_cpu_hoost", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 7, GFLAGS),
+
+ /* PD_VI */
+ GATE(0, "aclk_vi_niu", "aclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 15, GFLAGS),
+ GATE(ACLK_CIF, "aclk_cif", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(0, "hclk_vi_niu", "hclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(HCLK_CIF, "hclk_cif", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 4, GFLAGS),
+
+ /* PD_VO */
+ GATE(0, "aclk_vo_niu", "aclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(ACLK_VOPB, "aclk_vopb", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 3, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(ACLK_VOPL, "aclk_vopl", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 5, GFLAGS),
+
+ GATE(0, "hclk_vo_niu", "hclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(HCLK_VOPB, "hclk_vopb", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 4, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(HCLK_VOPL, "hclk_vopl", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 6, GFLAGS),
+
+ GATE(0, "pclk_vo_niu", "pclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 9, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, PX30_CLKGATE_CON(13), 15, GFLAGS),
+
+ GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 9, GFLAGS),
+ GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 4, GFLAGS),
+
+ GATE(0, "pclk_bus_niu", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 10, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 13, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 14, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 15, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 4, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 5, GFLAGS),
+ GATE(PCLK_OTP_NS, "pclk_otp_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 6, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 8, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 9, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 10, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 12, GFLAGS),
+
+ /* PD_VPU */
+ GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 4, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "hclk_crypto_niu", "hclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(0, "aclk_crypto_niu", "aclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 4, GFLAGS),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_niu", "hclk_sdmmc_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sdmmc_pre", 0, PX30_CLKGATE_CON(7), 1, GFLAGS),
+
+ /* PD_PERI */
+ GATE(0, "aclk_peri_niu", "aclk_peri_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 9, GFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(0, "hclk_mmc_nand_niu", "hclk_mmc_nand", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 11, GFLAGS),
+
+ /* PD_USB */
+ GATE(0, "hclk_usb_niu", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_usb", 0, PX30_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_HOST, "hclk_host", "hclk_usb", 0, PX30_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(HCLK_HOST_ARB, "hclk_host_arb", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 8, GFLAGS),
+
+ /* PD_GMAC */
+ GATE(0, "aclk_gmac_niu", "aclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(0, "pclk_gmac_niu", "pclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 1, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 3, GFLAGS),
+};
+
+static struct rockchip_clk_branch px30_clk_pmu_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ COMPOSITE_FRACMUX(0, "clk_rtc32k_frac", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(1), 0,
+ PX30_PMU_CLKGATE_CON(0), 13, GFLAGS,
+ &px30_rtc32k_pmu_fracmux),
+
+ COMPOSITE_NOMUX(XIN24M_DIV, "xin24m_div", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(0), 8, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 12, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "clk_wifi_pmu_src", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 8, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_WIFI_PMU, "clk_wifi_pmu", mux_wifi_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 15, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE(0, "clk_uart0_pmu_src", mux_uart_src_p, 0,
+ PX30_PMU_CLKSEL_CON(3), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart0_np5", "clk_uart0_pmu_src", 0,
+ PX30_PMU_CLKSEL_CON(4), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_pmu_src", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(5), 0,
+ PX30_PMU_CLKGATE_CON(1), 2, GFLAGS,
+ &px30_uart0_pmu_fracmux),
+ GATE(SCLK_UART0_PMU, "clk_uart0_pmu", "clk_uart0_pmu_mux", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKGATE_CON(1), 3, GFLAGS),
+
+ GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0,
+ PX30_PMU_CLKGATE_CON(1), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(0), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_REF24M_PMU, "clk_ref24m_pmu", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 0, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(SCLK_USBPHY_REF, "clk_usbphy_ref", mux_usbphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 6, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", mux_mipidsiphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 7, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 10, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_PMU */
+ GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "pclk_pmu_sgrf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "pclk_pmu_grf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "pclk_pmu_mem", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_UART0_PMU, "pclk_uart0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "pclk_cru_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 8, GFLAGS),
+};
+
+static const char *const px30_pmucru_critical_clocks[] __initconst = {
+ "aclk_bus_pre",
+ "pclk_bus_pre",
+ "hclk_bus_pre",
+ "aclk_peri_pre",
+ "hclk_peri_pre",
+ "aclk_gpu_niu",
+ "pclk_top_pre",
+ "pclk_pmu_pre",
+ "hclk_usb_niu",
+ "pll_npll",
+ "usb480m",
+ "clk_uart2",
+ "pclk_uart2",
+};
+
+static void __init px30_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+ struct clk *clk;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ /* aclk_dmac is controlled by sgrf_soc_con1[11]. */
+ clk = clk_register_fixed_factor(NULL, "aclk_dmac", "aclk_bus_pre", 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock aclk_dmac: %ld\n",
+ __func__, PTR_ERR(clk));
+ else
+ rockchip_clk_add_lookup(ctx, clk, ACLK_DMAC);
+
+ rockchip_clk_register_plls(ctx, px30_pll_clks,
+ ARRAY_SIZE(px30_pll_clks),
+ PX30_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, px30_clk_branches,
+ ARRAY_SIZE(px30_clk_branches));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &px30_cpuclk_data, px30_cpuclk_rates,
+ ARRAY_SIZE(px30_cpuclk_rates));
+
+ rockchip_register_softrst(np, 12, reg_base + PX30_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, PX30_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init);
+
+static void __init px30_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, px30_pmu_pll_clks,
+ ARRAY_SIZE(px30_pmu_pll_clks), PX30_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, px30_clk_pmu_branches,
+ ARRAY_SIZE(px30_clk_pmu_branches));
+
+ rockchip_clk_protect_critical(px30_pmucru_critical_clocks,
+ ARRAY_SIZE(px30_pmucru_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru_pmu, "rockchip,px30-pmucru", px30_pmu_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index bca10d618f0a..5a628148f3f0 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -631,7 +631,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
+ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
RK3399_CLKGATE_CON(8), 12, GFLAGS),
/* uart */
@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
"pclk_pmu_src",
"fclk_cm0s_src_pmu",
"clk_timer_src_pmu",
+ "pclk_rkpwm_pmu",
};
static void __init rk3399_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 326b3fa44f5d..c3ad92965823 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -492,6 +492,16 @@ void __init rockchip_clk_register_branches(
list->gate_flags, flags, list->child,
&ctx->lock);
break;
+ case branch_half_divider:
+ clk = rockchip_clk_register_halfdiv(list->name,
+ list->parent_names, list->num_parents,
+ ctx->reg_base, list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, list->div_shift,
+ list->div_width, list->div_flags,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags, flags, &ctx->lock);
+ break;
case branch_gate:
flags |= CLK_SET_RATE_PARENT;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ef601dded32c..6b53fff4cc96 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -34,7 +34,46 @@ struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
-/* register positions shared by RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+/* register positions shared by PX30, RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+#define BOOST_PLL_H_CON(x) ((x) * 0x4)
+#define BOOST_CLK_CON 0x0008
+#define BOOST_BOOST_CON 0x000c
+#define BOOST_SWITCH_CNT 0x0010
+#define BOOST_HIGH_PERF_CNT0 0x0014
+#define BOOST_HIGH_PERF_CNT1 0x0018
+#define BOOST_STATIS_THRESHOLD 0x001c
+#define BOOST_SHORT_SWITCH_CNT 0x0020
+#define BOOST_SWITCH_THRESHOLD 0x0024
+#define BOOST_FSM_STATUS 0x0028
+#define BOOST_PLL_L_CON(x) ((x) * 0x4 + 0x2c)
+#define BOOST_RECOVERY_MASK 0x1
+#define BOOST_RECOVERY_SHIFT 1
+#define BOOST_SW_CTRL_MASK 0x1
+#define BOOST_SW_CTRL_SHIFT 2
+#define BOOST_LOW_FREQ_EN_MASK 0x1
+#define BOOST_LOW_FREQ_EN_SHIFT 3
+#define BOOST_BUSY_STATE BIT(8)
+
+#define PX30_PLL_CON(x) ((x) * 0x4)
+#define PX30_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define PX30_CLKGATE_CON(x) ((x) * 0x4 + 0x200)
+#define PX30_GLB_SRST_FST 0xb8
+#define PX30_GLB_SRST_SND 0xbc
+#define PX30_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define PX30_MODE_CON 0xa0
+#define PX30_MISC_CON 0xa4
+#define PX30_SDMMC_CON0 0x380
+#define PX30_SDMMC_CON1 0x384
+#define PX30_SDIO_CON0 0x388
+#define PX30_SDIO_CON1 0x38c
+#define PX30_EMMC_CON0 0x390
+#define PX30_EMMC_CON1 0x394
+
+#define PX30_PMU_PLL_CON(x) ((x) * 0x4)
+#define PX30_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x40)
+#define PX30_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x80)
+#define PX30_PMU_MODE 0x0020
+
#define RV1108_PLL_CON(x) ((x) * 0x4)
#define RV1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
#define RV1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120)
@@ -354,6 +393,7 @@ enum rockchip_clk_branch_type {
branch_inverter,
branch_factor,
branch_ddrclk,
+ branch_half_divider,
};
struct rockchip_clk_branch {
@@ -684,6 +724,79 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define COMPOSITE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
+ df, go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \
+ ds, dw, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
+#define COMPOSITE_NOMUX_HALFDIV(_id, cname, pname, f, mo, ds, dw, df, \
+ go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define DIV_HALF(_id, cname, pname, f, o, s, w, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = o, \
+ .div_shift = s, \
+ .div_width = w, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
void rockchip_clk_of_add_provider(struct device_node *np,
@@ -708,6 +821,17 @@ void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
#define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0)
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock);
+
#ifdef CONFIG_RESET_CONTROLLER
void rockchip_register_softrst(struct device_node *np,
unsigned int num_regs,
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index d5f1ccb36300..cfaa057035ad 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -37,8 +37,6 @@ static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
E4X12_GATE_ISP1,
};
-PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
-
static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 72714633e39c..5b238fc314ac 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -28,7 +28,7 @@ static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
-static const char * const sdmmc_free_mux[] = {"peri_sdmmc_clk", "boot_clk"};
+static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"};
static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
@@ -37,6 +37,11 @@ static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
+static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
+ "peri_mpu_base_clk",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
{ STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
@@ -57,7 +62,7 @@ static const struct stratix10_perip_c_clock s10_main_perip_c_clks[] = {
};
static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
- { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
0, 0x48, 0, 0, 0},
{ STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
0, 0x4C, 0, 0, 0},
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index acaa14cfa25c..49454700f2e5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,24 +1,24 @@
# SPDX-License-Identifier: GPL-2.0
# Common objects
-lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
+obj-y += ccu_common.o
+obj-y += ccu_mmc_timing.o
+obj-y += ccu_reset.o
# Base clock types
-lib-$(CONFIG_SUNXI_CCU) += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
+obj-y += ccu_div.o
+obj-y += ccu_frac.o
+obj-y += ccu_gate.o
+obj-y += ccu_mux.o
+obj-y += ccu_mult.o
+obj-y += ccu_phase.o
+obj-y += ccu_sdm.o
# Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
+obj-y += ccu_nk.o
+obj-y += ccu_nkm.o
+obj-y += ccu_nkmp.o
+obj-y += ccu_nm.o
+obj-y += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 468d1abaf0ee..bae5ee67a797 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -289,16 +289,13 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.data = &sun8i_v3s_de2_clk_desc,
},
{
+ .compatible = "allwinner,sun50i-a64-de2-clk",
+ .data = &sun50i_a64_de2_clk_desc,
+ },
+ {
.compatible = "allwinner,sun50i-h5-de2-clk",
.data = &sun50i_a64_de2_clk_desc,
},
- /*
- * The Allwinner A64 SoC needs some bit to be poke in syscon to make
- * DE2 really working.
- * So there's currently no A64 compatible here.
- * H5 shares the same reset line with A64, so here H5 is using the
- * clock description of A64.
- */
{ }
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 65ba6455feb7..0f388f6944d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -66,17 +66,18 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
- "osc24M", 0x0010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video0_clk, "pll-video0",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
@@ -152,17 +153,18 @@ static struct ccu_nk pll_periph1_clk = {
};
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
- "osc24M", 0x030,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static struct ccu_nkm pll_sata_clk = {
.enable = BIT(31),
@@ -654,7 +656,8 @@ static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", de_parents,
0x108, 0, 4, 24, 3, BIT(31), 0);
@@ -666,9 +669,11 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
0x114, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_parents,
- 0x11c, 0, 4, 24, 3, BIT(31), 0);
+ 0x11c, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const deinterlace_parents[] = { "pll-periph0",
"pll-periph1" };
@@ -698,7 +703,8 @@ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
- 0x150, 0, 4, 24, 2, BIT(31), 0);
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M",
0x154, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
index 0db8e1e97af8..db2a1243f9ff 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -25,7 +25,9 @@
#define CLK_PLL_AUDIO_2X 4
#define CLK_PLL_AUDIO_4X 5
#define CLK_PLL_AUDIO_8X 6
-#define CLK_PLL_VIDEO0 7
+
+/* PLL_VIDEO0 is exported */
+
#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -34,7 +36,9 @@
#define CLK_PLL_PERIPH0_2X 13
#define CLK_PLL_PERIPH1 14
#define CLK_PLL_PERIPH1_2X 15
-#define CLK_PLL_VIDEO1 16
+
+/* PLL_VIDEO1 is exported */
+
#define CLK_PLL_VIDEO1_2X 17
#define CLK_PLL_SATA 18
#define CLK_PLL_SATA_OUT 19
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index b71692391bd6..6507acc843c7 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-periph-fixed.o
obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
+obj-y += clk-sdmmc-mux.o
obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
@@ -24,3 +25,4 @@ obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o
+obj-y += clk-utils.o
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a896692b74ec..01dada561c10 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -586,9 +586,15 @@ static struct clk_hw *tegra_bpmp_clk_of_xlate(struct of_phandle_args *clkspec,
unsigned int id = clkspec->args[0], i;
struct tegra_bpmp *bpmp = data;
- for (i = 0; i < bpmp->num_clocks; i++)
- if (bpmp->clocks[i]->id == id)
- return &bpmp->clocks[i]->hw;
+ for (i = 0; i < bpmp->num_clocks; i++) {
+ struct tegra_bpmp_clk *clk = bpmp->clocks[i];
+
+ if (!clk)
+ continue;
+
+ if (clk->id == id)
+ return &clk->hw;
+ }
return NULL;
}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 16e0aee14773..205fe8ff63f0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -32,35 +32,15 @@
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
- u64 divider_ux1 = parent_rate;
- u8 flags = divider->flags;
- int mul;
-
- if (!rate)
- return 0;
-
- mul = get_mul(divider);
-
- if (!(flags & TEGRA_DIVIDER_INT))
- divider_ux1 *= mul;
-
- if (flags & TEGRA_DIVIDER_ROUND_UP)
- divider_ux1 += rate - 1;
-
- do_div(divider_ux1, rate);
-
- if (flags & TEGRA_DIVIDER_INT)
- divider_ux1 *= mul;
+ int div;
- divider_ux1 -= mul;
+ div = div_frac_get(rate, parent_rate, divider->width,
+ divider->frac_width, divider->flags);
- if ((s64)divider_ux1 < 0)
+ if (div < 0)
return 0;
- if (divider_ux1 > get_max_div(divider))
- return get_max_div(divider);
-
- return divider_ux1;
+ return div;
}
static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
@@ -194,6 +174,7 @@ static const struct clk_div_table mc_div_table[] = {
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
- return clk_register_divider_table(NULL, name, parent_name, 0, reg,
- 16, 1, 0, mc_div_table, lock);
+ return clk_register_divider_table(NULL, name, parent_name,
+ CLK_IS_CRITICAL, reg, 16, 1, 0,
+ mc_div_table, lock);
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 5234acd30e89..0621a3a82ea6 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -132,7 +132,7 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
timing = tegra->timings + i;
if (timing->rate > req->max_rate) {
- i = min(i, 1);
+ i = max(i, 1);
req->rate = tegra->timings[i - 1].rate;
return 0;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index b616e33c5255..de466b4446da 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -227,13 +227,11 @@ enum clk_id {
tegra_clk_sdmmc1_9,
tegra_clk_sdmmc2,
tegra_clk_sdmmc2_8,
- tegra_clk_sdmmc2_9,
tegra_clk_sdmmc3,
tegra_clk_sdmmc3_8,
tegra_clk_sdmmc3_9,
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
- tegra_clk_sdmmc4_9,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
new file mode 100644
index 000000000000..473d418533cb
--- /dev/null
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
+ *
+ * based on clk-mux.c
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define DIV_MASK GENMASK(7, 0)
+#define MUX_SHIFT 29
+#define MUX_MASK GENMASK(MUX_SHIFT + 2, MUX_SHIFT)
+#define SDMMC_MUL 2
+
+#define get_max_div(d) DIV_MASK
+#define get_div_field(val) ((val) & DIV_MASK)
+#define get_mux_field(val) (((val) & MUX_MASK) >> MUX_SHIFT)
+
+static const char * const mux_sdmmc_parents[] = {
+ "pll_p", "pll_c4_out2", "pll_c4_out0", "pll_c4_out1", "clk_m"
+};
+
+static const u8 mux_lj_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 5, [4] = 6
+};
+
+static const u8 mux_non_lj_idx[] = {
+ [0] = 0, [1] = 3, [2] = 7, [3] = 4, [4] = 6
+};
+
+static u8 clk_sdmmc_mux_get_parent(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int num_parents, i;
+ u32 src, val;
+ const u8 *mux_idx;
+
+ num_parents = clk_hw_get_num_parents(hw);
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ src = get_mux_field(val);
+ if (get_div_field(val))
+ mux_idx = mux_non_lj_idx;
+ else
+ mux_idx = mux_lj_idx;
+
+ for (i = 0; i < num_parents; i++) {
+ if (mux_idx[i] == src)
+ return i;
+ }
+
+ WARN(1, "Unknown parent selector %d\n", src);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ if (get_div_field(val))
+ index = mux_non_lj_idx[index];
+ else
+ index = mux_lj_idx[index];
+
+ val &= ~MUX_MASK;
+ val |= index << MUX_SHIFT;
+
+ writel(val, sdmmc_mux->reg);
+
+ return 0;
+}
+
+static unsigned long clk_sdmmc_mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+ int div;
+ u64 rate = parent_rate;
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ div = get_div_field(val);
+
+ div += SDMMC_MUL;
+
+ rate *= SDMMC_MUL;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static int clk_sdmmc_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long output_rate = req->best_parent_rate;
+
+ req->rate = max(req->rate, req->min_rate);
+ req->rate = min(req->rate, req->max_rate);
+
+ if (!req->rate)
+ return output_rate;
+
+ div = div_frac_get(req->rate, output_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ div = 0;
+
+ if (sdmmc_mux->div_flags & TEGRA_DIVIDER_ROUND_UP)
+ req->rate = DIV_ROUND_UP(output_rate * SDMMC_MUL,
+ div + SDMMC_MUL);
+ else
+ req->rate = output_rate * SDMMC_MUL / (div + SDMMC_MUL);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+ u8 src;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ return div;
+
+ if (sdmmc_mux->lock)
+ spin_lock_irqsave(sdmmc_mux->lock, flags);
+
+ src = clk_sdmmc_mux_get_parent(hw);
+ if (div)
+ src = mux_non_lj_idx[src];
+ else
+ src = mux_lj_idx[src];
+
+ val = src << MUX_SHIFT;
+ val |= div;
+ writel(val, sdmmc_mux->reg);
+ fence_udelay(2, sdmmc_mux->reg);
+
+ if (sdmmc_mux->lock)
+ spin_unlock_irqrestore(sdmmc_mux->lock, flags);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_sdmmc_mux_enable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_sdmmc_mux_disable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
+ .get_parent = clk_sdmmc_mux_get_parent,
+ .set_parent = clk_sdmmc_mux_set_parent,
+ .determine_rate = clk_sdmmc_mux_determine_rate,
+ .recalc_rate = clk_sdmmc_mux_recalc_rate,
+ .set_rate = clk_sdmmc_mux_set_rate,
+ .is_enabled = clk_sdmmc_mux_is_enabled,
+ .enable = clk_sdmmc_mux_enable,
+ .disable = clk_sdmmc_mux_disable,
+};
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+ const struct tegra_clk_periph_regs *bank;
+ struct tegra_sdmmc_mux *sdmmc_mux;
+
+ init.ops = &tegra_clk_sdmmc_mux_ops;
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = mux_sdmmc_parents;
+ init.num_parents = ARRAY_SIZE(mux_sdmmc_parents);
+
+ bank = get_reg_bank(clk_num);
+ if (!bank)
+ return ERR_PTR(-EINVAL);
+
+ sdmmc_mux = kzalloc(sizeof(*sdmmc_mux), GFP_KERNEL);
+ if (!sdmmc_mux)
+ return ERR_PTR(-ENOMEM);
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sdmmc_mux->hw.init = &init;
+ sdmmc_mux->reg = clk_base + offset;
+ sdmmc_mux->lock = lock;
+ sdmmc_mux->gate.clk_base = clk_base;
+ sdmmc_mux->gate.regs = bank;
+ sdmmc_mux->gate.enable_refcnt = periph_clk_enb_refcnt;
+ sdmmc_mux->gate.clk_num = clk_num;
+ sdmmc_mux->gate.flags = TEGRA_PERIPH_ON_APB;
+ sdmmc_mux->div_flags = div_flags;
+ sdmmc_mux->gate_ops = &tegra_clk_periph_gate_ops;
+
+ clk = clk_register(NULL, &sdmmc_mux->hw);
+ if (IS_ERR(clk)) {
+ kfree(sdmmc_mux);
+ return clk;
+ }
+
+ sdmmc_mux->gate.hw.clk = clk;
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 2acba2986bc6..38c4eb28c8bf 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -451,15 +451,6 @@ static u32 mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0_idx[] = {
[0] = 0, [1] = 3, [2] = 4, [3] = 6, [4] = 7,
};
-static const char *mux_pllp_clkm_pllc4_out2_out1_out0_lj[] = {
- "pll_p",
- "pll_c4_out2", "pll_c4_out0", /* LJ input */
- "pll_c4_out2", "pll_c4_out1",
- "pll_c4_out1", /* LJ input */
- "clk_m", "pll_c4_out0"
-};
-#define mux_pllp_clkm_pllc4_out2_out1_out0_lj_idx NULL
-
static const char *mux_pllp_pllc2_c_c3_clkm[] = {
"pll_p", "pll_c2", "pll_c", "pll_c3", "clk_m"
};
@@ -686,9 +677,7 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
MUX8("sdmmc1", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_9),
- MUX8("sdmmc2", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_9),
MUX8("sdmmc3", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_9),
- MUX8("sdmmc4", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_9),
MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 0c69c7970950..b6cf28ca2ed2 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1267,7 +1267,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
- { TEGRA124_CLK_VDE, TEGRA124_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_C3, 600000000, 0 },
{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
@@ -1290,6 +1290,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
+ { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 5435d01c636a..9eb1cb14fce1 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -44,6 +44,8 @@
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC4 0x164
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -2286,11 +2288,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA210_CLK_TIMER, .present = true },
[tegra_clk_uarta_8] = { .dt_id = TEGRA210_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2_9] = { .dt_id = TEGRA210_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA210_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA210_CLK_I2C1, .present = true },
[tegra_clk_sdmmc1_9] = { .dt_id = TEGRA210_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4_9] = { .dt_id = TEGRA210_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA210_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA210_CLK_I2S2, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA210_CLK_USBD, .present = true },
@@ -3030,6 +3030,16 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
0, NULL);
clks[TEGRA210_CLK_ACLK] = clk;
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc2", clk_base,
+ CLK_SOURCE_SDMMC2, 9,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC2] = clk;
+
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc4", clk_base,
+ CLK_SOURCE_SDMMC4, 15,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC4] = clk;
+
for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
struct tegra_periph_init_data *init = &tegra210_periph[i];
struct clk **clkp;
diff --git a/drivers/clk/tegra/clk-utils.c b/drivers/clk/tegra/clk-utils.c
new file mode 100644
index 000000000000..1a5daae4e501
--- /dev/null
+++ b/drivers/clk/tegra/clk-utils.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define div_mask(w) ((1 << (w)) - 1)
+
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags)
+{
+ u64 divider_ux1 = parent_rate;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = 1 << frac_width;
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ if (divider_ux1 < mul)
+ return 0;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 > div_mask(width))
+ return div_mask(width);
+
+ return divider_ux1;
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index e1f88463b600..d2c3a010f8e9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
/**
* struct tegra_clk_sync_source - external clock source from codec
@@ -705,6 +706,32 @@ struct clk *tegra_clk_register_super_clk(const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 clk_super_flags,
spinlock_t *lock);
+
+/**
+ * struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling mux and divider
+ * @flags: hardware-specific flags
+ * @lock: optional register lock
+ * @gate: gate clock
+ * @gate_ops: gate clock ops
+ */
+struct tegra_sdmmc_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ spinlock_t *lock;
+ const struct clk_ops *gate_ops;
+ struct tegra_clk_periph_gate gate;
+ u8 div_flags;
+};
+
+#define to_clk_sdmmc_mux(_hw) container_of(_hw, struct tegra_sdmmc_mux, hw)
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock);
+
/**
* struct clk_init_table - clock initialization table
* @clk_id: clock id as mentioned in device tree bindings
@@ -811,6 +838,9 @@ extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll);
u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags);
+
/* Combined read fence with delay */
#define fence_udelay(delay, reg) \
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
index 521c80e9a06f..89b3ac378b3f 100644
--- a/drivers/clk/uniphier/clk-uniphier-peri.c
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -27,6 +27,12 @@
#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
+#define UNIPHIER_PERI_CLK_SCSSI(idx) \
+ UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+
+#define UNIPHIER_PERI_CLK_MCSSI(idx) \
+ UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
+
const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_UART(0, 0),
UNIPHIER_PERI_CLK_UART(1, 1),
@@ -38,6 +44,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_I2C(6, 2),
UNIPHIER_PERI_CLK_I2C(7, 3),
UNIPHIER_PERI_CLK_I2C(8, 4),
+ UNIPHIER_PERI_CLK_SCSSI(11),
{ /* sentinel */ }
};
@@ -53,5 +60,7 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_FI2C(8, 4),
UNIPHIER_PERI_CLK_FI2C(9, 5),
UNIPHIER_PERI_CLK_FI2C(10, 6),
+ UNIPHIER_PERI_CLK_SCSSI(11),
+ UNIPHIER_PERI_CLK_MCSSI(12),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 4f5ff9fa11fd..2bb5a8570adc 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -29,18 +29,20 @@
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
-/* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */
#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 32), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_PRO5_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 12), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 48), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_LD11_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 10), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x210c, 0)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 40), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x210c, 0)
+
+#define UNIPHIER_SYS_CLK_NAND_4X(idx) \
+ UNIPHIER_CLK_FACTOR("nand-4x", (idx), "nand", 4, 1)
#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \
UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2)
@@ -93,7 +95,9 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -108,7 +112,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("gpll", -1, "ref", 10, 1), /* 250 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
+ UNIPHIER_CLK_FACTOR("spi", 1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
@@ -118,6 +124,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_FACTOR("usb30-hsphy0", 16, "upll", 1, 12),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 20, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 18),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x2104, 19),
UNIPHIER_PRO4_SYS_CLK_AIO(40),
@@ -130,7 +139,9 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -143,7 +154,9 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
@@ -158,7 +171,9 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, RLE */
@@ -166,8 +181,11 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
/* The document mentions 0x2104 bit 18, but not functional */
- UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
- UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x2104, 19),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 18, "ref", 1, 1),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 21, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 22),
UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
@@ -180,7 +198,9 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD11_SYS_CLK_ETHER(6),
@@ -213,7 +233,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD20_SYS_CLK_SD,
@@ -226,8 +248,10 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
* We do not use bit 15 here.
*/
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 12),
+ UNIPHIER_CLK_GATE("usb30-hsphy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 18, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 19, "ref", 1, 1),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 4),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
@@ -254,19 +278,21 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
UNIPHIER_CLK_GATE("ether0", 6, NULL, 0x210c, 9),
UNIPHIER_CLK_GATE("ether1", 7, NULL, 0x210c, 10),
UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18),
- UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
- UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
- UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 16),
+ UNIPHIER_CLK_GATE("usb30-ssphy0", 17, NULL, 0x210c, 18),
+ UNIPHIER_CLK_GATE("usb30-ssphy1", 18, NULL, 0x210c, 20),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x210c, 17),
+ UNIPHIER_CLK_GATE("usb31-ssphy0", 21, NULL, 0x210c, 19),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 3),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index dec0dd88ec15..a11f4ba98b05 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -609,4 +609,15 @@ config ATCPIT100_TIMER
help
This option enables support for the Andestech ATCPIT100 timers.
+config RISCV_TIMER
+ bool "Timer for the RISC-V platform"
+ depends on RISCV
+ default y
+ select TIMER_PROBE
+ select TIMER_OF
+ help
+ This enables the per-hart timer built into all RISC-V systems, which
+ is accessed via both the SBI and the rdcycle instruction. This is
+ required for all RISC-V systems.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 00caf37e52f9..db51b2427e8a 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
@@ -78,3 +78,4 @@ obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
+obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57cb2f00fc07..d8c7f5750cdb 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
- clk->cpumask = cpu_all_mask;
+ clk->cpumask = cpu_possible_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644
index f9b724fd9950..000000000000
--- a/drivers/clocksource/mtk_timer.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG 0x00
-#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
-#define GPT_IRQ_ACK_REG 0x08
-#define GPT_IRQ_ACK(val) BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val) (0x10 * (val))
-#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT (0)
-#define TIMER_CTRL_OP_REPEAT (1)
-#define TIMER_CTRL_OP_FREERUN (3)
-#define TIMER_CTRL_CLEAR (2)
-#define TIMER_CTRL_ENABLE (1)
-#define TIMER_CTRL_DISABLE (0)
-
-#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M (0)
-#define TIMER_CLK_SRC_RTC32K (1)
-#define TIMER_CLK_DIV1 (0x0)
-#define TIMER_CLK_DIV2 (0x1)
-
-#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT 1
-#define GPT_CLK_SRC 2
-
-struct mtk_clock_event_device {
- void __iomem *gpt_base;
- u32 ticks_per_jiffy;
- struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
- return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
- struct clock_event_device *c)
-{
- return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
- writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
- TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
- unsigned long delay, u8 timer)
-{
- writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
- bool periodic, u8 timer)
-{
- u32 val;
-
- /* Acknowledge interrupt */
- writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
- /* Clear 2 bit timer operation mode field */
- val &= ~TIMER_CTRL_OP(0x3);
-
- if (periodic)
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
- else
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
- writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
- mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
- struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
- return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
- struct mtk_clock_event_device *evt = dev_id;
-
- /* Acknowledge timer0 irq */
- writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
- evt->dev.event_handler(&evt->dev);
-
- return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
- writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-
- writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
- evt->gpt_base + TIMER_CLK_REG(timer));
-
- writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
- writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- /* Disable all interrupts */
- writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
- /* Acknowledge all spurious pending interrupts */
- writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
- writel(val | GPT_IRQ_ENABLE(timer),
- evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
- struct mtk_clock_event_device *evt;
- struct resource res;
- unsigned long rate = 0;
- struct clk *clk;
-
- evt = kzalloc(sizeof(*evt), GFP_KERNEL);
- if (!evt)
- return -ENOMEM;
-
- evt->dev.name = "mtk_tick";
- evt->dev.rating = 300;
- evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
- evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
- evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
- evt->dev.tick_resume = mtk_clkevt_shutdown;
- evt->dev.set_next_event = mtk_clkevt_next_event;
- evt->dev.cpumask = cpu_possible_mask;
-
- evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
- if (IS_ERR(evt->gpt_base)) {
- pr_err("Can't get resource\n");
- goto err_kzalloc;
- }
-
- evt->dev.irq = irq_of_parse_and_map(node, 0);
- if (evt->dev.irq <= 0) {
- pr_err("Can't parse IRQ\n");
- goto err_mem;
- }
-
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
- goto err_irq;
- }
-
- if (clk_prepare_enable(clk)) {
- pr_err("Can't prepare clock\n");
- goto err_clk_put;
- }
- rate = clk_get_rate(clk);
-
- if (request_irq(evt->dev.irq, mtk_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
- pr_err("failed to setup irq %d\n", evt->dev.irq);
- goto err_clk_disable;
- }
-
- evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
- /* Configure clock source */
- mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
- clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
- node->name, rate, 300, 32, clocksource_mmio_readl_up);
- gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
- sched_clock_register(mtk_read_sched_clock, 32, rate);
-
- /* Configure clock event */
- mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
- clockevents_config_and_register(&evt->dev, rate, 0x3,
- 0xffffffff);
-
- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
- return 0;
-
-err_clk_disable:
- clk_disable_unprepare(clk);
-err_clk_put:
- clk_put(clk);
-err_irq:
- irq_dispose_mapping(evt->dev.irq);
-err_mem:
- iounmap(evt->gpt_base);
- of_address_to_resource(node, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
- kfree(evt);
-
- return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/riscv_timer.c
new file mode 100644
index 000000000000..4e8b347e43e2
--- /dev/null
+++ b/drivers/clocksource/riscv_timer.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <asm/sbi.h>
+
+/*
+ * All RISC-V systems have a timer attached to every hart. These timers can be
+ * read by the 'rdcycle' pseudo instruction, and can use the SBI to setup
+ * events. In order to abstract the architecture-specific timer reading and
+ * setting functions away from the clock event insertion code, we provide
+ * function pointers to the clockevent subsystem that perform two basic
+ * operations: rdtime() reads the timer on the current CPU, and
+ * next_event(delta) sets the next timer event to 'delta' cycles in the future.
+ * As the timers are inherently a per-cpu resource, these callbacks perform
+ * operations on the current hart. There is guaranteed to be exactly one timer
+ * per hart on all RISC-V systems.
+ */
+
+static int riscv_clock_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ csr_set(sie, SIE_STIE);
+ sbi_set_timer(get_cycles64() + delta);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
+ .name = "riscv_timer_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .set_next_event = riscv_clock_next_event,
+};
+
+/*
+ * It is guaranteed that all the timers across all the harts are synchronized
+ * within one tick of each other, so while this could technically go
+ * backwards when hopping between CPUs, practically it won't happen.
+ */
+static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
+{
+ return get_cycles64();
+}
+
+static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
+ .name = "riscv_clocksource",
+ .rating = 300,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = riscv_clocksource_rdtime,
+};
+
+static int riscv_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+
+ ce->cpumask = cpumask_of(cpu);
+ clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+
+ csr_set(sie, SIE_STIE);
+ return 0;
+}
+
+static int riscv_timer_dying_cpu(unsigned int cpu)
+{
+ csr_clear(sie, SIE_STIE);
+ return 0;
+}
+
+/* called directly from the low-level interrupt handler */
+void riscv_timer_interrupt(void)
+{
+ struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
+
+ csr_clear(sie, SIE_STIE);
+ evdev->event_handler(evdev);
+}
+
+static int __init riscv_timer_init_dt(struct device_node *n)
+{
+ int cpu_id = riscv_of_processor_hart(n), error;
+ struct clocksource *cs;
+
+ if (cpu_id != smp_processor_id())
+ return 0;
+
+ cs = per_cpu_ptr(&riscv_clocksource, cpu_id);
+ clocksource_register_hz(cs, riscv_timebase);
+
+ error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
+ "clockevents/riscv/timer:starting",
+ riscv_timer_starting_cpu, riscv_timer_dying_cpu);
+ if (error)
+ pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
+ error, cpu_id);
+ return error;
+}
+
+TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index c337a8100a7b..aa624885e0e2 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
return ret;
}
- tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.cpumask = cpu_possible_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
@@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- return register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
index 5e23d7b4a722..b4bd2f5b801d 100644
--- a/drivers/clocksource/timer-atcpit100.c
+++ b/drivers/clocksource/timer-atcpit100.c
@@ -185,7 +185,7 @@ static struct timer_of to = {
.set_state_oneshot = atcpit100_clkevt_set_oneshot,
.tick_resume = atcpit100_clkevt_shutdown,
.set_next_event = atcpit100_clkevt_next_event,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
},
.of_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0eee03250cfc..f5b2eda30bf3 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
event_dev->set_state_shutdown = keystone_shutdown;
event_dev->set_state_periodic = keystone_set_periodic;
event_dev->set_state_oneshot = keystone_shutdown;
- event_dev->cpumask = cpu_all_mask;
+ event_dev->cpumask = cpu_possible_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644
index 000000000000..eb10321f8517
--- /dev/null
+++ b/drivers/clocksource/timer-mediatek.c
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT (1)
+#define TIMER_CLK_SRC (2)
+
+#define TIMER_SYNC_TICKS (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG 0x00
+#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
+#define GPT_IRQ_ACK_REG 0x08
+#define GPT_IRQ_ACK(val) BIT((val) - 1)
+
+#define GPT_CTRL_REG(val) (0x10 * (val))
+#define GPT_CTRL_OP(val) (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT (0)
+#define GPT_CTRL_OP_REPEAT (1)
+#define GPT_CTRL_OP_FREERUN (3)
+#define GPT_CTRL_CLEAR (2)
+#define GPT_CTRL_ENABLE (1)
+#define GPT_CTRL_DISABLE (0)
+
+#define GPT_CLK_REG(val) (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val) (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M (0)
+#define GPT_CLK_SRC_RTC32K (1)
+#define GPT_CLK_DIV1 (0x0)
+#define GPT_CLK_DIV2 (0x1)
+
+#define GPT_CNT_REG(val) (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val) (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE (0x40)
+
+#define SYST_CON (SYST_BASE + 0x0)
+#define SYST_VAL (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ * - Start timer countdown.
+ * - Allow timeout ticks being updated.
+ * - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN BIT(0)
+#define SYST_CON_IRQ_EN BIT(1)
+#define SYST_CON_IRQ_CLR BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+ /* Clear and disable interrupt */
+ writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ mtk_syst_ack_irq(to);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+ struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Enable clock to allow timeout tick update later */
+ writel(SYST_CON_EN, SYST_CON_REG(to));
+
+ /*
+ * Write new timeout ticks. Timer shall start countdown
+ * after timeout ticks are updated.
+ */
+ writel(ticks, SYST_VAL_REG(to));
+
+ /* Enable interrupt */
+ writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Disable timer */
+ writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+ return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+ return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+ return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+ writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+ GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+ unsigned long delay, u8 timer)
+{
+ writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+ bool periodic, u8 timer)
+{
+ u32 val;
+
+ /* Acknowledge interrupt */
+ writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ /* Clear 2 bit timer operation mode field */
+ val &= ~GPT_CTRL_OP(0x3);
+
+ if (periodic)
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+ else
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+ writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+ mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Acknowledge timer0 irq */
+ writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+ writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+ timer_of_base(to) + GPT_CLK_REG(timer));
+
+ writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+ writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /* Acknowledge all spurious pending interrupts */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+ writel(val | GPT_IRQ_ENABLE(timer),
+ timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+ to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+ to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+ to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+ to.of_irq.handler = mtk_syst_handler;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+ to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+ to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.of_irq.handler = mtk_gpt_interrupt;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ /* Configure clock source */
+ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+ clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+ node->name, timer_of_rate(&to), 300, 32,
+ clocksource_mmio_readl_up);
+ gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+ sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+ /* Configure clock event */
+ mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index ef9ebeafb3ed..430cb99d8d79 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
return 0;
}
+static struct timer_of suspend_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+ return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+ sprd_timer_update_counter(timer_of_base(&suspend_to),
+ TIMER_VALUE_LO_MASK);
+ sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+ sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+ .name = "sprd_suspend_timer",
+ .rating = 200,
+ .read = sprd_suspend_timer_read,
+ .enable = sprd_suspend_timer_enable,
+ .disable = sprd_suspend_timer_disable,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_to);
+ if (ret)
+ return ret;
+
+ clocksource_register_hz(&suspend_clocksource,
+ timer_of_rate(&suspend_to));
+
+ return 0;
+}
+
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+ sprd_suspend_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 880a861ab3c8..29e2e1a78a43 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
.rating = 250,
.read = ti_32k_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
- CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index a6a0338eea77..f74689334f7c 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
- timer->clkevt.cpumask = cpu_all_mask;
+ timer->clkevt.cpumask = cpu_possible_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.irq = irqnr;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index e718b8c69a56..eeb7d31cbda5 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id)
}
EXPORT_SYMBOL_GPL(cn_del_callback);
-static int cn_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
struct cn_callback_entry *cbq;
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 739da90ff3f6..75491fc841a6 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -51,6 +51,16 @@
#define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3
+/* AVS register set */
+#define ARMADA_37XX_AVS_CTL0 0x0
+#define ARMADA_37XX_AVS_ENABLE BIT(30)
+#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
+#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
+#define ARMADA_37XX_AVS_VDD_MASK 0x3F
+#define ARMADA_37XX_AVS_CTL2 0x8
+#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
+#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
+
/*
* On Armada 37xx the Power management manages 4 level of CPU load,
* each level can be associated with a CPU clock source, a CPU
@@ -58,6 +68,17 @@
*/
#define LOAD_LEVEL_NR 4
+#define MIN_VOLT_MV 1000
+
+/* AVS value for the corresponding voltage (in mV) */
+static int avs_map[] = {
+ 747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
+ 910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
+ 1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
+ 1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
+ 1342
+};
+
struct armada37xx_cpufreq_state {
struct regmap *regmap;
u32 nb_l0l1;
@@ -71,6 +92,7 @@ static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
+ u32 avs[LOAD_LEVEL_NR];
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
@@ -148,6 +170,128 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent);
}
+/*
+ * Find out the armada 37x supported AVS value whose voltage value is
+ * the round-up closest to the target voltage value.
+ */
+static u32 armada_37xx_avs_val_match(int target_vm)
+{
+ u32 avs;
+
+ /* Find out the round-up closest supported voltage value */
+ for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
+ if (avs_map[avs] >= target_vm)
+ break;
+
+ /*
+ * If all supported voltages are smaller than target one,
+ * choose the largest supported voltage
+ */
+ if (avs == ARRAY_SIZE(avs_map))
+ avs = ARRAY_SIZE(avs_map) - 1;
+
+ return avs;
+}
+
+/*
+ * For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
+ * value or a default value when SVC is not supported.
+ * - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
+ * can be got from the mapping table of avs_map.
+ * - L1 voltage should be about 100mv smaller than L0 voltage
+ * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+ * This function calculates L1 & L2 & L3 AVS values dynamically based
+ * on L0 voltage and fill all AVS values to the AVS value table.
+ */
+static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int target_vm;
+ int load_level = 0;
+ u32 l0_vdd_min;
+
+ if (base == NULL)
+ return;
+
+ /* Get L0 VDD min value */
+ regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
+ l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
+ ARMADA_37XX_AVS_VDD_MASK;
+ if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
+ pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
+ return;
+ }
+ dvfs->avs[0] = l0_vdd_min;
+
+ if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
+ /*
+ * If L0 voltage is smaller than 1000mv, then all VDD sets
+ * use L0 voltage;
+ */
+ u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ dvfs->avs[load_level] = avs_min;
+
+ return;
+ }
+
+ /*
+ * L1 voltage is equal to L0 voltage - 100mv and it must be
+ * larger than 1000mv
+ */
+
+ target_vm = avs_map[l0_vdd_min] - 100;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * L2 & L3 voltage is equal to L0 voltage - 150mv and it must
+ * be larger than 1000mv
+ */
+ target_vm = avs_map[l0_vdd_min] - 150;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+}
+
+static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int avs_val = 0, freq;
+ int load_level = 0;
+
+ if (base == NULL)
+ return;
+
+ /* Disable AVS before the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE, 0);
+
+
+ /* Enable low voltage mode */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
+ ARMADA_37XX_AVS_LOW_VDD_EN,
+ ARMADA_37XX_AVS_LOW_VDD_EN);
+
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
+ freq = dvfs->cpu_freq_max / dvfs->divider[load_level];
+
+ avs_val = dvfs->avs[load_level];
+ regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
+ avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
+ }
+
+ /* Enable AVS after the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE,
+ ARMADA_37XX_AVS_ENABLE);
+
+}
+
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
@@ -216,7 +360,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency;
- struct regmap *nb_pm_base;
+ struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk;
@@ -227,6 +371,14 @@ static int __init armada37xx_cpufreq_driver_init(void)
if (IS_ERR(nb_pm_base))
return -ENODEV;
+ avs_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
+
+ /* if AVS is not present don't use it but still try to setup dvfs */
+ if (IS_ERR(avs_base)) {
+ pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
+ avs_base = NULL;
+ }
/* Before doing any configuration on the DVFS first, disable it */
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
@@ -270,16 +422,21 @@ static int __init armada37xx_cpufreq_driver_init(void)
armada37xx_cpufreq_state->regmap = nb_pm_base;
+ armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
+ unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = cur_frequency / dvfs->divider[load_lvl];
-
- ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
+
+
}
/* Now that everything is setup, enable the DVFS at hardware level */
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index a9d3eec32795..30f302149730 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -296,10 +296,62 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
+static inline u64 get_delta(u64 t1, u64 t0)
+{
+ if (t1 > t0 || t0 > ~(u32)0)
+ return t1 - t0;
+
+ return (u32)t1 - (u32)t0;
+}
+
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
+{
+ u64 delta_reference, delta_delivered;
+ u64 reference_perf, delivered_perf;
+
+ reference_perf = fb_ctrs_t0.reference_perf;
+
+ delta_reference = get_delta(fb_ctrs_t1.reference,
+ fb_ctrs_t0.reference);
+ delta_delivered = get_delta(fb_ctrs_t1.delivered,
+ fb_ctrs_t0.delivered);
+
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu->perf_ctrls.desired_perf;
+
+ return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
+}
+
+static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
+{
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ struct cppc_cpudata *cpu = all_cpu_data[cpunum];
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
+ if (ret)
+ return ret;
+
+ return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
+}
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
+ .get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
.name = "cppc_cpufreq",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b0dfd3222013..f53fb41efb7b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -923,7 +923,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- cpus_read_lock();
+ /*
+ * cpus_read_trylock() is used here to work around a circular lock
+ * dependency problem with respect to the cpufreq_register_driver().
+ */
+ if (!cpus_read_trylock())
+ return -EBUSY;
if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem);
@@ -2236,6 +2241,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
+ trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1d50e97d49f1..6d53f7d9fc7a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct policy_dbs_info *policy_dbs;
+
+ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
+ mutex_lock(&gov_dbs_data_mutex);
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
+ goto out;
mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
-
mutex_unlock(&policy_dbs->update_mutex);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8b3c2a79ad6c..b2ff423ad7f8 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -50,6 +51,7 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
+static struct thermal_cooling_device *cdev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
@@ -191,6 +193,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
+static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ cdev = of_cpufreq_cooling_register(policy);
+
+ if (!cdev)
+ dev_err(cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+}
+
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
@@ -202,13 +214,22 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_cooling_unregister(cdev);
+
+ return 0;
+}
+
static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
+ .ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ece120da3353..b6a1aadaff9f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
#ifdef CONFIG_ACPI
-static bool intel_pstate_get_ppc_enable_status(void)
+static bool intel_pstate_acpi_pm_profile_server(void)
{
if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
return true;
+ return false;
+}
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+ if (intel_pstate_acpi_pm_profile_server())
+ return true;
+
return acpi_ppc;
}
@@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}
+
+static inline bool intel_pstate_acpi_pm_profile_server(void)
+{
+ return false;
+}
#endif
static inline void update_turbo_state(void)
@@ -657,21 +670,18 @@ static ssize_t store_energy_performance_preference(
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
char str_preference[21];
- int ret, i = 0;
+ int ret;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
- while (energy_perf_strings[i] != NULL) {
- if (!strcmp(str_preference, energy_perf_strings[i])) {
- intel_pstate_set_energy_pref_index(cpu_data, i);
- return count;
- }
- ++i;
- }
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ intel_pstate_set_energy_pref_index(cpu_data, ret);
+ return count;
}
static ssize_t show_energy_performance_preference(
@@ -1841,7 +1851,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_hwp_enable(cpu);
id = x86_match_cpu(intel_pstate_hwp_boost_ids);
- if (id)
+ if (id && intel_pstate_acpi_pm_profile_server())
hwp_boost = true;
}
@@ -1998,7 +2008,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
struct cpudata *cpu)
{
- if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ if (!hwp_active &&
+ cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_freq) {
pr_debug("policy->max > max non turbo frequency\n");
@@ -2072,6 +2083,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+ if (hwp_active) {
+ unsigned int max_freq;
+
+ max_freq = global.turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ if (max_freq < policy->cpuinfo.max_freq)
+ policy->cpuinfo.max_freq = max_freq;
+ }
+
intel_pstate_init_acpi_perf_limits(policy);
policy->fast_switch_possible = true;
@@ -2394,6 +2414,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
return true;
}
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return true;
+
+ return !acpi_has_method(handle, "PCCH");
+}
+
static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
@@ -2453,7 +2485,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
switch (plat_info[idx].data) {
case PSS:
- return intel_pstate_no_acpi_pss();
+ if (!intel_pstate_no_acpi_pss())
+ return false;
+
+ return intel_pstate_no_acpi_pcch();
case PPC:
return intel_pstate_has_acpi_ppc() && !force_load;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 3f0ce2ae35ee..099a849396f6 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
{
int ret;
+ /* Skip initialization if another cpufreq driver is there. */
+ if (cpufreq_get_current_driver())
+ return 0;
+
if (acpi_disabled)
return 0;
@@ -589,6 +593,15 @@ static int __init pcc_cpufreq_init(void)
return ret;
}
+ if (num_present_cpus() > 4) {
+ pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
+ pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
+ __func__);
+ pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
+ __func__);
+ pr_err("%s: and complain to the system vendor\n", __func__);
+ }
+
ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54edaec1e608..bf6519cf64bc 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -758,8 +758,13 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
cur_msec = jiffies_to_msecs(get_jiffies_64());
- spin_lock(&gpstates->gpstate_lock);
freq_data.pstate_id = idx_to_pstate(new_index);
+ if (!gpstates) {
+ freq_data.gpstate_id = freq_data.pstate_id;
+ goto no_gpstate;
+ }
+
+ spin_lock(&gpstates->gpstate_lock);
if (!gpstates->last_sampled_time) {
gpstate_idx = new_index;
@@ -809,6 +814,7 @@ gpstates_done:
spin_unlock(&gpstates->gpstate_lock);
+no_gpstate:
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
@@ -843,6 +849,13 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
kernfs_put(kn);
}
+ policy->freq_table = powernv_freqs;
+ policy->fast_switch_possible = true;
+
+ if (pvr_version_is(PVR_POWER9))
+ return 0;
+
+ /* Initialise Gpstate ramp-down timer only on POWER8 */
gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
if (!gpstates)
return -ENOMEM;
@@ -857,8 +870,6 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
spin_lock_init(&gpstates->gpstate_lock);
- policy->freq_table = powernv_freqs;
- policy->fast_switch_possible = true;
return 0;
}
@@ -998,7 +1009,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
- del_timer_sync(&gpstates->timer);
+ if (gpstates)
+ del_timer_sync(&gpstates->timer);
}
static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 29389accf3e9..a1830fa25fc5 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -109,8 +109,9 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
if (IS_ERR(speedbin_nvmem)) {
- dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
- PTR_ERR(speedbin_nvmem));
+ if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
+ dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
return PTR_ERR(speedbin_nvmem);
}
@@ -183,6 +184,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
+ {}
};
/*
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e07bc7ace774..073557f433eb 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -105,7 +105,8 @@ static int __init arm_idle_init_cpu(int cpu)
ret = cpuidle_register_driver(drv);
if (ret) {
- pr_err("Failed to register cpuidle driver\n");
+ if (ret != -EBUSY)
+ pr_err("Failed to register cpuidle driver\n");
goto out_kfree_drv;
}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index d29e4f041efe..84b1ebe212b3 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -242,6 +242,7 @@ static inline void add_powernv_state(int index, const char *name,
powernv_states[index].target_residency = target_residency;
powernv_states[index].exit_latency = exit_latency;
powernv_states[index].enter = idle_fn;
+ /* For power8 and below psscr_* will be 0 */
stop_psscr_table[index].val = psscr_val;
stop_psscr_table[index].mask = psscr_mask;
}
@@ -263,186 +264,80 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
- struct device_node *power_mgt;
int nr_idle_states = 1; /* Snooze */
- int dt_idle_states, count;
- u32 latency_ns[CPUIDLE_STATE_MAX];
- u32 residency_ns[CPUIDLE_STATE_MAX];
- u32 flags[CPUIDLE_STATE_MAX];
- u64 psscr_val[CPUIDLE_STATE_MAX];
- u64 psscr_mask[CPUIDLE_STATE_MAX];
- const char *names[CPUIDLE_STATE_MAX];
+ int dt_idle_states;
u32 has_stop_states = 0;
- int i, rc;
+ int i;
u32 supported_flags = pnv_get_supported_cpuidle_states();
/* Currently we have snooze statically defined */
-
- power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
- if (!power_mgt) {
- pr_warn("opal: PowerMgmt Node not found\n");
- goto out;
- }
-
- /* Read values of any property to determine the num of idle states */
- dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
- if (dt_idle_states < 0) {
- pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ if (nr_pnv_idle_states <= 0) {
+ pr_warn("cpuidle-powernv : Only Snooze is available\n");
goto out;
}
- count = of_property_count_u32_elems(power_mgt,
- "ibm,cpu-idle-state-latencies-ns");
-
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
- "ibm,cpu-idle-state-latencies-ns",
- count) != 0)
- goto out;
-
- count = of_property_count_strings(power_mgt,
- "ibm,cpu-idle-state-names");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
- "ibm,cpu-idle-state-names",
- count) != 0)
- goto out;
+ /* TODO: Count only states which are eligible for cpuidle */
+ dt_idle_states = nr_pnv_idle_states;
/*
* Since snooze is used as first idle state, max idle states allowed is
* CPUIDLE_STATE_MAX -1
*/
- if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
+ if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) {
pr_warn("cpuidle-powernv: discovered idle states more than allowed");
dt_idle_states = CPUIDLE_STATE_MAX - 1;
}
- if (of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
- pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
- goto out;
- }
-
- if (of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-latencies-ns", latency_ns,
- dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
- goto out;
- }
- if (of_property_read_string_array(power_mgt,
- "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
- goto out;
- }
-
/*
* If the idle states use stop instruction, probe for psscr values
* and psscr mask which are necessary to specify required stop level.
*/
- has_stop_states = (flags[0] &
+ has_stop_states = (pnv_idle_states[0].flags &
(OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
- if (has_stop_states) {
- count = of_property_count_u64_elems(power_mgt,
- "ibm,cpu-idle-state-psscr");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-psscr",
- count) != 0)
- goto out;
-
- count = of_property_count_u64_elems(power_mgt,
- "ibm,cpu-idle-state-psscr-mask");
- if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-psscr-mask",
- count) != 0)
- goto out;
-
- if (of_property_read_u64_array(power_mgt,
- "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
- goto out;
- }
-
- if (of_property_read_u64_array(power_mgt,
- "ibm,cpu-idle-state-psscr-mask",
- psscr_mask, dt_idle_states)) {
- pr_warn("cpuidle-powernv:Missing ibm,cpu-idle-state-psscr-mask in DT\n");
- goto out;
- }
- }
-
- count = of_property_count_u32_elems(power_mgt,
- "ibm,cpu-idle-state-residency-ns");
-
- if (count < 0) {
- rc = count;
- } else if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
- dt_idle_states,
- "ibm,cpu-idle-state-residency-ns",
- count) != 0) {
- goto out;
- } else {
- rc = of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-residency-ns",
- residency_ns, dt_idle_states);
- }
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+ struct pnv_idle_states_t *state = &pnv_idle_states[i];
/*
* Skip the platform idle state whose flag isn't in
* the supported_cpuidle_states flag mask.
*/
- if ((flags[i] & supported_flags) != flags[i])
+ if ((state->flags & supported_flags) != state->flags)
continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
* in cpu-idle.
*/
- if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
+ if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS)
continue;
/*
* Firmware passes residency and latency values in ns.
* cpuidle expects it in us.
*/
- exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
- if (!rc)
- target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
- else
- target_residency = 0;
-
- if (has_stop_states) {
- int err = validate_psscr_val_mask(&psscr_val[i],
- &psscr_mask[i],
- flags[i]);
- if (err) {
- report_invalid_psscr_val(psscr_val[i], err);
+ exit_latency = DIV_ROUND_UP(state->latency_ns, 1000);
+ target_residency = DIV_ROUND_UP(state->residency_ns, 1000);
+
+ if (has_stop_states && !(state->valid))
continue;
- }
- }
- if (flags[i] & OPAL_PM_TIMEBASE_STOP)
+ if (state->flags & OPAL_PM_TIMEBASE_STOP)
stops_timebase = true;
- /*
- * For nap and fastsleep, use default target_residency
- * values if f/w does not expose it.
- */
- if (flags[i] & OPAL_PM_NAP_ENABLED) {
- if (!rc)
- target_residency = 100;
+ if (state->flags & OPAL_PM_NAP_ENABLED) {
/* Add NAP state */
add_powernv_state(nr_idle_states, "Nap",
CPUIDLE_FLAG_NONE, nap_loop,
target_residency, exit_latency, 0, 0);
} else if (has_stop_states && !stops_timebase) {
- add_powernv_state(nr_idle_states, names[i],
+ add_powernv_state(nr_idle_states, state->name,
CPUIDLE_FLAG_NONE, stop_loop,
target_residency, exit_latency,
- psscr_val[i], psscr_mask[i]);
+ state->psscr_val,
+ state->psscr_mask);
}
/*
@@ -450,20 +345,19 @@ static int powernv_add_idle_states(void)
* within this config dependency check.
*/
#ifdef CONFIG_TICK_ONESHOT
- else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
- flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
- if (!rc)
- target_residency = 300000;
+ else if (state->flags & OPAL_PM_SLEEP_ENABLED ||
+ state->flags & OPAL_PM_SLEEP_ENABLED_ER1) {
/* Add FASTSLEEP state */
add_powernv_state(nr_idle_states, "FastSleep",
CPUIDLE_FLAG_TIMER_STOP,
fastsleep_loop,
target_residency, exit_latency, 0, 0);
} else if (has_stop_states && stops_timebase) {
- add_powernv_state(nr_idle_states, names[i],
+ add_powernv_state(nr_idle_states, state->name,
CPUIDLE_FLAG_TIMER_STOP, stop_loop,
target_residency, exit_latency,
- psscr_val[i], psscr_mask[i]);
+ state->psscr_val,
+ state->psscr_mask);
}
#endif
else
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 1aef60d160eb..110483f0e3fb 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -328,9 +328,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
unsigned int polling_threshold;
/*
- * We want to default to C1 (hlt), not to busy polling
- * unless the timer is happening really really soon, or
- * C1's exit latency exceeds the user configured limit.
+ * Default to a physical idle state, not to busy polling, unless
+ * a timer is going to trigger really really soon.
*/
polling_threshold = max_t(unsigned int, 20, s->target_residency);
if (data->next_timer_us > polling_threshold &&
@@ -349,14 +348,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the tick is already stopped, the cost of possible short
* idle duration misprediction is much higher, because the CPU
* may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and try
- * to force the CPU into a state for which we would have stopped
- * the tick, unless a timer is going to expire really soon
- * anyway.
+ * result of it. In that case say we might mispredict and use
+ * the known time till the closest timer event for the idle
+ * state selection.
*/
if (data->predicted_us < TICK_USEC)
- data->predicted_us = min_t(unsigned int, TICK_USEC,
- ktime_to_us(delta_next));
+ data->predicted_us = ktime_to_us(delta_next);
} else {
/*
* Use the performance multiplier and the user-configurable
@@ -381,8 +378,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
continue;
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > data->predicted_us)
- break;
+ if (s->target_residency > data->predicted_us) {
+ if (!tick_nohz_tick_stopped())
+ break;
+
+ /*
+ * If the state selected so far is shallow and this
+ * state's target residency matches the time till the
+ * closest timer event, select this one to avoid getting
+ * stuck in the shallow one for too long.
+ */
+ if (drv->states[idx].target_residency < TICK_USEC &&
+ s->target_residency <= ktime_to_us(delta_next))
+ idx = i;
+
+ goto out;
+ }
if (s->exit_latency > latency_req) {
/*
* If we break out of the loop for latency reasons, use
@@ -403,14 +414,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Don't stop the tick if the selected state is a polling one or if the
* expected idle duration is shorter than the tick period length.
*/
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- expected_interval < TICK_USEC) {
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
unsigned int delta_next_us = ktime_to_us(delta_next);
*stop_tick = false;
- if (!tick_nohz_tick_stopped() && idx > 0 &&
- drv->states[idx].target_residency > delta_next_us) {
+ if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
/*
* The tick is not going to be stopped and the target
* residency of the state to be returned is not within
@@ -418,8 +428,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick, so try to correct that.
*/
for (i = idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
continue;
idx = i;
@@ -429,6 +439,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
}
+out:
data->last_state_idx = idx;
return data->last_state_idx;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43cccf6aff61..a8c4ce07fc9d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -585,6 +585,17 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
+config CRYPTO_DEV_QCOM_RNG
+ tristate "Qualcomm Random Number Generator Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select CRYPTO_RNG
+ help
+ This driver provides support for the Random Number
+ Generator hardware found on Qualcomm SoCs.
+
+ To compile this driver as a module, choose M here. The
+ module will be called qcom-rng. If unsure, say N.
+
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
@@ -689,8 +700,10 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
select CRYPTO_HASH
select CRYPTO_HMAC
+ select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -746,4 +759,6 @@ config CRYPTO_DEV_CCREE
cryptographic operations on the system REE.
If unsure say Y.
+source "drivers/crypto/hisilicon/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7ae87b4f6c8d..c23396f32c8a 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
@@ -45,3 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 05981ccd9901..6eaec9ba0f68 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1132,8 +1132,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1153,8 +1152,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1174,8 +1172,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1196,8 +1193,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1217,8 +1213,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1237,8 +1232,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index e66f18a0ddd0..74f083f45e97 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -186,7 +186,10 @@ static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
- copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
@@ -268,15 +271,17 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_ecc_cmd *cmd = &work_data->cmd;
- size_t copied;
- size_t n_sz = ctx->n_sz;
+ size_t copied, n_sz;
if (status)
goto free_work_data;
+ /* might want less than we've got */
+ n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
/* copy the shared secret */
- copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
- n_sz);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+ &cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
@@ -440,7 +445,7 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- size_t copied;
+ size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
@@ -448,10 +453,14 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ /* might want less than we've got */
+ nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
/* public key was saved at private key generation */
- copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
- ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ ctx->public_key, nbytes);
+ if (copied != nbytes)
ret = -EINVAL;
return ret;
@@ -470,6 +479,10 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return crypto_kpp_compute_shared_secret(req);
}
+ /* must have exactly two points to be on the curve */
+ if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
@@ -554,10 +567,6 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
-
- dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
-
ctx->fallback = fallback;
return 0;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4d43081120db..8a19df2fba6a 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2316,9 +2316,7 @@ struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
goto error;
}
- tfm = crypto_alloc_ahash(name,
- CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(name, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto error;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0fb8bbf41a8d..7f07a5085e9b 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2704,7 +2704,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2727,7 +2727,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2751,7 +2751,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2777,7 +2777,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha384",
.cra_driver_name = "artpec-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2801,7 +2801,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "artpec-hmac-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2824,7 +2824,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha512",
.cra_driver_name = "artpec-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2848,7 +2848,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "artpec-hmac-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2867,8 +2867,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2888,8 +2887,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2911,8 +2909,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2933,8 +2930,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2964,7 +2960,7 @@ static struct aead_alg aead_algos[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 309c67c7012f..2d1f1db9f807 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3914,8 +3914,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.cipher_info = {
@@ -4649,8 +4648,7 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_type = &crypto_ahash_type;
- hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4691,7 +4689,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
INIT_LIST_HEAD(&aead->base.cra_list);
- aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0beb28196e20..43975ab5f09c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1846,8 +1846,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index df21d996db7e..600336d169a9 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -351,7 +351,7 @@ static int cvm_enc_dec_init(struct crypto_tfm *tfm)
return 0;
}
-struct crypto_alg algs[] = { {
+static struct crypto_alg algs[] = { {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4fdc921ba611..ebe267379ac9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -148,7 +148,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
+ vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
if (!vaddr)
return NULL;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 26687f318de6..3c6fe57f91f8 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -399,13 +399,12 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_exit = ccp_aes_cmac_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 871c9628a2ee..2ca64bb57d2e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -497,13 +497,12 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d826d7d..218739b961fe 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
int reg;
/* Read the interrupt status: */
- status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+ status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Check if it is command completion: */
- if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ if (!(status & PSP_CMD_COMPLETE))
goto done;
/* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
psp->sev_int_rcvd = 1;
wake_up(&psp->sev_int_queue);
@@ -77,17 +77,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
done:
/* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
{
- psp->sev_int_rcvd = 0;
-
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
- *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
}
static int sev_cmd_buffer_len(int cmd)
@@ -145,13 +143,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
- iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
- iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+ iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+ psp->sev_int_rcvd = 0;
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+ iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
sev_wait_cmd_ioc(psp, &reg);
@@ -789,7 +789,7 @@ static int sev_misc_init(struct psp_device *psp)
static int sev_init(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
dev_dbg(psp->dev, "device does not support SEV\n");
return 1;
}
@@ -817,11 +817,11 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- psp->io_regs = sp->io_map + psp->vdata->offset;
+ psp->io_regs = sp->io_map;
/* Disable and clear interrupts until ready */
- iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+ iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
/* Request an irq */
ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,9 @@ int psp_dev_init(struct sp_device *sp)
sp->set_psp_master_device(sp);
/* Enable interrupt */
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+ dev_notice(dev, "psp enabled\n");
return 0;
@@ -856,6 +858,9 @@ void psp_dev_destroy(struct sp_device *sp)
{
struct psp_device *psp = sp->psp_data;
+ if (!psp)
+ return;
+
if (psp->sev_misc)
kref_put(&misc_dev->refcount, sev_exit);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a233c..8b53a9674ecb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,24 +30,7 @@
#include "sp-dev.h"
-#define PSP_C2PMSG(_num) ((_num) << 2)
-#define PSP_CMDRESP PSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
-#define PSP_FEATURE_REG PSP_C2PMSG(63)
-
-#define PSP_P2CMSG(_num) ((_num) << 2)
-#define PSP_CMD_COMPLETE_REG 1
-#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
-
-#define PSP_P2CMSG_INTEN 0x0110
-#define PSP_P2CMSG_INTSTS 0x0114
-
-#define PSP_C2PMSG_ATTR_0 0x0118
-#define PSP_C2PMSG_ATTR_1 0x011c
-#define PSP_C2PMSG_ATTR_2 0x0120
-#define PSP_C2PMSG_ATTR_3 0x0124
-#define PSP_P2CMSG_ATTR_0 0x0128
+#define PSP_CMD_COMPLETE BIT(1)
#define PSP_CMDRESP_CMD_SHIFT 16
#define PSP_CMDRESP_IOC BIT(0)
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index acb197b66ced..14398cad1625 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -44,7 +44,12 @@ struct ccp_vdata {
};
struct psp_vdata {
- const unsigned int offset;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int feature_reg;
+ const unsigned int inten_reg;
+ const unsigned int intsts_reg;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f5f43c50698a..7da93e9bebed 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,38 +269,62 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
- .offset = 0x10500,
+static const struct psp_vdata pspv1 = {
+ .cmdresp_reg = 0x10580,
+ .cmdbuff_addr_lo_reg = 0x105e0,
+ .cmdbuff_addr_hi_reg = 0x105e4,
+ .feature_reg = 0x105fc,
+ .inten_reg = 0x10610,
+ .intsts_reg = 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
};
#endif
static const struct sp_dev_vdata dev_vdata[] = {
- {
+ { /* 0 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3,
#endif
},
- {
+ { /* 1 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
- .psp_vdata = &psp_entry
+ .psp_vdata = &pspv1,
#endif
},
- {
+ { /* 2 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5b,
#endif
},
+ { /* 3 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv2,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 03f4b9fce556..01b82b82f8b8 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2344,7 +2344,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2364,7 +2363,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2384,7 +2382,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2404,7 +2401,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2424,7 +2420,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),cbc(aes))",
.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2444,7 +2439,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2464,7 +2458,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2484,7 +2477,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2504,7 +2496,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "ccm(aes)",
.driver_name = "ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_ccm_setauthsize,
@@ -2524,7 +2515,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4309(ccm(aes))",
.driver_name = "rfc4309-ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4309_ccm_setkey,
.setauthsize = cc_rfc4309_ccm_setauthsize,
@@ -2544,7 +2534,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "gcm(aes)",
.driver_name = "gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_gcm_setauthsize,
@@ -2564,7 +2553,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4106(gcm(aes))",
.driver_name = "rfc4106-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4106_gcm_setkey,
.setauthsize = cc_rfc4106_gcm_setauthsize,
@@ -2584,7 +2572,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4543(gcm(aes))",
.driver_name = "rfc4543-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4543_gcm_setkey,
.setauthsize = cc_rfc4543_gcm_setauthsize,
@@ -2621,8 +2608,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- tmpl->type;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index b32577477b4c..dd948e1df9e5 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -454,9 +454,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -498,9 +496,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c183b73..7623b29911af 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -19,8 +19,6 @@
#define template_skcipher template_u.skcipher
-#define CC_MIN_AES_XTS_SIZE 0x10
-#define CC_MAX_AES_XTS_SIZE 0x2000
struct cc_cipher_handle {
struct list_head alg_list;
};
@@ -98,8 +96,7 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if (size >= CC_MIN_AES_XTS_SIZE &&
- size <= CC_MAX_AES_XTS_SIZE &&
+ if (size >= AES_BLOCK_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -593,34 +590,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ IS_ALIGNED((unsigned long)ctr, 8)) {
+
+ __be64 *high_be = (__be64 *)ctr;
+ __be64 *low_be = high_be + 1;
+ u64 orig_low = __be64_to_cpu(*low_be);
+ u64 new_low = orig_low + (u64)increment;
+
+ *low_be = __cpu_to_be64(new_low);
+
+ if (new_low < orig_low)
+ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+ } else {
+ u8 *pos = (ctr + AES_BLOCK_SIZE);
+ u8 val;
+ unsigned int size;
+
+ for (; increment; increment--)
+ for (size = AES_BLOCK_SIZE; size; size--) {
+ val = *--pos + 1;
+ *pos = val;
+ if (val)
+ break;
+ }
+ }
+}
+
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CBC:
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req->iv, req->dst, len,
+ ivsize, 0);
+ }
+ break;
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- (req->cryptlen - ivsize),
- ivsize, 0);
+ case DRV_CIPHER_CTR:
+ /* Compute the counter of the last block */
+ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+ cc_update_ctr((u8 *)req->iv, len);
+ break;
+
+ default:
+ break;
}
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
skcipher_request_complete(req, err);
}
@@ -639,7 +684,7 @@ static int cc_cipher_process(struct skcipher_request *req,
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
- int rc, cts_restore_flag = 0;
+ int rc;
unsigned int seq_len = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -671,23 +716,10 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
- /*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) &&
- ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
- ctx_p->cipher_mode = DRV_CIPHER_CBC;
- cts_restore_flag = 1;
- }
-
/* Setup request structure */
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
- cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
- STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-
-#endif
-
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -708,14 +740,6 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
&seq_len);
- /* do we need to generate IV? */
- if (req_ctx->is_giv) {
- cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- cc_req.ivgen_dma_addr_len = 1;
- /* set the IV size (8/16 B long)*/
- cc_req.ivgen_size = ivsize;
- }
-
/* STAT_PHASE_3: Lock HW and push sequence */
rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
@@ -728,9 +752,6 @@ static int cc_cipher_process(struct skcipher_request *req,
}
exit_process:
- if (cts_restore_flag)
- ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
if (rc != -EINPROGRESS && rc != -EBUSY) {
kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
@@ -743,8 +764,7 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
{
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- req_ctx->is_giv = false;
- req_ctx->backup_info = NULL;
+ memset(req_ctx, 0, sizeof(*req_ctx));
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
@@ -752,21 +772,28 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
gfp_t flags = cc_gfp_flags(&req->base);
+ unsigned int len;
- /*
- * Allocate and save the last IV sized bytes of the source, which will
- * be lost in case of in-place decryption and might be needed for CTS.
- */
- req_ctx->backup_info = kmalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+ /* Allocate and save the last IV sized bytes of the source,
+ * which will be lost in case of in-place decryption.
+ */
+ req_ctx->backup_info = kzalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
- (req->cryptlen - ivsize), ivsize, 0);
- req_ctx->is_giv = false;
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+ ivsize, 0);
+ }
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -927,7 +954,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -944,7 +970,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(paes)",
.driver_name = "cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -961,7 +986,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(paes)",
.driver_name = "ofb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -975,10 +999,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_712,
},
{
- .name = "cts1(cbc(paes))",
- .driver_name = "cts1-cbc-paes-ccree",
+ .name = "cts(cbc(paes))",
+ .driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -995,7 +1018,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(paes)",
.driver_name = "ctr-paes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -1162,7 +1184,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1179,7 +1200,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1196,7 +1216,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1210,10 +1229,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_630,
},
{
- .name = "cts1(cbc(aes))",
- .driver_name = "cts1-cbc-aes-ccree",
+ .name = "cts(cbc(aes))",
+ .driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1230,7 +1248,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1247,7 +1264,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1264,7 +1280,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des3_ede)",
.driver_name = "ecb-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1281,7 +1296,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1298,7 +1312,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des)",
.driver_name = "ecb-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1338,8 +1351,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
alg->base.cra_init = cc_cipher_init;
alg->base.cra_exit = cc_cipher_exit;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_TYPE_SKCIPHER;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 68444cfa936b..4dbc0a1e6d5c 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -22,7 +22,6 @@ struct cipher_req_ctx {
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
- bool is_giv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index bd974fef05e4..1ff229c2aeab 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -131,8 +131,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
if (irr) {
- dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
- irr);
+ dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 95f82b2d1e70..d608a4faf662 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -148,7 +148,6 @@ struct cc_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- u32 type;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 96ff777474d7..b9313306c36f 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
return rc;
}
-static int cc_hash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
- struct scatterlist *src = req->src;
- unsigned int nbytes = req->nbytes;
- u8 *result = req->result;
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- bool is_hmac = ctx->is_hmac;
- struct cc_crypto_req cc_req = {};
- struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- unsigned int idx = 0;
- int rc;
- gfp_t flags = cc_gfp_flags(&req->base);
-
- dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
-
- if (cc_map_req(dev, state, ctx)) {
- dev_err(dev, "map_ahash_source() failed\n");
- return -EINVAL;
- }
-
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
- flags)) {
- dev_err(dev, "map_ahash_request_final() failed\n");
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
- if (cc_map_result(dev, state, digestsize)) {
- dev_err(dev, "map_ahash_digest() failed\n");
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
-
- /* Setup request structure */
- cc_req.user_cb = cc_hash_complete;
- cc_req.user_arg = req;
-
- idx = cc_restore_hash(desc, ctx, state, idx);
-
- if (is_hmac)
- idx = cc_fin_hmac(desc, req, idx);
-
- idx = cc_fin_result(desc, req, idx);
-
- rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS && rc != -EBUSY) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_result(dev, state, digestsize, result);
- cc_unmap_req(dev, state, ctx);
- }
- return rc;
-}
-
-static int cc_hash_final(struct ahash_request *req)
+static int cc_do_finup(struct ahash_request *req, bool update)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
+ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+ update ? "finup" : "final", nbytes);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
-
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, src, true);
@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
idx = cc_restore_hash(desc, ctx, state, idx);
- /* "DO-PAD" must be enabled only when writing current length to HW */
+ /* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
set_cipher_mode(&desc[idx], ctx->hw_mode);
@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
return rc;
}
+static int cc_hash_finup(struct ahash_request *req)
+{
+ return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ return cc_do_finup(req, false);
+}
+
static int cc_hash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
@@ -1813,9 +1764,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
alg->cra_exit = cc_cra_exit;
alg->cra_init = cc_cra_init;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_crypto_alg->hash_mode = template->hash_mode;
t_crypto_alg->hw_mode = template->hw_mode;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b916c4eb608c..5c539af8ed60 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -4203,7 +4203,6 @@ static int chcr_unregister_alg(void)
#define SZ_AHASH_CTX sizeof(struct chcr_context)
#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
-#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
/*
* chcr_register_alg - Register crypto algorithms with kernel framework.
@@ -4237,8 +4236,7 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4258,10 +4256,9 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+ a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
- a_hash->halg.base.cra_type = &crypto_ahash_type;
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 2bb6f0380758..0997e166ea57 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
tp->rcv_nxt++;
- tp->rx_opt.ts_recent_stamp = get_seconds();
+ tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
tp->srtt_us = 0;
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d50140f9e5..490960755864 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -97,7 +97,7 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
{
return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- val << bit_pos);
+ (u64)val << bit_pos);
}
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
new file mode 100644
index 000000000000..8ca9c503bcb0
--- /dev/null
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CRYPTO_DEV_HISI_SEC
+ tristate "Support for Hisilicon SEC crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select SG_SPLIT
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Support for Hisilicon SEC Engine in Hip06 and Hip07
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
new file mode 100644
index 000000000000..463f46ace182
--- /dev/null
+++ b/drivers/crypto/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
diff --git a/drivers/crypto/hisilicon/sec/Makefile b/drivers/crypto/hisilicon/sec/Makefile
new file mode 100644
index 000000000000..a55b698e0c27
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += hisi_sec.o
+hisi_sec-y = sec_algs.o sec_drv.o
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
new file mode 100644
index 000000000000..f7d6d690116e
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -0,0 +1,1122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sec_drv.h"
+
+#define SEC_MAX_CIPHER_KEY 64
+#define SEC_REQ_LIMIT SZ_32M
+
+struct sec_c_alg_cfg {
+ unsigned c_alg : 3;
+ unsigned c_mode : 3;
+ unsigned key_len : 2;
+ unsigned c_width : 2;
+};
+
+static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
+ [SEC_C_DES_ECB_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_DES_CBC_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_3DES_ECB_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_ECB_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_3DES_CBC_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_CBC_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_AES_ECB_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_ECB_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_ECB_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CBC_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CBC_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CBC_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CTR_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CTR_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CTR_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_XTS_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_XTS_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_NULL] = {
+ },
+};
+
+/*
+ * Mutex used to ensure safe operation of reference count of
+ * alg providers
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
+ struct sec_bd_info *req,
+ enum sec_cipher_alg alg)
+{
+ const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
+
+ memset(req, 0, sizeof(*req));
+ req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
+ req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
+ req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
+ req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
+
+ req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
+ req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
+}
+
+static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+ const u8 *key,
+ unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->cipher_alg = alg;
+ memcpy(ctx->key, key, keylen);
+ sec_alg_skcipher_init_template(ctx, &ctx->req_template,
+ ctx->cipher_alg);
+}
+
+static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+ int count,
+ struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current = NULL;
+ struct sec_hw_sgl *sgl_next;
+ dma_addr_t sgl_next_dma;
+ struct scatterlist *sg;
+ int ret, sge_index, i;
+
+ if (!count)
+ return -EINVAL;
+
+ for_each_sg(sgl, sg, count, i) {
+ sge_index = i % SEC_MAX_SGE_NUM;
+ if (sge_index == 0) {
+ sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+ GFP_KERNEL, &sgl_next_dma);
+ if (!sgl_next) {
+ ret = -ENOMEM;
+ goto err_free_hw_sgls;
+ }
+
+ if (!sgl_current) { /* First one */
+ *psec_sgl = sgl_next_dma;
+ *sec_sgl = sgl_next;
+ } else { /* Chained */
+ sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = sgl_next_dma;
+ sgl_current->next = sgl_next;
+ }
+ sgl_current = sgl_next;
+ }
+ sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
+ sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
+ sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
+ }
+ sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = 0;
+ (*sec_sgl)->entry_sum_in_chain = count;
+
+ return 0;
+
+err_free_hw_sgls:
+ sgl_current = *sec_sgl;
+ while (sgl_current) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ *psec_sgl = 0;
+
+ return ret;
+}
+
+static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current, *sgl_next;
+
+ if (!hw_sgl)
+ return;
+ sgl_current = hw_sgl;
+ while (sgl_current->next) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+}
+
+static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->key) {
+ /* rekeying */
+ memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
+ } else {
+ /* new key */
+ ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
+ if (!ctx->key) {
+ mutex_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&ctx->lock);
+ sec_alg_skcipher_init_context(tfm, key, keylen, alg);
+
+ return 0;
+}
+
+static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_ECB_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_ECB_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_ECB_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CBC_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CBC_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CBC_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CTR_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CTR_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CTR_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128 * 2:
+ alg = SEC_C_AES_XTS_128;
+ break;
+ case AES_KEYSIZE_256 * 2:
+ alg = SEC_C_AES_XTS_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
+}
+
+static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
+}
+
+static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE * 3)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_ECB_192_3KEY);
+}
+
+static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_CBC_192_3KEY);
+}
+
+static void sec_alg_free_el(struct sec_request_el *el,
+ struct sec_dev_info *info)
+{
+ sec_free_hw_sgl(el->out, el->dma_out, info);
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+ kfree(el->sgl_in);
+ kfree(el->sgl_out);
+ kfree(el);
+}
+
+/* queuelock must be held */
+static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
+{
+ struct sec_request_el *el, *temp;
+ int ret = 0;
+
+ mutex_lock(&sec_req->lock);
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ /*
+ * Add to hardware queue only under following circumstances
+ * 1) Software and hardware queue empty so no chain dependencies
+ * 2) No dependencies as new IV - (check software queue empty
+ * to maintain order)
+ * 3) No dependencies because the mode does no chaining.
+ *
+ * In other cases first insert onto the software queue which
+ * is then emptied as requests complete
+ */
+ if (!queue->havesoftqueue ||
+ (kfifo_is_empty(&queue->softqueue) &&
+ sec_queue_empty(queue))) {
+ ret = sec_queue_send(queue, &el->req, sec_req);
+ if (ret == -EAGAIN) {
+ /* Wait unti we can send then try again */
+ /* DEAD if here - should not happen */
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ } else {
+ kfifo_put(&queue->softqueue, el);
+ }
+ }
+err_unlock:
+ mutex_unlock(&sec_req->lock);
+
+ return ret;
+}
+
+static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
+ struct crypto_async_request *req_base)
+{
+ struct skcipher_request *skreq = container_of(req_base,
+ struct skcipher_request,
+ base);
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_request *backlog_req;
+ struct sec_request_el *sec_req_el, *nextrequest;
+ struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct device *dev = ctx->queue->dev_info->dev;
+ int icv_or_skey_en, ret;
+ bool done;
+
+ sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
+ head);
+ icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
+ SEC_BD_W0_ICV_OR_SKEY_EN_S;
+ if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
+ dev_err(dev, "Got an invalid answer %lu %d\n",
+ sec_resp->w1 & SEC_BD_W1_BD_INVALID,
+ icv_or_skey_en);
+ sec_req->err = -EINVAL;
+ /*
+ * We need to muddle on to avoid getting stuck with elements
+ * on the queue. Error will be reported so requester so
+ * it should be able to handle appropriately.
+ */
+ }
+
+ mutex_lock(&ctx->queue->queuelock);
+ /* Put the IV in place for chained cases */
+ switch (ctx->cipher_alg) {
+ case SEC_C_AES_CBC_128:
+ case SEC_C_AES_CBC_192:
+ case SEC_C_AES_CBC_256:
+ if (sec_req_el->req.w0 & SEC_BD_W0_DE)
+ sg_pcopy_to_buffer(sec_req_el->sgl_out,
+ sg_nents(sec_req_el->sgl_out),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ else
+ sg_pcopy_to_buffer(sec_req_el->sgl_in,
+ sg_nents(sec_req_el->sgl_in),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ /* No need to sync to the device as coherent DMA */
+ break;
+ case SEC_C_AES_CTR_128:
+ case SEC_C_AES_CTR_192:
+ case SEC_C_AES_CTR_256:
+ crypto_inc(skreq->iv, 16);
+ break;
+ default:
+ /* Do not update */
+ break;
+ }
+
+ if (ctx->queue->havesoftqueue &&
+ !kfifo_is_empty(&ctx->queue->softqueue) &&
+ sec_queue_empty(ctx->queue)) {
+ ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
+ if (ret <= 0)
+ dev_err(dev,
+ "Error getting next element from kfifo %d\n",
+ ret);
+ else
+ /* We know there is space so this cannot fail */
+ sec_queue_send(ctx->queue, &nextrequest->req,
+ nextrequest->sec_req);
+ } else if (!list_empty(&ctx->backlog)) {
+ /* Need to verify there is room first */
+ backlog_req = list_first_entry(&ctx->backlog,
+ typeof(*backlog_req),
+ backlog_head);
+ if (sec_queue_can_enqueue(ctx->queue,
+ backlog_req->num_elements) ||
+ (ctx->queue->havesoftqueue &&
+ kfifo_avail(&ctx->queue->softqueue) >
+ backlog_req->num_elements)) {
+ sec_send_request(backlog_req, ctx->queue);
+ backlog_req->req_base->complete(backlog_req->req_base,
+ -EINPROGRESS);
+ list_del(&backlog_req->backlog_head);
+ }
+ }
+ mutex_unlock(&ctx->queue->queuelock);
+
+ mutex_lock(&sec_req->lock);
+ list_del(&sec_req_el->head);
+ mutex_unlock(&sec_req->lock);
+ sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
+
+ /*
+ * Request is done.
+ * The dance is needed as the lock is freed in the completion
+ */
+ mutex_lock(&sec_req->lock);
+ done = list_empty(&sec_req->elements);
+ mutex_unlock(&sec_req->lock);
+ if (done) {
+ if (crypto_skcipher_ivsize(atfm)) {
+ dma_unmap_single(dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ }
+ dma_unmap_sg(dev, skreq->src, sec_req->len_in,
+ DMA_BIDIRECTIONAL);
+ if (skreq->src != skreq->dst)
+ dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
+ DMA_BIDIRECTIONAL);
+ skreq->base.complete(&skreq->base, sec_req->err);
+ }
+}
+
+void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+{
+ struct sec_request *sec_req = shadow;
+
+ sec_req->cb(resp, sec_req->req_base);
+}
+
+static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+ int *steps)
+{
+ size_t *sizes;
+ int i;
+
+ /* Split into suitable sized blocks */
+ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+ sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ if (!sizes)
+ return -ENOMEM;
+
+ for (i = 0; i < *steps - 1; i++)
+ sizes[i] = SEC_REQ_LIMIT;
+ sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
+ *split_sizes = sizes;
+
+ return 0;
+}
+
+static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ int steps, struct scatterlist ***splits,
+ int **splits_nents,
+ int sgl_len_in,
+ struct device *dev)
+{
+ int ret, count;
+
+ count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ if (!*splits) {
+ ret = -ENOMEM;
+ goto err_unmap_sg;
+ }
+ *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ if (!*splits_nents) {
+ ret = -ENOMEM;
+ goto err_free_splits;
+ }
+
+ /* output the scatter list before and after this */
+ ret = sg_split(sgl, count, 0, steps, split_sizes,
+ *splits, *splits_nents, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_splits_nents;
+ }
+
+ return 0;
+
+err_free_splits_nents:
+ kfree(*splits_nents);
+err_free_splits:
+ kfree(*splits);
+err_unmap_sg:
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+
+ return ret;
+}
+
+/*
+ * Reverses the sec_map_and_split_sg call for messages not yet added to
+ * the queues.
+ */
+static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
+ struct scatterlist **splits, int *splits_nents,
+ int sgl_len_in, struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < steps; i++)
+ kfree(splits[i]);
+ kfree(splits_nents);
+ kfree(splits);
+
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+}
+
+static struct sec_request_el
+*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
+ int el_size, bool different_dest,
+ struct scatterlist *sgl_in, int n_ents_in,
+ struct scatterlist *sgl_out, int n_ents_out,
+ struct sec_dev_info *info)
+{
+ struct sec_request_el *el;
+ struct sec_bd_info *req;
+ int ret;
+
+ el = kzalloc(sizeof(*el), GFP_KERNEL);
+ if (!el)
+ return ERR_PTR(-ENOMEM);
+ el->el_length = el_size;
+ req = &el->req;
+ memcpy(req, template, sizeof(*req));
+
+ req->w0 &= ~SEC_BD_W0_CIPHER_M;
+ if (encrypt)
+ req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
+ else
+ req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+ req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
+ SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+ req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
+ SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+
+ /* Writing whole u32 so no need to take care of masking */
+ req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
+ ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
+ SEC_BD_W2_C_GRAN_SIZE_15_0_M);
+
+ req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
+ req->w1 |= SEC_BD_W1_ADDR_TYPE;
+
+ el->sgl_in = sgl_in;
+
+ ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+ n_ents_in, info);
+ if (ret)
+ goto err_free_el;
+
+ req->data_addr_lo = lower_32_bits(el->dma_in);
+ req->data_addr_hi = upper_32_bits(el->dma_in);
+
+ if (different_dest) {
+ el->sgl_out = sgl_out;
+ ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+ el->sgl_out,
+ n_ents_out, info);
+ if (ret)
+ goto err_free_hw_sgl_in;
+
+ req->w0 |= SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
+
+ } else {
+ req->w0 &= ~SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
+ }
+
+ return el;
+
+err_free_hw_sgl_in:
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+err_free_el:
+ kfree(el);
+
+ return ERR_PTR(ret);
+}
+
+static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ bool encrypt)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_queue *queue = ctx->queue;
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_dev_info *info = queue->dev_info;
+ int i, ret, steps;
+ size_t *split_sizes;
+ struct scatterlist **splits_in;
+ struct scatterlist **splits_out = NULL;
+ int *splits_in_nents;
+ int *splits_out_nents = NULL;
+ struct sec_request_el *el, *temp;
+
+ mutex_init(&sec_req->lock);
+ sec_req->req_base = &skreq->base;
+ sec_req->err = 0;
+ /* SGL mapping out here to allow us to break it up as necessary */
+ sec_req->len_in = sg_nents(skreq->src);
+
+ ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+ &steps);
+ if (ret)
+ return ret;
+ sec_req->num_elements = steps;
+ ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+ &splits_in_nents, sec_req->len_in,
+ info->dev);
+ if (ret)
+ goto err_free_split_sizes;
+
+ if (skreq->src != skreq->dst) {
+ sec_req->len_out = sg_nents(skreq->dst);
+ ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+ &splits_out, &splits_out_nents,
+ sec_req->len_out, info->dev);
+ if (ret)
+ goto err_unmap_in_sg;
+ }
+ /* Shared info stored in seq_req - applies to all BDs */
+ sec_req->tfm_ctx = ctx;
+ sec_req->cb = sec_skcipher_alg_callback;
+ INIT_LIST_HEAD(&sec_req->elements);
+
+ /*
+ * Future optimization.
+ * In the chaining case we can't use a dma pool bounce buffer
+ * but in the case where we know there is no chaining we can
+ */
+ if (crypto_skcipher_ivsize(atfm)) {
+ sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
+ ret = -ENOMEM;
+ goto err_unmap_out_sg;
+ }
+ }
+
+ /* Set them all up then queue - cleaner error handling. */
+ for (i = 0; i < steps; i++) {
+ el = sec_alg_alloc_and_fill_el(&ctx->req_template,
+ encrypt ? 1 : 0,
+ split_sizes[i],
+ skreq->src != skreq->dst,
+ splits_in[i], splits_in_nents[i],
+ splits_out[i],
+ splits_out_nents[i], info);
+ if (IS_ERR(el)) {
+ ret = PTR_ERR(el);
+ goto err_free_elements;
+ }
+ el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
+ el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
+ el->sec_req = sec_req;
+ list_add_tail(&el->head, &sec_req->elements);
+ }
+
+ /*
+ * Only attempt to queue if the whole lot can fit in the queue -
+ * we can't successfully cleanup after a partial queing so this
+ * must succeed or fail atomically.
+ *
+ * Big hammer test of both software and hardware queues - could be
+ * more refined but this is unlikely to happen so no need.
+ */
+
+ /* Cleanup - all elements in pointer arrays have been coppied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+
+ /* Grab a big lock for a long time to avoid concurrency issues */
+ mutex_lock(&queue->queuelock);
+
+ /*
+ * Can go on to queue if we have space in either:
+ * 1) The hardware queue and no software queue
+ * 2) The software queue
+ * AND there is nothing in the backlog. If there is backlog we
+ * have to only queue to the backlog queue and return busy.
+ */
+ if ((!sec_queue_can_enqueue(queue, steps) &&
+ (!queue->havesoftqueue ||
+ kfifo_avail(&queue->softqueue) > steps)) ||
+ !list_empty(&ctx->backlog)) {
+ if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ list_add_tail(&sec_req->backlog_head, &ctx->backlog);
+ mutex_unlock(&queue->queuelock);
+ return -EBUSY;
+ }
+
+ ret = -EBUSY;
+ mutex_unlock(&queue->queuelock);
+ goto err_free_elements;
+ }
+ ret = sec_send_request(sec_req, queue);
+ mutex_unlock(&queue->queuelock);
+ if (ret)
+ goto err_free_elements;
+
+ return -EINPROGRESS;
+
+err_free_elements:
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ list_del(&el->head);
+ sec_alg_free_el(el, info);
+ }
+ if (crypto_skcipher_ivsize(atfm))
+ dma_unmap_single(info->dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_BIDIRECTIONAL);
+err_unmap_out_sg:
+ if (skreq->src != skreq->dst)
+ sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
+ splits_out_nents, sec_req->len_out,
+ info->dev);
+err_unmap_in_sg:
+ sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
+ sec_req->len_in, info->dev);
+err_free_split_sizes:
+ kfree(split_sizes);
+
+ return ret;
+}
+
+static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, true);
+}
+
+static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, false);
+}
+
+static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->backlog);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
+
+ ctx->queue = sec_queue_alloc_start_safe();
+ if (IS_ERR(ctx->queue))
+ return PTR_ERR(ctx->queue);
+
+ mutex_init(&ctx->queue->queuelock);
+ ctx->queue->havesoftqueue = false;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ if (ctx->key) {
+ memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
+ dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
+ ctx->pkey);
+ }
+ sec_queue_stop_release(ctx->queue);
+}
+
+static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = sec_alg_skcipher_init(tfm);
+ if (ret)
+ return ret;
+
+ INIT_KFIFO(ctx->queue->softqueue);
+ ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
+ if (ret) {
+ sec_alg_skcipher_exit(tfm);
+ return ret;
+ }
+ ctx->queue->havesoftqueue = true;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ kfifo_free(&ctx->queue->softqueue);
+ sec_alg_skcipher_exit(tfm);
+}
+
+static struct skcipher_alg sec_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "hisi_sec_aes_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "hisi_sec_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "hisi_sec_aes_ctr",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_ctr,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "hisi_sec_aes_xts",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_xts,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ /* Unable to find any test vectors so untested */
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "hisi_sec_des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "hisi_sec_des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_3des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_3des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ }
+};
+
+int sec_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ if (ret)
+ --active_devs;
+unlock:
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+void sec_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
new file mode 100644
index 000000000000..c1ee4e7bf996
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ *
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ */
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sec_drv.h"
+
+#define SEC_QUEUE_AR_FROCE_ALLOC 0
+#define SEC_QUEUE_AR_FROCE_NOALLOC 1
+#define SEC_QUEUE_AR_FROCE_DIS 2
+
+#define SEC_QUEUE_AW_FROCE_ALLOC 0
+#define SEC_QUEUE_AW_FROCE_NOALLOC 1
+#define SEC_QUEUE_AW_FROCE_DIS 2
+
+/* SEC_ALGSUB registers */
+#define SEC_ALGSUB_CLK_EN_REG 0x03b8
+#define SEC_ALGSUB_CLK_DIS_REG 0x03bc
+#define SEC_ALGSUB_CLK_ST_REG 0x535c
+#define SEC_ALGSUB_RST_REQ_REG 0x0aa8
+#define SEC_ALGSUB_RST_DREQ_REG 0x0aac
+#define SEC_ALGSUB_RST_ST_REG 0x5a54
+#define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
+
+#define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
+#define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
+#define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
+#define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
+
+#define SEC_SAA_BASE 0x00001000UL
+
+/* SEC_SAA registers */
+#define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
+#define SEC_SAA_CTRL_GET_QM_EN BIT(0)
+
+#define SEC_ST_INTMSK1_REG 0x0200
+#define SEC_ST_RINT1_REG 0x0400
+#define SEC_ST_INTSTS1_REG 0x0600
+#define SEC_BD_MNG_STAT_REG 0x0800
+#define SEC_PARSING_STAT_REG 0x0804
+#define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
+#define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
+#define SEC_BACK_TIME_OUT_CNT_REG 0x0810
+#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
+#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
+#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
+#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
+#define SEC_SAA_ACC_REG 0x083c
+#define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
+#define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
+#define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
+#define SEC_BACK_WORK_TIME_CNT_REG 0x0868
+#define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
+#define SEC_SAA_CLK_CNT_REG 0x0870
+
+/* SEC_COMMON registers */
+#define SEC_CLK_EN_REG 0x0000
+#define SEC_CTRL_REG 0x0004
+
+#define SEC_COMMON_CNT_CLR_CE_REG 0x0008
+#define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
+#define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
+
+#define SEC_SECURE_CTRL_REG 0x000c
+#define SEC_AXI_CACHE_CFG_REG 0x0010
+#define SEC_AXI_QOS_CFG_REG 0x0014
+#define SEC_IPV4_MASK_TABLE_REG 0x0020
+#define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
+#define SEC_FSM_MAX_CNT_REG 0x0064
+
+#define SEC_CTRL2_REG 0x0068
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
+#define SEC_CTRL2_CLK_GATE_EN BIT(7)
+#define SEC_CTRL2_ENDIAN_BD BIT(8)
+#define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
+
+#define SEC_CNT_PRECISION_CFG_REG 0x006c
+#define SEC_DEBUG_BD_CFG_REG 0x0070
+#define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
+#define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
+
+#define SEC_Q_SIGHT_SEL 0x0074
+#define SEC_Q_SIGHT_HIS_CLR 0x0078
+#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
+#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
+#define SEC_STAT_CLR_REG 0x0a00
+#define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
+#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
+#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
+#define SEC_QM_BD_DFX_CFG_REG 0x0b08
+#define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
+#define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
+#define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
+#define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
+#define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
+#define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
+#define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
+#define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
+
+#define SEC_HASH_IPV4_MASK 0xfff00000
+#define SEC_MAX_SAA_NUM 0xa
+#define SEC_SAA_ADDR_SIZE 0x1000
+
+#define SEC_Q_INIT_REG 0x0
+#define SEC_Q_INIT_WO_STAT_CLEAR 0x2
+#define SEC_Q_INIT_AND_STAT_CLEAR 0x3
+
+#define SEC_Q_CFG_REG 0x8
+#define SEC_Q_CFG_REORDER BIT(0)
+
+#define SEC_Q_PROC_NUM_CFG_REG 0x10
+#define SEC_QUEUE_ENB_REG 0x18
+
+#define SEC_Q_DEPTH_CFG_REG 0x50
+#define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
+#define SEC_Q_DEPTH_CFG_DEPTH_S 0
+
+#define SEC_Q_BASE_HADDR_REG 0x54
+#define SEC_Q_BASE_LADDR_REG 0x58
+#define SEC_Q_WR_PTR_REG 0x5c
+#define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
+#define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
+#define SEC_Q_OUTORDER_RD_PTR_REG 0x68
+#define SEC_Q_OT_TH_REG 0x6c
+
+#define SEC_Q_ARUSER_CFG_REG 0x70
+#define SEC_Q_ARUSER_CFG_FA BIT(0)
+#define SEC_Q_ARUSER_CFG_FNA BIT(1)
+#define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
+#define SEC_Q_ARUSER_CFG_PKG BIT(3)
+
+#define SEC_Q_AWUSER_CFG_REG 0x74
+#define SEC_Q_AWUSER_CFG_FA BIT(0)
+#define SEC_Q_AWUSER_CFG_FNA BIT(1)
+#define SEC_Q_AWUSER_CFG_PKG BIT(2)
+
+#define SEC_Q_ERR_BASE_HADDR_REG 0x7c
+#define SEC_Q_ERR_BASE_LADDR_REG 0x80
+#define SEC_Q_CFG_VF_NUM_REG 0x84
+#define SEC_Q_SOFT_PROC_PTR_REG 0x88
+#define SEC_Q_FAIL_INT_MSK_REG 0x300
+#define SEC_Q_FLOW_INT_MKS_REG 0x304
+#define SEC_Q_FAIL_RINT_REG 0x400
+#define SEC_Q_FLOW_RINT_REG 0x404
+#define SEC_Q_FAIL_INT_STATUS_REG 0x500
+#define SEC_Q_FLOW_INT_STATUS_REG 0x504
+#define SEC_Q_STATUS_REG 0x600
+#define SEC_Q_RD_PTR_REG 0x604
+#define SEC_Q_PRO_PTR_REG 0x608
+#define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
+#define SEC_Q_OT_CNT_STATUS_REG 0x610
+#define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
+#define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
+#define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
+#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
+#define SEC_Q_RD_DONE_PTR_REG 0x660
+#define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
+#define SEC_Q_CPL_Q_PTR_ST_REG 0x704
+#define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
+#define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
+#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
+#define SEC_Q_WRR_ID_CHECK_REG 0x714
+#define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
+#define SEC_Q_SUCCESS_BD_CNT_REG 0x800
+#define SEC_Q_FAIL_BD_CNT_REG 0x804
+#define SEC_Q_GET_BD_CNT_REG 0x808
+#define SEC_Q_IVLD_CNT_REG 0x80c
+#define SEC_Q_BD_PROC_GET_CNT_REG 0x810
+#define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
+#define SEC_Q_LAT_CLR_REG 0x850
+#define SEC_Q_PKT_LAT_MAX_REG 0x854
+#define SEC_Q_PKT_LAT_AVG_REG 0x858
+#define SEC_Q_PKT_LAT_MIN_REG 0x85c
+#define SEC_Q_ID_CLR_CFG_REG 0x900
+#define SEC_Q_1ST_BD_ERR_ID_REG 0x904
+#define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
+#define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
+#define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
+#define SEC_Q_1ST_IVLD_ID_REG 0x914
+#define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
+#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
+#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
+
+struct sec_debug_bd_info {
+#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
+ u32 soft_err_check;
+#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
+ u32 hard_err_check;
+ u32 icv_mac1st_word;
+#define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
+ u32 sec_get_id;
+ /* W4---W15 */
+ u32 reserv_left[12];
+};
+
+struct sec_out_bd_info {
+#define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
+#define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
+ u16 data;
+};
+
+#define SEC_MAX_DEVICES 8
+static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
+static DEFINE_MUTEX(sec_id_lock);
+
+static int sec_queue_map_io(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct resource *res;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM,
+ 2 + queue->queue_id);
+ if (!res) {
+ dev_err(dev, "Failed to get queue %d memory resource\n",
+ queue->queue_id);
+ return -ENOMEM;
+ }
+ queue->regs = ioremap(res->start, resource_size(res));
+ if (!queue->regs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_queue_unmap_io(struct sec_queue *queue)
+{
+ iounmap(queue->regs);
+}
+
+static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (ar_pkg)
+ regval |= SEC_Q_ARUSER_CFG_PKG;
+ else
+ regval &= ~SEC_Q_ARUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval |= SEC_Q_AWUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_clk_en(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock enable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_clk_dis(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock disable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_reset_whole_module(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ bool is_reset, b_is_reset;
+ u32 i = 0;
+
+ writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (is_reset && b_is_reset)
+ break;
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset req failed\n");
+ return -EIO;
+ }
+ }
+
+ i = 0;
+ writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (!is_reset && !b_is_reset)
+ break;
+
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset dreq failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void sec_bd_endian_little(struct sec_dev_info *info)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
+ writel_relaxed(regval, addr);
+}
+
+/*
+ * sec_cache_config - configure optimum cache placement
+ */
+static void sec_cache_config(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /* Check that translation is occurring */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ writel_relaxed(0x44cf9e, addr);
+ else
+ writel_relaxed(0x4cfd9, addr);
+}
+
+static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clkgate)
+ regval |= SEC_CTRL2_CLK_GATE_EN;
+ else
+ regval &= ~SEC_CTRL2_CLK_GATE_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clr_ce)
+ regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (snap_en)
+ regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
+{
+ void __iomem *base = info->regs[SEC_SAA];
+ int i;
+
+ for (i = 0; i < 10; i++)
+ writel_relaxed(hash_mask[0],
+ base + SEC_IPV6_MASK_TABLE_X_REG(i));
+}
+
+static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
+{
+ if (hash_mask & SEC_HASH_IPV4_MASK) {
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ return -EINVAL;
+ }
+
+ writel_relaxed(hash_mask,
+ info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
+
+ return 0;
+}
+
+static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ /* Always disable write back of normal bd */
+ regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
+
+ if (cfg)
+ regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
+ else
+ regval |= SEC_DEBUG_BD_CFG_WB_EN;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
+ SEC_SAA_CTRL_REG(saa_indx);
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (en)
+ regval |= SEC_SAA_CTRL_GET_QM_EN;
+ else
+ regval &= ~SEC_SAA_CTRL_GET_QM_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
+ u32 saa_int_mask)
+{
+ writel_relaxed(saa_int_mask,
+ info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
+ saa_indx * SEC_SAA_ADDR_SIZE);
+}
+
+static void sec_streamid(struct sec_dev_info *info, int i)
+{
+ #define SEC_SID 0x600
+ #define SEC_VMID 0
+
+ writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
+ info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
+}
+
+static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
+ regval |= SEC_Q_ARUSER_CFG_FA;
+ regval &= ~SEC_Q_ARUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_ARUSER_CFG_FA;
+ regval |= SEC_Q_ARUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
+ regval |= SEC_Q_AWUSER_CFG_FA;
+ regval &= ~SEC_Q_AWUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_AWUSER_CFG_FA;
+ regval |= SEC_Q_AWUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
+{
+ void __iomem *base = queue->regs;
+ u32 regval;
+
+ regval = readl_relaxed(base + SEC_Q_CFG_REG);
+ if (reorder)
+ regval |= SEC_Q_CFG_REORDER;
+ else
+ regval &= ~SEC_Q_CFG_REORDER;
+ writel_relaxed(regval, base + SEC_Q_CFG_REG);
+}
+
+static void sec_queue_depth(struct sec_queue *queue, u32 depth)
+{
+ void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
+ regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
+}
+
+static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
+}
+
+static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
+}
+
+static void sec_queue_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_irq_enable(struct sec_queue *queue)
+{
+ writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_abn_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
+}
+
+static void sec_queue_stop(struct sec_queue *queue)
+{
+ disable_irq(queue->task_irq);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static void sec_queue_start(struct sec_queue *queue)
+{
+ sec_queue_irq_enable(queue);
+ enable_irq(queue->task_irq);
+ queue->expected = 0;
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+ writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
+{
+ int i;
+
+ mutex_lock(&info->dev_lock);
+
+ /* Get the first idle queue in SEC device */
+ for (i = 0; i < SEC_Q_NUM; i++)
+ if (!info->queues[i].in_use) {
+ info->queues[i].in_use = true;
+ info->queues_in_use++;
+ mutex_unlock(&info->dev_lock);
+
+ return &info->queues[i];
+ }
+ mutex_unlock(&info->dev_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int sec_queue_free(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+
+ if (queue->queue_id >= SEC_Q_NUM) {
+ dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ if (!queue->in_use) {
+ dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&info->dev_lock);
+ queue->in_use = false;
+ info->queues_in_use--;
+ mutex_unlock(&info->dev_lock);
+
+ return 0;
+}
+
+static irqreturn_t sec_isr_handle_th(int irq, void *q)
+{
+ sec_queue_irq_disable(q);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sec_isr_handle(int irq, void *q)
+{
+ struct sec_queue *queue = q;
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
+ struct sec_out_bd_info *outorder_msg;
+ struct sec_bd_info *msg;
+ u32 ooo_read, ooo_write;
+ void __iomem *base = queue->regs;
+ int q_id;
+
+ ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+
+ while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
+ /*
+ * Must be before callback otherwise blocks adding other chained
+ * elements
+ */
+ set_bit(q_id, queue->unprocessed);
+ if (q_id == queue->expected)
+ while (test_bit(queue->expected, queue->unprocessed)) {
+ clear_bit(queue->expected, queue->unprocessed);
+ msg = msg_ring->vaddr + queue->expected;
+ msg->w0 &= ~SEC_BD_W0_DONE;
+ msg_ring->callback(msg,
+ queue->shadow[queue->expected]);
+ queue->shadow[queue->expected] = NULL;
+ queue->expected = (queue->expected + 1) %
+ SEC_QUEUE_LEN;
+ atomic_dec(&msg_ring->used);
+ }
+
+ ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
+ writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+ }
+
+ sec_queue_irq_enable(queue);
+
+ return IRQ_HANDLED;
+}
+
+static int sec_queue_irq_init(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+ int irq = queue->task_irq;
+ int ret;
+
+ ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
+ IRQF_TRIGGER_RISING, queue->name, queue);
+ if (ret) {
+ dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
+ return ret;
+ }
+ disable_irq(irq);
+
+ return 0;
+}
+
+static int sec_queue_irq_uninit(struct sec_queue *queue)
+{
+ free_irq(queue->task_irq, queue);
+
+ return 0;
+}
+
+static struct sec_dev_info *sec_device_get(void)
+{
+ struct sec_dev_info *sec_dev = NULL;
+ struct sec_dev_info *this_sec_dev;
+ int least_busy_n = SEC_Q_NUM + 1;
+ int i;
+
+ /* Find which one is least busy and use that first */
+ for (i = 0; i < SEC_MAX_DEVICES; i++) {
+ this_sec_dev = sec_devices[i];
+ if (this_sec_dev &&
+ this_sec_dev->queues_in_use < least_busy_n) {
+ least_busy_n = this_sec_dev->queues_in_use;
+ sec_dev = this_sec_dev;
+ }
+ }
+
+ return sec_dev;
+}
+
+static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
+{
+ struct sec_queue *queue;
+
+ queue = sec_alloc_queue(info);
+ if (IS_ERR(queue)) {
+ dev_err(info->dev, "alloc sec queue failed! %ld\n",
+ PTR_ERR(queue));
+ return queue;
+ }
+
+ sec_queue_start(queue);
+
+ return queue;
+}
+
+/**
+ * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
+ *
+ * This function does extremely simplistic load balancing. It does not take into
+ * account NUMA locality of the accelerator, or which cpu has requested the
+ * queue. Future work may focus on optimizing this in order to improve full
+ * machine throughput.
+ */
+struct sec_queue *sec_queue_alloc_start_safe(void)
+{
+ struct sec_dev_info *info;
+ struct sec_queue *queue = ERR_PTR(-ENODEV);
+
+ mutex_lock(&sec_id_lock);
+ info = sec_device_get();
+ if (!info)
+ goto unlock;
+
+ queue = sec_queue_alloc_start(info);
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return queue;
+}
+
+/**
+ * sec_queue_stop_release() - free up a hw queue for reuse
+ * @queue: The queue we are done with.
+ *
+ * This will stop the current queue, terminanting any transactions
+ * that are inflight an return it to the pool of available hw queuess
+ */
+int sec_queue_stop_release(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ int ret;
+
+ sec_queue_stop(queue);
+
+ ret = sec_queue_free(queue);
+ if (ret)
+ dev_err(dev, "Releasing queue failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * sec_queue_empty() - Is this hardware queue currently empty.
+ *
+ * We need to know if we have an empty queue for some of the chaining modes
+ * as if it is not empty we may need to hold the message in a software queue
+ * until the hw queue is drained.
+ */
+bool sec_queue_empty(struct sec_queue *queue)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return !atomic_read(&msg_ring->used);
+}
+
+/**
+ * sec_queue_send() - queue up a single operation in the hw queue
+ * @queue: The queue in which to put the message
+ * @msg: The message
+ * @ctx: Context to be put in the shadow array and passed back to cb on result.
+ *
+ * This function will return -EAGAIN if the queue is currently full.
+ */
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ void __iomem *base = queue->regs;
+ u32 write, read;
+
+ mutex_lock(&msg_ring->lock);
+ read = readl(base + SEC_Q_RD_PTR_REG);
+ write = readl(base + SEC_Q_WR_PTR_REG);
+ if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
+ mutex_unlock(&msg_ring->lock);
+ return -EAGAIN;
+ }
+ memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
+ queue->shadow[write] = ctx;
+ write = (write + 1) % SEC_QUEUE_LEN;
+
+ /* Ensure content updated before queue advance */
+ wmb();
+ writel(write, base + SEC_Q_WR_PTR_REG);
+
+ atomic_inc(&msg_ring->used);
+ mutex_unlock(&msg_ring->lock);
+
+ return 0;
+}
+
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
+}
+
+static void sec_queue_hw_init(struct sec_queue *queue)
+{
+ sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_ar_pkgattr(queue, 1);
+ sec_queue_aw_pkgattr(queue, 1);
+
+ /* Enable out of order queue */
+ sec_queue_reorder(queue, true);
+
+ /* Interrupt after a single complete element */
+ writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
+
+ sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
+
+ sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
+
+ sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
+
+ sec_queue_errbase_addr(queue, queue->ring_db.paddr);
+
+ writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
+
+ sec_queue_abn_irq_disable(queue);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+}
+
+static int sec_hw_init(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ u32 sec_ipv4_mask = 0;
+ u32 sec_ipv6_mask[10] = {};
+ u32 i, ret;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /*
+ * Enable all available processing unit clocks.
+ * Only the first cluster is usable with translations.
+ */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ info->num_saas = 5;
+
+ else
+ info->num_saas = 10;
+
+ writel_relaxed(GENMASK(info->num_saas - 1, 0),
+ info->regs[SEC_SAA] + SEC_CLK_EN_REG);
+
+ /* 32 bit little endian */
+ sec_bd_endian_little(info);
+
+ sec_cache_config(info);
+
+ /* Data axi port write and read outstanding config as per datasheet */
+ sec_data_axiwr_otsd_cfg(info, 0x7);
+ sec_data_axird_otsd_cfg(info, 0x7);
+
+ /* Enable clock gating */
+ sec_clk_gate_en(info, true);
+
+ /* Set CNT_CYC register not read clear */
+ sec_comm_cnt_cfg(info, false);
+
+ /* Enable CNT_CYC */
+ sec_commsnap_en(info, false);
+
+ writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
+
+ ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
+ if (ret) {
+ dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
+ return -EIO;
+ }
+
+ sec_ipv6_hashmask(info, sec_ipv6_mask);
+
+ /* do not use debug bd */
+ sec_set_dbg_bd_cfg(info, 0);
+
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_streamid(info, i);
+ /* Same QoS for all queues */
+ writel_relaxed(0x3f,
+ info->regs[SEC_SAA] +
+ SEC_Q_WEIGHT_CFG_REG(i));
+ }
+ }
+
+ for (i = 0; i < info->num_saas; i++) {
+ sec_saa_getqm_en(info, i, 1);
+ sec_saa_int_mask(info, i, 0);
+ }
+
+ return 0;
+}
+
+static void sec_hw_exit(struct sec_dev_info *info)
+{
+ int i;
+
+ for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
+ sec_saa_int_mask(info, i, (u32)~0);
+ sec_saa_getqm_en(info, i, 0);
+ }
+}
+
+static void sec_queue_base_init(struct sec_dev_info *info,
+ struct sec_queue *queue, int queue_id)
+{
+ queue->dev_info = info;
+ queue->queue_id = queue_id;
+ snprintf(queue->name, sizeof(queue->name),
+ "%s_%d", dev_name(info->dev), queue->queue_id);
+}
+
+static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+
+ if (!res) {
+ dev_err(info->dev, "Memory resource %d not found\n", i);
+ return -EINVAL;
+ }
+
+ info->regs[i] = devm_ioremap(info->dev, res->start,
+ resource_size(res));
+ if (!info->regs[i]) {
+ dev_err(info->dev,
+ "Memory resource %d could not be remapped\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_base_init(struct sec_dev_info *info,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sec_map_io(info, pdev);
+ if (ret)
+ return ret;
+
+ ret = sec_clk_en(info);
+ if (ret)
+ return ret;
+
+ ret = sec_reset_whole_module(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ ret = sec_hw_init(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ return 0;
+
+sec_clk_disable:
+ sec_clk_dis(info);
+
+ return ret;
+}
+
+static void sec_base_exit(struct sec_dev_info *info)
+{
+ sec_hw_exit(info);
+ sec_clk_dis(info);
+}
+
+#define SEC_Q_CMD_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
+#define SEC_Q_CQ_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
+#define SEC_Q_DB_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
+
+static int sec_queue_res_cfg(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
+ struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
+ struct sec_queue_ring_db *ring_db = &queue->ring_db;
+ int ret;
+
+ ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr,
+ GFP_KERNEL);
+ if (!ring_cmd->vaddr)
+ return -ENOMEM;
+
+ atomic_set(&ring_cmd->used, 0);
+ mutex_init(&ring_cmd->lock);
+ ring_cmd->callback = sec_alg_callback;
+
+ ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr,
+ GFP_KERNEL);
+ if (!ring_cq->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cmd;
+ }
+
+ ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr,
+ GFP_KERNEL);
+ if (!ring_db->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cq;
+ }
+ queue->task_irq = platform_get_irq(to_platform_device(dev),
+ queue->queue_id * 2 + 1);
+ if (queue->task_irq <= 0) {
+ ret = -EINVAL;
+ goto err_free_ring_db;
+ }
+
+ return 0;
+
+err_free_ring_db:
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+err_free_ring_cq:
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+err_free_ring_cmd:
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+
+ return ret;
+}
+
+static void sec_queue_free_ring_pages(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+}
+
+static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
+ int queue_id)
+{
+ int ret;
+
+ sec_queue_base_init(info, queue, queue_id);
+
+ ret = sec_queue_res_cfg(queue);
+ if (ret)
+ return ret;
+
+ ret = sec_queue_map_io(queue);
+ if (ret) {
+ dev_err(info->dev, "Queue map failed %d\n", ret);
+ sec_queue_free_ring_pages(queue);
+ return ret;
+ }
+
+ sec_queue_hw_init(queue);
+
+ return 0;
+}
+
+static void sec_queue_unconfig(struct sec_dev_info *info,
+ struct sec_queue *queue)
+{
+ sec_queue_unmap_io(queue);
+ sec_queue_free_ring_pages(queue);
+}
+
+static int sec_id_alloc(struct sec_dev_info *info)
+{
+ int ret = 0;
+ int i;
+
+ mutex_lock(&sec_id_lock);
+
+ for (i = 0; i < SEC_MAX_DEVICES; i++)
+ if (!sec_devices[i])
+ break;
+ if (i == SEC_MAX_DEVICES) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ info->sec_id = i;
+ sec_devices[info->sec_id] = info;
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return ret;
+}
+
+static void sec_id_free(struct sec_dev_info *info)
+{
+ mutex_lock(&sec_id_lock);
+ sec_devices[info->sec_id] = NULL;
+ mutex_unlock(&sec_id_lock);
+}
+
+static int sec_probe(struct platform_device *pdev)
+{
+ struct sec_dev_info *info;
+ struct device *dev = &pdev->dev;
+ int i, j;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ mutex_init(&info->dev_lock);
+
+ info->hw_sgl_pool = dmam_pool_create("sgl", dev,
+ sizeof(struct sec_hw_sgl), 64, 0);
+ if (!info->hw_sgl_pool) {
+ dev_err(dev, "Failed to create sec sgl dma pool\n");
+ return -ENOMEM;
+ }
+
+ ret = sec_base_init(info, pdev);
+ if (ret) {
+ dev_err(dev, "Base initialization fail! %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ ret = sec_queue_config(info, &info->queues[i], i);
+ if (ret)
+ goto queues_unconfig;
+
+ ret = sec_queue_irq_init(&info->queues[i]);
+ if (ret) {
+ sec_queue_unconfig(info, &info->queues[i]);
+ goto queues_unconfig;
+ }
+ }
+
+ ret = sec_algs_register();
+ if (ret) {
+ dev_err(dev, "Failed to register algorithms with crypto %d\n",
+ ret);
+ goto queues_unconfig;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = sec_id_alloc(info);
+ if (ret)
+ goto algs_unregister;
+
+ return 0;
+
+algs_unregister:
+ sec_algs_unregister();
+queues_unconfig:
+ for (j = i - 1; j >= 0; j--) {
+ sec_queue_irq_uninit(&info->queues[j]);
+ sec_queue_unconfig(info, &info->queues[j]);
+ }
+ sec_base_exit(info);
+
+ return ret;
+}
+
+static int sec_remove(struct platform_device *pdev)
+{
+ struct sec_dev_info *info = platform_get_drvdata(pdev);
+ int i;
+
+ /* Unexpose as soon as possible, reuse during remove is fine */
+ sec_id_free(info);
+
+ sec_algs_unregister();
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_queue_irq_uninit(&info->queues[i]);
+ sec_queue_unconfig(info, &info->queues[i]);
+ }
+
+ sec_base_exit(info);
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id sec_match[] = {
+ { .compatible = "hisilicon,hip06-sec" },
+ { .compatible = "hisilicon,hip07-sec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sec_match);
+
+static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
+ { "HISI02C1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
+
+static struct platform_driver sec_driver = {
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .driver = {
+ .name = "hisi_sec_platform_driver",
+ .of_match_table = sec_match,
+ .acpi_match_table = ACPI_PTR(sec_acpi_match),
+ },
+};
+module_platform_driver(sec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
+MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
new file mode 100644
index 000000000000..2d2f186674ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef _SEC_DRV_H_
+#define _SEC_DRV_H_
+
+#include <crypto/algapi.h>
+#include <linux/kfifo.h>
+
+#define SEC_MAX_SGE_NUM 64
+#define SEC_HW_RING_NUM 3
+
+#define SEC_CMD_RING 0
+#define SEC_OUTORDER_RING 1
+#define SEC_DBG_RING 2
+
+/* A reasonable length to balance memory use against flexibility */
+#define SEC_QUEUE_LEN 512
+
+#define SEC_MAX_SGE_NUM 64
+
+struct sec_bd_info {
+#define SEC_BD_W0_T_LEN_M GENMASK(4, 0)
+#define SEC_BD_W0_T_LEN_S 0
+
+#define SEC_BD_W0_C_WIDTH_M GENMASK(6, 5)
+#define SEC_BD_W0_C_WIDTH_S 5
+#define SEC_C_WIDTH_AES_128BIT 0
+#define SEC_C_WIDTH_AES_8BIT 1
+#define SEC_C_WIDTH_AES_1BIT 2
+#define SEC_C_WIDTH_DES_64BIT 0
+#define SEC_C_WIDTH_DES_8BIT 1
+#define SEC_C_WIDTH_DES_1BIT 2
+
+#define SEC_BD_W0_C_MODE_M GENMASK(9, 7)
+#define SEC_BD_W0_C_MODE_S 7
+#define SEC_C_MODE_ECB 0
+#define SEC_C_MODE_CBC 1
+#define SEC_C_MODE_CTR 4
+#define SEC_C_MODE_CCM 5
+#define SEC_C_MODE_GCM 6
+#define SEC_C_MODE_XTS 7
+
+#define SEC_BD_W0_SEQ BIT(10)
+#define SEC_BD_W0_DE BIT(11)
+#define SEC_BD_W0_DAT_SKIP_M GENMASK(13, 12)
+#define SEC_BD_W0_DAT_SKIP_S 12
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_M GENMASK(17, 14)
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_S 14
+
+#define SEC_BD_W0_CIPHER_M GENMASK(19, 18)
+#define SEC_BD_W0_CIPHER_S 18
+#define SEC_CIPHER_NULL 0
+#define SEC_CIPHER_ENCRYPT 1
+#define SEC_CIPHER_DECRYPT 2
+
+#define SEC_BD_W0_AUTH_M GENMASK(21, 20)
+#define SEC_BD_W0_AUTH_S 20
+#define SEC_AUTH_NULL 0
+#define SEC_AUTH_MAC 1
+#define SEC_AUTH_VERIF 2
+
+#define SEC_BD_W0_AI_GEN BIT(22)
+#define SEC_BD_W0_CI_GEN BIT(23)
+#define SEC_BD_W0_NO_HPAD BIT(24)
+#define SEC_BD_W0_HM_M GENMASK(26, 25)
+#define SEC_BD_W0_HM_S 25
+#define SEC_BD_W0_ICV_OR_SKEY_EN_M GENMASK(28, 27)
+#define SEC_BD_W0_ICV_OR_SKEY_EN_S 27
+
+/* Multi purpose field - gran size bits for send, flag for recv */
+#define SEC_BD_W0_FLAG_M GENMASK(30, 29)
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_M GENMASK(30, 29)
+#define SEC_BD_W0_FLAG_S 29
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_S 29
+
+#define SEC_BD_W0_DONE BIT(31)
+ u32 w0;
+
+#define SEC_BD_W1_AUTH_GRAN_SIZE_M GENMASK(21, 0)
+#define SEC_BD_W1_AUTH_GRAN_SIZE_S 0
+#define SEC_BD_W1_M_KEY_EN BIT(22)
+#define SEC_BD_W1_BD_INVALID BIT(23)
+#define SEC_BD_W1_ADDR_TYPE BIT(24)
+
+#define SEC_BD_W1_A_ALG_M GENMASK(28, 25)
+#define SEC_BD_W1_A_ALG_S 25
+#define SEC_A_ALG_SHA1 0
+#define SEC_A_ALG_SHA256 1
+#define SEC_A_ALG_MD5 2
+#define SEC_A_ALG_SHA224 3
+#define SEC_A_ALG_HMAC_SHA1 8
+#define SEC_A_ALG_HMAC_SHA224 10
+#define SEC_A_ALG_HMAC_SHA256 11
+#define SEC_A_ALG_HMAC_MD5 12
+#define SEC_A_ALG_AES_XCBC 13
+#define SEC_A_ALG_AES_CMAC 14
+
+#define SEC_BD_W1_C_ALG_M GENMASK(31, 29)
+#define SEC_BD_W1_C_ALG_S 29
+#define SEC_C_ALG_DES 0
+#define SEC_C_ALG_3DES 1
+#define SEC_C_ALG_AES 2
+
+ u32 w1;
+
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_M GENMASK(15, 0)
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_S 0
+#define SEC_BD_W2_GRAN_NUM_M GENMASK(31, 16)
+#define SEC_BD_W2_GRAN_NUM_S 16
+ u32 w2;
+
+#define SEC_BD_W3_AUTH_LEN_OFFSET_M GENMASK(9, 0)
+#define SEC_BD_W3_AUTH_LEN_OFFSET_S 0
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_M GENMASK(19, 10)
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_S 10
+#define SEC_BD_W3_MAC_LEN_M GENMASK(24, 20)
+#define SEC_BD_W3_MAC_LEN_S 20
+#define SEC_BD_W3_A_KEY_LEN_M GENMASK(29, 25)
+#define SEC_BD_W3_A_KEY_LEN_S 25
+#define SEC_BD_W3_C_KEY_LEN_M GENMASK(31, 30)
+#define SEC_BD_W3_C_KEY_LEN_S 30
+#define SEC_KEY_LEN_AES_128 0
+#define SEC_KEY_LEN_AES_192 1
+#define SEC_KEY_LEN_AES_256 2
+#define SEC_KEY_LEN_DES 1
+#define SEC_KEY_LEN_3DES_3_KEY 1
+#define SEC_KEY_LEN_3DES_2_KEY 3
+ u32 w3;
+
+ /* W4,5 */
+ union {
+ u32 authkey_addr_lo;
+ u32 authiv_addr_lo;
+ };
+ union {
+ u32 authkey_addr_hi;
+ u32 authiv_addr_hi;
+ };
+
+ /* W6,7 */
+ u32 cipher_key_addr_lo;
+ u32 cipher_key_addr_hi;
+
+ /* W8,9 */
+ u32 cipher_iv_addr_lo;
+ u32 cipher_iv_addr_hi;
+
+ /* W10,11 */
+ u32 data_addr_lo;
+ u32 data_addr_hi;
+
+ /* W12,13 */
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ /* W14,15 */
+ u32 cipher_destin_addr_lo;
+ u32 cipher_destin_addr_hi;
+};
+
+enum sec_mem_region {
+ SEC_COMMON = 0,
+ SEC_SAA,
+ SEC_NUM_ADDR_REGIONS
+};
+
+#define SEC_NAME_SIZE 64
+#define SEC_Q_NUM 16
+
+
+/**
+ * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
+ * @used: Local counter used to cheaply establish if the ring is empty.
+ * @lock: Protect against simultaneous adjusting of the read and write pointers.
+ * @vaddr: Virtual address for the ram pages used for the ring.
+ * @paddr: Physical address of the dma mapped region of ram used for the ring.
+ * @callback: Callback function called on a ring element completing.
+ */
+struct sec_queue_ring_cmd {
+ atomic_t used;
+ struct mutex lock;
+ struct sec_bd_info *vaddr;
+ dma_addr_t paddr;
+ void (*callback)(struct sec_bd_info *resp, void *ctx);
+};
+
+struct sec_debug_bd_info;
+struct sec_queue_ring_db {
+ struct sec_debug_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_out_bd_info;
+struct sec_queue_ring_cq {
+ struct sec_out_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_dev_info;
+
+enum sec_cipher_alg {
+ SEC_C_DES_ECB_64,
+ SEC_C_DES_CBC_64,
+
+ SEC_C_3DES_ECB_192_3KEY,
+ SEC_C_3DES_ECB_192_2KEY,
+
+ SEC_C_3DES_CBC_192_3KEY,
+ SEC_C_3DES_CBC_192_2KEY,
+
+ SEC_C_AES_ECB_128,
+ SEC_C_AES_ECB_192,
+ SEC_C_AES_ECB_256,
+
+ SEC_C_AES_CBC_128,
+ SEC_C_AES_CBC_192,
+ SEC_C_AES_CBC_256,
+
+ SEC_C_AES_CTR_128,
+ SEC_C_AES_CTR_192,
+ SEC_C_AES_CTR_256,
+
+ SEC_C_AES_XTS_128,
+ SEC_C_AES_XTS_256,
+
+ SEC_C_NULL,
+};
+
+/**
+ * struct sec_alg_tfm_ctx - hardware specific tranformation context
+ * @cipher_alg: Cipher algorithm enabled include encryption mode.
+ * @key: Key storage if required.
+ * @pkey: DMA address for the key storage.
+ * @req_template: Request template to save time on setup.
+ * @queue: The hardware queue associated with this tfm context.
+ * @lock: Protect key and pkey to ensure they are consistent
+ * @auth_buf: Current context buffer for auth operations.
+ * @backlog: The backlog queue used for cases where our buffers aren't
+ * large enough.
+ */
+struct sec_alg_tfm_ctx {
+ enum sec_cipher_alg cipher_alg;
+ u8 *key;
+ dma_addr_t pkey;
+ struct sec_bd_info req_template;
+ struct sec_queue *queue;
+ struct mutex lock;
+ u8 *auth_buf;
+ struct list_head backlog;
+};
+
+/**
+ * struct sec_request - data associate with a single crypto request
+ * @elements: List of subparts of this request (hardware size restriction)
+ * @num_elements: The number of subparts (used as an optimization)
+ * @lock: Protect elements of this structure against concurrent change.
+ * @tfm_ctx: hardware specific context.
+ * @len_in: length of in sgl from upper layers
+ * @len_out: length of out sgl from upper layers
+ * @dma_iv: initialization vector - phsyical address
+ * @err: store used to track errors across subelements of this request.
+ * @req_base: pointer to base element of associate crypto context.
+ * This is needed to allow shared handling skcipher, ahash etc.
+ * @cb: completion callback.
+ * @backlog_head: list head to allow backlog maintenance.
+ *
+ * The hardware is limited in the maximum size of data that it can
+ * process from a single BD. Typically this is fairly large (32MB)
+ * but still requires the complexity of splitting the incoming
+ * skreq up into a number of elements complete with appropriate
+ * iv chaining.
+ */
+struct sec_request {
+ struct list_head elements;
+ int num_elements;
+ struct mutex lock;
+ struct sec_alg_tfm_ctx *tfm_ctx;
+ int len_in;
+ int len_out;
+ dma_addr_t dma_iv;
+ int err;
+ struct crypto_async_request *req_base;
+ void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
+ struct list_head backlog_head;
+};
+
+/**
+ * struct sec_request_el - A subpart of a request.
+ * @head: allow us to attach this to the list in the sec_request
+ * @req: hardware block descriptor corresponding to this request subpart
+ * @in: hardware sgl for input - virtual address
+ * @dma_in: hardware sgl for input - physical address
+ * @sgl_in: scatterlist for this request subpart
+ * @out: hardware sgl for output - virtual address
+ * @dma_out: hardware sgl for output - physical address
+ * @sgl_out: scatterlist for this request subpart
+ * @sec_req: The request which this subpart forms a part of
+ * @el_length: Number of bytes in this subpart. Needed to locate
+ * last ivsize chunk for iv chaining.
+ */
+struct sec_request_el {
+ struct list_head head;
+ struct sec_bd_info req;
+ struct sec_hw_sgl *in;
+ dma_addr_t dma_in;
+ struct scatterlist *sgl_in;
+ struct sec_hw_sgl *out;
+ dma_addr_t dma_out;
+ struct scatterlist *sgl_out;
+ struct sec_request *sec_req;
+ size_t el_length;
+};
+
+/**
+ * struct sec_queue - All the information about a HW queue
+ * @dev_info: The parent SEC device to which this queue belongs.
+ * @task_irq: Completion interrupt for the queue.
+ * @name: Human readable queue description also used as irq name.
+ * @ring: The several HW rings associated with one queue.
+ * @regs: The iomapped device registers
+ * @queue_id: Index of the queue used for naming and resource selection.
+ * @in_use: Flag to say if the queue is in use.
+ * @expected: The next expected element to finish assuming we were in order.
+ * @uprocessed: A bitmap to track which OoO elements are done but not handled.
+ * @softqueue: A software queue used when chaining requirements prevent direct
+ * use of the hardware queues.
+ * @havesoftqueue: A flag to say we have a queues - as we may need one for the
+ * current mode.
+ * @queuelock: Protect the soft queue from concurrent changes to avoid some
+ * potential loss of data races.
+ * @shadow: Pointers back to the shadow copy of the hardware ring element
+ * need because we can't store any context reference in the bd element.
+ */
+struct sec_queue {
+ struct sec_dev_info *dev_info;
+ int task_irq;
+ char name[SEC_NAME_SIZE];
+ struct sec_queue_ring_cmd ring_cmd;
+ struct sec_queue_ring_cq ring_cq;
+ struct sec_queue_ring_db ring_db;
+ void __iomem *regs;
+ u32 queue_id;
+ bool in_use;
+ int expected;
+
+ DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
+ DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
+ bool havesoftqueue;
+ struct mutex queuelock;
+ void *shadow[SEC_QUEUE_LEN];
+};
+
+/**
+ * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
+ * @buf: The IOV dma address for this entry.
+ * @len: Length of this IOV.
+ * @pad: Reserved space.
+ */
+struct sec_hw_sge {
+ dma_addr_t buf;
+ unsigned int len;
+ unsigned int pad;
+};
+
+/**
+ * struct sec_hw_sgl: One hardware SGL entry.
+ * @next_sgl: The next entry if we need to chain dma address. Null if last.
+ * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
+ * @entry_sum_in_sgl: The number of SGEs in this SGL element.
+ * @flag: Unused in skciphers.
+ * @serial_num: Unsued in skciphers.
+ * @cpuid: Currently unused.
+ * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
+ * @next: Virtual address used to stash the next sgl - useful in completion.
+ * @reserved: A reserved field not currently used.
+ * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
+ * @node: Currently unused.
+ */
+struct sec_hw_sgl {
+ dma_addr_t next_sgl;
+ u16 entry_sum_in_chain;
+ u16 entry_sum_in_sgl;
+ u32 flag;
+ u64 serial_num;
+ u32 cpuid;
+ u32 data_bytes_in_sgl;
+ struct sec_hw_sgl *next;
+ u64 reserved;
+ struct sec_hw_sge sge_entries[SEC_MAX_SGE_NUM];
+ u8 node[16];
+};
+
+struct dma_pool;
+
+/**
+ * struct sec_dev_info: The full SEC unit comprising queues and processors.
+ * @sec_id: Index used to track which SEC this is when more than one is present.
+ * @num_saas: The number of backed processors enabled.
+ * @regs: iomapped register regions shared by whole SEC unit.
+ * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
+ * @queues: The 16 queues that this SEC instance provides.
+ * @dev: Device pointer.
+ * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
+ */
+struct sec_dev_info {
+ int sec_id;
+ int num_saas;
+ void __iomem *regs[SEC_NUM_ADDR_REGIONS];
+ struct mutex dev_lock;
+ int queues_in_use;
+ struct sec_queue queues[SEC_Q_NUM];
+ struct device *dev;
+ struct dma_pool *hw_sgl_pool;
+};
+
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
+int sec_queue_stop_release(struct sec_queue *queue);
+struct sec_queue *sec_queue_alloc_start_safe(void);
+bool sec_queue_empty(struct sec_queue *queue);
+
+/* Algorithm specific elements from sec_algs.c */
+void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
+int sec_algs_register(void);
+void sec_algs_unregister(void);
+
+#endif /* _SEC_DRV_H_ */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4e86f864a952..7e71043457a6 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -33,7 +30,19 @@ MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, htable_offset;
- int i;
+ int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+ if (priv->version == EIP197B) {
+ cs_rc_max = EIP197B_CS_RC_MAX;
+ cs_ht_wc = EIP197B_CS_HT_WC;
+ cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+ } else {
+ cs_rc_max = EIP197D_CS_RC_MAX;
+ cs_ht_wc = EIP197D_CS_HT_WC;
+ cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+ }
/* Enable the record cache memory access */
val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -54,7 +63,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Clear all records */
- for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+ for (i = 0; i < cs_rc_max; i++) {
u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
@@ -64,14 +73,14 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
if (i == 0)
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
- else if (i == EIP197_CS_RC_MAX - 1)
+ else if (i == cs_rc_max - 1)
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
writel(val, priv->base + offset + sizeof(u32));
}
/* Clear the hash table entries */
- htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
- for (i = 0; i < 64; i++)
+ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+ for (i = 0; i < cs_ht_wc; i++)
writel(GENMASK(29, 0),
priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
@@ -82,23 +91,23 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
- EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+ EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
writel(val, priv->base + EIP197_TRC_FREECHAIN);
/* Configure the record cache #1 */
- val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
- EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+ val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+ EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
writel(val, priv->base + EIP197_TRC_PARAMS2);
/* Configure the record cache #2 */
- val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
EIP197_TRC_PARAMS_HTABLE_SZ(2);
writel(val, priv->base + EIP197_TRC_PARAMS);
}
static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
- const struct firmware *fw, u32 ctrl,
+ const struct firmware *fw, int pe, u32 ctrl,
u32 prog_en)
{
const u32 *data = (const u32 *)fw->data;
@@ -112,7 +121,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -120,7 +129,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Release engine from reset */
val = readl(EIP197_PE(priv) + ctrl);
@@ -132,35 +141,62 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
const struct firmware *fw[FW_NB];
- int i, j, ret = 0;
+ char fw_path[31], *dir = NULL;
+ int i, j, ret = 0, pe;
u32 val;
+ switch (priv->version) {
+ case EIP197B:
+ dir = "eip197b";
+ break;
+ case EIP197D:
+ dir = "eip197d";
+ break;
+ default:
+ /* No firmware is required */
+ return 0;
+ }
+
for (i = 0; i < FW_NB; i++) {
- ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+ ret = request_firmware(&fw[i], fw_path, priv->dev);
if (ret) {
- dev_err(priv->dev,
- "Failed to request firmware %s (%d)\n",
- fw_name[i], ret);
- goto release_fw;
- }
- }
-
- /* Clear the scratchpad memory */
- val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
- EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
- EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
- EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
+ if (priv->version != EIP197B)
+ goto release_fw;
- memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
-
- eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
- EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+ /* Fallback to the old firmware location for the
+ * EIP197b.
+ */
+ ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to request firmware %s (%d)\n",
+ fw_name[i], ret);
+ goto release_fw;
+ }
+ }
+ }
- eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
- EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Clear the scratchpad memory */
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+ val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+ EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+ EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+ EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+ memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+ EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+ eip197_write_firmware(priv, fw[FW_IFPP], pe,
+ EIP197_PE_ICE_FPP_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+ eip197_write_firmware(priv, fw[FW_IPUE], pe,
+ EIP197_PE_ICE_PUE_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ }
release_fw:
for (j = 0; j < i; j++)
@@ -256,7 +292,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 version, val;
- int i, ret;
+ int i, ret, pe;
/* Determine endianess and configure byte swap */
version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
@@ -267,6 +303,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+ /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
@@ -282,82 +322,94 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any pending interrupt */
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- /* Data Fetch Engine configuration */
-
- /* Reset all DFE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- if (priv->version == EIP197) {
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
- val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
- val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
-
- /* Leave the DFE threads reset state */
- writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
-
- if (priv->version == EIP197) {
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN |
- GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* Data Store Engine configuration */
-
- /* Reset all DSE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
- /* Wait for all DSE threads to complete */
- while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
- GENMASK(15, 12)) != GENMASK(15, 12))
- ;
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
- /* FIXME: instability issues can occur for EIP97 but disabling it impact
- * performances.
- */
- if (priv->version == EIP197)
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
+ /* Processing Engine configuration */
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Data Fetch Engine configuration */
- /* Leave the DSE threads reset state */
- writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Reset all DFE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* Processing Engine configuration */
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+ val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+ val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+ /* Leave the DFE threads reset state */
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+ /* Configure the processing engine thresholds */
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(9),
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(7),
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* H/W capabilities selection */
- val = EIP197_FUNCTION_RSVD;
- val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
- val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
- val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
- val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
- writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
+ /* Data Store Engine configuration */
+
+ /* Reset all DSE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Wait for all DSE threads to complete */
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+ GENMASK(15, 12)) != GENMASK(15, 12))
+ ;
+
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+ /* Leave the DSE threads reset state */
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Configure the procesing engine thresholds */
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+ /* Processing Engine configuration */
+
+ /* H/W capabilities selection */
+ val = EIP197_FUNCTION_RSVD;
+ val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+ val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+ val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+ val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+ val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+ val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+ val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+ }
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
@@ -408,18 +460,20 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
- /* Enable command descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Enable command descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Enable result descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Enable result descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+ }
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->version == EIP197) {
+ if (priv->version == EIP197B || priv->version == EIP197D) {
eip197_trc_cache_init(priv);
ret = eip197_load_firmwares(priv);
@@ -452,7 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
- struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
@@ -476,16 +529,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
}
handle_req:
- request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request)
- goto request_failed;
-
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ring, request, &commands, &results);
- if (ret) {
- kfree(request);
+ ret = ctx->send(req, ring, &commands, &results);
+ if (ret)
goto request_failed;
- }
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -494,14 +541,8 @@ handle_req:
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
- if (!commands && !results) {
- kfree(request);
+ if (!commands && !results)
continue;
- }
-
- spin_lock_bh(&priv->ring[ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ring].list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands;
rdesc += results;
@@ -519,7 +560,7 @@ finalize:
if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
@@ -528,7 +569,7 @@ finalize:
priv->ring[ring].busy = true;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
@@ -560,6 +601,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL;
}
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req)
+{
+ int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+ priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+ int i = safexcel_ring_first_rdr_index(priv, ring);
+
+ return priv->ring[ring].rdr_req[i];
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -588,21 +647,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request)
+ dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
int ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
- if (IS_ERR(cdesc)) {
- ret = PTR_ERR(cdesc);
- goto unlock;
- }
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
@@ -617,21 +671,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback;
}
- request->req = async;
- goto unlock;
+ safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+ return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-unlock:
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
- struct safexcel_request *sreq;
+ struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
@@ -646,28 +699,22 @@ handle_results:
goto requests_left;
for (i = 0; i < nreq; i++) {
- spin_lock_bh(&priv->ring[ring].egress_lock);
- sreq = list_first_entry(&priv->ring[ring].list,
- struct safexcel_request, list);
- list_del(&sreq->list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- ctx = crypto_tfm_ctx(sreq->req->tfm);
- ndesc = ctx->handle_result(priv, ring, sreq->req,
+ req = safexcel_rdr_req_get(priv, ring);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
- kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
- sreq->req->complete(sreq->req, ret);
+ req->complete(req, ret);
local_bh_enable();
}
- kfree(sreq);
tot_descs += ndesc;
handled++;
}
@@ -686,7 +733,7 @@ acknowledge:
goto handle_results;
requests_left:
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
@@ -694,7 +741,7 @@ requests_left:
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
@@ -785,17 +832,29 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
}
static struct safexcel_alg_template *safexcel_algs[] = {
+ &safexcel_alg_ecb_des,
+ &safexcel_alg_cbc_des,
+ &safexcel_alg_ecb_des3_ede,
+ &safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
+ &safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
&safexcel_alg_sha256,
+ &safexcel_alg_sha384,
+ &safexcel_alg_sha512,
+ &safexcel_alg_hmac_md5,
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
+ &safexcel_alg_hmac_sha384,
+ &safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha384_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha512_cbc_aes,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -805,6 +864,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
safexcel_algs[i]->priv = priv;
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -820,6 +882,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
fail:
for (j = 0; j < i; j++) {
+ if (!(safexcel_algs[j]->engines & priv->version))
+ continue;
+
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -836,6 +901,9 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
int i;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -847,9 +915,21 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask;
+ u32 val, mask = 0;
val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+ /* Read number of PEs from the engine */
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
+ mask = EIP197_N_PES_MASK;
+ break;
+ default:
+ mask = EIP97_N_PES_MASK;
+ }
+ priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
@@ -867,7 +947,9 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
struct safexcel_register_offsets *offsets = &priv->offsets;
- if (priv->version == EIP197) {
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
offsets->hia_aic = EIP197_HIA_AIC_BASE;
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
@@ -878,7 +960,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
offsets->pe = EIP197_PE_BASE;
- } else {
+ break;
+ case EIP97IES:
offsets->hia_aic = EIP97_HIA_AIC_BASE;
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
@@ -889,6 +972,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
offsets->pe = EIP97_PE_BASE;
+ break;
}
}
@@ -906,6 +990,9 @@ static int safexcel_probe(struct platform_device *pdev)
priv->dev = dev;
priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ priv->flags |= EIP197_TRC_CACHE;
+
safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -957,6 +1044,13 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
+ priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ GFP_KERNEL);
+ if (!priv->ring) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
for (i = 0; i < priv->config.rings; i++) {
char irq_name[6] = {0}; /* "ringX\0" */
char wq_name[9] = {0}; /* "wq_ringX\0" */
@@ -969,6 +1063,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
+ priv->ring[i].rdr_req = devm_kzalloc(dev,
+ sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ GFP_KERNEL);
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
@@ -1004,9 +1106,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
- INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
- spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
}
@@ -1034,6 +1134,24 @@ err_core_clk:
return ret;
}
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->config.rings; i++) {
+ /* clear any pending interrupt */
+ writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+ writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+ /* Reset the CDR base address */
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+ /* Reset the RDR base address */
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ }
+}
static int safexcel_remove(struct platform_device *pdev)
{
@@ -1041,6 +1159,8 @@ static int safexcel_remove(struct platform_device *pdev)
int i;
safexcel_unregister_algorithms(priv);
+ safexcel_hw_reset_rings(priv);
+
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1051,12 +1171,26 @@ static int safexcel_remove(struct platform_device *pdev)
static const struct of_device_id safexcel_of_match_table[] = {
{
+ .compatible = "inside-secure,safexcel-eip97ies",
+ .data = (void *)EIP97IES,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197b",
+ .data = (void *)EIP197B,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197d",
+ .data = (void *)EIP197D,
+ },
+ {
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip97",
- .data = (void *)EIP97,
+ .data = (void *)EIP97IES,
},
{
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip197",
- .data = (void *)EIP197,
+ .data = (void *)EIP197B,
},
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 8b3ee9b59f53..65624a81f0fd 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __SAFEXCEL_H__
@@ -95,13 +92,13 @@
#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x0000
-#define EIP197_HIA_DFE_THR_CTRL 0x0000
-#define EIP197_HIA_DFE_THR_STAT 0x0004
-#define EIP197_HIA_DSE_CFG 0x0000
-#define EIP197_HIA_DSE_THR_CTRL 0x0000
-#define EIP197_HIA_DSE_THR_STAT 0x0004
-#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n)))
#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
@@ -114,18 +111,18 @@
#define EIP197_HIA_MST_CTRL 0xfff4
#define EIP197_HIA_OPTIONS 0xfff8
#define EIP197_HIA_VERSION 0xfffc
-#define EIP197_PE_IN_DBUF_THRES 0x0000
-#define EIP197_PE_IN_TBUF_THRES 0x0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
-#define EIP197_PE_ICE_PUE_CTRL 0x0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
-#define EIP197_PE_ICE_FPP_CTRL 0x0d80
-#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
-#define EIP197_PE_OUT_DBUF_THRES 0x1c00
-#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
/* EIP197-specific registers, no indirection */
@@ -184,6 +181,11 @@
#define EIP197_HIA_RA_PE_CTRL_RESET BIT(31)
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET 4
+#define EIP197_N_PES_MASK GENMASK(4, 0)
+#define EIP97_N_PES_MASK GENMASK(2, 0)
+
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
#define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1)
@@ -217,6 +219,7 @@
#define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0))
#define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0)
#define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
#define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
@@ -287,7 +290,7 @@ struct safexcel_context_record {
u32 control0;
u32 control1;
- __le32 data[24];
+ __le32 data[40];
} __packed;
/* control0 */
@@ -305,14 +308,19 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5)
#define CONTEXT_CONTROL_SIZE(n) ((n) << 8)
#define CONTEXT_CONTROL_KEY_EN BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES (0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES (0x2 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
@@ -327,6 +335,11 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE 64
+
/* EIP197_CS_RAM_CTRL */
#define EIP197_TRC_ENABLE_0 BIT(4)
#define EIP197_TRC_ENABLE_1 BIT(5)
@@ -349,13 +362,19 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
-#define EIP197_CS_RC_MAX 52
+#define EIP197B_CS_RC_MAX 52
+#define EIP197D_CS_RC_MAX 96
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
#define EIP197_CS_RC_PREV(x) ((x) << 10)
#define EIP197_RC_NULL 0x3ff
-#define EIP197_CS_TRC_REC_WC 59
-#define EIP197_CS_TRC_LG_REC_WC 73
+#define EIP197B_CS_TRC_REC_WC 59
+#define EIP197D_CS_TRC_REC_WC 64
+#define EIP197B_CS_TRC_LG_REC_WC 73
+#define EIP197D_CS_TRC_LG_REC_WC 80
+#define EIP197B_CS_HT_WC 64
+#define EIP197D_CS_HT_WC 256
+
/* Result data */
struct result_data_desc {
@@ -450,6 +469,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
#define EIP197_TYPE_EXTENDED 0x3
@@ -480,7 +500,7 @@ enum eip197_fw {
FW_NB
};
-struct safexcel_ring {
+struct safexcel_desc_ring {
void *base;
void *base_end;
dma_addr_t base_dma;
@@ -489,8 +509,7 @@ struct safexcel_ring {
void *write;
void *read;
- /* number of elements used in the ring */
- unsigned nr;
+ /* descriptor element offset */
unsigned offset;
};
@@ -500,12 +519,8 @@ enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_AHASH,
};
-struct safexcel_request {
- struct list_head list;
- struct crypto_async_request *req;
-};
-
struct safexcel_config {
+ u32 pes;
u32 rings;
u32 cd_size;
@@ -521,9 +536,40 @@ struct safexcel_work_data {
int ring;
};
+struct safexcel_ring {
+ spinlock_t lock;
+
+ struct workqueue_struct *workqueue;
+ struct safexcel_work_data work_data;
+
+ /* command/result rings */
+ struct safexcel_desc_ring cdr;
+ struct safexcel_desc_ring rdr;
+
+ /* result ring crypto API request */
+ struct crypto_async_request **rdr_req;
+
+ /* queue */
+ struct crypto_queue queue;
+ spinlock_t queue_lock;
+
+ /* Number of requests in the engine. */
+ int requests;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
+};
+
enum safexcel_eip_version {
- EIP97,
- EIP197,
+ EIP97IES = BIT(0),
+ EIP197B = BIT(1),
+ EIP197D = BIT(2),
};
struct safexcel_register_offsets {
@@ -539,6 +585,10 @@ struct safexcel_register_offsets {
u32 pe;
};
+enum safexcel_flags {
+ EIP197_TRC_CACHE = BIT(0),
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
@@ -548,46 +598,19 @@ struct safexcel_crypto_priv {
enum safexcel_eip_version version;
struct safexcel_register_offsets offsets;
+ u32 flags;
/* context DMA pool */
struct dma_pool *context_pool;
atomic_t ring_used;
- struct {
- spinlock_t lock;
- spinlock_t egress_lock;
-
- struct list_head list;
- struct workqueue_struct *workqueue;
- struct safexcel_work_data work_data;
-
- /* command/result rings */
- struct safexcel_ring cdr;
- struct safexcel_ring rdr;
-
- /* queue */
- struct crypto_queue queue;
- spinlock_t queue_lock;
-
- /* Number of requests in the engine. */
- int requests;
-
- /* The ring is currently handling at least one request */
- bool busy;
-
- /* Store for current requests when bailing out of the dequeueing
- * function when no enough resources are available.
- */
- struct crypto_async_request *req;
- struct crypto_async_request *backlog;
- } ring[EIP197_MAX_RINGS];
+ struct safexcel_ring *ring;
};
struct safexcel_context {
int (*send)(struct crypto_async_request *req, int ring,
- struct safexcel_request *request, int *commands,
- int *results);
+ int *commands, int *results);
int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *req, bool *complete,
int *ret);
@@ -600,13 +623,13 @@ struct safexcel_context {
};
struct safexcel_ahash_export_state {
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
u32 digest;
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA256_BLOCK_SIZE];
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA512_BLOCK_SIZE];
};
/*
@@ -617,6 +640,7 @@ struct safexcel_ahash_export_state {
struct safexcel_alg_template {
struct safexcel_crypto_priv *priv;
enum safexcel_alg_type type;
+ u32 engines;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
@@ -635,16 +659,16 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request);
+ dma_addr_t ctxr_dma, int ring);
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr);
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr);
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
@@ -655,21 +679,44 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
void *istate, void *ostate);
/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 6bb60fda2043..3aef1d43e435 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/device.h>
@@ -15,6 +12,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -27,21 +25,28 @@ enum safexcel_cipher_direction {
SAFEXCEL_DECRYPT,
};
+enum safexcel_cipher_alg {
+ SAFEXCEL_DES,
+ SAFEXCEL_3DES,
+ SAFEXCEL_AES,
+};
+
struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
u32 mode;
+ enum safexcel_cipher_alg alg;
bool aead;
__le32 key[8];
unsigned int key_len;
/* All the below is AEAD specific */
- u32 alg;
+ u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_cipher_req {
@@ -57,10 +62,24 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
unsigned offset = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ offset = DES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
+ case SAFEXCEL_3DES:
+ offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ case SAFEXCEL_AES:
+ offset = AES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
+ }
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -145,7 +164,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
return ret;
}
- if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
ctx->base.needs_inv = true;
@@ -179,12 +198,12 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
/* Encryption key */
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
memcmp(ctx->key, keys.enckey, keys.enckeylen))
ctx->base.needs_inv = true;
/* Auth key */
- switch (ctx->alg) {
+ switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
keys.authkeylen, &istate, &ostate))
@@ -200,6 +219,16 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+ if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+ if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -208,7 +237,7 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_RES_MASK);
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
ctx->base.needs_inv = true;
@@ -258,22 +287,28 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
if (ctx->aead)
cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
- ctx->alg;
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- break;
- default:
- dev_err(priv->dev, "aes keysize not supported: %u\n",
- ctx->key_len);
- return -EINVAL;
+ ctx->hash_alg;
+
+ if (ctx->alg == SAFEXCEL_DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+ } else if (ctx->alg == SAFEXCEL_3DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+ } else if (ctx->alg == SAFEXCEL_AES) {
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+ break;
+ default:
+ dev_err(priv->dev, "aes keysize not supported: %u\n",
+ ctx->key_len);
+ return -EINVAL;
+ }
}
ctrl_size = ctx->key_len / sizeof(u32);
@@ -298,7 +333,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -315,7 +349,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) {
dma_unmap_sg(priv->dev, src,
@@ -335,8 +368,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
return ndesc;
}
-static int safexcel_aes_send(struct crypto_async_request *base, int ring,
- struct safexcel_request *request,
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
@@ -346,7 +378,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
+ struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -386,8 +418,6 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz);
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* command descriptors */
for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
@@ -434,12 +464,12 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
+ if (first)
+ first_rdesc = rdesc;
n_rdesc++;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- request->req = base;
+ safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
@@ -452,8 +482,6 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
if (src == dst) {
dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen),
@@ -481,7 +509,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -491,17 +518,13 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
break;
}
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
+ if (likely(!*ret))
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -577,15 +600,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
- request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -596,7 +617,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
@@ -605,21 +625,19 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+ int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -628,14 +646,13 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
- req->dst, req->cryptlen, req->assoclen,
+ ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+ req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;
@@ -705,9 +722,10 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
-static int safexcel_aes(struct crypto_async_request *base,
+static int safexcel_queue_req(struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- enum safexcel_cipher_direction dir, u32 mode)
+ enum safexcel_cipher_direction dir, u32 mode,
+ enum safexcel_cipher_alg alg)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
@@ -715,10 +733,11 @@ static int safexcel_aes(struct crypto_async_request *base,
sreq->needs_inv = false;
sreq->direction = dir;
+ ctx->alg = alg;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 && ctx->base.needs_inv) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -745,14 +764,16 @@ static int safexcel_aes(struct crypto_async_request *base,
static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -795,7 +816,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_skcipher_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "skcipher: invalidation error %d\n",
@@ -815,7 +836,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_aead_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "aead: invalidation error %d\n",
@@ -828,6 +849,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_ecb_aes_encrypt,
@@ -838,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_name = "ecb(aes)",
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -852,18 +874,21 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_cbc_aes_encrypt,
@@ -875,7 +900,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_name = "cbc(aes)",
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -887,20 +912,234 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (len != DES_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_cbc_des_encrypt,
+ .decrypt = safexcel_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "safexcel-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_ecb_des_encrypt,
+ .decrypt = safexcel_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "safexcel-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma) {
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+ }
+
+ memcpy(ctx->key, key, len);
+
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_cbc_des3_ede_encrypt,
+ .decrypt = safexcel_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "safexcel-cbc-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_ecb_des3_ede_encrypt,
+ .decrypt = safexcel_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "safexcel-ecb-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -926,13 +1165,14 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -943,7 +1183,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -960,13 +1200,14 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -977,7 +1218,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -994,13 +1235,14 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -1011,7 +1253,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1022,3 +1264,73 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
},
},
};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index c77b0e1655a8..ac9282c1a5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <crypto/hmac.h>
+#include <crypto/md5.h>
#include <crypto/sha.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -22,8 +20,8 @@ struct safexcel_ahash_ctx {
u32 alg;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -38,18 +36,26 @@ struct safexcel_ahash_req {
u32 digest;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
- u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+ if (req->len[1] > req->processed[1])
+ return 0xffffffff - (req->len[0] - req->processed[0]);
+
+ return req->len[0] - req->processed[0];
+}
+
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length)
{
@@ -72,9 +78,9 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc,
- unsigned int digestsize,
- unsigned int blocksize)
+ unsigned int digestsize)
{
+ struct safexcel_crypto_priv *priv = ctx->priv;
int i;
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
@@ -82,12 +88,17 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= req->digest;
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
- if (req->processed) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (req->processed[0] || req->processed[1]) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
} else {
@@ -102,12 +113,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (req->processed) {
+ if (req->processed[0] || req->processed[1]) {
for (i = 0; i < digestsize / sizeof(u32); i++)
ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
- if (req->finish)
- ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ if (req->finish) {
+ u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+ req->processed[1]);
+
+ /* This is a haredware limitation, as the
+ * counter must fit into an u32. This represents
+ * a farily big amount of input data, so we
+ * shouldn't see this.
+ */
+ if (unlikely(count & 0xffff0000)) {
+ dev_warn(priv->dev,
+ "Input data is too big\n");
+ return;
+ }
+
+ ctx->base.ctxr->data[i] = cpu_to_le32(count);
+ }
}
} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
@@ -126,11 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len;
+ u64 cache_len;
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -141,7 +167,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -164,7 +189,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- cache_len = sreq->len - sreq->processed;
+ cache_len = safexcel_queued_len(sreq);
if (cache_len)
memcpy(sreq->cache, sreq->cache_next, cache_len);
@@ -174,7 +199,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
@@ -185,9 +209,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, extra, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len;
- queued = len = req->len - req->processed;
+ queued = len = safexcel_queued_len(req);
if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
@@ -220,16 +245,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, req->cache_dma)) {
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
- }
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -260,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - sglen < 0)
+ if (queued < sglen)
sglen = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
@@ -282,8 +303,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
send_command:
/* Setup the context options */
- safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
- crypto_ahash_blocksize(ahash));
+ safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -303,10 +323,11 @@ send_command:
goto unmap_result;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
- request->req = &areq->base;
+ req->processed[0] += len;
+ if (req->processed[0] < len)
+ req->processed[1]++;
*commands = n_cdesc;
*results = 1;
@@ -327,7 +348,6 @@ unmap_cache:
req->cache_sz = 0;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -335,16 +355,18 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
unsigned int state_w_sz = req->state_sz / sizeof(u32);
+ u64 processed;
int i;
+ processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
for (i = 0; i < state_w_sz; i++)
if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
return true;
- if (ctx->base.ctxr->data[state_w_sz] !=
- cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+ if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
return true;
return false;
@@ -363,21 +385,16 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
- } else if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "hash: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EINVAL;
+ } else {
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -413,7 +430,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
- BUG_ON(priv->version == EIP97 && req->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
if (req->needs_inv) {
req->needs_inv = false;
@@ -428,15 +445,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ret = safexcel_invalidate_cache(async, ctx->priv,
- ctx->base.ctxr_dma, ring, request);
+ ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -447,19 +463,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
}
static int safexcel_ahash_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret;
if (req->needs_inv)
- ret = safexcel_ahash_send_inv(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_inv(async, ring, commands, results);
else
- ret = safexcel_ahash_send_req(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_req(async, ring, commands, results);
+
return ret;
}
@@ -509,17 +523,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- int queued, cache_len;
+ u64 queued, cache_len;
- /* cache_len: everyting accepted by the driver but not sent yet,
- * tot sz handled by update() - last req sz - tot sz handled by send()
- */
- cache_len = req->len - areq->nbytes - req->processed;
/* queued: everything accepted by the driver which will be handled by
* the next send() calls.
* tot sz handled by update() - tot sz handled by send()
*/
- queued = req->len - req->processed;
+ queued = safexcel_queued_len(req);
+ /* cache_len: everything accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
+ cache_len = queued - areq->nbytes;
/*
* In case there isn't enough bytes to proceed (less than a
@@ -546,8 +560,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 &&
- !ctx->base.needs_inv && req->processed &&
+ if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+ (req->processed[0] || req->processed[1]) &&
req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -590,7 +604,9 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (!areq->nbytes)
return 0;
- req->len += areq->nbytes;
+ req->len[0] += areq->nbytes;
+ if (req->len[0] < areq->nbytes)
+ req->len[1]++;
safexcel_ahash_cache(areq);
@@ -605,7 +621,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- req->len - req->processed > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
return safexcel_ahash_enqueue(areq);
return 0;
@@ -620,8 +636,11 @@ static int safexcel_ahash_final(struct ahash_request *areq)
req->finish = true;
/* If we have an overall 0 length request */
- if (!(req->len + areq->nbytes)) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ memcpy(areq->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
memcpy(areq->result, sha1_zero_message_hash,
SHA1_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
@@ -630,6 +649,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
memcpy(areq->result, sha256_zero_message_hash,
SHA256_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+ memcpy(areq->result, sha384_zero_message_hash,
+ SHA384_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ memcpy(areq->result, sha512_zero_message_hash,
+ SHA512_DIGEST_SIZE);
return 0;
}
@@ -654,8 +679,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
- export->len = req->len;
- export->processed = req->processed;
+ export->len[0] = req->len[0];
+ export->len[1] = req->len[1];
+ export->processed[0] = req->processed[0];
+ export->processed[1] = req->processed[1];
export->digest = req->digest;
@@ -676,8 +703,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
if (ret)
return ret;
- req->len = export->len;
- req->processed = export->processed;
+ req->len[0] = export->len[0];
+ req->len[1] = export->len[1];
+ req->processed[0] = export->processed[0];
+ req->processed[1] = export->processed[1];
req->digest = export->digest;
@@ -743,7 +772,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_ahash_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
@@ -755,6 +784,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha1_init,
.update = safexcel_ahash_update,
@@ -908,8 +938,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
u8 *ipad, *opad;
int ret;
- tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(alg, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
@@ -963,7 +992,7 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- if (priv->version == EIP197 && ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
@@ -988,6 +1017,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha1_init,
.update = safexcel_ahash_update,
@@ -1051,6 +1081,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha256_init,
.update = safexcel_ahash_update,
@@ -1113,6 +1144,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha224_init,
.update = safexcel_ahash_update,
@@ -1168,6 +1200,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha224_init,
.update = safexcel_ahash_update,
@@ -1224,6 +1257,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha256_init,
.update = safexcel_ahash_update,
@@ -1251,3 +1285,375 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
},
},
};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA512_H0);
+ req->state[1] = upper_32_bits(SHA512_H0);
+ req->state[2] = lower_32_bits(SHA512_H1);
+ req->state[3] = upper_32_bits(SHA512_H1);
+ req->state[4] = lower_32_bits(SHA512_H2);
+ req->state[5] = upper_32_bits(SHA512_H2);
+ req->state[6] = lower_32_bits(SHA512_H3);
+ req->state[7] = upper_32_bits(SHA512_H3);
+ req->state[8] = lower_32_bits(SHA512_H4);
+ req->state[9] = upper_32_bits(SHA512_H4);
+ req->state[10] = lower_32_bits(SHA512_H5);
+ req->state[11] = upper_32_bits(SHA512_H5);
+ req->state[12] = lower_32_bits(SHA512_H6);
+ req->state[13] = upper_32_bits(SHA512_H6);
+ req->state[14] = lower_32_bits(SHA512_H7);
+ req->state[15] = upper_32_bits(SHA512_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha512_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "safexcel-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA384_H0);
+ req->state[1] = upper_32_bits(SHA384_H0);
+ req->state[2] = lower_32_bits(SHA384_H1);
+ req->state[3] = upper_32_bits(SHA384_H1);
+ req->state[4] = lower_32_bits(SHA384_H2);
+ req->state[5] = upper_32_bits(SHA384_H2);
+ req->state[6] = lower_32_bits(SHA384_H3);
+ req->state[7] = upper_32_bits(SHA384_H3);
+ req->state[8] = lower_32_bits(SHA384_H4);
+ req->state[9] = upper_32_bits(SHA384_H4);
+ req->state[10] = lower_32_bits(SHA384_H5);
+ req->state[11] = upper_32_bits(SHA384_H5);
+ req->state[12] = lower_32_bits(SHA384_H6);
+ req->state[13] = upper_32_bits(SHA384_H6);
+ req->state[14] = lower_32_bits(SHA384_H7);
+ req->state[15] = upper_32_bits(SHA384_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha384_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "safexcel-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha512_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha512_digest,
+ .setkey = safexcel_hmac_sha512_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "safexcel-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha384_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha384_digest,
+ .setkey = safexcel_hmac_sha384_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "safexcel-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = MD5_H0;
+ req->state[1] = MD5_H1;
+ req->state[2] = MD5_H2;
+ req->state[3] = MD5_H3;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_md5_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "safexcel-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_md5_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+ MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_md5_digest,
+ .setkey = safexcel_hmac_md5_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "safexcel-hmac-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index c9d2a8716b5b..eb75fa684876 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
@@ -14,8 +11,8 @@
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr)
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr)
{
cdr->offset = sizeof(u32) * priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
@@ -24,7 +21,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
- cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+ cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +31,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
- rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+ rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base;
return 0;
@@ -46,49 +43,73 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
}
static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->write;
- if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
- ring->write += ring->offset;
if (ring->write == ring->base_end)
ring->write = ring->base;
+ else
+ ring->write += ring->offset;
- ring->nr++;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->read;
- if (!ring->nr)
+ if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
- ring->read += ring->offset;
if (ring->read == ring->base_end)
ring->read = ring->base;
+ else
+ ring->read += ring->offset;
- ring->nr--;
return ptr;
}
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
- if (!ring->nr)
+ if (ring->write == ring->read)
return;
if (ring->write == ring->base)
- ring->write = ring->base_end - ring->offset;
+ ring->write = ring->base_end;
else
ring->write -= ring->offset;
-
- ring->nr--;
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e34d80b6b7e5..99ff54cc8a15 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1183,8 +1183,7 @@ static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
u8 *opad;
int ret;
- tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index b182e941b0cd..ee0404e27a0f 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "mtk-platform.h"
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index ab6235b7ff22..55f34cfc43ff 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1487,8 +1487,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = tmpl->block_size;
base->cra_ctxsize = sizeof(struct n2_hash_ctx);
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 36afd6d8753c..c68df7e8bee1 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -24,6 +24,8 @@
#include <asm/icswx.h>
#include <asm/vas.h>
#include <asm/reg.h>
+#include <asm/opal-api.h>
+#include <asm/opal.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
@@ -753,7 +755,7 @@ static int nx842_open_percpu_txwins(void)
}
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
- int vasid)
+ int vasid, int *ct)
{
struct vas_window *rxwin = NULL;
struct vas_rx_win_attr rxattr;
@@ -837,6 +839,15 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.id = vasid;
nx842_add_coprocs_list(coproc, chip_id);
+ /*
+ * (lpid, pid, tid) combination has to be unique for each
+ * coprocessor instance in the system. So to make it
+ * unique, skiboot uses coprocessor type such as 842 or
+ * GZIP for pid and provides this value to kernel in pid
+ * device-tree property.
+ */
+ *ct = pid;
+
return 0;
err_out:
@@ -850,6 +861,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
struct device_node *dn;
int chip_id, vasid, ret = 0;
int nx_fifo_found = 0;
+ int uninitialized_var(ct);
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@@ -865,7 +877,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
for_each_child_of_node(pn, dn) {
if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
- ret = vas_cfg_coproc_info(dn, chip_id, vasid);
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
if (ret) {
of_node_put(dn);
return ret;
@@ -876,9 +888,22 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
if (!nx_fifo_found) {
pr_err("NX842 FIFO nodes are missing\n");
- ret = -EINVAL;
+ return -EINVAL;
}
+ /*
+ * Initialize NX instance for both high and normal priority FIFOs.
+ */
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct);
+ if (ret) {
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ ret = opal_error_code(ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
return ret;
}
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index c2f7d4befb55..ad3358e74f5c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -386,7 +386,6 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index becb738c897b..a6764af83c6d 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -288,7 +288,6 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b6e183d58d73..92956bc6e45e 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -294,7 +294,6 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d1a1c74fb56a..0641185bd82f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1464,8 +1464,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1487,8 +1486,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1511,8 +1509,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1536,8 +1533,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1564,8 +1560,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1586,8 +1581,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1609,8 +1603,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1633,8 +1626,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1659,8 +1651,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1681,8 +1672,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1704,8 +1694,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,8 +1717,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 1c6cbda56afe..09d823d36d3a 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
return;
}
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count - initial));
+ : "d"(control_word), "b"(key), "c"(count));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if (count < cbc_fetch_blocks)
return cbc_crypt(input, output, key, iv, control_word, count);
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count-initial));
+ : "d" (control_word), "b" (key), "c" (count));
return iv;
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d32c79328876..21e5cae0a1e0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -247,8 +247,7 @@ static struct shash_alg sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -271,8 +270,7 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -484,7 +482,6 @@ static struct shash_alg sha1_alg_nano = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -503,7 +500,6 @@ static struct shash_alg sha256_alg_nano = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index da8a2d3b5e9a..9225d060e18f 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -163,7 +163,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->accel_dev = accel_dev;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 718b32a3112e..1c3b36b75467 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 53227d70d397..d8a5db11b7ea 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -378,8 +378,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
else
return -EINVAL;
- ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
if (IS_ERR(ahash_tfm))
return PTR_ERR(ahash_tfm);
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 000000000000..e54249ccc009
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define WORD_SZ 4
+
+struct qcom_rng {
+ struct mutex lock;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+ struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+ unsigned int currsize = 0;
+ u32 val;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ if ((max - currsize) >= WORD_SZ) {
+ memcpy(data, &val, WORD_SZ);
+ data += WORD_SZ;
+ currsize += WORD_SZ;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - currsize);
+ break;
+ }
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct qcom_rng *rng = ctx->rng;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rng->lock);
+
+ ret = qcom_rng_read(rng, dstn, dlen);
+
+ mutex_unlock(&rng->lock);
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+ struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->rng = qcom_rng_dev;
+
+ if (!ctx->rng->skip_init)
+ return qcom_rng_enable(ctx->rng);
+
+ return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+ .generate = qcom_rng_generate,
+ .seed = qcom_rng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "qcom-rng",
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct qcom_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = qcom_rng_init,
+ }
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qcom_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+ mutex_init(&rng->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ /* ACPI systems have clk already on, so skip clk_get */
+ if (!has_acpi_companion(&pdev->dev)) {
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+ }
+
+
+ rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+ qcom_rng_dev = rng;
+ ret = crypto_register_rng(&qcom_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+ qcom_rng_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+ crypto_unregister_rng(&qcom_rng_alg);
+
+ qcom_rng_dev = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+ { .id = "QCOM8160", .driver_data = 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+ { .compatible = "qcom,prng", .data = (void *)0},
+ { .compatible = "qcom,prng-ee", .data = (void *)1},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+ .probe = qcom_rng_probe,
+ .remove = qcom_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(qcom_rng_of_match),
+ .acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+ }
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bf7163042569..faa282074e5a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1765,8 +1765,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha1",
.cra_driver_name = "exynos-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1791,8 +1790,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "md5",
.cra_driver_name = "exynos-md5",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1817,8 +1815,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "exynos-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0f2245e1af2b..e7540a5b8197 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1253,8 +1253,7 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1280,8 +1279,7 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1351,7 +1349,7 @@ err_sha_v4_algs:
err_sha_v3_algs:
for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
+ crypto_unregister_ahash(&sha_v3_algs[j]);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -1367,7 +1365,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c5d3efc54a4f..23b0b7bd64c7 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/aes.h>
@@ -105,6 +106,7 @@
#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
bool swap_final;
@@ -519,6 +521,8 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
+ pm_runtime_get_sync(cryp->dev);
+
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -638,6 +642,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
free_pages((unsigned long)buf_out, pages);
}
+ pm_runtime_mark_last_busy(cryp->dev);
+ pm_runtime_put_autosuspend(cryp->dev);
+
if (is_gcm(cryp) || is_ccm(cryp)) {
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
cryp->areq = NULL;
@@ -1969,6 +1976,13 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(rst)) {
reset_control_assert(rst);
@@ -2008,6 +2022,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
err_aead_algs:
@@ -2020,6 +2036,11 @@ err_engine1:
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(cryp->clk);
return ret;
@@ -2028,10 +2049,15 @@ err_engine1:
static int stm32_cryp_remove(struct platform_device *pdev)
{
struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+ int ret;
if (!cryp)
return -ENODEV;
+ ret = pm_runtime_get_sync(cryp->dev);
+ if (ret < 0)
+ return ret;
+
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
@@ -2041,16 +2067,52 @@ static int stm32_cryp_remove(struct platform_device *pdev)
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(cryp->dev);
+ pm_runtime_put_noidle(cryp->dev);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
clk_disable_unprepare(cryp->clk);
return 0;
}
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+ stm32_cryp_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
.remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_cryp_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cdc96f1bb917..590d7352837e 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/engine.h>
@@ -121,6 +122,8 @@ enum stm32_hash_data_format {
#define HASH_QUEUE_LENGTH 16
#define HASH_DMA_THRESHOLD 50
+#define HASH_AUTOSUSPEND_DELAY 50
+
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
@@ -814,12 +817,17 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
rctx->flags |= HASH_FLAGS_ERRORS;
}
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
crypto_finalize_hash_request(hdev->engine, req, err);
}
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
+ pm_runtime_get_sync(hdev->dev);
+
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
stm32_hash_write(hdev, HASH_STR, 0);
@@ -967,6 +975,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
+ pm_runtime_get_sync(hdev->dev);
+
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
cpu_relax();
@@ -982,6 +992,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
memcpy(out, rctx, sizeof(*rctx));
return 0;
@@ -1000,6 +1013,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
+ pm_runtime_get_sync(hdev->dev);
+
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
stm32_hash_write(hdev, HASH_CR, *preg);
@@ -1009,6 +1024,9 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
kfree(rctx->hw_context);
return 0;
@@ -1132,8 +1150,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1159,8 +1176,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1185,8 +1201,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1212,8 +1227,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1241,8 +1255,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1268,8 +1281,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1294,8 +1306,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1321,8 +1332,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1482,6 +1492,13 @@ static int stm32_hash_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(hdev->rst)) {
reset_control_assert(hdev->rst);
@@ -1522,6 +1539,8 @@ static int stm32_hash_probe(struct platform_device *pdev)
dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+ pm_runtime_put_sync(dev);
+
return 0;
err_algs:
@@ -1535,6 +1554,9 @@ err_engine:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(hdev->clk);
return ret;
@@ -1543,11 +1565,16 @@ err_engine:
static int stm32_hash_remove(struct platform_device *pdev)
{
static struct stm32_hash_dev *hdev;
+ int ret;
hdev = platform_get_drvdata(pdev);
if (!hdev)
return -ENODEV;
+ ret = pm_runtime_get_sync(hdev->dev);
+ if (ret < 0)
+ return ret;
+
stm32_hash_unregister_algs(hdev);
crypto_engine_exit(hdev->engine);
@@ -1559,16 +1586,52 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(hdev->dev);
+ pm_runtime_put_noidle(hdev->dev);
+
clk_disable_unprepare(hdev->clk);
return 0;
}
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+ stm32_hash_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
.remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
+ .pm = &stm32_hash_pm_ops,
.of_match_table = stm32_hash_of_match,
}
};
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 8f09b8430893..29d2095d9dfd 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -6,8 +6,11 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <crypto/internal/hash.h>
@@ -28,9 +31,7 @@
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
-/* Polynomial reversed */
-#define POLY_CRC32 0xEDB88320
-#define POLY_CRC32C 0x82F63B78
+#define CRC_AUTOSUSPEND_DELAY 50
struct stm32_crc {
struct list_head list;
@@ -66,7 +67,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32;
+ mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +76,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32C;
+ mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -106,6 +107,8 @@ static int stm32_crc_init(struct shash_desc *desc)
}
spin_unlock_bh(&crc_list.lock);
+ pm_runtime_get_sync(ctx->crc->dev);
+
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
@@ -115,6 +118,9 @@ static int stm32_crc_init(struct shash_desc *desc)
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
+ pm_runtime_mark_last_busy(ctx->crc->dev);
+ pm_runtime_put_autosuspend(ctx->crc->dev);
+
return 0;
}
@@ -126,6 +132,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
u32 *d32;
unsigned int i;
+ pm_runtime_get_sync(crc->dev);
+
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
@@ -149,6 +157,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
+
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
@@ -174,7 +185,7 @@ static int stm32_crc_final(struct shash_desc *desc, u8 *out)
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
/* Send computed CRC */
- put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
~ctx->partial : ctx->partial, out);
return 0;
@@ -272,6 +283,13 @@ static int stm32_crc_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
@@ -287,12 +305,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int stm32_crc_remove(struct platform_device *pdev)
{
struct stm32_crc *crc = platform_get_drvdata(pdev);
+ int ret = pm_runtime_get_sync(crc->dev);
+
+ if (ret < 0)
+ return ret;
spin_lock(&crc_list.lock);
list_del(&crc->list);
@@ -300,11 +324,46 @@ static int stm32_crc_remove(struct platform_device *pdev)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ pm_runtime_disable(crc->dev);
+ pm_runtime_put_noidle(crc->dev);
+
+ clk_disable_unprepare(crc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
clk_disable_unprepare(crc->clk);
return 0;
}
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+ stm32_crc_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dt_ids[] = {
{ .compatible = "st,stm32f7-crc", },
{},
@@ -316,6 +375,7 @@ static struct platform_driver stm32_crc_driver = {
.remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_crc_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index a81d89b3b7d8..89adf9e0fed2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -45,11 +45,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "md5-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -73,11 +71,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "sha1-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -96,8 +92,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -118,8 +113,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -140,8 +134,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -161,8 +154,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -183,8 +175,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -205,7 +196,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cf14f099ce4a..6988012deca4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2822,8 +2822,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2838,8 +2837,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2854,8 +2852,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2870,8 +2867,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2886,8 +2882,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2902,8 +2897,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2918,8 +2912,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2934,8 +2927,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2950,8 +2942,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2966,8 +2957,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2982,8 +2972,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2998,8 +2987,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3186,7 +3174,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
t_alg->algt.alg.hash.final = ahash_final;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index cb31b59c9d53..d2663a4e1f5e 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -20,6 +20,7 @@
#include <linux/irqreturn.h>
#include <linux/klist.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 2d0a677bcc76..633321a8dd03 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -21,6 +21,7 @@
#include <linux/klist.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
@@ -1524,8 +1525,7 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_init = hash_cra_init,
@@ -1548,11 +1548,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1574,11 +1572,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1600,11 +1596,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index af6a908dfa7a..2c573d1aaa64 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -49,12 +49,18 @@ struct virtio_crypto_sym_request {
bool encrypt;
};
+struct virtio_crypto_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct crypto_alg algo;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
@@ -312,15 +318,21 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
unsigned int keylen)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ uint32_t alg;
int ret;
+ ret = virtio_crypto_alg_validate_key(keylen, &alg);
+ if (ret)
+ return ret;
+
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
- virtcrypto_get_dev_node(node);
+ virtcrypto_get_dev_node(node,
+ VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
@@ -371,12 +383,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
- sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
+ sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
- req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
@@ -571,57 +583,85 @@ static void virtio_crypto_ablkcipher_finalize_req(
virtcrypto_clear_request(&vc_sym_req->base);
}
-static struct crypto_alg virtio_crypto_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+ .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+ .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+ .algo = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
},
},
} };
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
+ int i = 0;
mutex_lock(&algs_lock);
- if (++virtio_crypto_active_devs != 1)
- goto unlock;
- ret = crypto_register_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
- if (ret)
- virtio_crypto_active_devs--;
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 0) {
+ ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+ virtio_crypto_algs[i].algo.cra_name);
+ }
unlock:
mutex_unlock(&algs_lock);
return ret;
}
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
{
+ int i = 0;
+
mutex_lock(&algs_lock);
- if (--virtio_crypto_active_devs != 0)
- goto unlock;
- crypto_unregister_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (virtio_crypto_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 1)
+ crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+ virtio_crypto_algs[i].active_devs--;
+ }
-unlock:
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 66501a5a2b7b..63ef7f7924ea 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -55,6 +55,20 @@ struct virtio_crypto {
/* Number of queue currently used by the driver */
u32 curr_queue;
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_*
+ */
+ u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ u32 cipher_algo_l;
+ u32 cipher_algo_h;
+ u32 hash_algo;
+ u32 mac_algo_l;
+ u32 mac_algo_h;
+ u32 aead_algo;
+
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -102,7 +116,12 @@ int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_get_dev_node(int node);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+ uint32_t service,
+ uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+ uint32_t service,
+ uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
@@ -122,7 +141,7 @@ static inline int virtio_crypto_get_current_node(void)
return node;
}
-int virtio_crypto_algs_register(void);
-void virtio_crypto_algs_unregister(void);
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 83326986c113..3c9e120287af 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_data_queues; i++)
- virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+ virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
vi->affinity_hint_set = false;
}
@@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
*
*/
for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
if (++i >= vcrypto->max_data_queues)
break;
}
@@ -303,6 +303,13 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
+ u32 cipher_algo_l = 0;
+ u32 cipher_algo_h = 0;
+ u32 hash_algo = 0;
+ u32 mac_algo_l = 0;
+ u32 mac_algo_h = 0;
+ u32 aead_algo = 0;
+ u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -339,6 +346,20 @@ static int virtcrypto_probe(struct virtio_device *vdev)
max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_size, &max_size);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -358,6 +379,14 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
+ vcrypto->crypto_services = crypto_services;
+ vcrypto->cipher_algo_l = cipher_algo_l;
+ vcrypto->cipher_algo_h = cipher_algo_h;
+ vcrypto->mac_algo_l = mac_algo_l;
+ vcrypto->mac_algo_h = mac_algo_h;
+ vcrypto->hash_algo = hash_algo;
+ vcrypto->aead_algo = aead_algo;
+
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index a69ff71de2c4..d70de3a4f7d7 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -181,14 +181,20 @@ int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ * dev
+ * @algo: The algorithm number that needs to be supported by the
+ * dev
*
- * Function returns the virtio crypto device used fewest on the node.
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
-struct virtio_crypto *virtcrypto_get_dev_node(int node)
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+ uint32_t algo)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
@@ -199,7 +205,8 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
- virtcrypto_dev_started(tmp_dev)) {
+ virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
@@ -214,7 +221,9 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
- if (virtcrypto_dev_started(tmp_dev)) {
+ if (virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev,
+ service, algo)) {
vcrypto_dev = tmp_dev;
break;
}
@@ -240,7 +249,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
- if (virtio_crypto_algs_register()) {
+ if (virtio_crypto_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto algs\n");
return -EFAULT;
}
@@ -260,5 +269,65 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
- virtio_crypto_algs_unregister();
+ virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ * See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+ uint32_t service,
+ uint32_t algo)
+{
+ uint32_t service_mask = 1u << service;
+ uint32_t algo_mask = 0;
+ bool low = true;
+
+ if (algo > 31) {
+ algo -= 32;
+ low = false;
+ }
+
+ if (!(vcrypto->crypto_services & service_mask))
+ return false;
+
+ switch (service) {
+ case VIRTIO_CRYPTO_SERVICE_CIPHER:
+ if (low)
+ algo_mask = vcrypto->cipher_algo_l;
+ else
+ algo_mask = vcrypto->cipher_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_HASH:
+ algo_mask = vcrypto->hash_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_MAC:
+ if (low)
+ algo_mask = vcrypto->mac_algo_l;
+ else
+ algo_mask = vcrypto->mac_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AEAD:
+ algo_mask = vcrypto->aead_algo;
+ break;
+ }
+
+ if (!(algo_mask & (1u << algo)))
+ return false;
+
+ return true;
}
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..dd8b8716467a 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -215,7 +215,7 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index de2f8297a210..0a2acd7993f0 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
/* prevent private mappings from being established */
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
- dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;
}
mask = dax_region->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) {
- dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
current->comm, func, vma->vm_start, vma->vm_end,
mask);
return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
&& (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, dax range requires MADV_DONTFORK\n",
current->comm, func);
return -EINVAL;
}
if (!vma_is_dax(vma)) {
- dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
return -EINVAL;
}
@@ -470,7 +474,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vma->vm_flags |= VM_HUGEPAGE;
return 0;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0b5b3abe054e..4c49bb1330b5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -604,28 +604,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->min_freq = find_available_min_freq(devfreq);
- if (!devfreq->min_freq) {
+ devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->scaling_min_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_min_freq = devfreq->min_freq;
+ devfreq->min_freq = devfreq->scaling_min_freq;
- devfreq->max_freq = find_available_max_freq(devfreq);
- if (!devfreq->max_freq) {
+ devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->scaling_max_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_max_freq = devfreq->max_freq;
+ devfreq->max_freq = devfreq->scaling_max_freq;
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_dev;
+ put_device(&devfreq->dev);
+ goto err_out;
}
devfreq->trans_table =
@@ -672,6 +673,7 @@ err_init:
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+ devfreq = NULL;
err_dev:
if (devfreq)
kfree(devfreq);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 3cd6a184fe7c..a9c64f0d3284 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -627,11 +627,9 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
size = sizeof(struct devfreq_event_dev *) * info->num_events;
info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!info->edev) {
- dev_err(&pdev->dev,
- "failed to allocate memory devfreq-event devices\n");
+ if (!info->edev)
return -ENOMEM;
- }
+
edev = info->edev;
platform_set_drvdata(pdev, info);
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 5dfbfa3cc878..e795ad2b3f6b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -68,15 +68,6 @@ struct rk3399_dmcfreq {
struct devfreq_event_dev *edev;
struct mutex lock;
struct dram_timing timing;
-
- /*
- * DDR Converser of Frequency (DCF) is used to implement DDR frequency
- * conversion without the participation of CPU, we will implement and
- * control it in arm trust firmware.
- */
- wait_queue_head_t wait_dcf_queue;
- int irq;
- int wait_dcf_flag;
struct regulator *vdd_center;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
@@ -112,31 +103,22 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err) {
- dev_err(dev, "Cannot to set voltage %lu uV\n",
+ dev_err(dev, "Cannot set voltage %lu uV\n",
target_volt);
goto out;
}
}
- dmcfreq->wait_dcf_flag = 1;
err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
if (err) {
- dev_err(dev, "Cannot to set frequency %lu (%d)\n",
- target_rate, err);
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
}
/*
- * Wait until bcf irq happen, it means freq scaling finish in
- * arm trust firmware, use 100ms as timeout time.
- */
- if (!wait_event_timeout(dmcfreq->wait_dcf_queue,
- !dmcfreq->wait_dcf_flag, HZ / 10))
- dev_warn(dev, "Timeout waiting for dcf interrupt\n");
-
- /*
* Check the dpll rate,
* There only two result we will get,
* 1. Ddr frequency scaling fail, we still get the old rate.
@@ -146,8 +128,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
/* If get the incorrect rate, set voltage to old value. */
if (dmcfreq->rate != target_rate) {
- dev_err(dev, "Get wrong ddr frequency, Request frequency %lu,\
- Current frequency %lu\n", target_rate, dmcfreq->rate);
+ dev_err(dev, "Got wrong frequency, Request %lu, Current %lu\n",
+ target_rate, dmcfreq->rate);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
@@ -155,7 +137,7 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err)
- dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
dmcfreq->rate = target_rate;
dmcfreq->volt = target_volt;
@@ -241,22 +223,6 @@ static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
rk3399_dmcfreq_resume);
-static irqreturn_t rk3399_dmc_irq(int irq, void *dev_id)
-{
- struct rk3399_dmcfreq *dmcfreq = dev_id;
- struct arm_smccc_res res;
-
- dmcfreq->wait_dcf_flag = 0;
- wake_up(&dmcfreq->wait_dcf_queue);
-
- /* Clear the DCF interrupt */
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
- ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ,
- 0, 0, 0, 0, &res);
-
- return IRQ_HANDLED;
-}
-
static int of_get_ddr_timings(struct dram_timing *timing,
struct device_node *np)
{
@@ -330,16 +296,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct rk3399_dmcfreq *data;
- int ret, irq, index, size;
+ int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Cannot get the dmc interrupt resource: %d\n", irq);
- return irq;
- }
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -348,27 +308,22 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->vdd_center = devm_regulator_get(dev, "center");
if (IS_ERR(data->vdd_center)) {
+ if (PTR_ERR(data->vdd_center) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the regulator \"center\"\n");
return PTR_ERR(data->vdd_center);
}
data->dmc_clk = devm_clk_get(dev, "dmc_clk");
if (IS_ERR(data->dmc_clk)) {
+ if (PTR_ERR(data->dmc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
};
- data->irq = irq;
- ret = devm_request_irq(dev, irq, rk3399_dmc_irq, 0,
- dev_name(dev), data);
- if (ret) {
- dev_err(dev, "Failed to request dmc irq: %d\n", ret);
- return ret;
- }
-
- init_waitqueue_head(&data->wait_dcf_queue);
- data->wait_dcf_flag = 0;
-
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
return -EPROBE_DEFER;
@@ -420,8 +375,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->rate = clk_get_rate(data->dmc_clk);
opp = devfreq_recommended_opp(dev, &data->rate, 0);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_free_opp;
+ }
data->rate = dev_pm_opp_get_freq(opp);
data->volt = dev_pm_opp_get_voltage(opp);
@@ -433,14 +390,34 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
&rk3399_devfreq_dmc_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
+ if (IS_ERR(data->devfreq)) {
+ ret = PTR_ERR(data->devfreq);
+ goto err_free_opp;
+ }
+
devm_devfreq_register_opp_notifier(dev, data->devfreq);
data->dev = dev;
platform_set_drvdata(pdev, data);
return 0;
+
+err_free_opp:
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ return ret;
+}
+
+static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+{
+ struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+
+ /*
+ * Before remove the opp table we need to unregister the opp notifier.
+ */
+ devm_devfreq_unregister_opp_notifier(dmcfreq->dev, dmcfreq->devfreq);
+ dev_pm_opp_of_remove_table(dmcfreq->dev);
+
+ return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
@@ -451,6 +428,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
+ .remove = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index ae712159246f..c59d2eee5d30 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d78d5fc173dc..13884474d158 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->map_atomic
|| !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_lock(&dmabuf->lock);
if (dmabuf->ops->attach) {
- ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
*
- * There are also atomic variants of these interfaces. Like for kmap they
- * facilitate non-blocking fast-paths. Neither the importer nor the exporter
- * (in the callback) is allowed to block when using these.
- *
- * Interfaces::
- * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- * For importers all the restrictions of using kmap apply, like the limited
- * supply of kmap_atomic slots. Hence an importer shall only hold onto at
- * max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ * Implementing the functions is optional for exporters and for importers all
+ * the restrictions of using kmap apply.
*
* dma_buf kmap calls outside of the range specified in begin_cpu_access are
* undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
* the partial chunks at the beginning and end but may return stale or bogus
* data outside of the range (in these partial chunks).
*
- * Note that these calls need to always succeed. The exporter needs to
- * complete any preparations that might fail in begin_cpu_access.
- *
* For some cases the overhead of kmap can be too high, a vmap interface
* is introduced. This interface should be used very carefully, as vmalloc
* space is a limited resources on many architectures.
@@ -860,41 +847,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap_atomic)
- dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
-/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
* @dmabuf: [in] buffer to map page from.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);
+ if (!dmabuf->ops->map)
+ return NULL;
return dmabuf->ops->map(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index dd1edfb27b61..a8c254497251 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,6 @@ const struct dma_fence_ops dma_fence_array_ops = {
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
- .wait = dma_fence_default_wait,
.release = dma_fence_array_release,
};
EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 4edb9fd3cf47..1551ca7df394 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -39,11 +39,42 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ * descriptor from userspace, created by calling sync_file_create(). This is
+ * called explicit fencing, since userspace passes around explicit
+ * synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ * fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ * implicitly passed around as part of shared &dma_buf instances. Such
+ * implicit fences are stored in &struct reservation_object through the
+ * &dma_buf.resv pointer.
+ */
+
+/**
* dma_fence_context_alloc - allocate an array of fence contexts
- * @num: [in] amount of contexts to allocate
+ * @num: amount of contexts to allocate
*
- * This function will return the first index of the number of fences allocated.
- * The fence context is used for setting fence->context to a unique number.
+ * This function will return the first index of the number of fence contexts
+ * allocated. The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
*/
u64 dma_fence_context_alloc(unsigned num)
{
@@ -59,10 +90,14 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
*
- * Unlike dma_fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal_locked(struct dma_fence *fence)
{
@@ -102,8 +137,11 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal(struct dma_fence *fence)
{
@@ -136,9 +174,9 @@ EXPORT_SYMBOL(dma_fence_signal);
/**
* dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. Other error values may be
@@ -148,6 +186,8 @@ EXPORT_SYMBOL(dma_fence_signal);
* directly or indirectly (buf-mgr between reservation and committing)
* holds a reference to the fence, otherwise the fence might be
* freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
*/
signed long
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
@@ -158,12 +198,22 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
return -EINVAL;
trace_dma_fence_wait_start(fence);
- ret = fence->ops->wait(fence, intr, timeout);
+ if (fence->ops->wait)
+ ret = fence->ops->wait(fence, intr, timeout);
+ else
+ ret = dma_fence_default_wait(fence, intr, timeout);
trace_dma_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
void dma_fence_release(struct kref *kref)
{
struct dma_fence *fence =
@@ -181,6 +231,13 @@ void dma_fence_release(struct kref *kref)
}
EXPORT_SYMBOL(dma_fence_release);
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
@@ -189,10 +246,11 @@ EXPORT_SYMBOL(dma_fence_free);
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
- * @fence: [in] the fence to enable
+ * @fence: the fence to enable
*
- * this will request for sw signaling to be enabled, to make the fence
- * complete as soon as possible
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
*/
void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
@@ -200,7 +258,8 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
@@ -216,24 +275,24 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
* dma_fence_add_callback - add a callback to be called when the fence
* is signaled
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to register
- * @func: [in] the function to call
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
*
- * cb will be initialized by dma_fence_add_callback, no initialization
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
* Note that the callback can be called from an atomic context. If
* fence is already signaled, this function will return -ENOENT (and
- * *not* call the callback)
+ * *not* call the callback).
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait, however the caller doesn't need to
- * keep a refcount to fence afterwards: when software access is enabled,
- * the creator of the fence is required to keep the fence alive until
- * after it signals with dma_fence_signal. The callback itself can be called
- * from irq context.
+ * refcount as it does to dma_fence_wait(), however the caller doesn't need to
+ * keep a refcount to fence afterward dma_fence_add_callback() has returned:
+ * when software access is enabled, the creator of the fence is required to keep
+ * the fence alive until after it signals with dma_fence_signal(). The callback
+ * itself can be called from irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.
@@ -260,7 +319,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
- else if (!was_set) {
+ else if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -282,7 +341,7 @@ EXPORT_SYMBOL(dma_fence_add_callback);
/**
* dma_fence_get_status - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* This wraps dma_fence_get_status_locked() to return the error status
* condition on a signaled fence. See dma_fence_get_status_locked() for more
@@ -307,8 +366,8 @@ EXPORT_SYMBOL(dma_fence_get_status);
/**
* dma_fence_remove_callback - remove a callback from the signaling list
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to remove
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
* true if the callback is successfully removed, or false if the fence has
@@ -319,6 +378,9 @@ EXPORT_SYMBOL(dma_fence_get_status);
* doing, since deadlocks and race conditions could occur all too easily. For
* this reason, it should only ever be done on hardware lockup recovery,
* with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
*/
bool
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
@@ -355,9 +417,9 @@ dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
/**
* dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. If timeout is zero the value one is
@@ -388,7 +450,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
- if (!was_set) {
+ if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -450,12 +512,12 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
/**
* dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
- * @fences: [in] array of fences to wait on
- * @count: [in] number of fences to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
- * @idx: [out] the first signaled fence index, meaningful only on
- * positive return
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -464,6 +526,8 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
* Synchronous waits for the first fence in the array to be signaled. The
* caller needs to hold a reference to all fences in the array, otherwise a
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
*/
signed long
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
@@ -496,11 +560,6 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
for (i = 0; i < count; ++i) {
struct dma_fence *fence = fences[i];
- if (fence->ops->wait != dma_fence_default_wait) {
- ret = -EINVAL;
- goto fence_rm_cb;
- }
-
cb[i].task = current;
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
@@ -541,27 +600,25 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* dma_fence_init - Initialize a custom fence.
- * @fence: [in] the fence to initialize
- * @ops: [in] the dma_fence_ops for operations on this fence
- * @lock: [in] the irqsafe spinlock to use for locking this fence
- * @context: [in] the execution context this fence is run on
- * @seqno: [in] a linear increasing sequence number for this context
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if dma_fence_ops.enable_signaling gets called. This can
- * be used for other implementing other types of fence.
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using dma_fence_later.
+ * to check which fence is later by simply using dma_fence_later().
*/
void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
- BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 314eb1071cce..6c95f61a32e7 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -46,7 +46,7 @@
* write-side updates.
*/
-DEFINE_WW_CLASS(reservation_ww_class);
+DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
struct lock_class_key reservation_seqcount_class;
@@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
if (signaled) {
RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
} else {
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
fobj->shared_count++;
}
@@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
old = reservation_object_get_list(obj);
obj->staged = NULL;
- if (!fobj) {
- BUG_ON(old->shared_count >= old->shared_max);
+ if (!fobj)
reservation_object_add_shared_inplace(obj, old, fence);
- } else
+ else
reservation_object_add_shared_replace(obj, old, fobj, fence);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 3d78ca89a605..53c1d6d36a64 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -188,7 +188,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ca1680afa20a..dacf3f42426d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -250,6 +250,7 @@ config IMX_SDMA
tristate "i.MX SDMA support"
depends on ARCH_MXC
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51/53/6 chips.
@@ -413,6 +414,14 @@ config NBPFAXI_DMA
help
Support for "Type-AXI" NBPF DMA IPs from Renesas
+config OWL_DMA
+ tristate "Actions Semi Owl SoCs DMA support"
+ depends on ARCH_ACTIONS
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Actions Semi Owl SoCs DMA controller.
+
config PCH_DMA
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && (X86_32 || COMPILE_TEST)
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 203a99d68315..c91702d88b95 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 08ba8473a284..272bed6c8ba7 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -500,12 +500,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->max_burst = device->max_burst;
caps->residue_granularity = device->residue_granularity;
caps->descriptor_reuse = device->descriptor_reuse;
-
- /*
- * Some devices implement only pause (e.g. to get residuum) but no
- * resume. However cmd_pause is advertised as pause AND resume.
- */
- caps->cmd_pause = !!(device->device_pause && device->device_resume);
+ caps->cmd_pause = !!device->device_pause;
+ caps->cmd_resume = !!device->device_resume;
caps->cmd_terminate = !!device->device_terminate_all;
return 0;
@@ -774,8 +770,14 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
return ERR_PTR(-ENODEV);
chan = __dma_request_channel(mask, NULL, NULL);
- if (!chan)
- chan = ERR_PTR(-ENODEV);
+ if (!chan) {
+ mutex_lock(&dma_list_mutex);
+ if (list_empty(&dma_device_list))
+ chan = ERR_PTR(-EPROBE_DEFER);
+ else
+ chan = ERR_PTR(-ENODEV);
+ mutex_unlock(&dma_list_mutex);
+ }
return chan;
}
@@ -1139,6 +1141,41 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
+static void dmam_device_release(struct device *dev, void *res)
+{
+ struct dma_device *device;
+
+ device = *(struct dma_device **)res;
+ dma_async_device_unregister(device);
+}
+
+/**
+ * dmaenginem_async_device_register - registers DMA devices found
+ * @device: &dma_device
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+int dmaenginem_async_device_register(struct dma_device *device)
+{
+ void *p;
+ int ret;
+
+ p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = dma_async_device_register(device);
+ if (!ret) {
+ *(struct dma_device **)p = device;
+ devres_add(device->dev, p);
+ } else {
+ devres_free(p);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dmaenginem_async_device_register);
+
struct dmaengine_unmap_pool {
struct kmem_cache *cache;
const char *name;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index ec240592f5c8..a15592383d4e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 29d04ca71d52..202ffa9f7611 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -413,6 +413,13 @@ static void hsu_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
}
+static void hsu_dma_synchronize(struct dma_chan *chan)
+{
+ struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
+
+ vchan_synchronize(&hsuc->vchan);
+}
+
int hsu_dma_probe(struct hsu_dma_chip *chip)
{
struct hsu_dma *hsu;
@@ -459,6 +466,7 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
hsu->dma.device_pause = hsu_dma_pause;
hsu->dma.device_resume = hsu_dma_resume;
hsu->dma.device_terminate_all = hsu_dma_terminate_all;
+ hsu->dma.device_synchronize = hsu_dma_synchronize;
hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e5c911200bdb..1fbf9cb9b742 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -496,6 +496,13 @@ static int idma64_terminate_all(struct dma_chan *chan)
return 0;
}
+static void idma64_synchronize(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+ vchan_synchronize(&idma64c->vchan);
+}
+
static int idma64_alloc_chan_resources(struct dma_chan *chan)
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
@@ -583,6 +590,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.device_pause = idma64_pause;
idma64->dma.device_resume = idma64_resume;
idma64->dma.device_terminate_all = idma64_terminate_all;
+ idma64->dma.device_synchronize = idma64_synchronize;
idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f077992635c2..b4ec2d20e661 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -41,6 +42,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
+#include "virt-dma.h"
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
@@ -183,6 +185,7 @@
* Mode/Count of data node descriptors - IPCv2
*/
struct sdma_mode_count {
+#define SDMA_BD_MAX_CNT 0xffff
u32 count : 16; /* size of the buffer pointed by this BD */
u32 status : 8; /* E,R,I,C,W,D status bits stored here */
u32 command : 8; /* command mostly used for channel 0 */
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
/**
* struct sdma_channel_control - Channel control Block
*
- * @current_bd_ptr current buffer descriptor processed
- * @base_bd_ptr first element of buffer descriptor array
- * @unused padding. The SDMA engine expects an array of 128 byte
+ * @current_bd_ptr: current buffer descriptor processed
+ * @base_bd_ptr: first element of buffer descriptor array
+ * @unused: padding. The SDMA engine expects an array of 128 byte
* control blocks
*/
struct sdma_channel_control {
@@ -215,10 +218,13 @@ struct sdma_channel_control {
* struct sdma_state_registers - SDMA context for a channel
*
* @pc: program counter
+ * @unused1: unused
* @t: test bit: status of arithmetic & test instruction
* @rpc: return program counter
+ * @unused0: unused
* @sf: source fault while loading data
* @spc: loop start program counter
+ * @unused2: unused
* @df: destination fault while storing data
* @epc: loop end program counter
* @lm: loop mode
@@ -256,6 +262,14 @@ struct sdma_state_registers {
* @dsa: dedicated core source address register
* @ds: dedicated core status register
* @dd: dedicated core data register
+ * @scratch0: 1st word of dedicated ram for context switch
+ * @scratch1: 2nd word of dedicated ram for context switch
+ * @scratch2: 3rd word of dedicated ram for context switch
+ * @scratch3: 4th word of dedicated ram for context switch
+ * @scratch4: 5th word of dedicated ram for context switch
+ * @scratch5: 6th word of dedicated ram for context switch
+ * @scratch6: 7th word of dedicated ram for context switch
+ * @scratch7: 8th word of dedicated ram for context switch
*/
struct sdma_context_data {
struct sdma_state_registers channel_state;
@@ -284,25 +298,67 @@ struct sdma_context_data {
u32 scratch7;
} __attribute__ ((packed));
-#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
struct sdma_engine;
/**
+ * struct sdma_desc - descriptor structor for one transfer
+ * @vd: descriptor for virt dma
+ * @num_bd: number of descriptors currently handling
+ * @bd_phys: physical address of bd
+ * @buf_tail: ID of the buffer that was processed
+ * @buf_ptail: ID of the previous buffer that was processed
+ * @period_len: period length, used in cyclic.
+ * @chn_real_count: the real count updated from bd->mode.count
+ * @chn_count: the transfer count set
+ * @sdmac: sdma_channel pointer
+ * @bd: pointer of allocate bd
+ */
+struct sdma_desc {
+ struct virt_dma_desc vd;
+ unsigned int num_bd;
+ dma_addr_t bd_phys;
+ unsigned int buf_tail;
+ unsigned int buf_ptail;
+ unsigned int period_len;
+ unsigned int chn_real_count;
+ unsigned int chn_count;
+ struct sdma_channel *sdmac;
+ struct sdma_buffer_descriptor *bd;
+};
+
+/**
* struct sdma_channel - housekeeping for a SDMA channel
*
- * @sdma pointer to the SDMA engine for this channel
- * @channel the channel number, matches dmaengine chan_id + 1
- * @direction transfer type. Needed for setting SDMA script
- * @peripheral_type Peripheral type. Needed for setting SDMA script
- * @event_id0 aka dma request line
- * @event_id1 for channels that use 2 events
- * @word_size peripheral access size
- * @buf_tail ID of the buffer that was processed
- * @buf_ptail ID of the previous buffer that was processed
- * @num_bd max NUM_BD. number of descriptors currently handling
+ * @vc: virt_dma base structure
+ * @desc: sdma description including vd and other special member
+ * @sdma: pointer to the SDMA engine for this channel
+ * @channel: the channel number, matches dmaengine chan_id + 1
+ * @direction: transfer type. Needed for setting SDMA script
+ * @peripheral_type: Peripheral type. Needed for setting SDMA script
+ * @event_id0: aka dma request line
+ * @event_id1: for channels that use 2 events
+ * @word_size: peripheral access size
+ * @pc_from_device: script address for those device_2_memory
+ * @pc_to_device: script address for those memory_2_device
+ * @device_to_device: script address for those device_2_device
+ * @pc_to_pc: script address for those memory_2_memory
+ * @flags: loop mode or not
+ * @per_address: peripheral source or destination address in common case
+ * destination address in p_2_p case
+ * @per_address2: peripheral source address in p_2_p case
+ * @event_mask: event mask used in p_2_p script
+ * @watermark_level: value for gReg[7], some script will extend it from
+ * basic watermark such as p_2_p
+ * @shp_addr: value for gReg[6]
+ * @per_addr: value for gReg[2]
+ * @status: status of dma channel
+ * @data: specific sdma interface structure
+ * @bd_pool: dma_pool for bd
*/
struct sdma_channel {
+ struct virt_dma_chan vc;
+ struct sdma_desc *desc;
struct sdma_engine *sdma;
unsigned int channel;
enum dma_transfer_direction direction;
@@ -310,28 +366,17 @@ struct sdma_channel {
unsigned int event_id0;
unsigned int event_id1;
enum dma_slave_buswidth word_size;
- unsigned int buf_tail;
- unsigned int buf_ptail;
- unsigned int num_bd;
- unsigned int period_len;
- struct sdma_buffer_descriptor *bd;
- dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
unsigned int device_to_device;
+ unsigned int pc_to_pc;
unsigned long flags;
dma_addr_t per_address, per_address2;
unsigned long event_mask[2];
unsigned long watermark_level;
u32 shp_addr, per_addr;
- struct dma_chan chan;
- spinlock_t lock;
- struct dma_async_tx_descriptor desc;
enum dma_status status;
- unsigned int chn_count;
- unsigned int chn_real_count;
- struct tasklet_struct tasklet;
struct imx_dma_data data;
- bool enabled;
+ struct dma_pool *bd_pool;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -346,15 +391,15 @@ struct sdma_channel {
/**
* struct sdma_firmware_header - Layout of the firmware image
*
- * @magic "SDMA"
- * @version_major increased whenever layout of struct sdma_script_start_addrs
- * changes.
- * @version_minor firmware minor version (for binary compatible changes)
- * @script_addrs_start offset of struct sdma_script_start_addrs in this image
- * @num_script_addrs Number of script addresses in this image
- * @ram_code_start offset of SDMA ram image in this firmware image
- * @ram_code_size size of SDMA ram image
- * @script_addrs Stores the start address of the SDMA scripts
+ * @magic: "SDMA"
+ * @version_major: increased whenever layout of struct
+ * sdma_script_start_addrs changes.
+ * @version_minor: firmware minor version (for binary compatible changes)
+ * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
+ * @num_script_addrs: Number of script addresses in this image
+ * @ram_code_start: offset of SDMA ram image in this firmware image
+ * @ram_code_size: size of SDMA ram image
+ * @script_addrs: Stores the start address of the SDMA scripts
* (in SDMA memory space)
*/
struct sdma_firmware_header {
@@ -391,6 +436,8 @@ struct sdma_engine {
u32 spba_start_addr;
u32 spba_end_addr;
unsigned int irq;
+ dma_addr_t bd0_phys;
+ struct sdma_buffer_descriptor *bd0;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
{
- unsigned long flags;
- struct sdma_channel *sdmac = &sdma->channel[channel];
-
writel(BIT(channel), sdma->regs + SDMA_H_START);
-
- spin_lock_irqsave(&sdmac->lock, flags);
- sdmac->enabled = true;
- spin_unlock_irqrestore(&sdmac->lock, flags);
}
/*
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
u32 address)
{
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
void *buf_virt;
dma_addr_t buf_phys;
int ret;
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed(val, sdma->regs + chnenbl);
}
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+ struct sdma_desc *desc;
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ if (!vd) {
+ sdmac->desc = NULL;
+ return;
+ }
+ sdmac->desc = desc = to_sdma_desc(&vd->tx);
+ /*
+ * Do not delete the node in desc_issued list in cyclic mode, otherwise
+ * the desc allocated will never be freed in vchan_dma_desc_free_list
+ */
+ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+ list_del(&vd->node);
+
+ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+ sdma_enable_channel(sdma, sdmac->channel);
+}
+
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
{
struct sdma_buffer_descriptor *bd;
int error = 0;
enum dma_status old_status = sdmac->status;
- unsigned long flags;
-
- spin_lock_irqsave(&sdmac->lock, flags);
- if (!sdmac->enabled) {
- spin_unlock_irqrestore(&sdmac->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&sdmac->lock, flags);
/*
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
*/
- while (1) {
- bd = &sdmac->bd[sdmac->buf_tail];
+ while (sdmac->desc) {
+ struct sdma_desc *desc = sdmac->desc;
+
+ bd = &desc->bd[desc->buf_tail];
if (bd->mode.status & BD_DONE)
break;
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* the number of bytes present in the current buffer descriptor.
*/
- sdmac->chn_real_count = bd->mode.count;
+ desc->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
- bd->mode.count = sdmac->period_len;
- sdmac->buf_ptail = sdmac->buf_tail;
- sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
+ bd->mode.count = desc->period_len;
+ desc->buf_ptail = desc->buf_tail;
+ desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA transaction status by the time the client tasklet is
* executed.
*/
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+ spin_unlock(&sdmac->vc.lock);
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+ spin_lock(&sdmac->vc.lock);
if (error)
sdmac->status = old_status;
}
}
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->chn_real_count = 0;
+ sdmac->desc->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
- for (i = 0; i < sdmac->num_bd; i++) {
- bd = &sdmac->bd[i];
+ for (i = 0; i < sdmac->desc->num_bd; i++) {
+ bd = &sdmac->desc->bd[i];
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
- sdmac->chn_real_count += bd->mode.count;
+ sdmac->desc->chn_real_count += bd->mode.count;
}
if (error)
sdmac->status = DMA_ERROR;
else
sdmac->status = DMA_COMPLETE;
-
- dma_cookie_complete(&sdmac->desc);
-
- dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
+ struct sdma_desc *desc;
+
+ spin_lock(&sdmac->vc.lock);
+ desc = sdmac->desc;
+ if (desc) {
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ sdma_update_channel_loop(sdmac);
+ } else {
+ mxc_sdma_handle_channel_normal(sdmac);
+ vchan_cookie_complete(&desc->vd);
+ sdma_start_desc(sdmac);
+ }
+ }
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- sdma_update_channel_loop(sdmac);
- else
- tasklet_schedule(&sdmac->tasklet);
-
+ spin_unlock(&sdmac->vc.lock);
__clear_bit(channel, &stat);
}
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
- int per_2_per = 0;
+ int per_2_per = 0, emi_2_emi = 0;
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
sdmac->device_to_device = 0;
+ sdmac->pc_to_pc = 0;
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
+ emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
break;
case IMX_DMATYPE_DSP:
emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac->pc_from_device = per_2_emi;
sdmac->pc_to_device = emi_2_per;
sdmac->device_to_device = per_2_per;
+ sdmac->pc_to_pc = emi_2_emi;
}
static int sdma_load_context(struct sdma_channel *sdmac)
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int channel = sdmac->channel;
int load_address;
struct sdma_context_data *context = sdma->context;
- struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ struct sdma_buffer_descriptor *bd0 = sdma->bd0;
int ret;
unsigned long flags;
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
load_address = sdmac->device_to_device;
+ else if (sdmac->direction == DMA_MEM_TO_MEM)
+ load_address = sdmac->pc_to_pc;
else
load_address = sdmac->pc_to_device;
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sdma_channel, chan);
+ return container_of(chan, struct sdma_channel, vc.chan);
}
static int sdma_disable_channel(struct dma_chan *chan)
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int channel = sdmac->channel;
- unsigned long flags;
writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
sdmac->status = DMA_ERROR;
- spin_lock_irqsave(&sdmac->lock, flags);
- sdmac->enabled = false;
- spin_unlock_irqrestore(&sdmac->lock, flags);
-
return 0;
}
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
sdma_disable_channel(chan);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
+ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return 0;
}
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_request_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret = -EBUSY;
- sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
- GFP_KERNEL);
- if (!sdmac->bd) {
+ sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
+ if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
}
- sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+ sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+ sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
- sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+ sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
return 0;
out:
return ret;
}
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+
+static int sdma_alloc_bd(struct sdma_desc *desc)
{
- unsigned long flags;
- struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
- dma_cookie_t cookie;
+ int ret = 0;
- spin_lock_irqsave(&sdmac->lock, flags);
+ desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
+ &desc->bd_phys);
+ if (!desc->bd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+ return ret;
+}
- cookie = dma_cookie_assign(tx);
+static void sdma_free_bd(struct sdma_desc *desc)
+{
+ dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+}
- spin_unlock_irqrestore(&sdmac->lock, flags);
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+ struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
- return cookie;
+ sdma_free_bd(desc);
+ kfree(desc);
}
static int sdma_alloc_chan_resources(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct imx_dma_data *data = chan->private;
+ struct imx_dma_data mem_data;
int prio, ret;
- if (!data)
- return -EINVAL;
+ /*
+ * MEMCPY may never setup chan->private by filter function such as
+ * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
+ * Please note in any other slave case, you have to setup chan->private
+ * with 'struct imx_dma_data' in your own filter function if you want to
+ * request dma channel by dma_request_channel() rather than
+ * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
+ * to warn you to correct your filter function.
+ */
+ if (!data) {
+ dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
+ mem_data.priority = 2;
+ mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
+ mem_data.dma_request = 0;
+ mem_data.dma_request2 = 0;
+ data = &mem_data;
+
+ sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
+ }
switch (data->priority) {
case DMA_PRIO_HIGH:
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ipg;
- ret = sdma_request_channel(sdmac);
- if (ret)
- goto disable_clk_ahb;
-
ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
goto disable_clk_ahb;
- dma_async_tx_descriptor_init(&sdmac->desc, chan);
- sdmac->desc.tx_submit = sdma_tx_submit;
- /* txd.flags will be overwritten in prep funcs */
- sdmac->desc.flags = DMA_CTRL_ACK;
+ sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
+ sizeof(struct sdma_buffer_descriptor),
+ 32, 0);
return 0;
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel(chan);
+ sdma_disable_channel_with_delay(chan);
if (sdmac->event_id0)
sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_set_channel_priority(sdmac, 0);
- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
-
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
+
+ dma_pool_destroy(sdmac->bd_pool);
+ sdmac->bd_pool = NULL;
+}
+
+static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ enum dma_transfer_direction direction, u32 bds)
+{
+ struct sdma_desc *desc;
+
+ desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
+ if (!desc)
+ goto err_out;
+
+ sdmac->status = DMA_IN_PROGRESS;
+ sdmac->direction = direction;
+ sdmac->flags = 0;
+
+ desc->chn_count = 0;
+ desc->chn_real_count = 0;
+ desc->buf_tail = 0;
+ desc->buf_ptail = 0;
+ desc->sdmac = sdmac;
+ desc->num_bd = bds;
+
+ if (sdma_alloc_bd(desc))
+ goto err_desc_out;
+
+ /* No slave_config called in MEMCPY case, so do here */
+ if (direction == DMA_MEM_TO_MEM)
+ sdma_config_ownership(sdmac, false, true, false);
+
+ if (sdma_load_context(sdmac))
+ goto err_desc_out;
+
+ return desc;
+
+err_desc_out:
+ kfree(desc);
+err_out:
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ size_t count;
+ int i = 0, param;
+ struct sdma_buffer_descriptor *bd;
+ struct sdma_desc *desc;
+
+ if (!chan || !len)
+ return NULL;
+
+ dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
+ &dma_src, &dma_dst, len, channel);
+
+ desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
+ len / SDMA_BD_MAX_CNT + 1);
+ if (!desc)
+ return NULL;
+
+ do {
+ count = min_t(size_t, len, SDMA_BD_MAX_CNT);
+ bd = &desc->bd[i];
+ bd->buffer_addr = dma_src;
+ bd->ext_buffer_addr = dma_dst;
+ bd->mode.count = count;
+ desc->chn_count += count;
+ bd->mode.command = 0;
+
+ dma_src += count;
+ dma_dst += count;
+ len -= count;
+ i++;
+
+ param = BD_DONE | BD_EXTD | BD_CONT;
+ /* last bd */
+ if (!len) {
+ param |= BD_INTR;
+ param |= BD_LAST;
+ param &= ~BD_CONT;
+ }
+
+ dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
+ i, count, bd->buffer_addr,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+ } while (len);
+
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int ret, i, count;
+ int i, count;
int channel = sdmac->channel;
struct scatterlist *sg;
+ struct sdma_desc *desc;
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
- sdmac->status = DMA_IN_PROGRESS;
-
- sdmac->flags = 0;
-
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
+ desc = sdma_transfer_init(sdmac, direction, sg_len);
+ if (!desc)
+ goto err_out;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
-
- if (sg_len > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, sg_len, NUM_BD);
- ret = -EINVAL;
- goto err_out;
- }
-
- sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = sg->dma_address;
count = sg_dma_len(sg);
- if (count > 0xffff) {
+ if (count > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
- channel, count, 0xffff);
- ret = -EINVAL;
- goto err_out;
+ channel, count, SDMA_BD_MAX_CNT);
+ goto err_bd_out;
}
bd->mode.count = count;
- sdmac->chn_count += count;
+ desc->chn_count += count;
- if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
- ret = -EINVAL;
- goto err_out;
- }
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
+ goto err_bd_out;
switch (sdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
bd->mode.command = 0;
if (count & 3 || sg->dma_address & 3)
- return NULL;
+ goto err_bd_out;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1)
- return NULL;
+ goto err_bd_out;
break;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
bd->mode.command = 1;
break;
default:
- return NULL;
+ goto err_bd_out;
}
param = BD_DONE | BD_EXTD | BD_CONT;
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->mode.status = param;
}
- sdmac->num_bd = sg_len;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
sdmac->status = DMA_ERROR;
return NULL;
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len;
int channel = sdmac->channel;
- int ret, i = 0, buf = 0;
+ int i = 0, buf = 0;
+ struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
- if (sdmac->status == DMA_IN_PROGRESS)
- return NULL;
-
- sdmac->status = DMA_IN_PROGRESS;
+ desc = sdma_transfer_init(sdmac, direction, num_periods);
+ if (!desc)
+ goto err_out;
- sdmac->buf_tail = 0;
- sdmac->buf_ptail = 0;
- sdmac->chn_real_count = 0;
- sdmac->period_len = period_len;
+ desc->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
- sdmac->direction = direction;
- ret = sdma_load_context(sdmac);
- if (ret)
- goto err_out;
-
- if (num_periods > NUM_BD) {
- dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
- channel, num_periods, NUM_BD);
- goto err_out;
- }
- if (period_len > 0xffff) {
+ if (period_len > SDMA_BD_MAX_CNT) {
dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
- channel, period_len, 0xffff);
- goto err_out;
+ channel, period_len, SDMA_BD_MAX_CNT);
+ goto err_bd_out;
}
while (buf < buf_len) {
- struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
bd->buffer_addr = dma_addr;
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
bd->mode.count = period_len;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
- goto err_out;
+ goto err_bd_out;
if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
bd->mode.command = 0;
else
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
i++;
}
- sdmac->num_bd = num_periods;
- sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
-
- return &sdmac->desc;
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+ sdma_free_bd(desc);
+ kfree(desc);
err_out:
sdmac->status = DMA_ERROR;
return NULL;
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_desc *desc;
u32 residue;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_ptail) *
- sdmac->period_len - sdmac->chn_real_count;
- else
- residue = sdmac->chn_count - sdmac->chn_real_count;
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vd = vchan_find_desc(&sdmac->vc, cookie);
+ if (vd) {
+ desc = to_sdma_desc(&vd->tx);
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ residue = (desc->num_bd - desc->buf_ptail) *
+ desc->period_len - desc->chn_real_count;
+ else
+ residue = desc->chn_count - desc->chn_real_count;
+ } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
+ residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
+ } else {
+ residue = 0;
+ }
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
residue);
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
static void sdma_issue_pending(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_engine *sdma = sdmac->sdma;
+ unsigned long flags;
- if (sdmac->status == DMA_IN_PROGRESS)
- sdma_enable_channel(sdma, sdmac->channel);
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+ sdma_start_desc(sdmac);
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
for (i = 0; i < MAX_DMA_CHANNELS; i++)
writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
- ret = sdma_request_channel(&sdma->channel[0]);
+ ret = sdma_request_channel0(sdma);
if (ret)
goto err_dma_alloc;
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
struct sdma_channel *sdmac = &sdma->channel[i];
sdmac->sdma = sdma;
- spin_lock_init(&sdmac->lock);
- sdmac->chan.device = &sdma->dma_device;
- dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
-
- tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
- (unsigned long) sdmac);
+ sdmac->vc.desc_free = sdma_desc_free;
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
if (i)
- list_add_tail(&sdmac->chan.device_node,
- &sdma->dma_device.channels);
+ vchan_init(&sdmac->vc, &sdma->dma_device);
}
ret = sdma_init(sdma);
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
- dma_set_max_seg_size(sdma->dma_device.dev, 65535);
+ dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
- tasklet_kill(&sdmac->tasklet);
+ tasklet_kill(&sdmac->vc.task);
+ sdma_free_chan_resources(&sdmac->vc.chan);
}
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8b5b23a8ace9..23fb2fa04000 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -688,6 +688,12 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
{
u64 phys_complete;
+ /* set the completion address register again */
+ writel(lower_32_bits(ioat_chan->completion_dma),
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(upper_32_bits(ioat_chan->completion_dma),
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
ioat_quiesce(ioat_chan, 0);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
__cleanup(ioat_chan, phys_complete);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fa31cccbe04f..6bfa217ed6d0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct k3_dma_dev *d = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 68dd79783b54..b76cb17d879c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -470,11 +470,6 @@ static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
mic_dma_chan_mask_intr(ch);
}
-static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
-{
- dma_async_device_unregister(&mic_dma_dev->dma_dev);
-}
-
static int mic_dma_setup_irq(struct mic_dma_chan *ch)
{
ch->cookie =
@@ -630,7 +625,7 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
&mic_dma_dev->dma_dev.channels);
}
- return dma_async_device_register(&mic_dma_dev->dma_dev);
+ return dmaenginem_async_device_register(&mic_dma_dev->dma_dev);
}
/*
@@ -678,7 +673,6 @@ alloc_error:
static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
{
- mic_dma_unregister_dma_device(mic_dma_dev);
mic_dma_uninit(mic_dma_dev);
kfree(mic_dma_dev);
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index c6589ccf1b9a..8dc0aa4d73ab 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -174,6 +174,7 @@ struct mv_xor_v2_device {
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
+ struct msi_desc *msi_desc;
};
/**
@@ -588,11 +589,9 @@ static void mv_xor_v2_tasklet(unsigned long data)
*/
dma_cookie_complete(&next_pending_sw_desc->async_tx);
- if (next_pending_sw_desc->async_tx.callback)
- next_pending_sw_desc->async_tx.callback(
- next_pending_sw_desc->async_tx.callback_param);
-
dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
+ dmaengine_desc_get_callback_invoke(
+ &next_pending_sw_desc->async_tx, NULL);
}
dma_run_dependencies(&next_pending_sw_desc->async_tx);
@@ -643,9 +642,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
/* write the DESQ address to the DMA enngine*/
- writel(xor_dev->hw_desq & 0xFFFFFFFF,
+ writel(lower_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
- writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ writel(upper_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
/*
@@ -780,6 +779,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
msi_desc = first_msi_entry(&pdev->dev);
if (!msi_desc)
goto free_msi_irqs;
+ xor_dev->msi_desc = msi_desc;
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
mv_xor_v2_interrupt_handler, 0,
@@ -897,8 +897,12 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
+ devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
+
platform_msi_domain_free_irqs(&pdev->dev);
+ tasklet_kill(&xor_dev->irq_tasklet);
+
clk_disable_unprepare(xor_dev->clk);
return 0;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 2f9974ddfbb2..8c7b2e8703da 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -479,6 +479,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
default:
pr_warn("%s(): invalid bus width %u\n", __func__, width);
+ /* fall through */
case DMA_SLAVE_BUSWIDTH_1_BYTE:
size = burst;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
new file mode 100644
index 000000000000..7812a6338acd
--- /dev/null
+++ b/drivers/dma/owl-dma.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Actions Semi Owl SoCs DMA driver
+//
+// Copyright (c) 2014 Actions Semi Inc.
+// Author: David Liu <liuwei@actions-semi.com>
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include "virt-dma.h"
+
+#define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
+
+/* Global DMA Controller Registers */
+#define OWL_DMA_IRQ_PD0 0x00
+#define OWL_DMA_IRQ_PD1 0x04
+#define OWL_DMA_IRQ_PD2 0x08
+#define OWL_DMA_IRQ_PD3 0x0C
+#define OWL_DMA_IRQ_EN0 0x10
+#define OWL_DMA_IRQ_EN1 0x14
+#define OWL_DMA_IRQ_EN2 0x18
+#define OWL_DMA_IRQ_EN3 0x1C
+#define OWL_DMA_SECURE_ACCESS_CTL 0x20
+#define OWL_DMA_NIC_QOS 0x24
+#define OWL_DMA_DBGSEL 0x28
+#define OWL_DMA_IDLE_STAT 0x2C
+
+/* Channel Registers */
+#define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
+#define OWL_DMAX_MODE 0x00
+#define OWL_DMAX_SOURCE 0x04
+#define OWL_DMAX_DESTINATION 0x08
+#define OWL_DMAX_FRAME_LEN 0x0C
+#define OWL_DMAX_FRAME_CNT 0x10
+#define OWL_DMAX_REMAIN_FRAME_CNT 0x14
+#define OWL_DMAX_REMAIN_CNT 0x18
+#define OWL_DMAX_SOURCE_STRIDE 0x1C
+#define OWL_DMAX_DESTINATION_STRIDE 0x20
+#define OWL_DMAX_START 0x24
+#define OWL_DMAX_PAUSE 0x28
+#define OWL_DMAX_CHAINED_CTL 0x2C
+#define OWL_DMAX_CONSTANT 0x30
+#define OWL_DMAX_LINKLIST_CTL 0x34
+#define OWL_DMAX_NEXT_DESCRIPTOR 0x38
+#define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
+#define OWL_DMAX_INT_CTL 0x40
+#define OWL_DMAX_INT_STATUS 0x44
+#define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
+#define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
+
+/* OWL_DMAX_MODE Bits */
+#define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
+#define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
+#define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
+#define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
+#define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
+#define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
+#define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
+#define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
+#define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
+#define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
+#define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
+#define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
+#define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
+#define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
+#define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
+#define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
+#define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
+#define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
+#define OWL_DMA_MODE_CB BIT(23)
+#define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
+#define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
+#define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
+#define OWL_DMA_MODE_CFE BIT(29)
+#define OWL_DMA_MODE_LME BIT(30)
+#define OWL_DMA_MODE_CME BIT(31)
+
+/* OWL_DMAX_LINKLIST_CTL Bits */
+#define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
+#define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
+#define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
+#define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
+#define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
+#define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
+#define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
+#define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
+#define OWL_DMA_LLC_SUSPEND BIT(16)
+
+/* OWL_DMAX_INT_CTL Bits */
+#define OWL_DMA_INTCTL_BLOCK BIT(0)
+#define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
+#define OWL_DMA_INTCTL_FRAME BIT(2)
+#define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
+#define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
+
+/* OWL_DMAX_INT_STATUS Bits */
+#define OWL_DMA_INTSTAT_BLOCK BIT(0)
+#define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
+#define OWL_DMA_INTSTAT_FRAME BIT(2)
+#define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
+#define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
+
+/* Pack shift and newshift in a single word */
+#define BIT_FIELD(val, width, shift, newshift) \
+ ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
+
+/**
+ * struct owl_dma_lli_hw - Hardware link list for dma transfer
+ * @next_lli: physical address of the next link list
+ * @saddr: source physical address
+ * @daddr: destination physical address
+ * @flen: frame length
+ * @fcnt: frame count
+ * @src_stride: source stride
+ * @dst_stride: destination stride
+ * @ctrla: dma_mode and linklist ctrl config
+ * @ctrlb: interrupt config
+ * @const_num: data for constant fill
+ */
+struct owl_dma_lli_hw {
+ u32 next_lli;
+ u32 saddr;
+ u32 daddr;
+ u32 flen:20;
+ u32 fcnt:12;
+ u32 src_stride;
+ u32 dst_stride;
+ u32 ctrla;
+ u32 ctrlb;
+ u32 const_num;
+};
+
+/**
+ * struct owl_dma_lli - Link list for dma transfer
+ * @hw: hardware link list
+ * @phys: physical address of hardware link list
+ * @node: node for txd's lli_list
+ */
+struct owl_dma_lli {
+ struct owl_dma_lli_hw hw;
+ dma_addr_t phys;
+ struct list_head node;
+};
+
+/**
+ * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @lli_list: link list of lli nodes
+ */
+struct owl_dma_txd {
+ struct virt_dma_desc vd;
+ struct list_head lli_list;
+};
+
+/**
+ * struct owl_dma_pchan - Holder for the physical channels
+ * @id: physical index to this channel
+ * @base: virtual memory base for the dma channel
+ * @vchan: the virtual channel currently being served by this physical channel
+ * @lock: a lock to use when altering an instance of this struct
+ */
+struct owl_dma_pchan {
+ u32 id;
+ void __iomem *base;
+ struct owl_dma_vchan *vchan;
+ spinlock_t lock;
+};
+
+/**
+ * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
+ * @vc: wrappped virtual channel
+ * @pchan: the physical channel utilized by this channel
+ * @txd: active transaction on this channel
+ */
+struct owl_dma_vchan {
+ struct virt_dma_chan vc;
+ struct owl_dma_pchan *pchan;
+ struct owl_dma_txd *txd;
+};
+
+/**
+ * struct owl_dma - Holder for the Owl DMA controller
+ * @dma: dma engine for this instance
+ * @base: virtual memory base for the DMA controller
+ * @clk: clock for the DMA controller
+ * @lock: a lock to use when change DMA controller global register
+ * @lli_pool: a pool for the LLI descriptors
+ * @nr_pchans: the number of physical channels
+ * @pchans: array of data for the physical channels
+ * @nr_vchans: the number of physical channels
+ * @vchans: array of data for the physical channels
+ */
+struct owl_dma {
+ struct dma_device dma;
+ void __iomem *base;
+ struct clk *clk;
+ spinlock_t lock;
+ struct dma_pool *lli_pool;
+ int irq;
+
+ unsigned int nr_pchans;
+ struct owl_dma_pchan *pchans;
+
+ unsigned int nr_vchans;
+ struct owl_dma_vchan *vchans;
+};
+
+static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
+ u32 val, bool state)
+{
+ u32 regval;
+
+ regval = readl(pchan->base + reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(val, pchan->base + reg);
+}
+
+static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
+{
+ writel(data, pchan->base + reg);
+}
+
+static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
+{
+ return readl(pchan->base + reg);
+}
+
+static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
+{
+ u32 regval;
+
+ regval = readl(od->base + reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(val, od->base + reg);
+}
+
+static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
+{
+ writel(data, od->base + reg);
+}
+
+static u32 dma_readl(struct owl_dma *od, u32 reg)
+{
+ return readl(od->base + reg);
+}
+
+static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
+{
+ return container_of(dd, struct owl_dma, dma);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct owl_dma_vchan, vc.chan);
+}
+
+static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct owl_dma_txd, vd.tx);
+}
+
+static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
+{
+ u32 ctl;
+
+ ctl = BIT_FIELD(mode, 4, 28, 28) |
+ BIT_FIELD(mode, 8, 16, 20) |
+ BIT_FIELD(mode, 4, 8, 16) |
+ BIT_FIELD(mode, 6, 0, 10) |
+ BIT_FIELD(llc_ctl, 2, 10, 8) |
+ BIT_FIELD(llc_ctl, 2, 8, 6);
+
+ return ctl;
+}
+
+static inline u32 llc_hw_ctrlb(u32 int_ctl)
+{
+ u32 ctl;
+
+ ctl = BIT_FIELD(int_ctl, 7, 0, 18);
+
+ return ctl;
+}
+
+static void owl_dma_free_lli(struct owl_dma *od,
+ struct owl_dma_lli *lli)
+{
+ list_del(&lli->node);
+ dma_pool_free(od->lli_pool, lli, lli->phys);
+}
+
+static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
+{
+ struct owl_dma_lli *lli;
+ dma_addr_t phys;
+
+ lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
+ if (!lli)
+ return NULL;
+
+ INIT_LIST_HEAD(&lli->node);
+ lli->phys = phys;
+
+ return lli;
+}
+
+static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
+ struct owl_dma_lli *prev,
+ struct owl_dma_lli *next)
+{
+ list_add_tail(&next->node, &txd->lli_list);
+
+ if (prev) {
+ prev->hw.next_lli = next->phys;
+ prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
+ }
+
+ return next;
+}
+
+static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
+ struct owl_dma_lli *lli,
+ dma_addr_t src, dma_addr_t dst,
+ u32 len, enum dma_transfer_direction dir)
+{
+ struct owl_dma_lli_hw *hw = &lli->hw;
+ u32 mode;
+
+ mode = OWL_DMA_MODE_PW(0);
+
+ switch (dir) {
+ case DMA_MEM_TO_MEM:
+ mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
+ OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
+ OWL_DMA_MODE_DAM_INC;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw->next_lli = 0; /* One link list by default */
+ hw->saddr = src;
+ hw->daddr = dst;
+
+ hw->fcnt = 1; /* Frame count fixed as 1 */
+ hw->flen = len; /* Max frame length is 1MB */
+ hw->src_stride = 0;
+ hw->dst_stride = 0;
+ hw->ctrla = llc_hw_ctrla(mode,
+ OWL_DMA_LLC_SAV_LOAD_NEXT |
+ OWL_DMA_LLC_DAV_LOAD_NEXT);
+
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
+
+ return 0;
+}
+
+static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
+ struct owl_dma_vchan *vchan)
+{
+ struct owl_dma_pchan *pchan = NULL;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < od->nr_pchans; i++) {
+ pchan = &od->pchans[i];
+
+ spin_lock_irqsave(&pchan->lock, flags);
+ if (!pchan->vchan) {
+ pchan->vchan = vchan;
+ spin_unlock_irqrestore(&pchan->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&pchan->lock, flags);
+ }
+
+ return pchan;
+}
+
+static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
+{
+ unsigned int val;
+
+ val = dma_readl(od, OWL_DMA_IDLE_STAT);
+
+ return !(val & (1 << pchan->id));
+}
+
+static void owl_dma_terminate_pchan(struct owl_dma *od,
+ struct owl_dma_pchan *pchan)
+{
+ unsigned long flags;
+ u32 irq_pd;
+
+ pchan_writel(pchan, OWL_DMAX_START, 0);
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+
+ spin_lock_irqsave(&od->lock, flags);
+ dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
+
+ irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
+ if (irq_pd & (1 << pchan->id)) {
+ dev_warn(od->dma.dev,
+ "terminating pchan %d that still has pending irq\n",
+ pchan->id);
+ dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
+ }
+
+ pchan->vchan = NULL;
+
+ spin_unlock_irqrestore(&od->lock, flags);
+}
+
+static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
+ struct owl_dma_pchan *pchan = vchan->pchan;
+ struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
+ struct owl_dma_lli *lli;
+ unsigned long flags;
+ u32 int_ctl;
+
+ list_del(&vd->node);
+
+ vchan->txd = txd;
+
+ /* Wait for channel inactive */
+ while (owl_dma_pchan_busy(od, pchan))
+ cpu_relax();
+
+ lli = list_first_entry(&txd->lli_list,
+ struct owl_dma_lli, node);
+
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
+
+ pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
+ pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
+ OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
+ pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
+ pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
+
+ /* Clear IRQ status for this pchan */
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+
+ spin_lock_irqsave(&od->lock, flags);
+
+ dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
+
+ spin_unlock_irqrestore(&od->lock, flags);
+
+ dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
+
+ /* Start DMA transfer for this pchan */
+ pchan_writel(pchan, OWL_DMAX_START, 0x1);
+
+ return 0;
+}
+
+static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
+{
+ /* Ensure that the physical channel is stopped */
+ owl_dma_terminate_pchan(od, vchan->pchan);
+
+ vchan->pchan = NULL;
+}
+
+static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
+{
+ struct owl_dma *od = dev_id;
+ struct owl_dma_vchan *vchan;
+ struct owl_dma_pchan *pchan;
+ unsigned long pending;
+ int i;
+ unsigned int global_irq_pending, chan_irq_pending;
+
+ spin_lock(&od->lock);
+
+ pending = dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ /* Clear IRQ status for each pchan */
+ for_each_set_bit(i, &pending, od->nr_pchans) {
+ pchan = &od->pchans[i];
+ pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
+ }
+
+ /* Clear pending IRQ */
+ dma_writel(od, OWL_DMA_IRQ_PD0, pending);
+
+ /* Check missed pending IRQ */
+ for (i = 0; i < od->nr_pchans; i++) {
+ pchan = &od->pchans[i];
+ chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
+ pchan_readl(pchan, OWL_DMAX_INT_STATUS);
+
+ /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
+ dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
+
+ if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
+ dev_dbg(od->dma.dev,
+ "global and channel IRQ pending match err\n");
+
+ /* Clear IRQ status for this pchan */
+ pchan_update(pchan, OWL_DMAX_INT_STATUS,
+ 0xff, false);
+
+ /* Update global IRQ pending */
+ pending |= BIT(i);
+ }
+ }
+
+ spin_unlock(&od->lock);
+
+ for_each_set_bit(i, &pending, od->nr_pchans) {
+ struct owl_dma_txd *txd;
+
+ pchan = &od->pchans[i];
+
+ vchan = pchan->vchan;
+ if (!vchan) {
+ dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
+ pchan->id);
+ continue;
+ }
+
+ spin_lock(&vchan->vc.lock);
+
+ txd = vchan->txd;
+ if (txd) {
+ vchan->txd = NULL;
+
+ vchan_cookie_complete(&txd->vd);
+
+ /*
+ * Start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&vchan->vc))
+ owl_dma_start_next_txd(vchan);
+ else
+ owl_dma_phy_free(od, vchan);
+ }
+
+ spin_unlock(&vchan->vc.lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
+{
+ struct owl_dma_lli *lli, *_lli;
+
+ if (unlikely(!txd))
+ return;
+
+ list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
+ owl_dma_free_lli(od, lli);
+
+ kfree(txd);
+}
+
+static void owl_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
+ struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
+
+ owl_dma_free_txd(od, txd);
+}
+
+static int owl_dma_terminate_all(struct dma_chan *chan)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (vchan->pchan)
+ owl_dma_phy_free(od, vchan);
+
+ if (vchan->txd) {
+ owl_dma_desc_free(&vchan->txd->vd);
+ vchan->txd = NULL;
+ }
+
+ vchan_get_all_descriptors(&vchan->vc, &head);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma_pchan *pchan;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli;
+ unsigned int next_lli_phy;
+ size_t bytes;
+
+ pchan = vchan->pchan;
+ txd = vchan->txd;
+
+ if (!pchan || !txd)
+ return 0;
+
+ /* Get remain count of current node in link list */
+ bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
+
+ /* Loop through the preceding nodes to get total remaining bytes */
+ if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
+ next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
+ list_for_each_entry(lli, &txd->lli_list, node) {
+ /* Start from the next active node */
+ if (lli->phys == next_lli_phy) {
+ list_for_each_entry(lli, &txd->lli_list, node)
+ bytes += lli->hw.flen;
+ break;
+ }
+ }
+ }
+
+ return bytes;
+}
+
+static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct owl_dma_lli *lli;
+ struct virt_dma_desc *vd;
+ struct owl_dma_txd *txd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE || !state)
+ return ret;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vd = vchan_find_desc(&vchan->vc, cookie);
+ if (vd) {
+ txd = to_owl_txd(&vd->tx);
+ list_for_each_entry(lli, &txd->lli_list, node)
+ bytes += lli->hw.flen;
+ } else {
+ bytes = owl_dma_getbytes_chan(vchan);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ dma_set_residue(state, bytes);
+
+ return ret;
+}
+
+static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
+{
+ struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
+ struct owl_dma_pchan *pchan;
+
+ pchan = owl_dma_get_pchan(od, vchan);
+ if (!pchan)
+ return;
+
+ dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
+
+ vchan->pchan = pchan;
+ owl_dma_start_next_txd(vchan);
+}
+
+static void owl_dma_issue_pending(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+ if (vchan_issue_pending(&vchan->vc)) {
+ if (!vchan->pchan)
+ owl_dma_phy_alloc_and_start(vchan);
+ }
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor
+ *owl_dma_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL;
+ size_t offset, bytes;
+ int ret;
+
+ if (!len)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+
+ /* Process the transfer as frame by frame */
+ for (offset = 0; offset < len; offset += bytes) {
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_warn(chan2dev(chan), "failed to allocate lli\n");
+ goto err_txd_free;
+ }
+
+ bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
+
+ ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
+ bytes, DMA_MEM_TO_MEM);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli\n");
+ goto err_txd_free;
+ }
+
+ prev = owl_dma_add_lli(txd, prev, lli);
+ }
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+ return NULL;
+}
+
+static void owl_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(&vchan->vc);
+}
+
+static inline void owl_dma_free(struct owl_dma *od)
+{
+ struct owl_dma_vchan *vchan = NULL;
+ struct owl_dma_vchan *next;
+
+ list_for_each_entry_safe(vchan,
+ next, &od->dma.channels, vc.chan.device_node) {
+ list_del(&vchan->vc.chan.device_node);
+ tasklet_kill(&vchan->vc.task);
+ }
+}
+
+static int owl_dma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct owl_dma *od;
+ struct resource *res;
+ int ret, i, nr_channels, nr_requests;
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ od->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(od->base))
+ return PTR_ERR(od->base);
+
+ ret = of_property_read_u32(np, "dma-channels", &nr_channels);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get dma-channels\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "dma-requests", &nr_requests);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get dma-requests\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
+ nr_channels, nr_requests);
+
+ od->nr_pchans = nr_channels;
+ od->nr_vchans = nr_requests;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ platform_set_drvdata(pdev, od);
+ spin_lock_init(&od->lock);
+
+ dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
+
+ od->dma.dev = &pdev->dev;
+ od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
+ od->dma.device_tx_status = owl_dma_tx_status;
+ od->dma.device_issue_pending = owl_dma_issue_pending;
+ od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
+ od->dma.device_terminate_all = owl_dma_terminate_all;
+ od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ od->dma.directions = BIT(DMA_MEM_TO_MEM);
+ od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ INIT_LIST_HEAD(&od->dma.channels);
+
+ od->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(od->clk)) {
+ dev_err(&pdev->dev, "unable to get clock\n");
+ return PTR_ERR(od->clk);
+ }
+
+ /*
+ * Eventhough the DMA controller is capable of generating 4
+ * IRQ's for DMA priority feature, we only use 1 IRQ for
+ * simplification.
+ */
+ od->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
+ dev_name(&pdev->dev), od);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return ret;
+ }
+
+ /* Init physical channel */
+ od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
+ sizeof(struct owl_dma_pchan), GFP_KERNEL);
+ if (!od->pchans)
+ return -ENOMEM;
+
+ for (i = 0; i < od->nr_pchans; i++) {
+ struct owl_dma_pchan *pchan = &od->pchans[i];
+
+ pchan->id = i;
+ pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
+ }
+
+ /* Init virtual channel */
+ od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
+ sizeof(struct owl_dma_vchan), GFP_KERNEL);
+ if (!od->vchans)
+ return -ENOMEM;
+
+ for (i = 0; i < od->nr_vchans; i++) {
+ struct owl_dma_vchan *vchan = &od->vchans[i];
+
+ vchan->vc.desc_free = owl_dma_desc_free;
+ vchan_init(&vchan->vc, &od->dma);
+ }
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
+ sizeof(struct owl_dma_lli),
+ __alignof__(struct owl_dma_lli),
+ 0);
+ if (!od->lli_pool) {
+ dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ clk_prepare_enable(od->clk);
+
+ ret = dma_async_device_register(&od->dma);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register DMA engine device\n");
+ goto err_pool_free;
+ }
+
+ return 0;
+
+err_pool_free:
+ clk_disable_unprepare(od->clk);
+ dma_pool_destroy(od->lli_pool);
+
+ return ret;
+}
+
+static int owl_dma_remove(struct platform_device *pdev)
+{
+ struct owl_dma *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->dma);
+
+ /* Mask all interrupts for this execution environment */
+ dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
+
+ /* Make sure we won't have any further interrupts */
+ devm_free_irq(od->dma.dev, od->irq, od);
+
+ owl_dma_free(od);
+
+ clk_disable_unprepare(od->clk);
+
+ return 0;
+}
+
+static const struct of_device_id owl_dma_match[] = {
+ { .compatible = "actions,s900-dma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_dma_match);
+
+static struct platform_driver owl_dma_driver = {
+ .probe = owl_dma_probe,
+ .remove = owl_dma_remove,
+ .driver = {
+ .name = "dma-owl",
+ .of_match_table = of_match_ptr(owl_dma_match),
+ },
+};
+
+static int owl_dma_init(void)
+{
+ return platform_driver_register(&owl_dma_driver);
+}
+subsys_initcall(owl_dma_init);
+
+static void __exit owl_dma_exit(void)
+{
+ platform_driver_unregister(&owl_dma_driver);
+}
+module_exit(owl_dma_exit);
+
+MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index defcdde4d358..88750a34e859 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1046,13 +1046,16 @@ static bool _start(struct pl330_thread *thrd)
if (_state(thrd) == PL330_STATE_KILLING)
UNTIL(thrd, PL330_STATE_STOPPED)
+ /* fall through */
case PL330_STATE_FAULTING:
_stop(thrd);
+ /* fall through */
case PL330_STATE_KILLING:
case PL330_STATE_COMPLETING:
UNTIL(thrd, PL330_STATE_STOPPED)
+ /* fall through */
case PL330_STATE_STOPPED:
return _trigger(thrd);
@@ -1779,8 +1782,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330;
-
if (!thrd || thrd->free)
return;
@@ -1789,8 +1790,6 @@ static void pl330_release_channel(struct pl330_thread *thrd)
dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
- pl330 = thrd->dmac;
-
_free_event(thrd, thrd->ev);
thrd->free = true;
}
@@ -2257,13 +2256,14 @@ static int pl330_terminate_all(struct dma_chan *chan)
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
+
spin_lock(&pl330->lock);
_stop(pch->thread);
- spin_unlock(&pl330->lock);
-
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ spin_unlock(&pl330->lock);
+
power_down = pch->active;
pch->active = false;
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
1 : PL330_MAX_BURST);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b53fb618bbf6..b31c28b67ad3 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -179,6 +179,8 @@ static unsigned int pxad_drcmr(unsigned int line)
return 0x1000 + line * 4;
}
+bool pxad_filter_fn(struct dma_chan *chan, void *param);
+
/*
* Debug fs
*/
@@ -760,6 +762,8 @@ static void pxad_free_chan_resources(struct dma_chan *dchan)
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
+ chan->drcmr = U32_MAX;
+ chan->prio = PXAD_PRIO_LOWEST;
}
static void pxad_free_desc(struct virt_dma_desc *vd)
@@ -1384,6 +1388,9 @@ static int pxad_init_dmadev(struct platform_device *op,
c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
+
+ c->drcmr = U32_MAX;
+ c->prio = PXAD_PRIO_LOWEST;
c->vc.desc_free = pxad_free_desc;
vchan_init(&c->vc, &pdev->slave);
init_waitqueue_head(&c->wq_state);
@@ -1396,9 +1403,10 @@ static int pxad_probe(struct platform_device *op)
{
struct pxad_device *pdev;
const struct of_device_id *of_id;
+ const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0, nb_requestors = 0;
+ int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1429,6 +1437,8 @@ static int pxad_probe(struct platform_device *op)
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
+ slave_map = pdata->slave_map;
+ slave_map_cnt = pdata->slave_map_cnt;
} else {
dma_channels = 32; /* default 32 channel */
}
@@ -1440,6 +1450,9 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
+ pdev->slave.filter.map = slave_map;
+ pdev->slave.filter.mapcnt = slave_map_cnt;
+ pdev->slave.filter.fn = pxad_filter_fn;
pdev->slave.copy_align = PDMA_ALIGNMENT;
pdev->slave.src_addr_widths = widths;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7056fe7513b4..64744eb88720 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_data/dma-s3c24xx.h>
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2a2ccd9c78e4..48ee35e2bce6 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 DMA Controller Driver
*
* Copyright (C) 2014 Renesas Electronics Inc.
*
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
@@ -431,7 +428,8 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
}
- rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR,
+ chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
}
static int rcar_dmac_init(struct rcar_dmac *dmac)
@@ -761,21 +759,15 @@ static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
dev_err(chan->chan.device->dev, "CHCR DE check error\n");
}
-static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
{
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
- if (!(chcr & RCAR_DMACHCR_DE))
- return;
-
/* set DE=0 and flush remaining data */
rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
/* make sure all remaining data was flushed */
rcar_dmac_chcr_de_barrier(chan);
-
- /* back DE */
- rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
@@ -783,7 +775,8 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
- RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
+ RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
+ RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
rcar_dmac_chcr_de_barrier(chan);
}
@@ -812,12 +805,7 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
}
}
-static void rcar_dmac_stop(struct rcar_dmac *dmac)
-{
- rcar_dmac_write(dmac, RCAR_DMAOR, 0);
-}
-
-static void rcar_dmac_abort(struct rcar_dmac *dmac)
+static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
{
unsigned int i;
@@ -826,14 +814,24 @@ static void rcar_dmac_abort(struct rcar_dmac *dmac)
struct rcar_dmac_chan *chan = &dmac->channels[i];
/* Stop and reinitialize the channel. */
- spin_lock(&chan->lock);
+ spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
- spin_unlock(&chan->lock);
-
- rcar_dmac_chan_reinit(chan);
+ spin_unlock_irq(&chan->lock);
}
}
+static int rcar_dmac_chan_pause(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ rcar_dmac_clear_chcr_de(rchan);
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Descriptors preparation
*/
@@ -1355,9 +1353,6 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
- if (desc->direction == DMA_DEV_TO_MEM)
- rcar_dmac_sync_tcr(chan);
-
/* Add the residue for the current chunk. */
residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
@@ -1522,11 +1517,26 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
struct rcar_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
+ bool reinit = false;
u32 chcr;
spin_lock(&chan->lock);
chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (chcr & RCAR_DMACHCR_CAE) {
+ struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
+
+ /*
+ * We don't need to call rcar_dmac_chan_halt()
+ * because channel is already stopped in error case.
+ * We need to clear register and check DE bit as recovery.
+ */
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
+ rcar_dmac_chcr_de_barrier(chan);
+ reinit = true;
+ goto spin_lock_end;
+ }
+
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
@@ -1539,8 +1549,16 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
ret |= rcar_dmac_isr_transfer_end(chan);
+spin_lock_end:
spin_unlock(&chan->lock);
+ if (reinit) {
+ dev_err(chan->chan.device->dev, "Channel Address Error\n");
+
+ rcar_dmac_chan_reinit(chan);
+ ret = IRQ_HANDLED;
+ }
+
return ret;
}
@@ -1597,24 +1615,6 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
-{
- struct rcar_dmac *dmac = data;
-
- if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
- return IRQ_NONE;
-
- /*
- * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
- * abort transfers on all channels, and reinitialize the DMAC.
- */
- rcar_dmac_stop(dmac);
- rcar_dmac_abort(dmac);
- rcar_dmac_init(dmac);
-
- return IRQ_HANDLED;
-}
-
/* -----------------------------------------------------------------------------
* OF xlate and channel filter
*/
@@ -1784,8 +1784,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
struct rcar_dmac *dmac;
struct resource *mem;
unsigned int i;
- char *irqname;
- int irq;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -1824,17 +1822,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
- irq = platform_get_irq_byname(pdev, "error");
- if (irq < 0) {
- dev_err(&pdev->dev, "no error IRQ specified\n");
- return -ENODEV;
- }
-
- irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
- dev_name(dmac->dev));
- if (!irqname)
- return -ENOMEM;
-
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -1871,6 +1858,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
engine->device_config = rcar_dmac_device_config;
+ engine->device_pause = rcar_dmac_chan_pause;
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
engine->device_tx_status = rcar_dmac_tx_status;
engine->device_issue_pending = rcar_dmac_issue_pending;
@@ -1885,14 +1873,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
- ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
- irqname, dmac);
- if (ret) {
- dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
- irq, ret);
- return ret;
- }
-
/* Register the DMAC as a DMA provider for DT. */
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
NULL);
@@ -1932,7 +1912,7 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
{
struct rcar_dmac *dmac = platform_get_drvdata(pdev);
- rcar_dmac_stop(dmac);
+ rcar_dmac_stop_all_chan(dmac);
}
static const struct of_device_id rcar_dmac_of_ids[] = {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1bc149af990e..f4edfc56f34e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -555,6 +555,7 @@ struct d40_gen_dmac {
* @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
* later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @regs_interrupt: Scratch space for registers during interrupt.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
@@ -592,6 +593,7 @@ struct d40_base {
u32 reg_val_backup[BACKUP_REGS_SZ];
u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
u32 *reg_val_backup_chan;
+ u32 *regs_interrupt;
u16 gcc_pwr_off_mask;
struct d40_gen_dmac gen_dmac;
};
@@ -1637,7 +1639,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
- u32 regs[base->gen_dmac.il_size];
+ u32 *regs = base->regs_interrupt;
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
@@ -3258,13 +3260,22 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lcla_pool.alloc_map)
goto free_backup_chan;
+ base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
+ sizeof(*base->regs_interrupt),
+ GFP_KERNEL);
+ if (!base->regs_interrupt)
+ goto free_map;
+
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (base->desc_slab == NULL)
- goto free_map;
+ goto free_regs;
+
return base;
+ free_regs:
+ kfree(base->regs_interrupt);
free_map:
kfree(base->lcla_pool.alloc_map);
free_backup_chan:
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 8c5807362a25..379e8d534e61 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -594,7 +594,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -693,7 +693,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 9dc450b7ace6..06dd1725375e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1170,7 +1170,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1183,7 +1183,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1203,7 +1203,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
return ret;
}
@@ -1240,7 +1240,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9b5ca8691f27..a4a931ddf6f6 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ if (__dma_omap15xx(od->plat->dma_attr))
+ od->ddev.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ else
+ od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 27b523530c4a..c12442312595 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -115,6 +115,9 @@
#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
+#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
+#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
+
/* HW specific definitions */
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
@@ -340,6 +343,7 @@ struct xilinx_dma_tx_descriptor {
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
+ * @has_vflip: S2MM vertical flip
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -376,6 +380,7 @@ struct xilinx_dma_chan {
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
+ bool has_vflip;
};
/**
@@ -1092,6 +1097,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
+ if (chan->has_vflip) {
+ reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
+ reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ reg |= config->vflip_en;
+ dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
+ reg);
+ }
+
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (config->frm_cnt_en)
@@ -2105,6 +2118,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
}
chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ chan->config.vflip_en = cfg->vflip_en;
+
if (cfg->park)
chan->config.park_frm = cfg->park_frm;
else
@@ -2428,6 +2443,13 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
chan->tdest = chan_id - xdev->nr_channels;
+ chan->has_vflip = of_property_read_bool(node,
+ "xlnx,enable-vert-flip");
+ if (chan->has_vflip) {
+ chan->config.vflip_en = dma_read(chan,
+ XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
+ XILINX_VDMA_ENABLE_VERTICAL_FLIP;
+ }
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index d0d5c4dbe097..5762c3c383f2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -730,7 +730,8 @@ static int altr_s10_sdram_probe(struct platform_device *pdev)
S10_DDR0_IRQ_MASK)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error clearing SDRAM ECC count\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err2;
}
if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3bb82e511eca..7d3edd713932 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -215,6 +215,7 @@ const char * const edac_mem_types[] = {
[MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
[MEM_DDR4] = "Unbuffered-DDR4",
[MEM_RDDR4] = "Registered-DDR4",
+ [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM",
};
EXPORT_SYMBOL_GPL(edac_mem_types);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 7481955160a4..20374b8248f0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1075,14 +1075,14 @@ int __init edac_mc_sysfs_init(void)
err = device_add(mci_pdev);
if (err < 0)
- goto out_dev_free;
+ goto out_put_device;
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
- out_dev_free:
- kfree(mci_pdev);
+ out_put_device:
+ put_device(mci_pdev);
out:
return err;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8ed4dd9c571b..8e120bf60624 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4a89c8093307..07726fb00321 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -352,6 +352,7 @@ struct pci_id_table {
struct sbridge_dev {
struct list_head list;
+ int seg;
u8 bus, mc;
u8 node_id, source_id;
struct pci_dev **pdev;
@@ -729,7 +730,8 @@ static inline int numcol(u32 mtr)
return 1 << cols;
}
-static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
+static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
+ int multi_bus,
struct sbridge_dev *prev)
{
struct sbridge_dev *sbridge_dev;
@@ -747,14 +749,15 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bu
: sbridge_edac_list.next, struct sbridge_dev, list);
list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
+ if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
+ (dom == SOCK || dom == sbridge_dev->dom))
return sbridge_dev;
}
return NULL;
}
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
+static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
const struct pci_id_table *table)
{
struct sbridge_dev *sbridge_dev;
@@ -771,6 +774,7 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
return NULL;
}
+ sbridge_dev->seg = seg;
sbridge_dev->bus = bus;
sbridge_dev->dom = dom;
sbridge_dev->n_devs = table->n_devs_per_imc;
@@ -2246,6 +2250,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
struct sbridge_dev *sbridge_dev = NULL;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
+ int seg = 0;
u8 bus = 0;
int i = 0;
@@ -2276,10 +2281,12 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
/* End of list, leave */
return -ENODEV;
}
+ seg = pci_domain_nr(pdev->bus);
bus = pdev->bus->number;
next_imc:
- sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
+ sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
+ multi_bus, sbridge_dev);
if (!sbridge_dev) {
/* If the HA1 wasn't found, don't create EDAC second memory controller */
if (dev_descr->dom == IMC1 && devno != 1) {
@@ -2292,7 +2299,7 @@ next_imc:
if (dev_descr->dom == SOCK)
goto out_imc;
- sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
+ sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
if (!sbridge_dev) {
pci_dev_put(pdev);
return -ENOMEM;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 4803c6468bab..c009d94f40c5 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -408,26 +408,29 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
-
unsigned int cline_size = cache_line_size();
-
- u8 tmp[cline_size];
+ u8 *tmp;
void __iomem *addr;
unsigned int offs, timeout = 100000;
atomic_set(&lmc->ecc_int, 0);
lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
-
if (!lmc->mem)
return -ENOMEM;
+ tmp = kmalloc(cline_size, GFP_KERNEL);
+ if (!tmp) {
+ __free_pages(lmc->mem, 0);
+ return -ENOMEM;
+ }
+
addr = page_address(lmc->mem);
while (!atomic_read(&lmc->ecc_int) && timeout--) {
stop_machine(inject_ecc_fn, lmc, NULL);
- for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
+ for (offs = 0; offs < PAGE_SIZE; offs += cline_size) {
/*
* Do a load from the previously rigged location
* This should generate an error interrupt.
@@ -437,6 +440,7 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
}
}
+ kfree(tmp);
__free_pages(lmc->mem, 0);
return count;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index b7e9ea377d70..5e1dd2772278 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index acaccb128fc4..fd24debe58a3 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -20,7 +20,7 @@
#include <linux/acpi.h>
#include <linux/extcon-provider.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index 0aa410836f4e..1335a476bfec 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct max3355_data {
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 660bbf163bf5..72bc0f2478e2 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6721ab01fe7d..43c0a936ab82 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -1,18 +1,8 @@
-/**
- * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
- *
- * Copyright (C) 2017 Google, Inc
- * Author: Benson Leung <bleung@chromium.org>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS Embedded Controller extcon
+//
+// Copyright (C) 2017 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
@@ -548,4 +538,4 @@ module_platform_driver(extcon_cros_ec_driver);
MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index af83ad58819c..b9d27c8fe57e 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
return index;
spin_lock_irqsave(&edev->lock, flags);
-
state = !!(edev->state & BIT(index));
+ spin_unlock_irqrestore(&edev->lock, flags);
/*
* Call functions in a raw notifier chain for the specific one
@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
*/
raw_notifier_call_chain(&edev->nh_all, state, edev);
+ spin_lock_irqsave(&edev->lock, flags);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (!prop_buf) {
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f0587273940e..d8e185582642 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1205,7 +1205,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
- struct timespec ts = {0, 0};
+ struct timespec64 ts = {0, 0};
u32 cycle_time;
int ret = 0;
@@ -1214,9 +1214,9 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
switch (a->clk_id) {
- case CLOCK_REALTIME: getnstimeofday(&ts); break;
- case CLOCK_MONOTONIC: ktime_get_ts(&ts); break;
- case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
+ case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
+ case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
+ case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
default:
ret = -EINVAL;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 781a4a337557..d8e159feb573 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_ARMSTUB
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ The device tree is typically provided by the platform or by
+ the bootloader, so this option is mostly for development
+ purposes only.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index b5214c143fee..388a929baf95 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -259,7 +259,6 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
- efi_memmap_unmap();
memblock_reserve(params.mmap & PAGE_MASK,
PAGE_ALIGN(params.mmap_size +
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 5889cbea60b8..922cfb813109 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,11 +110,20 @@ static int __init arm_enable_runtime_services(void)
{
u64 mapsize;
- if (!efi_enabled(EFI_BOOT)) {
+ if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
pr_info("EFI services will not be available.\n");
return 0;
}
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
@@ -127,13 +136,6 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
-
- if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
-
if (!efi_virtmap_init()) {
pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 3bf0dca378a6..a7902fccdcfa 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
{
static atomic64_t seq;
- if (!atomic64_read(&seq))
- atomic64_set(&seq, ((u64)get_seconds()) << 32);
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
return atomic64_inc_return(&seq);
}
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 232f4915223b..2a29dd9c986d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -82,8 +82,11 @@ struct mm_struct efi_mm = {
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
};
+struct workqueue_struct *efi_rts_wq;
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -337,6 +340,18 @@ static int __init efisubsys_init(void)
if (!efi_enabled(EFI_BOOT))
return 0;
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return 0;
+ }
+
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
@@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init);
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_DATA &&
- md->type != EFI_RUNTIME_SERVICES_DATA) {
- continue;
- }
-
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1ab80e06e7c5..5d06bd247d07 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
return;
rc = efi_mem_desc_lookup(efi.esrt, &md);
- if (rc < 0) {
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
end = esrt_data + size;
pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
- efi_mem_reserve(esrt_data, esrt_data_size);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
pr_debug("esrt-init: loaded.\n");
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index a34e9290a699..14c40a7750d1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,10 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
+ $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
@@ -20,7 +23,8 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
- $(call cc-option,-fno-stack-protector)
+ $(call cc-option,-fno-stack-protector) \
+ -D__DISABLE_EXPORTS
GCOV_PROFILE := n
KASAN_SANITIZE := n
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 01a9d78ee415..6920033de6d4 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -40,31 +40,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_t *image = __image;
- efi_file_handle_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
-
- status = sys_table_arg->boottime->handle_protocol(handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- return status;
- }
-
- status = io->open_volume(io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
void efi_char16_printk(efi_system_table_t *sys_table_arg,
efi_char16_t *str)
{
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
* boot is enabled if we can't determine its state.
*/
- if (secure_boot != efi_secureboot_mode_disabled &&
- strstr(cmdline_ptr, "dtb=")) {
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ secure_boot != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 50a9cab5a834..e94975f4655b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
return efi_call_proto(efi_file_handle, close, handle);
}
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ efi_file_handle_t **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+ device_handle,
+ image);
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ else
+ *__fh = fh;
+
+ return status;
+}
+
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image,
- (void **)&fh);
+ status = efi_open_volume(sys_table_arg, image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f59564b72ddc..32799cf039ef 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
- void **__fh);
-
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index ae54870b2788..aa66cbf23512 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,6 +1,15 @@
/*
* runtime-wrappers.c - Runtime Services function call wrappers
*
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
#include <asm/efi.h>
/*
@@ -33,6 +45,76 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work: Queue efi_runtime_service() and wait until it's done
+ * @rts: efi_runtime_service() function identifier
+ * @rts_arg<1-5>: efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
+({ \
+ struct efi_runtime_work efi_rts_work; \
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+ INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+ efi_rts_work.arg4 = _arg4; \
+ efi_rts_work.arg5 = _arg5; \
+ efi_rts_work.efi_rts_id = _rts; \
+ \
+ /* \
+ * queue_work() returns 0 if work was already on queue, \
+ * _ideally_ this should never happen. \
+ */ \
+ if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
+ wait_for_completion(&efi_rts_work.efi_rts_comp); \
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
+ efi_rts_work.status; \
+})
+
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock);
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ struct efi_runtime_work *efi_rts_work;
+ void *arg1, *arg2, *arg3, *arg4, *arg5;
+ efi_status_t status = EFI_NOT_FOUND;
+
+ efi_rts_work = container_of(work, struct efi_runtime_work, work);
+ arg1 = efi_rts_work->arg1;
+ arg2 = efi_rts_work->arg2;
+ arg3 = efi_rts_work->arg3;
+ arg4 = efi_rts_work->arg4;
+ arg5 = efi_rts_work->arg5;
+
+ switch (efi_rts_work->efi_rts_id) {
+ case GET_TIME:
+ status = efi_call_virt(get_time, (efi_time_t *)arg1,
+ (efi_time_cap_t *)arg2);
+ break;
+ case SET_TIME:
+ status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ break;
+ case GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+ (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ break;
+ case SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+ (efi_time_t *)arg2);
+ break;
+ case GET_VARIABLE:
+ status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, (u32 *)arg3,
+ (unsigned long *)arg4, (void *)arg5);
+ break;
+ case GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+ (efi_char16_t *)arg2,
+ (efi_guid_t *)arg3);
+ break;
+ case SET_VARIABLE:
+ status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, *(u32 *)arg3,
+ *(unsigned long *)arg4, (void *)arg5);
+ break;
+ case QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+ (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ break;
+ case GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ break;
+ case UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2,
+ *(unsigned long *)arg3);
+ break;
+ case QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2, (u64 *)arg3,
+ (int *)arg4);
+ break;
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+ efi_rts_work->status = status;
+ complete(&efi_rts_work->efi_rts_comp);
+}
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_time, tm, tc);
+ status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_time, tm);
+ status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_wakeup_time, enabled, tm);
+ status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_variable, name_size, name, vendor);
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_variable_info, attr, storage_space,
- remaining_space, max_variable_size);
+ status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_high_mono_count, count);
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(update_capsule, capsules, count, sg_list);
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
- reset_type);
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
}
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index e9db895916c3..1aa67bb5d8c0 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
memunmap(sec->baseaddr);
+ sec->enabled = false;
}
return 0;
@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
ret = vpd_section_init("rw", &rw_vpd,
physaddr + sizeof(struct vpd_cbmem) +
header.ro_size, header.rw_size);
- if (ret)
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
return ret;
+ }
}
return 0;
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 14fedbeca724..039e0f91dba8 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -28,6 +28,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ee9c5420c47f..1ebcef4bab5b 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -130,4 +130,72 @@ config OF_FPGA_REGION
Support for loading FPGA images by applying a Device Tree
overlay.
+config FPGA_DFL
+ tristate "FPGA Device Feature List (DFL) support"
+ select FPGA_BRIDGE
+ select FPGA_REGION
+ help
+ Device Feature List (DFL) defines a feature list structure that
+ creates a linked list of feature headers within the MMIO space
+ to provide an extensible way of adding features for FPGA.
+ Driver can walk through the feature headers to enumerate feature
+ devices (e.g. FPGA Management Engine, Port and Accelerator
+ Function Unit) and their private features for target FPGA devices.
+
+ Select this option to enable common support for Field-Programmable
+ Gate Array (FPGA) solutions which implement Device Feature List.
+ It provides enumeration APIs and feature device infrastructure.
+
+config FPGA_DFL_FME
+ tristate "FPGA DFL FME Driver"
+ depends on FPGA_DFL
+ help
+ The FPGA Management Engine (FME) is a feature device implemented
+ under Device Feature List (DFL) framework. Select this option to
+ enable the platform device driver for FME which implements all
+ FPGA platform level management features. There shall be one FME
+ per DFL based FPGA device.
+
+config FPGA_DFL_FME_MGR
+ tristate "FPGA DFL FME Manager Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Manager driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_BRIDGE
+ tristate "FPGA DFL FME Bridge Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Bridge driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_REGION
+ tristate "FPGA DFL FME Region Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Region driver for FPGA Management Engine.
+
+config FPGA_DFL_AFU
+ tristate "FPGA DFL AFU Driver"
+ depends on FPGA_DFL
+ help
+ This is the driver for FPGA Accelerated Function Unit (AFU) which
+ implements AFU and Port management features. A User AFU connects
+ to the FPGA infrastructure via a Port. There may be more than one
+ Port/AFU per DFL based FPGA device.
+
+config FPGA_DFL_PCI
+ tristate "FPGA DFL PCIe Device Driver"
+ depends on PCI && FPGA_DFL
+ help
+ Select this option to enable PCIe driver for PCIe-based
+ Field-Programmable Gate Array (FPGA) solutions which implement
+ the Device Feature List (DFL). This driver provides interfaces
+ for userspace applications to configure, enumerate, open and access
+ FPGA accelerators on the FPGA DFL devices, enables system level
+ management functions such as FPGA partial reconfiguration, power
+ management and virtualization with DFL framework and DFL feature
+ device drivers.
+
+ To compile this as a module, choose M here.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f9803dad6919..7a2d73ba7122 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -28,3 +28,17 @@ obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
# High Level Interfaces
obj-$(CONFIG_FPGA_REGION) += fpga-region.o
obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
+
+# FPGA Device Feature List Support
+obj-$(CONFIG_FPGA_DFL) += dfl.o
+obj-$(CONFIG_FPGA_DFL_FME) += dfl-fme.o
+obj-$(CONFIG_FPGA_DFL_FME_MGR) += dfl-fme-mgr.o
+obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
+obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
+obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
+
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
+dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+
+# Drivers for FPGAs which implement DFL
+obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index dd4edd8f22ce..7fa793672a7a 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
&altera_cvp_ops, conf);
- if (!mgr)
- return -ENOMEM;
+ if (!mgr) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
pci_set_drvdata(pdev, mgr);
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 000000000000..0e81d33af856
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+static void put_all_pages(struct page **pages, int npages)
+{
+ int i;
+
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ put_page(pages[i]);
+}
+
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ afu->dma_regions = RB_ROOT;
+}
+
+/**
+ * afu_dma_adjust_locked_vm - adjust locked memory
+ * @dev: port device
+ * @npages: number of pages
+ * @incr: increase or decrease locked memory
+ *
+ * Increase or decrease the locked memory size with npages input.
+ *
+ * Return 0 on success.
+ * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
+ */
+static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
+{
+ unsigned long locked, lock_limit;
+ int ret = 0;
+
+ /* the task is exiting. */
+ if (!current->mm)
+ return 0;
+
+ down_write(&current->mm->mmap_sem);
+
+ if (incr) {
+ locked = current->mm->locked_vm + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ ret = -ENOMEM;
+ else
+ current->mm->locked_vm += npages;
+ } else {
+ if (WARN_ON_ONCE(npages > current->mm->locked_vm))
+ npages = current->mm->locked_vm;
+ current->mm->locked_vm -= npages;
+ }
+
+ dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
+ incr ? '+' : '-', npages << PAGE_SHIFT,
+ current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
+ ret ? "- execeeded" : "");
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/**
+ * afu_dma_pin_pages - pin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be pinned
+ *
+ * Pin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+ int ret, pinned;
+
+ ret = afu_dma_adjust_locked_vm(dev, npages, true);
+ if (ret)
+ return ret;
+
+ region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!region->pages) {
+ ret = -ENOMEM;
+ goto unlock_vm;
+ }
+
+ pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ region->pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto put_pages;
+ } else if (pinned != npages) {
+ ret = -EFAULT;
+ goto free_pages;
+ }
+
+ dev_dbg(dev, "%d pages pinned\n", pinned);
+
+ return 0;
+
+put_pages:
+ put_all_pages(region->pages, pinned);
+free_pages:
+ kfree(region->pages);
+unlock_vm:
+ afu_dma_adjust_locked_vm(dev, npages, false);
+ return ret;
+}
+
+/**
+ * afu_dma_unpin_pages - unpin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be unpinned
+ *
+ * Unpin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ long npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+
+ put_all_pages(region->pages, npages);
+ kfree(region->pages);
+ afu_dma_adjust_locked_vm(dev, npages, false);
+
+ dev_dbg(dev, "%ld pages unpinned\n", npages);
+}
+
+/**
+ * afu_dma_check_continuous_pages - check if pages are continuous
+ * @region: dma memory region
+ *
+ * Return true if pages of given dma memory region have continuous physical
+ * address, otherwise return false.
+ */
+static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < npages - 1; i++)
+ if (page_to_pfn(region->pages[i]) + 1 !=
+ page_to_pfn(region->pages[i + 1]))
+ return false;
+
+ return true;
+}
+
+/**
+ * dma_region_check_iova - check if memory area is fully contained in the region
+ * @region: dma memory region
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * Compare the dma memory area defined by @iova and @size with given dma region.
+ * Return true if memory area is fully contained in the region, otherwise false.
+ */
+static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
+ u64 iova, u64 size)
+{
+ if (!size && region->iova != iova)
+ return false;
+
+ return (region->iova <= iova) &&
+ (region->length + region->iova >= iova + size);
+}
+
+/**
+ * afu_dma_region_add - add given dma region to rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be added
+ *
+ * Return 0 for success, -EEXIST if dma region has already been added.
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node **new, *parent = NULL;
+
+ dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ new = &afu->dma_regions.rb_node;
+
+ while (*new) {
+ struct dfl_afu_dma_region *this;
+
+ this = container_of(*new, struct dfl_afu_dma_region, node);
+
+ parent = *new;
+
+ if (dma_region_check_iova(this, region->iova, region->length))
+ return -EEXIST;
+
+ if (region->iova < this->iova)
+ new = &((*new)->rb_left);
+ else if (region->iova > this->iova)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&region->node, parent, new);
+ rb_insert_color(&region->node, &afu->dma_regions);
+
+ return 0;
+}
+
+/**
+ * afu_dma_region_remove - remove given dma region from rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be removed
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu;
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+ rb_erase(&region->node, &afu->dma_regions);
+}
+
+/**
+ * afu_dma_region_destroy - destroy all regions in rbtree
+ * @pdata: feature device platform data
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = rb_first(&afu->dma_regions);
+ struct dfl_afu_dma_region *region;
+
+ while (node) {
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ rb_erase(node, &afu->dma_regions);
+
+ if (region->iova)
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length,
+ DMA_BIDIRECTIONAL);
+
+ if (region->pages)
+ afu_dma_unpin_pages(pdata, region);
+
+ node = rb_next(node);
+ kfree(region);
+ }
+}
+
+/**
+ * afu_dma_region_find - find the dma region from rbtree based on iova and size
+ * @pdata: feature device platform data
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * It finds the dma region from the rbtree based on @iova and @size:
+ * - if @size == 0, it finds the dma region which starts from @iova
+ * - otherwise, it finds the dma region which fully contains
+ * [@iova, @iova+size)
+ * If nothing is matched returns NULL.
+ *
+ * Needs to be called with pdata->lock held.
+ */
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = afu->dma_regions.rb_node;
+ struct device *dev = &pdata->dev->dev;
+
+ while (node) {
+ struct dfl_afu_dma_region *region;
+
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ if (dma_region_check_iova(region, iova, size)) {
+ dev_dbg(dev, "find region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+ return region;
+ }
+
+ if (iova < region->iova)
+ node = node->rb_left;
+ else if (iova > region->iova)
+ node = node->rb_right;
+ else
+ /* the iova region is not fully covered. */
+ break;
+ }
+
+ dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
+ (unsigned long long)iova, (unsigned long long)size);
+
+ return NULL;
+}
+
+/**
+ * afu_dma_region_find_iova - find the dma region from rbtree by iova
+ * @pdata: feature device platform data
+ * @iova: address of the dma region
+ *
+ * Needs to be called with pdata->lock held.
+ */
+static struct dfl_afu_dma_region *
+afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ return afu_dma_region_find(pdata, iova, 0);
+}
+
+/**
+ * afu_dma_map_region - map memory region for dma
+ * @pdata: feature device platform data
+ * @user_addr: address of the memory region
+ * @length: size of the memory region
+ * @iova: pointer of iova address
+ *
+ * Map memory region defined by @user_addr and @length, and return dma address
+ * of the memory region via @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova)
+{
+ struct dfl_afu_dma_region *region;
+ int ret;
+
+ /*
+ * Check Inputs, only accept page-aligned user memory region with
+ * valid length.
+ */
+ if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
+ return -EINVAL;
+
+ /* Check overflow */
+ if (user_addr + length < user_addr)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr,
+ length))
+ return -EINVAL;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->user_addr = user_addr;
+ region->length = length;
+
+ /* Pin the user memory region */
+ ret = afu_dma_pin_pages(pdata, region);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ goto free_region;
+ }
+
+ /* Only accept continuous pages, return error else */
+ if (!afu_dma_check_continuous_pages(region)) {
+ dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ ret = -EINVAL;
+ goto unpin_pages;
+ }
+
+ /* As pages are continuous then start to do DMA mapping */
+ region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->pages[0], 0,
+ region->length,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
+ dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ *iova = region->iova;
+
+ mutex_lock(&pdata->lock);
+ ret = afu_dma_region_add(pdata, region);
+ mutex_unlock(&pdata->lock);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ goto unmap_dma;
+ }
+
+ return 0;
+
+unmap_dma:
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+unpin_pages:
+ afu_dma_unpin_pages(pdata, region);
+free_region:
+ kfree(region);
+ return ret;
+}
+
+/**
+ * afu_dma_unmap_region - unmap dma memory region
+ * @pdata: feature device platform data
+ * @iova: dma address of the region
+ *
+ * Unmap dma memory region based on @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ struct dfl_afu_dma_region *region;
+
+ mutex_lock(&pdata->lock);
+ region = afu_dma_region_find_iova(pdata, iova);
+ if (!region) {
+ mutex_unlock(&pdata->lock);
+ return -EINVAL;
+ }
+
+ if (region->in_use) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ afu_dma_region_remove(pdata, region);
+ mutex_unlock(&pdata->lock);
+
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+ afu_dma_unpin_pages(pdata, region);
+ kfree(region);
+
+ return 0;
+}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
new file mode 100644
index 000000000000..02baa6a227c0
--- /dev/null
+++ b/drivers/fpga/dfl-afu-main.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl-afu.h"
+
+/**
+ * port_enable - enable a port
+ * @pdev: port platform device.
+ *
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * port_enable function should only be used after port_disable function.
+ */
+static void port_enable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ WARN_ON(!pdata->disable_count);
+
+ if (--pdata->disable_count != 0)
+ return;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Clear port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+/**
+ * port_disable - disable a port
+ * @pdev: port platform device.
+ *
+ * Disable Port by setting the port soft reset bit, it puts the port into
+ * reset.
+ */
+static int port_disable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ if (pdata->disable_count++ != 0)
+ return 0;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Set port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v |= PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * This function resets the FPGA Port and its accelerator (AFU) by function
+ * __port_disable and __port_enable (set port soft reset bit and then clear
+ * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
+ * Reconfiguration. But it should never cause any system level issue, only
+ * functional failure (e.g. DMA or PR operation failure) and be recoverable
+ * from the failure.
+ *
+ * Note: the accelerator (AFU) is not accessible when its port is in reset
+ * (disabled). Any attempts on MMIO access to AFU while in reset, will
+ * result errors reported via port error reporting sub feature (if present).
+ */
+static int __port_reset(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = port_disable(pdev);
+ if (!ret)
+ port_enable(pdev);
+
+ return ret;
+}
+
+static int port_reset(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ mutex_lock(&pdata->lock);
+ ret = __port_reset(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int port_get_id(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
+}
+
+static ssize_t
+id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int id = port_get_id(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", id);
+}
+static DEVICE_ATTR_RO(id);
+
+static const struct attribute *port_hdr_attrs[] = {
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR Init.\n");
+
+ port_reset(pdev);
+
+ return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static void port_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static long
+port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_PORT_RESET:
+ if (!arg)
+ ret = port_reset(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static const struct dfl_feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .ioctl = port_hdr_ioctl,
+};
+
+static ssize_t
+afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 guidl, guidh;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+
+ mutex_lock(&pdata->lock);
+ if (pdata->disable_count) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ guidl = readq(base + GUID_L);
+ guidh = readq(base + GUID_H);
+ mutex_unlock(&pdata->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
+}
+static DEVICE_ATTR_RO(afu_id);
+
+static const struct attribute *port_afu_attrs[] = {
+ &dev_attr_afu_id.attr,
+ NULL
+};
+
+static int port_afu_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct resource *res = &pdev->resource[feature->resource_index];
+ int ret;
+
+ dev_dbg(&pdev->dev, "PORT AFU Init.\n");
+
+ ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU, resource_size(res),
+ res->start, DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
+ if (ret)
+ return ret;
+
+ return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static void port_afu_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+ .uinit = port_afu_uinit,
+};
+
+static struct dfl_feature_driver port_feature_drvs[] = {
+ {
+ .id = PORT_FEATURE_ID_HEADER,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .id = PORT_FEATURE_ID_AFU,
+ .ops = &port_afu_ops,
+ },
+ {
+ .ops = NULL,
+ }
+};
+
+static int afu_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&fdev->dev);
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = fdev;
+
+ return 0;
+}
+
+static int afu_release(struct inode *inode, struct file *filp)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ mutex_unlock(&pdata->lock);
+
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static long
+afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_info info;
+ struct dfl_afu *afu;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ info.flags = 0;
+ info.num_regions = afu->num_regions;
+ info.num_umsgs = afu->num_umsgs;
+ mutex_unlock(&pdata->lock);
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+ void __user *arg)
+{
+ struct dfl_fpga_port_region_info rinfo;
+ struct dfl_afu_mmio_region region;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
+
+ if (copy_from_user(&rinfo, arg, minsz))
+ return -EFAULT;
+
+ if (rinfo.argsz < minsz || rinfo.padding)
+ return -EINVAL;
+
+ ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ if (ret)
+ return ret;
+
+ rinfo.flags = region.flags;
+ rinfo.size = region.size;
+ rinfo.offset = region.offset;
+
+ if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_map map;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
+
+ if (copy_from_user(&map, arg, minsz))
+ return -EFAULT;
+
+ if (map.argsz < minsz || map.flags)
+ return -EINVAL;
+
+ ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, &map, sizeof(map))) {
+ afu_dma_unmap_region(pdata, map.iova);
+ return -EFAULT;
+ }
+
+ dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ (unsigned long long)map.user_addr,
+ (unsigned long long)map.length,
+ (unsigned long long)map.iova);
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_unmap unmap;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
+
+ if (copy_from_user(&unmap, arg, minsz))
+ return -EFAULT;
+
+ if (unmap.argsz < minsz || unmap.flags)
+ return -EINVAL;
+
+ return afu_dma_unmap_region(pdata, unmap.iova);
+}
+
+static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return afu_ioctl_check_extension(pdata, arg);
+ case DFL_FPGA_PORT_GET_INFO:
+ return afu_ioctl_get_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_GET_REGION_INFO:
+ return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_MAP:
+ return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_UNMAP:
+ return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 and other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f)
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_afu_mmio_region region;
+ u64 offset;
+ int ret;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ if (ret)
+ return ret;
+
+ if (!(region.flags & DFL_PORT_REGION_MMAP))
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
+ return -EPERM;
+
+ if ((vma->vm_flags & VM_WRITE) &&
+ !(region.flags & DFL_PORT_REGION_WRITE))
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma, vma->vm_start,
+ (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations afu_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_open,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .mmap = afu_mmap,
+};
+
+static int afu_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
+ if (!afu)
+ return -ENOMEM;
+
+ afu->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, afu);
+ afu_mmio_region_init(pdata);
+ afu_dma_region_init(pdata);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int afu_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ afu_mmio_region_destroy(pdata);
+ afu_dma_region_destroy(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int port_enable_set(struct platform_device *pdev, bool enable)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ if (enable)
+ port_enable(pdev);
+ else
+ ret = port_disable(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static struct dfl_fpga_port_ops afu_port_ops = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .owner = THIS_MODULE,
+ .get_id = port_get_id,
+ .enable_set = port_enable_set,
+};
+
+static int afu_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ ret = afu_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
+ if (ret) {
+ dfl_fpga_dev_feature_uinit(pdev);
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ afu_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int afu_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ afu_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver afu_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ },
+ .probe = afu_probe,
+ .remove = afu_remove,
+};
+
+static int __init afu_init(void)
+{
+ int ret;
+
+ dfl_fpga_port_ops_add(&afu_port_ops);
+
+ ret = platform_driver_register(&afu_driver);
+ if (ret)
+ dfl_fpga_port_ops_del(&afu_port_ops);
+
+ return ret;
+}
+
+static void __exit afu_exit(void)
+{
+ platform_driver_unregister(&afu_driver);
+
+ dfl_fpga_port_ops_del(&afu_port_ops);
+}
+
+module_init(afu_init);
+module_exit(afu_exit);
+
+MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-port");
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
new file mode 100644
index 000000000000..0804b7a0c298
--- /dev/null
+++ b/drivers/fpga/dfl-afu-region.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) MMIO Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include "dfl-afu.h"
+
+/**
+ * afu_mmio_region_init - init function for afu mmio region support
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ INIT_LIST_HEAD(&afu->regions);
+}
+
+#define for_each_region(region, afu) \
+ list_for_each_entry((region), &(afu)->regions, node)
+
+static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
+ u32 region_index)
+{
+ struct dfl_afu_mmio_region *region;
+
+ for_each_region(region, afu)
+ if (region->index == region_index)
+ return region;
+
+ return NULL;
+}
+
+/**
+ * afu_mmio_region_add - add a mmio region to given feature dev.
+ *
+ * @region_index: region index.
+ * @region_size: region size.
+ * @phys: region's physical address of this region.
+ * @flags: region flags (access permission).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->index = region_index;
+ region->size = region_size;
+ region->phys = phys;
+ region->flags = flags;
+
+ mutex_lock(&pdata->lock);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+
+ /* check if @index already exists */
+ if (get_region_by_index(afu, region_index)) {
+ mutex_unlock(&pdata->lock);
+ ret = -EEXIST;
+ goto exit;
+ }
+
+ region_size = PAGE_ALIGN(region_size);
+ region->offset = afu->region_cur_offset;
+ list_add(&region->node, &afu->regions);
+
+ afu->region_cur_offset += region_size;
+ afu->num_regions++;
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+exit:
+ devm_kfree(&pdata->dev->dev, region);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu_mmio_region *tmp, *region;
+
+ list_for_each_entry_safe(region, tmp, &afu->regions, node)
+ devm_kfree(&pdata->dev->dev, region);
+}
+
+/**
+ * afu_mmio_region_get_by_index - find an afu region by index.
+ * @pdata: afu platform device's pdata.
+ * @region_index: region index.
+ * @pregion: ptr to region for result.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ region = get_region_by_index(afu, region_index);
+ if (!region) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ *pregion = *region;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
+ *
+ * @pdata: afu platform device's pdata.
+ * @offset: region offset from start of the device fd.
+ * @size: region size.
+ * @pregion: ptr to region for result.
+ *
+ * Find the region which fully contains the region described by input
+ * parameters (offset and size) from the feature dev's region linked list.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ for_each_region(region, afu)
+ if (region->offset <= offset &&
+ region->offset + region->size >= offset + size) {
+ *pregion = *region;
+ goto exit;
+ }
+ ret = -EINVAL;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
new file mode 100644
index 000000000000..0c7630ae3cda
--- /dev/null
+++ b/drivers/fpga/dfl-afu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Accelerated Function Unit (AFU) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_AFU_H
+#define __DFL_AFU_H
+
+#include <linux/mm.h>
+
+#include "dfl.h"
+
+/**
+ * struct dfl_afu_mmio_region - afu mmio region data structure
+ *
+ * @index: region index.
+ * @flags: region flags (access permission).
+ * @size: region size.
+ * @offset: region offset from start of the device fd.
+ * @phys: region's physical address.
+ * @node: node to add to afu feature dev's region list.
+ */
+struct dfl_afu_mmio_region {
+ u32 index;
+ u32 flags;
+ u64 size;
+ u64 offset;
+ u64 phys;
+ struct list_head node;
+};
+
+/**
+ * struct fpga_afu_dma_region - afu DMA region data structure
+ *
+ * @user_addr: region userspace virtual address.
+ * @length: region length.
+ * @iova: region IO virtual address.
+ * @pages: ptr to pages of this region.
+ * @node: rb tree node.
+ * @in_use: flag to indicate if this region is in_use.
+ */
+struct dfl_afu_dma_region {
+ u64 user_addr;
+ u64 length;
+ u64 iova;
+ struct page **pages;
+ struct rb_node node;
+ bool in_use;
+};
+
+/**
+ * struct dfl_afu - afu device data structure
+ *
+ * @region_cur_offset: current region offset from start to the device fd.
+ * @num_regions: num of mmio regions.
+ * @regions: the mmio region linked list of this afu feature device.
+ * @dma_regions: root of dma regions rb tree.
+ * @num_umsgs: num of umsgs.
+ * @pdata: afu platform device's pdata.
+ */
+struct dfl_afu {
+ u64 region_cur_offset;
+ int num_regions;
+ u8 num_umsgs;
+ struct list_head regions;
+ struct rb_root dma_regions;
+
+ struct dfl_feature_platform_data *pdata;
+};
+
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags);
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion);
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion);
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova);
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+ u64 iova, u64 size);
+#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
new file mode 100644
index 000000000000..7cc041def8b3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-br.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Bridge Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#include "dfl.h"
+#include "dfl-fme-pr.h"
+
+struct fme_br_priv {
+ struct dfl_fme_br_pdata *pdata;
+ struct dfl_fpga_port_ops *port_ops;
+ struct platform_device *port_pdev;
+};
+
+static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct fme_br_priv *priv = bridge->priv;
+ struct platform_device *port_pdev;
+ struct dfl_fpga_port_ops *ops;
+
+ if (!priv->port_pdev) {
+ port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ return -ENODEV;
+
+ priv->port_pdev = port_pdev;
+ }
+
+ if (priv->port_pdev && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (!ops || !ops->enable_set)
+ return -ENOENT;
+
+ priv->port_ops = ops;
+ }
+
+ return priv->port_ops->enable_set(priv->port_pdev, enable);
+}
+
+static const struct fpga_bridge_ops fme_bridge_ops = {
+ .enable_set = fme_bridge_enable_set,
+};
+
+static int fme_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fme_br_priv *priv;
+ struct fpga_bridge *br;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(dev);
+
+ br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (!br)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, br);
+
+ ret = fpga_bridge_register(br);
+ if (ret)
+ fpga_bridge_free(br);
+
+ return ret;
+}
+
+static int fme_br_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+ struct fme_br_priv *priv = br->priv;
+
+ fpga_bridge_unregister(br);
+
+ if (priv->port_pdev)
+ put_device(&priv->port_pdev->dev);
+ if (priv->port_ops)
+ dfl_fpga_port_ops_put(priv->port_ops);
+
+ return 0;
+}
+
+static struct platform_driver fme_br_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
+ },
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
+};
+
+module_platform_driver(fme_br_driver);
+
+MODULE_DESCRIPTION("FPGA Bridge for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-bridge");
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
new file mode 100644
index 000000000000..086ad2420ade
--- /dev/null
+++ b/drivers/fpga/dfl-fme-main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+static ssize_t ports_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
+}
+static DEVICE_ATTR_RO(ports_num);
+
+/*
+ * Bitstream (static FPGA region) identifier number. It contains the
+ * detailed version and other information of this static FPGA region.
+ */
+static ssize_t bitstream_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_ID);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_id);
+
+/*
+ * Bitstream (static FPGA region) meta data. It contains the synthesis
+ * date, seed and other information of this static FPGA region.
+ */
+static ssize_t bitstream_metadata_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_MD);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_metadata);
+
+static const struct attribute *fme_hdr_attrs[] = {
+ &dev_attr_ports_num.attr,
+ &dev_attr_bitstream_id.attr,
+ &dev_attr_bitstream_metadata.attr,
+ NULL,
+};
+
+static int fme_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ void __iomem *base = feature->ioaddr;
+ int ret;
+
+ dev_dbg(&pdev->dev, "FME HDR Init.\n");
+ dev_dbg(&pdev->dev, "FME cap %llx.\n",
+ (unsigned long long)readq(base + FME_HDR_CAP));
+
+ ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "FME HDR UInit.\n");
+ sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
+}
+
+static const struct dfl_feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+};
+
+static struct dfl_feature_driver fme_feature_drvs[] = {
+ {
+ .id = FME_FEATURE_ID_HEADER,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .id = FME_FEATURE_ID_PR_MGMT,
+ .ops = &pr_mgmt_ops,
+ },
+ {
+ .ops = NULL,
+ },
+};
+
+static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static int fme_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ int ret;
+
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = pdata;
+
+ return 0;
+}
+
+static int fme_release(struct inode *inode, struct file *filp)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return fme_ioctl_check_extension(pdata, arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd.
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 or other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f) {
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fme_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
+ if (!fme)
+ return -ENOMEM;
+
+ fme->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, fme);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static void fme_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+}
+
+static const struct file_operations fme_fops = {
+ .owner = THIS_MODULE,
+ .open = fme_open,
+ .release = fme_release,
+ .unlocked_ioctl = fme_ioctl,
+};
+
+static int fme_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = fme_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
+ if (ret)
+ goto feature_uinit;
+
+ return 0;
+
+feature_uinit:
+ dfl_fpga_dev_feature_uinit(pdev);
+dev_destroy:
+ fme_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int fme_remove(struct platform_device *pdev)
+{
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ fme_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver fme_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ },
+ .probe = fme_probe,
+ .remove = fme_remove,
+};
+
+module_platform_driver(fme_driver);
+
+MODULE_DESCRIPTION("FPGA Management Engine driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme");
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
new file mode 100644
index 000000000000..b5ef405b6d88
--- /dev/null
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#include "dfl-fme-pr.h"
+
+/* FME Partial Reconfiguration Sub Feature Register Set */
+#define FME_PR_DFH 0x0
+#define FME_PR_CTRL 0x8
+#define FME_PR_STS 0x10
+#define FME_PR_DATA 0x18
+#define FME_PR_ERR 0x20
+#define FME_PR_INTFC_ID_H 0xA8
+#define FME_PR_INTFC_ID_L 0xB0
+
+/* FME PR Control Register Bitfield */
+#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
+#define FME_PR_CTRL_PR_RSTACK BIT_ULL(4) /* Ack for PR engine reset */
+#define FME_PR_CTRL_PR_RGN_ID GENMASK_ULL(9, 7) /* PR Region ID */
+#define FME_PR_CTRL_PR_START BIT_ULL(12) /* Start to request PR service */
+#define FME_PR_CTRL_PR_COMPLETE BIT_ULL(13) /* PR data push completion */
+
+/* FME PR Status Register Bitfield */
+/* Number of available entries in HW queue inside the PR engine. */
+#define FME_PR_STS_PR_CREDIT GENMASK_ULL(8, 0)
+#define FME_PR_STS_PR_STS BIT_ULL(16) /* PR operation status */
+#define FME_PR_STS_PR_STS_IDLE 0
+#define FME_PR_STS_PR_CTRLR_STS GENMASK_ULL(22, 20) /* Controller status */
+#define FME_PR_STS_PR_HOST_STS GENMASK_ULL(27, 24) /* PR host status */
+
+/* FME PR Data Register Bitfield */
+/* PR data from the raw-binary file. */
+#define FME_PR_DATA_PR_DATA_RAW GENMASK_ULL(32, 0)
+
+/* FME PR Error Register */
+/* PR Operation errors detected. */
+#define FME_PR_ERR_OPERATION_ERR BIT_ULL(0)
+/* CRC error detected. */
+#define FME_PR_ERR_CRC_ERR BIT_ULL(1)
+/* Incompatible PR bitstream detected. */
+#define FME_PR_ERR_INCOMPATIBLE_BS BIT_ULL(2)
+/* PR data push protocol violated. */
+#define FME_PR_ERR_PROTOCOL_ERR BIT_ULL(3)
+/* PR data fifo overflow error detected */
+#define FME_PR_ERR_FIFO_OVERFLOW BIT_ULL(4)
+
+#define PR_WAIT_TIMEOUT 8000000
+#define PR_HOST_STATUS_IDLE 0
+
+struct fme_mgr_priv {
+ void __iomem *ioaddr;
+ u64 pr_error;
+};
+
+static u64 pr_error_to_mgr_status(u64 err)
+{
+ u64 status = 0;
+
+ if (err & FME_PR_ERR_OPERATION_ERR)
+ status |= FPGA_MGR_STATUS_OPERATION_ERR;
+ if (err & FME_PR_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (err & FME_PR_ERR_INCOMPATIBLE_BS)
+ status |= FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR;
+ if (err & FME_PR_ERR_PROTOCOL_ERR)
+ status |= FPGA_MGR_STATUS_IP_PROTOCOL_ERR;
+ if (err & FME_PR_ERR_FIFO_OVERFLOW)
+ status |= FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR;
+
+ return status;
+}
+
+static u64 fme_mgr_pr_error_handle(void __iomem *fme_pr)
+{
+ u64 pr_status, pr_error;
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ if (!(pr_status & FME_PR_STS_PR_STS))
+ return 0;
+
+ pr_error = readq(fme_pr + FME_PR_ERR);
+ writeq(pr_error, fme_pr + FME_PR_ERR);
+
+ return pr_error;
+}
+
+static int fme_mgr_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(dev, "only supports partial reconfiguration.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "resetting PR before initiated PR\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ pr_ctrl & FME_PR_CTRL_PR_RSTACK, 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Reset ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev,
+ "waiting for PR resource in HW to be initialized and ready\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_STS, pr_status,
+ (pr_status & FME_PR_STS_PR_STS) ==
+ FME_PR_STS_PR_STS_IDLE, 1, PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Status timeout\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "check and clear previous PR error\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error)
+ dev_dbg(dev, "previous PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+
+ dev_dbg(dev, "set PR port ID\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RGN_ID;
+ pr_ctrl |= FIELD_PREP(FME_PR_CTRL_PR_RGN_ID, info->region_id);
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ return 0;
+}
+
+static int fme_mgr_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status, pr_data;
+ int delay = 0, pr_credit, i = 0;
+
+ dev_dbg(dev, "start request\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_START;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "pushing data from bitstream to HW\n");
+
+ /*
+ * driver can push data to PR hardware using PR_DATA register once HW
+ * has enough pr_credit (> 1), pr_credit reduces one for every 32bit
+ * pr data write to PR_DATA register. If pr_credit <= 1, driver needs
+ * to wait for enough pr_credit from hardware by polling.
+ */
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+
+ while (count > 0) {
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(dev, "PR_CREDIT timeout\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+ }
+
+ if (count < 4) {
+ dev_err(dev, "Invaild PR bitstream size\n");
+ return -EINVAL;
+ }
+
+ pr_data = 0;
+ pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW,
+ *(((u32 *)buf) + i));
+ writeq(pr_data, fme_pr + FME_PR_DATA);
+ count -= 4;
+ pr_credit--;
+ i++;
+ }
+
+ return 0;
+}
+
+static int fme_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl;
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_COMPLETE;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "green bitstream push complete\n");
+ dev_dbg(dev, "waiting for HW to release PR resource\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ !(pr_ctrl & FME_PR_CTRL_PR_START), 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Completion ACK timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "PR operation complete, checking status\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error) {
+ dev_dbg(dev, "PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "PR done successfully\n");
+
+ return 0;
+}
+
+static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static u64 fme_mgr_status(struct fpga_manager *mgr)
+{
+ struct fme_mgr_priv *priv = mgr->priv;
+
+ return pr_error_to_mgr_status(priv->pr_error);
+}
+
+static const struct fpga_manager_ops fme_mgr_ops = {
+ .write_init = fme_mgr_write_init,
+ .write = fme_mgr_write,
+ .write_complete = fme_mgr_write_complete,
+ .state = fme_mgr_state,
+ .status = fme_mgr_status,
+};
+
+static void fme_mgr_get_compat_id(void __iomem *fme_pr,
+ struct fpga_compat_id *id)
+{
+ id->id_l = readq(fme_pr + FME_PR_INTFC_ID_L);
+ id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H);
+}
+
+static int fme_mgr_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_compat_id *compat_id;
+ struct device *dev = &pdev->dev;
+ struct fme_mgr_priv *priv;
+ struct fpga_manager *mgr;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (pdata->ioaddr)
+ priv->ioaddr = pdata->ioaddr;
+
+ if (!priv->ioaddr) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->ioaddr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ioaddr))
+ return PTR_ERR(priv->ioaddr);
+ }
+
+ compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
+ if (!compat_id)
+ return -ENOMEM;
+
+ fme_mgr_get_compat_id(priv->ioaddr, compat_id);
+
+ mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
+ &fme_mgr_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->compat_id = compat_id;
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret)
+ fpga_mgr_free(mgr);
+
+ return ret;
+}
+
+static int fme_mgr_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_mgr_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_MGR,
+ },
+ .probe = fme_mgr_probe,
+ .remove = fme_mgr_remove,
+};
+
+module_platform_driver(fme_mgr_driver);
+
+MODULE_DESCRIPTION("FPGA Manager for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-mgr");
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
new file mode 100644
index 000000000000..fc9fd2d0482f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Partial Reconfiguration
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+#include "dfl-fme-pr.h"
+
+static struct dfl_fme_region *
+dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+
+ list_for_each_entry(fme_region, &fme->region_list, node)
+ if (fme_region->port_id == port_id)
+ return fme_region;
+
+ return NULL;
+}
+
+static int dfl_fme_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+ struct fpga_region *region;
+
+ fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
+ if (!fme_region)
+ return NULL;
+
+ region = fpga_region_class_find(NULL, &fme_region->region->dev,
+ dfl_fme_region_match);
+ if (!region)
+ return NULL;
+
+ return region;
+}
+
+static int fme_pr(struct platform_device *pdev, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __user *argp = (void __user *)arg;
+ struct dfl_fpga_fme_port_pr port_pr;
+ struct fpga_image_info *info;
+ struct fpga_region *region;
+ void __iomem *fme_hdr;
+ struct dfl_fme *fme;
+ unsigned long minsz;
+ void *buf = NULL;
+ int ret = 0;
+ u64 v;
+
+ minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
+
+ if (copy_from_user(&port_pr, argp, minsz))
+ return -EFAULT;
+
+ if (port_pr.argsz < minsz || port_pr.flags)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(port_pr.buffer_size, 4))
+ return -EINVAL;
+
+ /* get fme header region */
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ /* check port id */
+ v = readq(fme_hdr + FME_HDR_CAP);
+ if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
+ dev_dbg(&pdev->dev, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ if (!access_ok(VERIFY_READ,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size))
+ return -EFAULT;
+
+ buf = vmalloc(port_pr.buffer_size);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size)) {
+ ret = -EFAULT;
+ goto free_exit;
+ }
+
+ /* prepare fpga_image_info for PR */
+ info = fpga_image_info_alloc(&pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ /* fme device has been unregistered. */
+ if (!fme) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ region = dfl_fme_region_find(fme, port_pr.port_id);
+ if (!region) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ fpga_image_info_free(region->info);
+
+ info->buf = buf;
+ info->count = port_pr.buffer_size;
+ info->region_id = port_pr.port_id;
+ region->info = info;
+
+ ret = fpga_region_program_fpga(region);
+
+ /*
+ * it allows userspace to reset the PR region's logic by disabling and
+ * reenabling the bridge to clear things out between accleration runs.
+ * so no need to hold the bridges after partial reconfiguration.
+ */
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+
+ put_device(&region->dev);
+unlock_exit:
+ mutex_unlock(&pdata->lock);
+free_exit:
+ vfree(buf);
+ if (copy_to_user((void __user *)arg, &port_pr, minsz))
+ return -EFAULT;
+
+ return ret;
+}
+
+/**
+ * dfl_fme_create_mgr - create fpga mgr platform device as child device
+ *
+ * @pdata: fme platform_device's pdata
+ *
+ * Return: mgr platform device if successful, and error code otherwise.
+ */
+static struct platform_device *
+dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *mgr, *fme = pdata->dev;
+ struct dfl_fme_mgr_pdata mgr_pdata;
+ int ret = -ENOMEM;
+
+ if (!feature->ioaddr)
+ return ERR_PTR(-ENODEV);
+
+ mgr_pdata.ioaddr = feature->ioaddr;
+
+ /*
+ * Each FME has only one fpga-mgr, so allocate platform device using
+ * the same FME platform device id.
+ */
+ mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
+ if (!mgr)
+ return ERR_PTR(ret);
+
+ mgr->dev.parent = &fme->dev;
+
+ ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
+ if (ret)
+ goto create_mgr_err;
+
+ ret = platform_device_add(mgr);
+ if (ret)
+ goto create_mgr_err;
+
+ return mgr;
+
+create_mgr_err:
+ platform_device_put(mgr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_mgr - destroy fpga mgr platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+
+ platform_device_unregister(priv->mgr);
+}
+
+/**
+ * dfl_fme_create_bridge - create fme fpga bridge platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @port_id: port id for the bridge to be created.
+ *
+ * Return: bridge platform device if successful, and error code otherwise.
+ */
+static struct dfl_fme_bridge *
+dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+{
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_br_pdata br_pdata;
+ struct dfl_fme_bridge *fme_br;
+ int ret = -ENOMEM;
+
+ fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
+ if (!fme_br)
+ return ERR_PTR(ret);
+
+ br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.port_id = port_id;
+
+ fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
+ PLATFORM_DEVID_AUTO);
+ if (!fme_br->br)
+ return ERR_PTR(ret);
+
+ fme_br->br->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
+ if (ret)
+ goto create_br_err;
+
+ ret = platform_device_add(fme_br->br);
+ if (ret)
+ goto create_br_err;
+
+ return fme_br;
+
+create_br_err:
+ platform_device_put(fme_br->br);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy fpga bridge platform device
+ * @fme_br: fme bridge to destroy
+ */
+static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
+{
+ platform_device_unregister(fme_br->br);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_bridge *fbridge, *tmp;
+
+ list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
+ list_del(&fbridge->node);
+ dfl_fme_destroy_bridge(fbridge);
+ }
+}
+
+/**
+ * dfl_fme_create_region - create fpga region platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @mgr: mgr platform device needed for region
+ * @br: br platform device needed for region
+ * @port_id: port id
+ *
+ * Return: fme region if successful, and error code otherwise.
+ */
+static struct dfl_fme_region *
+dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+ struct platform_device *mgr,
+ struct platform_device *br, int port_id)
+{
+ struct dfl_fme_region_pdata region_pdata;
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_region *fme_region;
+ int ret = -ENOMEM;
+
+ fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
+ if (!fme_region)
+ return ERR_PTR(ret);
+
+ region_pdata.mgr = mgr;
+ region_pdata.br = br;
+
+ /*
+ * Each FPGA device may have more than one port, so allocate platform
+ * device using the same port platform device id.
+ */
+ fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
+ if (!fme_region->region)
+ return ERR_PTR(ret);
+
+ fme_region->region->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_region->region, &region_pdata,
+ sizeof(region_pdata));
+ if (ret)
+ goto create_region_err;
+
+ ret = platform_device_add(fme_region->region);
+ if (ret)
+ goto create_region_err;
+
+ fme_region->port_id = port_id;
+
+ return fme_region;
+
+create_region_err:
+ platform_device_put(fme_region->region);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_region - destroy fme region
+ * @fme_region: fme region to destroy
+ */
+static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
+{
+ platform_device_unregister(fme_region->region);
+}
+
+/**
+ * dfl_fme_destroy_regions - destroy all fme regions
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_region *fme_region, *tmp;
+
+ list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
+ list_del(&fme_region->node);
+ dfl_fme_destroy_region(fme_region);
+ }
+}
+
+static int pr_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme_region *fme_region;
+ struct dfl_fme_bridge *fme_br;
+ struct platform_device *mgr;
+ struct dfl_fme *priv;
+ void __iomem *fme_hdr;
+ int ret = -ENODEV, i = 0;
+ u64 fme_cap, port_offset;
+
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ /* Initialize the region and bridge sub device list */
+ INIT_LIST_HEAD(&priv->region_list);
+ INIT_LIST_HEAD(&priv->bridge_list);
+
+ /* Create fpga mgr platform device */
+ mgr = dfl_fme_create_mgr(pdata, feature);
+ if (IS_ERR(mgr)) {
+ dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
+ goto unlock;
+ }
+
+ priv->mgr = mgr;
+
+ /* Read capability register to check number of regions and bridges */
+ fme_cap = readq(fme_hdr + FME_HDR_CAP);
+ for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
+ port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
+ if (!(port_offset & FME_PORT_OFST_IMP))
+ continue;
+
+ /* Create bridge for each port */
+ fme_br = dfl_fme_create_bridge(pdata, i);
+ if (IS_ERR(fme_br)) {
+ ret = PTR_ERR(fme_br);
+ goto destroy_region;
+ }
+
+ list_add(&fme_br->node, &priv->bridge_list);
+
+ /* Create region for each port */
+ fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_br->br, i);
+ if (!fme_region) {
+ ret = PTR_ERR(fme_region);
+ goto destroy_region;
+ }
+
+ list_add(&fme_region->node, &priv->region_list);
+ }
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+destroy_region:
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+unlock:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static void pr_mgmt_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *priv;
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+ mutex_unlock(&pdata->lock);
+}
+
+static long fme_pr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_PR:
+ ret = fme_pr(pdev, arg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+const struct dfl_feature_ops pr_mgmt_ops = {
+ .init = pr_mgmt_init,
+ .uinit = pr_mgmt_uinit,
+ .ioctl = fme_pr_ioctl,
+};
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
new file mode 100644
index 000000000000..096a699089d3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Partial Reconfiguration Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_PR_H
+#define __DFL_FME_PR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dfl_fme_region - FME fpga region data structure
+ *
+ * @region: platform device of the FPGA region.
+ * @node: used to link fme_region to a list.
+ * @port_id: indicate which port this region connected to.
+ */
+struct dfl_fme_region {
+ struct platform_device *region;
+ struct list_head node;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_region_pdata - platform data for FME region platform device.
+ *
+ * @mgr: platform device of the FPGA manager.
+ * @br: platform device of the FPGA bridge.
+ * @region_id: region id (same as port_id).
+ */
+struct dfl_fme_region_pdata {
+ struct platform_device *mgr;
+ struct platform_device *br;
+ int region_id;
+};
+
+/**
+ * struct dfl_fme_bridge - FME fpga bridge data structure
+ *
+ * @br: platform device of the FPGA bridge.
+ * @node: used to link fme_bridge to a list.
+ */
+struct dfl_fme_bridge {
+ struct platform_device *br;
+ struct list_head node;
+};
+
+/**
+ * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ *
+ * @cdev: container device.
+ * @port_id: port id.
+ */
+struct dfl_fme_br_pdata {
+ struct dfl_fpga_cdev *cdev;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_mgr_pdata - platform data for FME manager platform device.
+ *
+ * @ioaddr: mapped io address for FME manager platform device.
+ */
+struct dfl_fme_mgr_pdata {
+ void __iomem *ioaddr;
+};
+
+#define DFL_FPGA_FME_MGR "dfl-fme-mgr"
+#define DFL_FPGA_FME_BRIDGE "dfl-fme-bridge"
+#define DFL_FPGA_FME_REGION "dfl-fme-region"
+
+#endif /* __DFL_FME_PR_H */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
new file mode 100644
index 000000000000..0b7e19c27c6d
--- /dev/null
+++ b/drivers/fpga/dfl-fme-region.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-region.h>
+
+#include "dfl-fme-pr.h"
+
+static int fme_region_get_bridges(struct fpga_region *region)
+{
+ struct dfl_fme_region_pdata *pdata = region->priv;
+ struct device *dev = &pdata->br->dev;
+
+ return fpga_bridge_get_to_list(dev, region->info, &region->bridge_list);
+}
+
+static int fme_region_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ mgr = fpga_mgr_get(&pdata->mgr->dev);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ region = fpga_region_create(dev, mgr, fme_region_get_bridges);
+ if (!region) {
+ ret = -ENOMEM;
+ goto eprobe_mgr_put;
+ }
+
+ region->priv = pdata;
+ region->compat_id = mgr->compat_id;
+ platform_set_drvdata(pdev, region);
+
+ ret = fpga_region_register(region);
+ if (ret)
+ goto region_free;
+
+ dev_dbg(dev, "DFL FME FPGA Region probed\n");
+
+ return 0;
+
+region_free:
+ fpga_region_free(region);
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int fme_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(region->mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_region_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
+ },
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
+};
+
+module_platform_driver(fme_region_driver);
+
+MODULE_DESCRIPTION("FPGA Region for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-region");
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
new file mode 100644
index 000000000000..5394a216c5c0
--- /dev/null
+++ b/drivers/fpga/dfl-fme.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_H
+#define __DFL_FME_H
+
+/**
+ * struct dfl_fme - dfl fme private data
+ *
+ * @mgr: FME's FPGA manager platform device.
+ * @region_list: linked list of FME's FPGA regions.
+ * @bridge_list: linked list of FME's FPGA bridges.
+ * @pdata: fme platform device's pdata.
+ */
+struct dfl_fme {
+ struct platform_device *mgr;
+ struct list_head region_list;
+ struct list_head bridge_list;
+ struct dfl_feature_platform_data *pdata;
+};
+
+extern const struct dfl_feature_ops pr_mgmt_ops;
+
+#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
new file mode 100644
index 000000000000..66b5720582bb
--- /dev/null
+++ b/drivers/fpga/dfl-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) PCIe device
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Zhang Yi <Yi.Z.Zhang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+
+#include "dfl.h"
+
+#define DRV_VERSION "0.8"
+#define DRV_NAME "dfl-pci"
+
+struct cci_drvdata {
+ struct dfl_fpga_cdev *cdev; /* container device */
+};
+
+static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
+{
+ if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
+ return NULL;
+
+ return pcim_iomap_table(pcidev)[bar];
+}
+
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+
+static struct pci_device_id cci_pcie_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+
+static int cci_init_drvdata(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pci_set_drvdata(pcidev, drvdata);
+
+ return 0;
+}
+
+static void cci_remove_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+
+ /* remove all children feature devices */
+ dfl_fpga_feature_devs_remove(drvdata->cdev);
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ resource_size_t start, len;
+ int port_num, bar, i, ret = 0;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* start to find Device Feature List from Bar 0 */
+ base = cci_pci_ioremap_bar(pcidev, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto enum_info_free_exit;
+ }
+
+ /*
+ * PF device has FME and Ports/AFUs, and VF device only has one
+ * Port/AFU. Check them and add related "Device Feature List" info
+ * for the next step enumeration.
+ */
+ if (dfl_feature_is_fme(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+
+ /*
+ * find more Device Feature Lists (e.g. Ports) per information
+ * indicated by FME module.
+ */
+ v = readq(base + FME_HDR_CAP);
+ port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
+
+ WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
+
+ for (i = 0; i < port_num; i++) {
+ v = readq(base + FME_HDR_PORT_OFST(i));
+
+ /* skip ports which are not implemented. */
+ if (!(v & FME_PORT_OFST_IMP))
+ continue;
+
+ /*
+ * add Port's Device Feature List information for next
+ * step enumeration.
+ */
+ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
+ offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ base = cci_pci_ioremap_bar(pcidev, bar);
+ if (!base)
+ continue;
+
+ start = pci_resource_start(pcidev, bar) + offset;
+ len = pci_resource_len(pcidev, bar) - offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len,
+ base + offset);
+ }
+ } else if (dfl_feature_is_port(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+ } else {
+ ret = -ENODEV;
+ goto enum_info_free_exit;
+ }
+
+ /* start enumeration with prepared enumeration information */
+ cdev = dfl_fpga_feature_devs_enumerate(info);
+ if (IS_ERR(cdev)) {
+ dev_err(&pcidev->dev, "Enumeration failure\n");
+ ret = PTR_ERR(cdev);
+ goto enum_info_free_exit;
+ }
+
+ drvdata->cdev = cdev;
+
+enum_info_free_exit:
+ dfl_fpga_enum_info_free(info);
+
+ return ret;
+}
+
+static
+int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
+{
+ int ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret < 0) {
+ dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
+ return ret;
+ }
+
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret && ret != -EINVAL)
+ dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
+
+ pci_set_master(pcidev);
+
+ if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_error_report_exit;
+ } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret)
+ goto disable_error_report_exit;
+ } else {
+ ret = -EIO;
+ dev_err(&pcidev->dev, "No suitable DMA support available.\n");
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_init_drvdata(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_enumerate_feature_devs(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ return ret;
+
+disable_error_report_exit:
+ pci_disable_pcie_error_reporting(pcidev);
+ return ret;
+}
+
+static void cci_pci_remove(struct pci_dev *pcidev)
+{
+ cci_remove_feature_devs(pcidev);
+ pci_disable_pcie_error_reporting(pcidev);
+}
+
+static struct pci_driver cci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cci_pcie_id_tbl,
+ .probe = cci_pci_probe,
+ .remove = cci_pci_remove,
+};
+
+module_pci_driver(cci_pci_driver);
+
+MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
new file mode 100644
index 000000000000..a9b521bccb06
--- /dev/null
+++ b/drivers/fpga/dfl.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include <linux/module.h>
+
+#include "dfl.h"
+
+static DEFINE_MUTEX(dfl_id_mutex);
+
+/*
+ * when adding a new feature dev support in DFL framework, it's required to
+ * add a new item in enum dfl_id_type and provide related information in below
+ * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
+ * platform device creation (define name strings in dfl.h, as they could be
+ * reused by platform device drivers).
+ *
+ * if the new feature dev needs chardev support, then it's required to add
+ * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
+ * index to dfl_chardevs table. If no chardev support just set devt_type
+ * as one invalid index (DFL_FPGA_DEVT_MAX).
+ */
+enum dfl_id_type {
+ FME_ID, /* fme id allocation and mapping */
+ PORT_ID, /* port id allocation and mapping */
+ DFL_ID_MAX,
+};
+
+enum dfl_fpga_devt_type {
+ DFL_FPGA_DEVT_FME,
+ DFL_FPGA_DEVT_PORT,
+ DFL_FPGA_DEVT_MAX,
+};
+
+/**
+ * dfl_dev_info - dfl feature device information.
+ * @name: name string of the feature platform device.
+ * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
+ * @id: idr id of the feature dev.
+ * @devt_type: index to dfl_chrdevs[].
+ */
+struct dfl_dev_info {
+ const char *name;
+ u32 dfh_id;
+ struct idr id;
+ enum dfl_fpga_devt_type devt_type;
+};
+
+/* it is indexed by dfl_id_type */
+static struct dfl_dev_info dfl_devs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
+ .devt_type = DFL_FPGA_DEVT_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
+ .devt_type = DFL_FPGA_DEVT_PORT},
+};
+
+/**
+ * dfl_chardev_info - chardev information of dfl feature device
+ * @name: nmae string of the char device.
+ * @devt: devt of the char device.
+ */
+struct dfl_chardev_info {
+ const char *name;
+ dev_t devt;
+};
+
+/* indexed by enum dfl_fpga_devt_type */
+static struct dfl_chardev_info dfl_chrdevs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT},
+};
+
+static void dfl_ids_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_init(&dfl_devs[i].id);
+}
+
+static void dfl_ids_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_destroy(&dfl_devs[i].id);
+}
+
+static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
+{
+ int id;
+
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
+ mutex_unlock(&dfl_id_mutex);
+
+ return id;
+}
+
+static void dfl_id_free(enum dfl_id_type type, int id)
+{
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ idr_remove(&dfl_devs[type].id, id);
+ mutex_unlock(&dfl_id_mutex);
+}
+
+static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (!strcmp(dfl_devs[i].name, pdev->name))
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+static enum dfl_id_type dfh_id_to_type(u32 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (dfl_devs[i].dfh_id == id)
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+/*
+ * introduce a global port_ops list, it allows port drivers to register ops
+ * in such list, then other feature devices (e.g. FME), could use the port
+ * functions even related port platform device is hidden. Below is one example,
+ * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
+ * enabled, port (and it's AFU) is turned into VF and port platform device
+ * is hidden from system but it's still required to access port to finish FPGA
+ * reconfiguration function in FME.
+ */
+
+static DEFINE_MUTEX(dfl_port_ops_mutex);
+static LIST_HEAD(dfl_port_ops_list);
+
+/**
+ * dfl_fpga_port_ops_get - get matched port ops from the global list
+ * @pdev: platform device to match with associated port ops.
+ * Return: matched port ops on success, NULL otherwise.
+ *
+ * Please note that must dfl_fpga_port_ops_put after use the port_ops.
+ */
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+{
+ struct dfl_fpga_port_ops *ops = NULL;
+
+ mutex_lock(&dfl_port_ops_mutex);
+ if (list_empty(&dfl_port_ops_list))
+ goto done;
+
+ list_for_each_entry(ops, &dfl_port_ops_list, node) {
+ /* match port_ops using the name of platform device */
+ if (!strcmp(pdev->name, ops->name)) {
+ if (!try_module_get(ops->owner))
+ ops = NULL;
+ goto done;
+ }
+ }
+
+ ops = NULL;
+done:
+ mutex_unlock(&dfl_port_ops_mutex);
+ return ops;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
+
+/**
+ * dfl_fpga_port_ops_put - put port ops
+ * @ops: port ops.
+ */
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
+{
+ if (ops && ops->owner)
+ module_put(ops->owner);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
+
+/**
+ * dfl_fpga_port_ops_add - add port_ops to global list
+ * @ops: port ops to add.
+ */
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_add_tail(&ops->node, &dfl_port_ops_list);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
+
+/**
+ * dfl_fpga_port_ops_del - remove port_ops from global list
+ * @ops: port ops to del.
+ */
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_del(&ops->node);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
+
+/**
+ * dfl_fpga_check_port_id - check the port id
+ * @pdev: port platform device.
+ * @pport_id: port id to compare.
+ *
+ * Return: 1 if port device matches with given port id, otherwise 0.
+ */
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+{
+ struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
+ int port_id;
+
+ if (!port_ops || !port_ops->get_id)
+ return 0;
+
+ port_id = port_ops->get_id(pdev);
+ dfl_fpga_port_ops_put(port_ops);
+
+ return port_id == *(int *)pport_id;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
+
+/**
+ * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
+ * @pdev: feature device.
+ */
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->ops) {
+ feature->ops->uinit(pdev, feature);
+ feature->ops = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
+
+static int dfl_feature_instance_init(struct platform_device *pdev,
+ struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature,
+ struct dfl_feature_driver *drv)
+{
+ int ret;
+
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+
+ feature->ops = drv->ops;
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
+ * @pdev: feature device.
+ * @feature_drvs: drvs for sub features.
+ *
+ * This function will match sub features with given feature drvs list and
+ * use matched drv to init related sub feature.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_driver *drv = feature_drvs;
+ struct dfl_feature *feature;
+ int ret;
+
+ while (drv->ops) {
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ /* match feature and drv using id */
+ if (feature->id == drv->id) {
+ ret = dfl_feature_instance_init(pdev, pdata,
+ feature, drv);
+ if (ret)
+ goto exit;
+ }
+ }
+ drv++;
+ }
+
+ return 0;
+exit:
+ dfl_fpga_dev_feature_uinit(pdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
+
+static void dfl_chardev_uinit(void)
+{
+ int i;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
+ if (MAJOR(dfl_chrdevs[i].devt)) {
+ unregister_chrdev_region(dfl_chrdevs[i].devt,
+ MINORMASK);
+ dfl_chrdevs[i].devt = MKDEV(0, 0);
+ }
+}
+
+static int dfl_chardev_init(void)
+{
+ int i, ret;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
+ dfl_chrdevs[i].name);
+ if (ret)
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ dfl_chardev_uinit();
+ return ret;
+}
+
+static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
+{
+ if (type >= DFL_FPGA_DEVT_MAX)
+ return 0;
+
+ return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
+}
+
+/**
+ * dfl_fpga_dev_ops_register - register cdev ops for feature dev
+ *
+ * @pdev: feature dev.
+ * @fops: file operations for feature dev's cdev.
+ * @owner: owning module/driver.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_init(&pdata->cdev, fops);
+ pdata->cdev.owner = owner;
+
+ /*
+ * set parent to the feature device so that its refcount is
+ * decreased after the last refcount of cdev is gone, that
+ * makes sure the feature device is valid during device
+ * file's life-cycle.
+ */
+ pdata->cdev.kobj.parent = &pdev->dev.kobj;
+
+ return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
+
+/**
+ * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
+ * @pdev: feature dev.
+ */
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_del(&pdata->cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
+
+/**
+ * struct build_feature_devs_info - info collected during feature dev build.
+ *
+ * @dev: device to enumerate.
+ * @cdev: the container device for all feature devices.
+ * @feature_dev: current feature device.
+ * @ioaddr: header register region address of feature device in enumeration.
+ * @sub_features: a sub features linked list for feature device in enumeration.
+ * @feature_num: number of sub features for feature device in enumeration.
+ */
+struct build_feature_devs_info {
+ struct device *dev;
+ struct dfl_fpga_cdev *cdev;
+ struct platform_device *feature_dev;
+ void __iomem *ioaddr;
+ struct list_head sub_features;
+ int feature_num;
+};
+
+/**
+ * struct dfl_feature_info - sub feature info collected during feature dev build
+ *
+ * @fid: id of this sub feature.
+ * @mmio_res: mmio resource of this sub feature.
+ * @ioaddr: mapped base address of mmio resource.
+ * @node: node in sub_features linked list.
+ */
+struct dfl_feature_info {
+ u64 fid;
+ struct resource mmio_res;
+ void __iomem *ioaddr;
+ struct list_head node;
+};
+
+static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
+ struct platform_device *port)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
+
+ mutex_lock(&cdev->lock);
+ list_add(&pdata->node, &cdev->port_dev_list);
+ get_device(&pdata->dev->dev);
+ mutex_unlock(&cdev->lock);
+}
+
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct platform_device *fdev = binfo->feature_dev;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_info *finfo, *p;
+ int ret, index = 0;
+
+ if (!fdev)
+ return 0;
+
+ /*
+ * we do not need to care for the memory which is associated with
+ * the platform device. After calling platform_device_unregister(),
+ * it will be automatically freed by device's release() callback,
+ * platform_device_release().
+ */
+ pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->dev = fdev;
+ pdata->num = binfo->feature_num;
+ pdata->dfl_cdev = binfo->cdev;
+ mutex_init(&pdata->lock);
+
+ /*
+ * the count should be initialized to 0 to make sure
+ *__fpga_port_enable() following __fpga_port_disable()
+ * works properly for port device.
+ * and it should always be 0 for fme device.
+ */
+ WARN_ON(pdata->disable_count);
+
+ fdev->dev.platform_data = pdata;
+
+ /* each sub feature has one MMIO resource */
+ fdev->num_resources = binfo->feature_num;
+ fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
+ GFP_KERNEL);
+ if (!fdev->resource)
+ return -ENOMEM;
+
+ /* fill features and resource information for feature dev */
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ struct dfl_feature *feature = &pdata->features[index];
+
+ /* save resource information for each feature */
+ feature->id = finfo->fid;
+ feature->resource_index = index;
+ feature->ioaddr = finfo->ioaddr;
+ fdev->resource[index++] = finfo->mmio_res;
+
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+
+ ret = platform_device_add(binfo->feature_dev);
+ if (!ret) {
+ if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
+ dfl_fpga_cdev_add_port_dev(binfo->cdev,
+ binfo->feature_dev);
+ else
+ binfo->cdev->fme_dev =
+ get_device(&binfo->feature_dev->dev);
+ /*
+ * reset it to avoid build_info_free() freeing their resource.
+ *
+ * The resource of successfully registered feature devices
+ * will be freed by platform_device_unregister(). See the
+ * comments in build_info_create_dev().
+ */
+ binfo->feature_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum dfl_id_type type, void __iomem *ioaddr)
+{
+ struct platform_device *fdev;
+ int ret;
+
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ /* we will create a new device, commit current device first */
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ /*
+ * we use -ENODEV as the initialization indicator which indicates
+ * whether the id need to be reclaimed
+ */
+ fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ if (!fdev)
+ return -ENOMEM;
+
+ binfo->feature_dev = fdev;
+ binfo->feature_num = 0;
+ binfo->ioaddr = ioaddr;
+ INIT_LIST_HEAD(&binfo->sub_features);
+
+ fdev->id = dfl_id_alloc(type, &fdev->dev);
+ if (fdev->id < 0)
+ return fdev->id;
+
+ fdev->dev.parent = &binfo->cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+
+ return 0;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_info *finfo, *p;
+
+ /*
+ * it is a valid id, free it. See comments in
+ * build_info_create_dev()
+ */
+ if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
+ dfl_id_free(feature_dev_id_type(binfo->feature_dev),
+ binfo->feature_dev->id);
+
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+ }
+
+ platform_device_put(binfo->feature_dev);
+
+ devm_kfree(binfo->dev, binfo);
+}
+
+static inline u32 feature_size(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+ /* workaround for private features with invalid size, use 4K instead */
+ return ofst ? ofst : 4096;
+}
+
+static u64 feature_id(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u16 id = FIELD_GET(DFH_ID, v);
+ u8 type = FIELD_GET(DFH_TYPE, v);
+
+ if (type == DFH_TYPE_FIU)
+ return FEATURE_ID_FIU_HEADER;
+ else if (type == DFH_TYPE_PRIVATE)
+ return id;
+ else if (type == DFH_TYPE_AFU)
+ return FEATURE_ID_AFU;
+
+ WARN_ON(1);
+ return 0;
+}
+
+/*
+ * when create sub feature instances, for private features, it doesn't need
+ * to provide resource size and feature id as they could be read from DFH
+ * register. For afu sub feature, its register region only contains user
+ * defined registers, so never trust any information from it, just use the
+ * resource size information provided by its parent FIU.
+ */
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
+ resource_size_t size, u64 fid)
+{
+ struct dfl_feature_info *finfo;
+
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(dfl->ioaddr + ofst);
+ fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
+
+ if (dfl->len - ofst < size)
+ return -EINVAL;
+
+ finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ if (!finfo)
+ return -ENOMEM;
+
+ finfo->fid = fid;
+ finfo->mmio_res.start = dfl->start + ofst;
+ finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->ioaddr = dfl->ioaddr + ofst;
+
+ list_add_tail(&finfo->node, &binfo->sub_features);
+ binfo->feature_num++;
+
+ return 0;
+}
+
+static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
+ u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
+
+ WARN_ON(!size);
+
+ return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
+}
+
+static int parse_feature_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
+ return -EINVAL;
+ }
+
+ switch (feature_dev_id_type(binfo->feature_dev)) {
+ case PORT_ID:
+ return parse_feature_port_afu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
+ binfo->feature_dev->name);
+ }
+
+ return 0;
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u32 id, offset;
+ u64 v;
+ int ret = 0;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ id = FIELD_GET(DFH_ID, v);
+
+ /* create platform device for dfl feature dev */
+ ret = build_info_create_dev(binfo, dfh_id_to_type(id),
+ dfl->ioaddr + ofst);
+ if (ret)
+ return ret;
+
+ ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
+ if (ret)
+ return ret;
+ /*
+ * find and parse FIU's child AFU via its NEXT_AFU register.
+ * please note that only Port has valid NEXT_AFU pointer per spec.
+ */
+ v = readq(dfl->ioaddr + ofst + NEXT_AFU);
+
+ offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
+ if (offset)
+ return parse_feature_afu(binfo, dfl, ofst + offset);
+
+ dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
+
+ return ret;
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
+ (unsigned long long)feature_id(dfl->ioaddr + ofst));
+ return -EINVAL;
+ }
+
+ return create_feature_instance(binfo, dfl, ofst, 0, 0);
+}
+
+/**
+ * parse_feature - parse a feature on given device feature list
+ *
+ * @binfo: build feature devices information.
+ * @dfl: device feature list to parse
+ * @ofst: offset to feature header on this device feature list
+ */
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
+{
+ u64 v;
+ u32 type;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ type = FIELD_GET(DFH_TYPE, v);
+
+ switch (type) {
+ case DFH_TYPE_AFU:
+ return parse_feature_afu(binfo, dfl, ofst);
+ case DFH_TYPE_PRIVATE:
+ return parse_feature_private(binfo, dfl, ofst);
+ case DFH_TYPE_FIU:
+ return parse_feature_fiu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev,
+ "Feature Type %x is not supported.\n", type);
+ }
+
+ return 0;
+}
+
+static int parse_feature_list(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl)
+{
+ void __iomem *start = dfl->ioaddr;
+ void __iomem *end = dfl->ioaddr + dfl->len;
+ int ret = 0;
+ u32 ofst = 0;
+ u64 v;
+
+ /* walk through the device feature list via DFH's next DFH pointer. */
+ for (; start < end; start += ofst) {
+ if (end - start < DFH_SIZE) {
+ dev_err(binfo->dev, "The region is too small to contain a feature.\n");
+ return -EINVAL;
+ }
+
+ ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
+ if (ret)
+ return ret;
+
+ v = readq(start + DFH);
+ ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+
+ /* stop parsing if EOL(End of List) is set or offset is 0 */
+ if ((v & DFH_EOL) || !ofst)
+ break;
+ }
+
+ /* commit current feature device when reach the end of list */
+ return build_info_commit_dev(binfo);
+}
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
+{
+ struct dfl_fpga_enum_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+ INIT_LIST_HEAD(&info->dfls);
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
+
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
+{
+ struct dfl_fpga_enum_dfl *tmp, *dfl;
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+
+ /* remove all device feature lists in the list. */
+ list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
+ list_del(&dfl->node);
+ devm_kfree(dev, dfl);
+ }
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
+
+/**
+ * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @start: mmio resource address of the device feature list.
+ * @len: mmio resource length of the device feature list.
+ * @ioaddr: mapped mmio resource address of the device feature list.
+ *
+ * One FPGA device may have one or more Device Feature Lists (DFLs), use this
+ * function to add information of each DFL to common data structure for next
+ * step enumeration.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr)
+{
+ struct dfl_fpga_enum_dfl *dfl;
+
+ dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
+ if (!dfl)
+ return -ENOMEM;
+
+ dfl->start = start;
+ dfl->len = len;
+ dfl->ioaddr = ioaddr;
+
+ list_add_tail(&dfl->node, &info->dfls);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+
+static int remove_feature_dev(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum dfl_id_type type = feature_dev_id_type(pdev);
+ int id = pdev->id;
+
+ platform_device_unregister(pdev);
+
+ dfl_id_free(type, id);
+
+ return 0;
+}
+
+static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
+{
+ device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
+}
+
+/**
+ * dfl_fpga_feature_devs_enumerate - enumerate feature devices
+ * @info: information for enumeration.
+ *
+ * This function creates a container device (base FPGA region), enumerates
+ * feature devices based on the enumeration info and creates platform devices
+ * under the container device.
+ *
+ * Return: dfl_fpga_cdev struct on success, -errno on failure
+ */
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
+{
+ struct build_feature_devs_info *binfo;
+ struct dfl_fpga_enum_dfl *dfl;
+ struct dfl_fpga_cdev *cdev;
+ int ret = 0;
+
+ if (!info->dev)
+ return ERR_PTR(-ENODEV);
+
+ cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ cdev->region = fpga_region_create(info->dev, NULL, NULL);
+ if (!cdev->region) {
+ ret = -ENOMEM;
+ goto free_cdev_exit;
+ }
+
+ cdev->parent = info->dev;
+ mutex_init(&cdev->lock);
+ INIT_LIST_HEAD(&cdev->port_dev_list);
+
+ ret = fpga_region_register(cdev->region);
+ if (ret)
+ goto free_region_exit;
+
+ /* create and init build info for enumeration */
+ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ ret = -ENOMEM;
+ goto unregister_region_exit;
+ }
+
+ binfo->dev = info->dev;
+ binfo->cdev = cdev;
+
+ /*
+ * start enumeration for all feature devices based on Device Feature
+ * Lists.
+ */
+ list_for_each_entry(dfl, &info->dfls, node) {
+ ret = parse_feature_list(binfo, dfl);
+ if (ret) {
+ remove_feature_devs(cdev);
+ build_info_free(binfo);
+ goto unregister_region_exit;
+ }
+ }
+
+ build_info_free(binfo);
+
+ return cdev;
+
+unregister_region_exit:
+ fpga_region_unregister(cdev->region);
+free_region_exit:
+ fpga_region_free(cdev->region);
+free_cdev_exit:
+ devm_kfree(info->dev, cdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
+
+/**
+ * dfl_fpga_feature_devs_remove - remove all feature devices
+ * @cdev: fpga container device.
+ *
+ * Remove the container device and all feature devices under given container
+ * devices.
+ */
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata, *ptmp;
+
+ remove_feature_devs(cdev);
+
+ mutex_lock(&cdev->lock);
+ if (cdev->fme_dev) {
+ /* the fme should be unregistered. */
+ WARN_ON(device_is_registered(cdev->fme_dev));
+ put_device(cdev->fme_dev);
+ }
+
+ list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
+ struct platform_device *port_dev = pdata->dev;
+
+ /* the port should be unregistered. */
+ WARN_ON(device_is_registered(&port_dev->dev));
+ list_del(&pdata->node);
+ put_device(&port_dev->dev);
+ }
+ mutex_unlock(&cdev->lock);
+
+ fpga_region_unregister(cdev->region);
+ devm_kfree(cdev->parent, cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
+
+/**
+ * __dfl_fpga_cdev_find_port - find a port under given container device
+ *
+ * @cdev: container device
+ * @data: data passed to match function
+ * @match: match function used to find specific port from the port device list
+ *
+ * Find a port device under container device. This function needs to be
+ * invoked with lock held.
+ *
+ * Return: pointer to port's platform device if successful, NULL otherwise.
+ *
+ * NOTE: you will need to drop the device reference with put_device() after use.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_dev;
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ port_dev = pdata->dev;
+
+ if (match(port_dev, data) && get_device(&port_dev->dev))
+ return port_dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+
+static int __init dfl_fpga_init(void)
+{
+ int ret;
+
+ dfl_ids_init();
+
+ ret = dfl_chardev_init();
+ if (ret)
+ dfl_ids_destroy();
+
+ return ret;
+}
+
+static void __exit dfl_fpga_exit(void)
+{
+ dfl_chardev_uinit();
+ dfl_ids_destroy();
+}
+
+module_init(dfl_fpga_init);
+module_exit(dfl_fpga_exit);
+
+MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
new file mode 100644
index 000000000000..a8b869e9e5b7
--- /dev/null
+++ b/drivers/fpga/dfl.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver Header File for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#ifndef __FPGA_DFL_H
+#define __FPGA_DFL_H
+
+#include <linux/bitfield.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/fpga/fpga-region.h>
+
+/* maximum supported number of ports */
+#define MAX_DFL_FPGA_PORT_NUM 4
+/* plus one for fme device */
+#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
+
+/* Reserved 0x0 for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define FME_FEATURE_ID_POWER_MGMT 0x2
+#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define FME_FEATURE_ID_PR_MGMT 0x5
+#define FME_FEATURE_ID_HSSI 0x6
+#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define PORT_FEATURE_ID_AFU FEATURE_ID_AFU
+#define PORT_FEATURE_ID_ERROR 0x10
+#define PORT_FEATURE_ID_UMSG 0x11
+#define PORT_FEATURE_ID_UINT 0x12
+#define PORT_FEATURE_ID_STP 0x13
+
+/*
+ * Device Feature Header Register Set
+ *
+ * For FIUs, they all have DFH + GUID + NEXT_AFU as common header registers.
+ * For AFUs, they have DFH + GUID as common header registers.
+ * For private features, they only have DFH register as common header.
+ */
+#define DFH 0x0
+#define GUID_L 0x8
+#define GUID_H 0x10
+#define NEXT_AFU 0x18
+
+#define DFH_SIZE 0x8
+
+/* Device Feature Header Register Bitfield */
+#define DFH_ID GENMASK_ULL(11, 0) /* Feature ID */
+#define DFH_ID_FIU_FME 0
+#define DFH_ID_FIU_PORT 1
+#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
+#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
+#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
+#define DFH_TYPE_AFU 1
+#define DFH_TYPE_PRIVATE 3
+#define DFH_TYPE_FIU 4
+
+/* Next AFU Register Bitfield */
+#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
+
+/* FME Header Register Set */
+#define FME_HDR_DFH DFH
+#define FME_HDR_GUID_L GUID_L
+#define FME_HDR_GUID_H GUID_H
+#define FME_HDR_NEXT_AFU NEXT_AFU
+#define FME_HDR_CAP 0x30
+#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_HDR_BITSTREAM_ID 0x60
+#define FME_HDR_BITSTREAM_MD 0x68
+
+/* FME Fab Capability Register Bitfield */
+#define FME_CAP_FABRIC_VERID GENMASK_ULL(7, 0) /* Fabric version ID */
+#define FME_CAP_SOCKET_ID BIT_ULL(8) /* Socket ID */
+#define FME_CAP_PCIE0_LINK_AVL BIT_ULL(12) /* PCIE0 Link */
+#define FME_CAP_PCIE1_LINK_AVL BIT_ULL(13) /* PCIE1 Link */
+#define FME_CAP_COHR_LINK_AVL BIT_ULL(14) /* Coherent Link */
+#define FME_CAP_IOMMU_AVL BIT_ULL(16) /* IOMMU available */
+#define FME_CAP_NUM_PORTS GENMASK_ULL(19, 17) /* Number of ports */
+#define FME_CAP_ADDR_WIDTH GENMASK_ULL(29, 24) /* Address bus width */
+#define FME_CAP_CACHE_SIZE GENMASK_ULL(43, 32) /* cache size in KB */
+#define FME_CAP_CACHE_ASSOC GENMASK_ULL(47, 44) /* Associativity */
+
+/* FME Port Offset Register Bitfield */
+/* Offset to port device feature header */
+#define FME_PORT_OFST_DFH_OFST GENMASK_ULL(23, 0)
+/* PCI Bar ID for this port */
+#define FME_PORT_OFST_BAR_ID GENMASK_ULL(34, 32)
+/* AFU MMIO access permission. 1 - VF, 0 - PF. */
+#define FME_PORT_OFST_ACC_CTRL BIT_ULL(55)
+#define FME_PORT_OFST_ACC_PF 0
+#define FME_PORT_OFST_ACC_VF 1
+#define FME_PORT_OFST_IMP BIT_ULL(60)
+
+/* PORT Header Register Set */
+#define PORT_HDR_DFH DFH
+#define PORT_HDR_GUID_L GUID_L
+#define PORT_HDR_GUID_H GUID_H
+#define PORT_HDR_NEXT_AFU NEXT_AFU
+#define PORT_HDR_CAP 0x30
+#define PORT_HDR_CTRL 0x38
+
+/* Port Capability Register Bitfield */
+#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
+#define PORT_CAP_MMIO_SIZE GENMASK_ULL(23, 8) /* MMIO size in KB */
+#define PORT_CAP_SUPP_INT_NUM GENMASK_ULL(35, 32) /* Interrupts num */
+
+/* Port Control Register Bitfield */
+#define PORT_CTRL_SFTRST BIT_ULL(0) /* Port soft reset */
+/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
+#define PORT_CTRL_LATENCY BIT_ULL(2)
+#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+/**
+ * struct dfl_fpga_port_ops - port ops
+ *
+ * @name: name of this port ops, to match with port platform device.
+ * @owner: pointer to the module which owns this port ops.
+ * @node: node to link port ops to global list.
+ * @get_id: get port id from hardware.
+ * @enable_set: enable/disable the port.
+ */
+struct dfl_fpga_port_ops {
+ const char *name;
+ struct module *owner;
+ struct list_head node;
+ int (*get_id)(struct platform_device *pdev);
+ int (*enable_set)(struct platform_device *pdev, bool enable);
+};
+
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+
+/**
+ * struct dfl_feature_driver - sub feature's driver
+ *
+ * @id: sub feature id.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature_driver {
+ u64 id;
+ const struct dfl_feature_ops *ops;
+};
+
+/**
+ * struct dfl_feature - sub feature of the feature devices
+ *
+ * @id: sub feature id.
+ * @resource_index: each sub feature has one mmio resource for its registers.
+ * this index is used to find its mmio resource from the
+ * feature dev (platform device)'s reources.
+ * @ioaddr: mapped mmio resource address.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature {
+ u64 id;
+ int resource_index;
+ void __iomem *ioaddr;
+ const struct dfl_feature_ops *ops;
+};
+
+#define DEV_STATUS_IN_USE 0
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @node: node to link feature devs to container device's port_dev_list.
+ * @lock: mutex to protect platform data.
+ * @cdev: cdev of feature dev.
+ * @dev: ptr to platform device linked with this platform data.
+ * @dfl_cdev: ptr to container device.
+ * @disable_count: count for port disable.
+ * @num: number for sub features.
+ * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
+ * @private: ptr to feature dev private data.
+ * @features: sub features of this feature dev.
+ */
+struct dfl_feature_platform_data {
+ struct list_head node;
+ struct mutex lock;
+ struct cdev cdev;
+ struct platform_device *dev;
+ struct dfl_fpga_cdev *dfl_cdev;
+ unsigned int disable_count;
+ unsigned long dev_status;
+ void *private;
+ int num;
+ struct dfl_feature features[0];
+};
+
+static inline
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+{
+ /* Test and set IN_USE flags to ensure file is exclusively used */
+ if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ return -EBUSY;
+
+ return 0;
+}
+
+static inline
+void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+{
+ clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+}
+
+static inline
+void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+ void *private)
+{
+ pdata->private = private;
+}
+
+static inline
+void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->private;
+}
+
+struct dfl_feature_ops {
+ int (*init)(struct platform_device *pdev, struct dfl_feature *feature);
+ void (*uinit)(struct platform_device *pdev,
+ struct dfl_feature *feature);
+ long (*ioctl)(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg);
+};
+
+#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
+#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
+
+static inline int dfl_feature_platform_data_size(const int num)
+{
+ return sizeof(struct dfl_feature_platform_data) +
+ num * sizeof(struct dfl_feature);
+}
+
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs);
+
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner);
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
+
+static inline
+struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
+ cdev);
+ return pdata->dev;
+}
+
+#define dfl_fpga_dev_for_each_feature(pdata, feature) \
+ for ((feature) = (pdata)->features; \
+ (feature) < (pdata)->features + (pdata)->num; (feature)++)
+
+static inline
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->id == id)
+ return feature;
+
+ return NULL;
+}
+
+static inline
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+
+ if (feature && feature->ioaddr)
+ return feature->ioaddr;
+
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline bool is_dfl_feature_present(struct device *dev, u64 id)
+{
+ return !!dfl_get_feature_ioaddr_by_id(dev, id);
+}
+
+static inline
+struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->dev->dev.parent->parent;
+}
+
+static inline bool dfl_feature_is_fme(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_FME);
+}
+
+static inline bool dfl_feature_is_port(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
+}
+
+/**
+ * struct dfl_fpga_enum_info - DFL FPGA enumeration information
+ *
+ * @dev: parent device.
+ * @dfls: list of device feature lists.
+ */
+struct dfl_fpga_enum_info {
+ struct device *dev;
+ struct list_head dfls;
+};
+
+/**
+ * struct dfl_fpga_enum_dfl - DFL FPGA enumeration device feature list info
+ *
+ * @start: base address of this device feature list.
+ * @len: size of this device feature list.
+ * @ioaddr: mapped base address of this device feature list.
+ * @node: node in list of device feature lists.
+ */
+struct dfl_fpga_enum_dfl {
+ resource_size_t start;
+ resource_size_t len;
+
+ void __iomem *ioaddr;
+
+ struct list_head node;
+};
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr);
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
+
+/**
+ * struct dfl_fpga_cdev - container device of DFL based FPGA
+ *
+ * @parent: parent device of this container device.
+ * @region: base fpga region.
+ * @fme_dev: FME feature device under this container device.
+ * @lock: mutex lock to protect the port device list.
+ * @port_dev_list: list of all port feature devices under this container device.
+ */
+struct dfl_fpga_cdev {
+ struct device *parent;
+ struct fpga_region *region;
+ struct device *fme_dev;
+ struct mutex lock;
+ struct list_head port_dev_list;
+};
+
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
+
+/*
+ * need to drop the device reference with put_device() after use port platform
+ * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
+ * functions.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *));
+
+static inline struct platform_device *
+dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct platform_device *pdev;
+
+ mutex_lock(&cdev->lock);
+ pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ mutex_unlock(&cdev->lock);
+
+ return pdev;
+}
+#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c1564cf827fe..a41b07e37884 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -406,12 +406,40 @@ static ssize_t state_show(struct device *dev,
return sprintf(buf, "%s\n", state_str[mgr->state]);
}
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ u64 status;
+ int len = 0;
+
+ if (!mgr->mops->status)
+ return -ENOENT;
+
+ status = mgr->mops->status(mgr);
+
+ if (status & FPGA_MGR_STATUS_OPERATION_ERR)
+ len += sprintf(buf + len, "reconfig operation error\n");
+ if (status & FPGA_MGR_STATUS_CRC_ERR)
+ len += sprintf(buf + len, "reconfig CRC error\n");
+ if (status & FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR)
+ len += sprintf(buf + len, "reconfig incompatible image\n");
+ if (status & FPGA_MGR_STATUS_IP_PROTOCOL_ERR)
+ len += sprintf(buf + len, "reconfig IP protocol error\n");
+ if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
+ len += sprintf(buf + len, "reconfig fifo overflow error\n");
+
+ return len;
+}
+
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(status);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(fpga_mgr);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 6d214d75c7be..0d65220d5ec5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -158,6 +158,27 @@ err_put_region:
}
EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
+static ssize_t compat_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ if (!region->compat_id)
+ return -ENOENT;
+
+ return sprintf(buf, "%016llx%016llx\n",
+ (unsigned long long)region->compat_id->id_h,
+ (unsigned long long)region->compat_id->id_l);
+}
+
+static DEVICE_ATTR_RO(compat_id);
+
+static struct attribute *fpga_region_attrs[] = {
+ &dev_attr_compat_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_region);
+
/**
* fpga_region_create - alloc and init a struct fpga_region
* @dev: device parent
@@ -258,6 +279,7 @@ static int __init fpga_region_init(void)
if (IS_ERR(fpga_region_class))
return PTR_ERR(fpga_region_class);
+ fpga_region_class->dev_groups = fpga_region_groups;
fpga_region_class->dev_release = fpga_region_dev_release;
return 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index a326ed663d3c..af3a20dd5aa4 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -12,6 +12,21 @@ menuconfig FSI
if FSI
+config FSI_NEW_DEV_NODE
+ bool "Create '/dev/fsi' directory for char devices"
+ default n
+ ---help---
+ This option causes char devices created for FSI devices to be
+ located under a common /dev/fsi/ directory. Set to N unless your
+ userspace has been updated to handle the new location.
+
+ Additionally, it also causes the char device names to be offset
+ by one so that chip 0 will have /dev/scom1 and chip1 /dev/scom2
+ to match old userspace expectations.
+
+ New userspace will use udev rules to generate predictable access
+ symlinks in /dev/fsi/by-path when this option is enabled.
+
config FSI_MASTER_GPIO
tristate "GPIO-based FSI master"
depends on GPIOLIB
@@ -27,9 +42,26 @@ config FSI_MASTER_HUB
allow chaining of FSI links to an arbitrary depth. This allows for
a high target device fanout.
+config FSI_MASTER_AST_CF
+ tristate "FSI master based on Aspeed ColdFire coprocessor"
+ depends on GPIOLIB
+ depends on GPIO_ASPEED
+ ---help---
+ This option enables a FSI master using the AST2400 and AST2500 GPIO
+ lines driven by the internal ColdFire coprocessor. This requires
+ the corresponding machine specific ColdFire firmware to be available.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
This option enables an FSI based SCOM device driver.
+config FSI_SBEFIFO
+ tristate "SBEFIFO FSI client device driver"
+ depends on OF_ADDRESS
+ ---help---
+ This option enables an FSI based SBEFIFO device driver. The SBEFIFO is
+ a pipe-like FSI device for communicating with the self boot engine
+ (SBE) on POWER processors.
+
endif
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index 65eb99dfafdb..a50d6ce22fb3 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,4 +2,6 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
+obj-$(CONFIG_FSI_SBEFIFO) += fsi-sbefifo.o
diff --git a/drivers/fsi/cf-fsi-fw.h b/drivers/fsi/cf-fsi-fw.h
new file mode 100644
index 000000000000..712df0461911
--- /dev/null
+++ b/drivers/fsi/cf-fsi-fw.h
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __CF_FSI_FW_H
+#define __CF_FSI_FW_H
+
+/*
+ * uCode file layout
+ *
+ * 0000...03ff : m68k exception vectors
+ * 0400...04ff : Header info & boot config block
+ * 0500....... : Code & stack
+ */
+
+/*
+ * Header info & boot config area
+ *
+ * The Header info is built into the ucode and provide version and
+ * platform information.
+ *
+ * the Boot config needs to be adjusted by the ARM prior to starting
+ * the ucode if the Command/Status area isn't at 0x320000 in CF space
+ * (ie. beginning of SRAM).
+ */
+
+#define HDR_OFFSET 0x400
+
+/* Info: Signature & version */
+#define HDR_SYS_SIG 0x00 /* 2 bytes system signature */
+#define SYS_SIG_SHARED 0x5348
+#define SYS_SIG_SPLIT 0x5350
+#define HDR_FW_VERS 0x02 /* 2 bytes Major.Minor */
+#define HDR_API_VERS 0x04 /* 2 bytes Major.Minor */
+#define API_VERSION_MAJ 2 /* Current version */
+#define API_VERSION_MIN 1
+#define HDR_FW_OPTIONS 0x08 /* 4 bytes option flags */
+#define FW_OPTION_TRACE_EN 0x00000001 /* FW tracing enabled */
+#define FW_OPTION_CONT_CLOCK 0x00000002 /* Continuous clocking supported */
+#define HDR_FW_SIZE 0x10 /* 4 bytes size for combo image */
+
+/* Boot Config: Address of Command/Status area */
+#define HDR_CMD_STAT_AREA 0x80 /* 4 bytes CF address */
+#define HDR_FW_CONTROL 0x84 /* 4 bytes control flags */
+#define FW_CONTROL_CONT_CLOCK 0x00000002 /* Continuous clocking enabled */
+#define FW_CONTROL_DUMMY_RD 0x00000004 /* Extra dummy read (AST2400) */
+#define FW_CONTROL_USE_STOP 0x00000008 /* Use STOP instructions */
+#define HDR_CLOCK_GPIO_VADDR 0x90 /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_DADDR 0x92 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_VADDR 0x94 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_DADDR 0x96 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_VADDR 0x98 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_DADDR 0x9a /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_BIT 0x9c /* 1 byte bit number */
+#define HDR_DATA_GPIO_BIT 0x9d /* 1 byte bit number */
+#define HDR_TRANS_GPIO_BIT 0x9e /* 1 byte bit number */
+
+/*
+ * Command/Status area layout: Main part
+ */
+
+/* Command/Status register:
+ *
+ * +---------------------------+
+ * | STAT | RLEN | CLEN | CMD |
+ * | 8 | 8 | 8 | 8 |
+ * +---------------------------+
+ * | | | |
+ * status | | |
+ * Response len | |
+ * (in bits) | |
+ * | |
+ * Command len |
+ * (in bits) |
+ * |
+ * Command code
+ *
+ * Due to the big endian layout, that means that a byte read will
+ * return the status byte
+ */
+#define CMD_STAT_REG 0x00
+#define CMD_REG_CMD_MASK 0x000000ff
+#define CMD_REG_CMD_SHIFT 0
+#define CMD_NONE 0x00
+#define CMD_COMMAND 0x01
+#define CMD_BREAK 0x02
+#define CMD_IDLE_CLOCKS 0x03 /* clen = #clocks */
+#define CMD_INVALID 0xff
+#define CMD_REG_CLEN_MASK 0x0000ff00
+#define CMD_REG_CLEN_SHIFT 8
+#define CMD_REG_RLEN_MASK 0x00ff0000
+#define CMD_REG_RLEN_SHIFT 16
+#define CMD_REG_STAT_MASK 0xff000000
+#define CMD_REG_STAT_SHIFT 24
+#define STAT_WORKING 0x00
+#define STAT_COMPLETE 0x01
+#define STAT_ERR_INVAL_CMD 0x80
+#define STAT_ERR_INVAL_IRQ 0x81
+#define STAT_ERR_MTOE 0x82
+
+/* Response tag & CRC */
+#define STAT_RTAG 0x04
+
+/* Response CRC */
+#define STAT_RCRC 0x05
+
+/* Echo and Send delay */
+#define ECHO_DLY_REG 0x08
+#define SEND_DLY_REG 0x09
+
+/* Command data area
+ *
+ * Last byte of message must be left aligned
+ */
+#define CMD_DATA 0x10 /* 64 bit of data */
+
+/* Response data area, right aligned, unused top bits are 1 */
+#define RSP_DATA 0x20 /* 32 bit of data */
+
+/* Misc */
+#define INT_CNT 0x30 /* 32-bit interrupt count */
+#define BAD_INT_VEC 0x34 /* 32-bit bad interrupt vector # */
+#define CF_STARTED 0x38 /* byte, set to -1 when copro started */
+#define CLK_CNT 0x3c /* 32-bit, clock count (debug only) */
+
+/*
+ * SRAM layout: GPIO arbitration part
+ */
+#define ARB_REG 0x40
+#define ARB_ARM_REQ 0x01
+#define ARB_ARM_ACK 0x02
+
+/* Misc2 */
+#define CF_RESET_D0 0x50
+#define CF_RESET_D1 0x54
+#define BAD_INT_S0 0x58
+#define BAD_INT_S1 0x5c
+#define STOP_CNT 0x60
+
+/* Internal */
+
+/*
+ * SRAM layout: Trace buffer (debug builds only)
+ */
+#define TRACEBUF 0x100
+#define TR_CLKOBIT0 0xc0
+#define TR_CLKOBIT1 0xc1
+#define TR_CLKOSTART 0x82
+#define TR_OLEN 0x83 /* + len */
+#define TR_CLKZ 0x84 /* + count */
+#define TR_CLKWSTART 0x85
+#define TR_CLKTAG 0x86 /* + tag */
+#define TR_CLKDATA 0x87 /* + len */
+#define TR_CLKCRC 0x88 /* + raw crc */
+#define TR_CLKIBIT0 0x90
+#define TR_CLKIBIT1 0x91
+#define TR_END 0xff
+
+#endif /* __CF_FSI_FW_H */
+
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4c03d6933646..2c31563fdcae 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -11,6 +11,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * TODO:
+ * - Rework topology
+ * - s/chip_id/chip_loc
+ * - s/cfam/chip (cfam_id -> chip_id etc...)
*/
#include <linux/crc4.h>
@@ -21,6 +26,9 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
#include "fsi-master.h"
@@ -78,9 +86,15 @@ static DEFINE_IDA(master_ida);
struct fsi_slave {
struct device dev;
struct fsi_master *master;
- int id;
- int link;
+ struct cdev cdev;
+ int cdev_idx;
+ int id; /* FSI address */
+ int link; /* FSI link# */
+ u32 cfam_id;
+ int chip_id;
uint32_t size; /* size of slave address space */
+ u8 t_send_delay;
+ u8 t_echo_delay;
};
#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
@@ -89,6 +103,13 @@ struct fsi_slave {
static const int slave_retries = 2;
static int discard_errors;
+static dev_t fsi_base_dev;
+static DEFINE_IDA(fsi_minor_ida);
+#define FSI_CHAR_MAX_DEVICES 0x1000
+
+/* Legacy /dev numbering: 4 devices per chip, 16 chips */
+#define FSI_CHAR_LEGACY_TOP 64
+
static int fsi_master_read(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, void *val, size_t size);
static int fsi_master_write(struct fsi_master *master, int link,
@@ -190,7 +211,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
- uint32_t irq, stat;
+ __be32 irq, stat;
int rc, link;
uint8_t id;
@@ -215,7 +236,53 @@ static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
&irq, sizeof(irq));
}
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+ return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+ return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+ return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+ return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static uint32_t fsi_slave_smode(int id, u8 t_senddly, u8 t_echodly)
+{
+ return FSI_SMODE_WSC | FSI_SMODE_ECRC
+ | fsi_smode_sid(id)
+ | fsi_smode_echodly(t_echodly - 1) | fsi_smode_senddly(t_senddly - 1)
+ | fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_slave *slave)
+{
+ uint32_t smode;
+ __be32 data;
+
+ /* set our smode register with the slave ID field to 0; this enables
+ * extended slave addressing
+ */
+ smode = fsi_slave_smode(slave->id, slave->t_send_delay, slave->t_echo_delay);
+ data = cpu_to_be32(smode);
+
+ return fsi_master_write(slave->master, slave->link, slave->id,
+ FSI_SLAVE_BASE + FSI_SMODE,
+ &data, sizeof(data));
+}
static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
uint32_t addr, size_t size)
@@ -223,7 +290,7 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
struct fsi_master *master = slave->master;
int rc, link;
uint32_t reg;
- uint8_t id;
+ uint8_t id, send_delay, echo_delay;
if (discard_errors)
return -1;
@@ -254,15 +321,26 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
}
}
+ send_delay = slave->t_send_delay;
+ echo_delay = slave->t_echo_delay;
+
/* getting serious, reset the slave via BREAK */
rc = fsi_master_break(master, link);
if (rc)
return rc;
- rc = fsi_slave_set_smode(master, link, id);
+ slave->t_send_delay = send_delay;
+ slave->t_echo_delay = echo_delay;
+
+ rc = fsi_slave_set_smode(slave);
if (rc)
return rc;
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
return fsi_slave_report_and_clear_errors(slave);
}
@@ -390,7 +468,6 @@ static struct device_node *fsi_device_find_of_node(struct fsi_device *dev)
static int fsi_slave_scan(struct fsi_slave *slave)
{
uint32_t engine_addr;
- uint32_t conf;
int rc, i;
/*
@@ -404,15 +481,17 @@ static int fsi_slave_scan(struct fsi_slave *slave)
for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
uint8_t slots, version, type, crc;
struct fsi_device *dev;
+ uint32_t conf;
+ __be32 data;
- rc = fsi_slave_read(slave, (i + 1) * sizeof(conf),
- &conf, sizeof(conf));
+ rc = fsi_slave_read(slave, (i + 1) * sizeof(data),
+ &data, sizeof(data));
if (rc) {
dev_warn(&slave->dev,
"error reading slave registers\n");
return -1;
}
- conf = be32_to_cpu(conf);
+ conf = be32_to_cpu(data);
crc = crc4(0, conf, 32);
if (crc) {
@@ -539,79 +618,11 @@ static const struct bin_attribute fsi_slave_raw_attr = {
.write = fsi_slave_sysfs_raw_write,
};
-static ssize_t fsi_slave_sysfs_term_write(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
- struct fsi_master *master = slave->master;
-
- if (!master->term)
- return -ENODEV;
-
- master->term(master, slave->link, slave->id);
- return count;
-}
-
-static const struct bin_attribute fsi_slave_term_attr = {
- .attr = {
- .name = "term",
- .mode = 0200,
- },
- .size = 0,
- .write = fsi_slave_sysfs_term_write,
-};
-
-/* Encode slave local bus echo delay */
-static inline uint32_t fsi_smode_echodly(int x)
-{
- return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
-}
-
-/* Encode slave local bus send delay */
-static inline uint32_t fsi_smode_senddly(int x)
-{
- return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
-}
-
-/* Encode slave local bus clock rate ratio */
-static inline uint32_t fsi_smode_lbcrr(int x)
-{
- return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
-}
-
-/* Encode slave ID */
-static inline uint32_t fsi_smode_sid(int x)
-{
- return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
-}
-
-static uint32_t fsi_slave_smode(int id)
-{
- return FSI_SMODE_WSC | FSI_SMODE_ECRC
- | fsi_smode_sid(id)
- | fsi_smode_echodly(0xf) | fsi_smode_senddly(0xf)
- | fsi_smode_lbcrr(0x8);
-}
-
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id)
-{
- uint32_t smode;
-
- /* set our smode register with the slave ID field to 0; this enables
- * extended slave addressing
- */
- smode = fsi_slave_smode(id);
- smode = cpu_to_be32(smode);
-
- return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SMODE,
- &smode, sizeof(smode));
-}
-
static void fsi_slave_release(struct device *dev)
{
struct fsi_slave *slave = to_fsi_slave(dev);
+ fsi_free_minor(slave->dev.devt);
of_node_put(dev->of_node);
kfree(slave);
}
@@ -659,11 +670,303 @@ static struct device_node *fsi_slave_find_of_node(struct fsi_master *master,
return NULL;
}
+static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
+ loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, read_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += read_len) {
+ __be32 data;
+
+ read_len = min_t(size_t, count, 4);
+ read_len -= off & 0x3;
+
+ rc = fsi_slave_read(slave, off, &data, read_len);
+ if (rc)
+ goto fail;
+ rc = copy_to_user(buf + total_len, &data, read_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ off += read_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static ssize_t cfam_write(struct file *filep, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, write_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += write_len) {
+ __be32 data;
+
+ write_len = min_t(size_t, count, 4);
+ write_len -= off & 0x3;
+
+ rc = copy_from_user(&data, buf + total_len, write_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ rc = fsi_slave_write(slave, off, &data, write_len);
+ if (rc)
+ goto fail;
+ off += write_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ break;
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+static int cfam_open(struct inode *inode, struct file *file)
+{
+ struct fsi_slave *slave = container_of(inode->i_cdev, struct fsi_slave, cdev);
+
+ file->private_data = slave;
+
+ return 0;
+}
+
+static const struct file_operations cfam_fops = {
+ .owner = THIS_MODULE,
+ .open = cfam_open,
+ .llseek = cfam_llseek,
+ .read = cfam_read,
+ .write = cfam_write,
+};
+
+static ssize_t send_term_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+
+ if (!master->term)
+ return -ENODEV;
+
+ master->term(master, slave->link, slave->id);
+ return count;
+}
+
+static DEVICE_ATTR_WO(send_term);
+
+static ssize_t slave_send_echo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%u\n", slave->t_send_delay);
+}
+
+static ssize_t slave_send_echo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+ unsigned long val;
+ int rc;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val < 1 || val > 16)
+ return -EINVAL;
+
+ if (!master->link_config)
+ return -ENXIO;
+
+ /* Current HW mandates that send and echo delay are identical */
+ slave->t_send_delay = val;
+ slave->t_echo_delay = val;
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc < 0)
+ return rc;
+ if (master->link_config)
+ master->link_config(master, slave->link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ return count;
+}
+
+static DEVICE_ATTR(send_echo_delays, 0600,
+ slave_send_echo_show, slave_send_echo_store);
+
+static ssize_t chip_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%d\n", slave->chip_id);
+}
+
+static DEVICE_ATTR_RO(chip_id);
+
+static ssize_t cfam_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "0x%x\n", slave->cfam_id);
+}
+
+static DEVICE_ATTR_RO(cfam_id);
+
+static struct attribute *cfam_attr[] = {
+ &dev_attr_send_echo_delays.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_cfam_id.attr,
+ &dev_attr_send_term.attr,
+ NULL,
+};
+
+static const struct attribute_group cfam_attr_group = {
+ .attrs = cfam_attr,
+};
+
+static const struct attribute_group *cfam_attr_groups[] = {
+ &cfam_attr_group,
+ NULL,
+};
+
+static char *cfam_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
+#else
+ return kasprintf(GFP_KERNEL, "cfam%d", slave->cdev_idx);
+#endif
+}
+
+static const struct device_type cfam_type = {
+ .name = "cfam",
+ .devnode = cfam_devnode,
+ .groups = cfam_attr_groups
+};
+
+static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/%s", dev_name(dev));
+#else
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+#endif
+}
+
+const struct device_type fsi_cdev_type = {
+ .name = "fsi-cdev",
+ .devnode = fsi_cdev_devnode,
+};
+EXPORT_SYMBOL_GPL(fsi_cdev_type);
+
+/* Backward compatible /dev/ numbering in "old style" mode */
+static int fsi_adjust_index(int index)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return index;
+#else
+ return index + 1;
+#endif
+}
+
+static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ int cid = slave->chip_id;
+ int id;
+
+ /* Check if we qualify for legacy numbering */
+ if (cid >= 0 && cid < 16 && type < 4) {
+ /* Try reserving the legacy number */
+ id = (cid << 4) | type;
+ id = ida_simple_get(&fsi_minor_ida, id, id + 1, GFP_KERNEL);
+ if (id >= 0) {
+ *out_index = fsi_adjust_index(cid);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+ }
+ /* Other failure */
+ if (id != -ENOSPC)
+ return id;
+ /* Fallback to non-legacy allocation */
+ }
+ id = ida_simple_get(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
+ FSI_CHAR_MAX_DEVICES, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ *out_index = fsi_adjust_index(id);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+}
+
+int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
+}
+EXPORT_SYMBOL_GPL(fsi_get_new_minor);
+
+void fsi_free_minor(dev_t dev)
+{
+ ida_simple_remove(&fsi_minor_ida, MINOR(dev));
+}
+EXPORT_SYMBOL_GPL(fsi_free_minor);
+
static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
{
- uint32_t chip_id, llmode;
+ uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
+ __be32 data, llmode;
int rc;
/* Currently, we only support single slaves on a link, and use the
@@ -672,31 +975,23 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (id != 0)
return -EINVAL;
- rc = fsi_master_read(master, link, id, 0, &chip_id, sizeof(chip_id));
+ rc = fsi_master_read(master, link, id, 0, &data, sizeof(data));
if (rc) {
dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
link, id, rc);
return -ENODEV;
}
- chip_id = be32_to_cpu(chip_id);
+ cfam_id = be32_to_cpu(data);
- crc = crc4(0, chip_id, 32);
+ crc = crc4(0, cfam_id, 32);
if (crc) {
- dev_warn(&master->dev, "slave %02x:%02x invalid chip id CRC!\n",
+ dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
link, id);
return -EIO;
}
dev_dbg(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
- chip_id, master->idx, link, id);
-
- rc = fsi_slave_set_smode(master, link, id);
- if (rc) {
- dev_warn(&master->dev,
- "can't set smode on slave:%02x:%02x %d\n",
- link, id, rc);
- return -ENODEV;
- }
+ cfam_id, master->idx, link, id);
/* If we're behind a master that doesn't provide a self-running bus
* clock, put the slave into async mode
@@ -719,30 +1014,61 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (!slave)
return -ENOMEM;
- slave->master = master;
+ dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+ slave->dev.type = &cfam_type;
slave->dev.parent = &master->dev;
slave->dev.of_node = fsi_slave_find_of_node(master, link, id);
slave->dev.release = fsi_slave_release;
+ device_initialize(&slave->dev);
+ slave->cfam_id = cfam_id;
+ slave->master = master;
slave->link = link;
slave->id = id;
slave->size = FSI_SLAVE_SIZE_23b;
+ slave->t_send_delay = 16;
+ slave->t_echo_delay = 16;
+
+ /* Get chip ID if any */
+ slave->chip_id = -1;
+ if (slave->dev.of_node) {
+ uint32_t prop;
+ if (!of_property_read_u32(slave->dev.of_node, "chip-id", &prop))
+ slave->chip_id = prop;
- dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
- rc = device_register(&slave->dev);
- if (rc < 0) {
- dev_warn(&master->dev, "failed to create slave device: %d\n",
- rc);
- put_device(&slave->dev);
- return rc;
}
+ /* Allocate a minor in the FSI space */
+ rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
+ &slave->cdev_idx);
+ if (rc)
+ goto err_free;
+
+ /* Create chardev for userspace access */
+ cdev_init(&slave->cdev, &cfam_fops);
+ rc = cdev_device_add(&slave->cdev, &slave->dev);
+ if (rc) {
+ dev_err(&slave->dev, "Error %d creating slave device\n", rc);
+ goto err_free;
+ }
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc) {
+ dev_warn(&master->dev,
+ "can't set smode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ kfree(slave);
+ return -ENODEV;
+ }
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ /* Legacy raw file -> to be removed */
rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
if (rc)
dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
- rc = device_create_bin_file(&slave->dev, &fsi_slave_term_attr);
- if (rc)
- dev_warn(&slave->dev, "failed to create term attr: %d\n", rc);
rc = fsi_slave_scan(slave);
if (rc)
@@ -750,6 +1076,10 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
rc);
return rc;
+
+ err_free:
+ put_device(&slave->dev);
+ return rc;
}
/* FSI master support */
@@ -814,12 +1144,16 @@ static int fsi_master_link_enable(struct fsi_master *master, int link)
*/
static int fsi_master_break(struct fsi_master *master, int link)
{
+ int rc = 0;
+
trace_fsi_master_break(master, link);
if (master->send_break)
- return master->send_break(master, link);
+ rc = master->send_break(master, link);
+ if (master->link_config)
+ master->link_config(master, link, 16, 16);
- return 0;
+ return rc;
}
static int fsi_master_scan(struct fsi_master *master)
@@ -854,8 +1188,11 @@ static int fsi_slave_remove_device(struct device *dev, void *arg)
static int fsi_master_remove_slave(struct device *dev, void *arg)
{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
device_for_each_child(dev, NULL, fsi_slave_remove_device);
- device_unregister(dev);
+ cdev_device_del(&slave->cdev, &slave->dev);
+ put_device(dev);
return 0;
}
@@ -866,8 +1203,14 @@ static void fsi_master_unscan(struct fsi_master *master)
int fsi_master_rescan(struct fsi_master *master)
{
+ int rc;
+
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
- return fsi_master_scan(master);
+ rc = fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_rescan);
@@ -903,9 +1246,7 @@ int fsi_master_register(struct fsi_master *master)
int rc;
struct device_node *np;
- if (!master)
- return -EINVAL;
-
+ mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
@@ -917,21 +1258,24 @@ int fsi_master_register(struct fsi_master *master)
rc = device_create_file(&master->dev, &dev_attr_rescan);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
rc = device_create_file(&master->dev, &dev_attr_break);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
np = dev_of_node(&master->dev);
- if (!of_property_read_bool(np, "no-scan-on-init"))
+ if (!of_property_read_bool(np, "no-scan-on-init")) {
+ mutex_lock(&master->scan_lock);
fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+ }
return 0;
}
@@ -944,7 +1288,9 @@ void fsi_master_unregister(struct fsi_master *master)
master->idx = -1;
}
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
+ mutex_unlock(&master->scan_lock);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(fsi_master_unregister);
@@ -996,13 +1342,27 @@ EXPORT_SYMBOL_GPL(fsi_bus_type);
static int __init fsi_init(void)
{
- return bus_register(&fsi_bus_type);
+ int rc;
+
+ rc = alloc_chrdev_region(&fsi_base_dev, 0, FSI_CHAR_MAX_DEVICES, "fsi");
+ if (rc)
+ return rc;
+ rc = bus_register(&fsi_bus_type);
+ if (rc)
+ goto fail_bus;
+ return 0;
+
+ fail_bus:
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ return rc;
}
postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ ida_destroy(&fsi_minor_ida);
}
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
new file mode 100644
index 000000000000..04d10ea8d343
--- /dev/null
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/firmware.h>
+#include <linux/gpio/aspeed.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+
+#include "fsi-master.h"
+#include "cf-fsi-fw.h"
+
+#define FW_FILE_NAME "cf-fsi-fw.bin"
+
+/* Common SCU based coprocessor control registers */
+#define SCU_COPRO_CTRL 0x100
+#define SCU_COPRO_RESET 0x00000002
+#define SCU_COPRO_CLK_EN 0x00000001
+
+/* AST2500 specific ones */
+#define SCU_2500_COPRO_SEG0 0x104
+#define SCU_2500_COPRO_SEG1 0x108
+#define SCU_2500_COPRO_SEG2 0x10c
+#define SCU_2500_COPRO_SEG3 0x110
+#define SCU_2500_COPRO_SEG4 0x114
+#define SCU_2500_COPRO_SEG5 0x118
+#define SCU_2500_COPRO_SEG6 0x11c
+#define SCU_2500_COPRO_SEG7 0x120
+#define SCU_2500_COPRO_SEG8 0x124
+#define SCU_2500_COPRO_SEG_SWAP 0x00000001
+#define SCU_2500_COPRO_CACHE_CTL 0x128
+#define SCU_2500_COPRO_CACHE_EN 0x00000001
+#define SCU_2500_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2500_COPRO_SEG1_CACHE_EN 0x00000004
+#define SCU_2500_COPRO_SEG2_CACHE_EN 0x00000008
+#define SCU_2500_COPRO_SEG3_CACHE_EN 0x00000010
+#define SCU_2500_COPRO_SEG4_CACHE_EN 0x00000020
+#define SCU_2500_COPRO_SEG5_CACHE_EN 0x00000040
+#define SCU_2500_COPRO_SEG6_CACHE_EN 0x00000080
+#define SCU_2500_COPRO_SEG7_CACHE_EN 0x00000100
+#define SCU_2500_COPRO_SEG8_CACHE_EN 0x00000200
+
+#define SCU_2400_COPRO_SEG0 0x104
+#define SCU_2400_COPRO_SEG2 0x108
+#define SCU_2400_COPRO_SEG4 0x10c
+#define SCU_2400_COPRO_SEG6 0x110
+#define SCU_2400_COPRO_SEG8 0x114
+#define SCU_2400_COPRO_SEG_SWAP 0x80000000
+#define SCU_2400_COPRO_CACHE_CTL 0x118
+#define SCU_2400_COPRO_CACHE_EN 0x00000001
+#define SCU_2400_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2400_COPRO_SEG2_CACHE_EN 0x00000004
+#define SCU_2400_COPRO_SEG4_CACHE_EN 0x00000008
+#define SCU_2400_COPRO_SEG6_CACHE_EN 0x00000010
+#define SCU_2400_COPRO_SEG8_CACHE_EN 0x00000020
+
+/* CVIC registers */
+#define CVIC_EN_REG 0x10
+#define CVIC_TRIG_REG 0x18
+
+/*
+ * System register base address (needed for configuring the
+ * coldfire maps)
+ */
+#define SYSREG_BASE 0x1e600000
+
+/* Amount of SRAM required */
+#define SRAM_SIZE 0x1000
+
+#define LAST_ADDR_INVALID 0x1
+
+struct fsi_master_acf {
+ struct fsi_master master;
+ struct device *dev;
+ struct regmap *scu;
+ struct mutex lock; /* mutex for command ordering */
+ struct gpio_desc *gpio_clk;
+ struct gpio_desc *gpio_data;
+ struct gpio_desc *gpio_trans; /* Voltage translator */
+ struct gpio_desc *gpio_enable; /* FSI enable */
+ struct gpio_desc *gpio_mux; /* Mux control */
+ uint16_t gpio_clk_vreg;
+ uint16_t gpio_clk_dreg;
+ uint16_t gpio_dat_vreg;
+ uint16_t gpio_dat_dreg;
+ uint16_t gpio_tra_vreg;
+ uint16_t gpio_tra_dreg;
+ uint8_t gpio_clk_bit;
+ uint8_t gpio_dat_bit;
+ uint8_t gpio_tra_bit;
+ uint32_t cf_mem_addr;
+ size_t cf_mem_size;
+ void __iomem *cf_mem;
+ void __iomem *cvic;
+ struct gen_pool *sram_pool;
+ void __iomem *sram;
+ bool is_ast2500;
+ bool external_mode;
+ bool trace_enabled;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
+ uint32_t cvic_sw_irq;
+};
+#define to_fsi_master_acf(m) container_of(m, struct fsi_master_acf, master)
+
+struct fsi_msg {
+ uint64_t msg;
+ uint8_t bits;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_ast_cf.h>
+
+static void msg_push_bits(struct fsi_msg *msg, uint64_t data, int bits)
+{
+ msg->msg <<= bits;
+ msg->msg |= data & ((1ull << bits) - 1);
+ msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_msg *msg)
+{
+ uint8_t crc;
+ int top;
+
+ top = msg->bits & 0x3;
+
+ /* start bit, and any non-aligned top bits */
+ crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+ /* aligned bits */
+ crc = crc4(crc, msg->msg, msg->bits - top);
+
+ msg_push_bits(msg, crc, 4);
+}
+
+static void msg_finish_cmd(struct fsi_msg *cmd)
+{
+ /* Left align message */
+ cmd->msg <<= (64 - cmd->bits);
+}
+
+static bool check_same_address(struct fsi_master_acf *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_acf *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_acf *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
+/*
+ * Encode an Absolute/Relative/Same Address command
+ */
+static void build_ar_command(struct fsi_master_acf *master,
+ struct fsi_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size,
+ const void *data)
+{
+ int i, addr_bits, opcode_bits;
+ bool write = !!data;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
+
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_acf_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_acf_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_acf_cmd_abs_addr(master, addr);
+ }
+
+ /*
+ * The read/write size is encoded in the lower bits of the address
+ * (as it must be naturally-aligned), and the following ds bit.
+ *
+ * size addr:1 addr:0 ds
+ * 1 x x 0
+ * 2 x 0 1
+ * 4 0 1 1
+ *
+ */
+ ds = size > 1 ? 1 : 0;
+ addr &= ~(size - 1);
+ if (size == 4)
+ addr |= 1;
+
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
+ msg_push_bits(cmd, ds, 1);
+ for (i = 0; write && i < size; i++)
+ msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_dpoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_epoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_term_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static int do_copro_command(struct fsi_master_acf *master, uint32_t op)
+{
+ uint32_t timeout = 10000000;
+ uint8_t stat;
+
+ trace_fsi_master_acf_copro_command(master, op);
+
+ /* Send command */
+ iowrite32be(op, master->sram + CMD_STAT_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ /* Wait for status to indicate completion (or error) */
+ do {
+ if (timeout-- == 0) {
+ dev_warn(master->dev,
+ "Timeout waiting for coprocessor completion\n");
+ return -ETIMEDOUT;
+ }
+ stat = ioread8(master->sram + CMD_STAT_REG);
+ } while(stat < STAT_COMPLETE || stat == 0xff);
+
+ if (stat == STAT_COMPLETE)
+ return 0;
+ switch(stat) {
+ case STAT_ERR_INVAL_CMD:
+ return -EINVAL;
+ case STAT_ERR_INVAL_IRQ:
+ return -EIO;
+ case STAT_ERR_MTOE:
+ return -ESHUTDOWN;
+ }
+ return -ENXIO;
+}
+
+static int clock_zeros(struct fsi_master_acf *master, int count)
+{
+ while (count) {
+ int rc, lcnt = min(count, 255);
+
+ rc = do_copro_command(master,
+ CMD_IDLE_CLOCKS | (lcnt << CMD_REG_CLEN_SHIFT));
+ if (rc)
+ return rc;
+ count -= lcnt;
+ }
+ return 0;
+}
+
+static int send_request(struct fsi_master_acf *master, struct fsi_msg *cmd,
+ unsigned int resp_bits)
+{
+ uint32_t op;
+
+ trace_fsi_master_acf_send_request(master, cmd, resp_bits);
+
+ /* Store message into SRAM */
+ iowrite32be((cmd->msg >> 32), master->sram + CMD_DATA);
+ iowrite32be((cmd->msg & 0xffffffff), master->sram + CMD_DATA + 4);
+
+ op = CMD_COMMAND;
+ op |= cmd->bits << CMD_REG_CLEN_SHIFT;
+ if (resp_bits)
+ op |= resp_bits << CMD_REG_RLEN_SHIFT;
+
+ return do_copro_command(master, op);
+}
+
+static int read_copro_response(struct fsi_master_acf *master, uint8_t size,
+ uint32_t *response, u8 *tag)
+{
+ uint8_t rtag = ioread8(master->sram + STAT_RTAG) & 0xf;
+ uint8_t rcrc = ioread8(master->sram + STAT_RCRC) & 0xf;
+ uint32_t rdata = 0;
+ uint32_t crc;
+ uint8_t ack;
+
+ *tag = ack = rtag & 3;
+
+ /* we have a whole message now; check CRC */
+ crc = crc4(0, 1, 1);
+ crc = crc4(crc, rtag, 4);
+ if (ack == FSI_RESP_ACK && size) {
+ rdata = ioread32be(master->sram + RSP_DATA);
+ crc = crc4(crc, rdata, size);
+ if (response)
+ *response = rdata;
+ }
+ crc = crc4(crc, rcrc, 4);
+
+ trace_fsi_master_acf_copro_response(master, rtag, rcrc, rdata, crc == 0);
+
+ if (crc) {
+ /*
+ * Check if it's all 1's or all 0's, that probably means
+ * the host is off
+ */
+ if ((rtag == 0xf && rcrc == 0xf) || (rtag == 0 && rcrc == 0))
+ return -ENODEV;
+ dev_dbg(master->dev, "Bad response CRC !\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int send_term(struct fsi_master_acf *master, uint8_t slave)
+{
+ struct fsi_msg cmd;
+ uint8_t tag;
+ int rc;
+
+ build_term_command(&cmd, slave);
+
+ rc = send_request(master, &cmd, 0);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending term\n", rc);
+ return rc;
+ }
+
+ rc = read_copro_response(master, 0, NULL, &tag);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "TERM failed; lost communication with slave\n");
+ return -EIO;
+ } else if (tag != FSI_RESP_ACK) {
+ dev_err(master->dev, "TERM failed; response %d\n", tag);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void dump_ucode_trace(struct fsi_master_acf *master)
+{
+ char trbuf[52];
+ char *p;
+ int i;
+
+ dev_dbg(master->dev,
+ "CMDSTAT:%08x RTAG=%02x RCRC=%02x RDATA=%02x #INT=%08x\n",
+ ioread32be(master->sram + CMD_STAT_REG),
+ ioread8(master->sram + STAT_RTAG),
+ ioread8(master->sram + STAT_RCRC),
+ ioread32be(master->sram + RSP_DATA),
+ ioread32be(master->sram + INT_CNT));
+
+ for (i = 0; i < 512; i++) {
+ uint8_t v;
+ if ((i % 16) == 0)
+ p = trbuf;
+ v = ioread8(master->sram + TRACEBUF + i);
+ p += sprintf(p, "%02x ", v);
+ if (((i % 16) == 15) || v == TR_END)
+ dev_dbg(master->dev, "%s\n", trbuf);
+ if (v == TR_END)
+ break;
+ }
+}
+
+static int handle_response(struct fsi_master_acf *master,
+ uint8_t slave, uint8_t size, void *data)
+{
+ int busy_count = 0, rc;
+ int crc_err_retries = 0;
+ struct fsi_msg cmd;
+ uint32_t response;
+ uint8_t tag;
+retry:
+ rc = read_copro_response(master, size, &response, &tag);
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto bail;
+ }
+ trace_fsi_master_acf_crc_rsp_error(master, crc_err_retries);
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for E_POLL\n", rc);
+ return rc;
+ }
+ build_epoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending E_POLL\n", rc);
+ return -EIO;
+ }
+ goto retry;
+ }
+ if (rc)
+ return rc;
+
+ switch (tag) {
+ case FSI_RESP_ACK:
+ if (size && data) {
+ if (size == 32)
+ *(__be32 *)data = cpu_to_be32(response);
+ else if (size == 16)
+ *(__be16 *)data = cpu_to_be16(response);
+ else
+ *(u8 *)data = response;
+ }
+ break;
+ case FSI_RESP_BUSY:
+ /*
+ * Its necessary to clock slave before issuing
+ * d-poll, not indicated in the hardware protocol
+ * spec. < 20 clocks causes slave to hang, 21 ok.
+ */
+ dev_dbg(master->dev, "Busy, retrying...\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for D_POLL\n", rc);
+ break;
+ }
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
+ build_dpoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending D_POLL\n", rc);
+ break;
+ }
+ goto retry;
+ }
+ dev_dbg(master->dev,
+ "ERR slave is stuck in busy state, issuing TERM\n");
+ send_term(master, slave);
+ rc = -EIO;
+ break;
+
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EIO;
+ break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EAGAIN;
+ break;
+ }
+ bail:
+ if (busy_count > 0) {
+ trace_fsi_master_acf_poll_response_busy(master, busy_count);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_xfer(struct fsi_master_acf *master, uint8_t slave,
+ struct fsi_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ resp_len <<= 3;
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd, resp_len);
+ if (rc) {
+ if (rc != -ESHUTDOWN)
+ dev_warn(master->dev, "Error %d sending command\n", rc);
+ break;
+ }
+ rc = handle_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_dbg(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_read(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ dev_dbg(master->dev, "read id %d addr %x size %zd\n", id, addr, size);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_acf_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "read id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_write(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, const void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ dev_dbg(master->dev, "write id %d addr %x size %zd raw_data: %08x\n",
+ id, addr, size, *(uint32_t *)val);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "write id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_term(struct fsi_master *_master,
+ int link, uint8_t id)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_term_command(&cmd, id);
+ dev_dbg(master->dev, "term id %d\n", id);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_break(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (master->external_mode) {
+ mutex_unlock(&master->lock);
+ return -EBUSY;
+ }
+ dev_dbg(master->dev, "sending BREAK\n");
+ rc = do_copro_command(master, CMD_BREAK);
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->lock);
+
+ /* Wait for logic reset to take effect */
+ udelay(200);
+
+ return rc;
+}
+
+static void reset_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_RESET);
+ usleep_range(20,20);
+ regmap_write(master->scu, SCU_COPRO_CTRL, 0);
+ usleep_range(20,20);
+}
+
+static void start_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_CLK_EN);
+}
+
+static void setup_ast2500_cf_maps(struct fsi_master_acf *master)
+{
+ /*
+ * Note about byteswap setting: the bus is wired backwards,
+ * so setting the byteswap bit actually makes the ColdFire
+ * work "normally" for a BE processor, ie, put the MSB in
+ * the lowest address byte.
+ *
+ * We thus need to set the bit for our main memory which
+ * contains our program code. We create two mappings for
+ * the register, one with each setting.
+ *
+ * Segments 2 and 3 has a "swapped" mapping (BE)
+ * and 6 and 7 have a non-swapped mapping (LE) which allows
+ * us to avoid byteswapping register accesses since the
+ * registers are all LE.
+ */
+
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* Segments 2 and 3 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG2, SYSREG_BASE |
+ SCU_2500_COPRO_SEG_SWAP);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG3, SYSREG_BASE | 0x100000 |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* And segment 6 and 7 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG6, SYSREG_BASE);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG7, SYSREG_BASE | 0x100000);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2500_COPRO_CACHE_CTL,
+ SCU_2500_COPRO_SEG0_CACHE_EN | SCU_2500_COPRO_CACHE_EN);
+}
+
+static void setup_ast2400_cf_maps(struct fsi_master_acf *master)
+{
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* Segments 2 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG2, SYSREG_BASE |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* And segment 6 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG6, SYSREG_BASE);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2400_COPRO_CACHE_CTL,
+ SCU_2400_COPRO_SEG0_CACHE_EN | SCU_2400_COPRO_CACHE_EN);
+}
+
+static void setup_common_fw_config(struct fsi_master_acf *master,
+ void __iomem *base)
+{
+ iowrite16be(master->gpio_clk_vreg, base + HDR_CLOCK_GPIO_VADDR);
+ iowrite16be(master->gpio_clk_dreg, base + HDR_CLOCK_GPIO_DADDR);
+ iowrite16be(master->gpio_dat_vreg, base + HDR_DATA_GPIO_VADDR);
+ iowrite16be(master->gpio_dat_dreg, base + HDR_DATA_GPIO_DADDR);
+ iowrite16be(master->gpio_tra_vreg, base + HDR_TRANS_GPIO_VADDR);
+ iowrite16be(master->gpio_tra_dreg, base + HDR_TRANS_GPIO_DADDR);
+ iowrite8(master->gpio_clk_bit, base + HDR_CLOCK_GPIO_BIT);
+ iowrite8(master->gpio_dat_bit, base + HDR_DATA_GPIO_BIT);
+ iowrite8(master->gpio_tra_bit, base + HDR_TRANS_GPIO_BIT);
+}
+
+static void setup_ast2500_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_USE_STOP, base + HDR_FW_CONTROL);
+}
+
+static void setup_ast2400_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_CONT_CLOCK|FW_CONTROL_DUMMY_RD, base + HDR_FW_CONTROL);
+}
+
+static int setup_gpios_for_copro(struct fsi_master_acf *master)
+{
+
+ int rc;
+
+ /* This aren't under ColdFire control, just set them up appropriately */
+ gpiod_direction_output(master->gpio_mux, 1);
+ gpiod_direction_output(master->gpio_enable, 1);
+
+ /* Those are under ColdFire control, let it configure them */
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_clk, &master->gpio_clk_vreg,
+ &master->gpio_clk_dreg, &master->gpio_clk_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign clock gpio to coprocessor\n");
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_data, &master->gpio_dat_vreg,
+ &master->gpio_dat_dreg, &master->gpio_dat_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign data gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_trans, &master->gpio_tra_vreg,
+ &master->gpio_tra_dreg, &master->gpio_tra_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign trans gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ return rc;
+ }
+ return 0;
+}
+
+static void release_copro_gpios(struct fsi_master_acf *master)
+{
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ aspeed_gpio_copro_release_gpio(master->gpio_trans);
+}
+
+static int load_copro_firmware(struct fsi_master_acf *master)
+{
+ const struct firmware *fw;
+ uint16_t sig = 0, wanted_sig;
+ const u8 *data;
+ size_t size = 0;
+ int rc;
+
+ /* Get the binary */
+ rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
+ if (rc) {
+ dev_err(
+ master->dev, "Error %d to load firwmare '%s' !\n",
+ rc, FW_FILE_NAME);
+ return rc;
+ }
+
+ /* Which image do we want ? (shared vs. split clock/data GPIOs) */
+ if (master->gpio_clk_vreg == master->gpio_dat_vreg)
+ wanted_sig = SYS_SIG_SHARED;
+ else
+ wanted_sig = SYS_SIG_SPLIT;
+ dev_dbg(master->dev, "Looking for image sig %04x\n", wanted_sig);
+
+ /* Try to find it */
+ for (data = fw->data; data < (fw->data + fw->size);) {
+ sig = be16_to_cpup((__be16 *)(data + HDR_OFFSET + HDR_SYS_SIG));
+ size = be32_to_cpup((__be32 *)(data + HDR_OFFSET + HDR_FW_SIZE));
+ if (sig == wanted_sig)
+ break;
+ data += size;
+ }
+ if (sig != wanted_sig) {
+ dev_err(master->dev, "Failed to locate image sig %04x in FW blob\n",
+ wanted_sig);
+ rc = -ENODEV;
+ goto release_fw;
+ }
+ if (size > master->cf_mem_size) {
+ dev_err(master->dev, "FW size (%zd) bigger than memory reserve (%zd)\n",
+ fw->size, master->cf_mem_size);
+ rc = -ENOMEM;
+ } else {
+ memcpy_toio(master->cf_mem, data, size);
+ }
+
+release_fw:
+ release_firmware(fw);
+ return rc;
+}
+
+static int check_firmware_image(struct fsi_master_acf *master)
+{
+ uint32_t fw_vers, fw_api, fw_options;
+
+ fw_vers = ioread16be(master->cf_mem + HDR_OFFSET + HDR_FW_VERS);
+ fw_api = ioread16be(master->cf_mem + HDR_OFFSET + HDR_API_VERS);
+ fw_options = ioread32be(master->cf_mem + HDR_OFFSET + HDR_FW_OPTIONS);
+ master->trace_enabled = !!(fw_options & FW_OPTION_TRACE_EN);
+
+ /* Check version and signature */
+ dev_info(master->dev, "ColdFire initialized, firmware v%d API v%d.%d (trace %s)\n",
+ fw_vers, fw_api >> 8, fw_api & 0xff,
+ master->trace_enabled ? "enabled" : "disabled");
+
+ if ((fw_api >> 8) != API_VERSION_MAJ) {
+ dev_err(master->dev, "Unsupported coprocessor API version !\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int copro_enable_sw_irq(struct fsi_master_acf *master)
+{
+ int timeout;
+ uint32_t val;
+
+ /*
+ * Enable coprocessor interrupt input. I've had problems getting the
+ * value to stick, so try in a loop
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ iowrite32(0x2, master->cvic + CVIC_EN_REG);
+ val = ioread32(master->cvic + CVIC_EN_REG);
+ if (val & 2)
+ break;
+ msleep(1);
+ }
+ if (!(val & 2)) {
+ dev_err(master->dev, "Failed to enable coprocessor interrupt !\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int fsi_master_acf_setup(struct fsi_master_acf *master)
+{
+ int timeout, rc;
+ uint32_t val;
+
+ /* Make sure the ColdFire is stopped */
+ reset_cf(master);
+
+ /*
+ * Clear SRAM. This needs to happen before we setup the GPIOs
+ * as we might start trying to arbitrate as soon as that happens.
+ */
+ memset_io(master->sram, 0, SRAM_SIZE);
+
+ /* Configure GPIOs */
+ rc = setup_gpios_for_copro(master);
+ if (rc)
+ return rc;
+
+ /* Load the firmware into the reserved memory */
+ rc = load_copro_firmware(master);
+ if (rc)
+ return rc;
+
+ /* Read signature and check versions */
+ rc = check_firmware_image(master);
+ if (rc)
+ return rc;
+
+ /* Setup coldfire memory map */
+ if (master->is_ast2500) {
+ setup_ast2500_cf_maps(master);
+ setup_ast2500_fw_config(master);
+ } else {
+ setup_ast2400_cf_maps(master);
+ setup_ast2400_fw_config(master);
+ }
+
+ /* Start the ColdFire */
+ start_cf(master);
+
+ /* Wait for status register to indicate command completion
+ * which signals the initialization is complete
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ val = ioread8(master->sram + CF_STARTED);
+ if (val)
+ break;
+ msleep(1);
+ }
+ if (!val) {
+ dev_err(master->dev, "Coprocessor startup timeout !\n");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* Configure echo & send delay */
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+
+ /* Enable SW interrupt to copro if any */
+ if (master->cvic) {
+ rc = copro_enable_sw_irq(master);
+ if (rc)
+ goto err;
+ }
+ return 0;
+ err:
+ /* An error occurred, don't leave the coprocessor running */
+ reset_cf(master);
+
+ /* Release the GPIOs */
+ release_copro_gpios(master);
+
+ return rc;
+}
+
+
+static void fsi_master_acf_terminate(struct fsi_master_acf *master)
+{
+ unsigned long flags;
+
+ /*
+ * A GPIO arbitration requestion could come in while this is
+ * happening. To avoid problems, we disable interrupts so it
+ * cannot preempt us on this CPU
+ */
+
+ local_irq_save(flags);
+
+ /* Stop the coprocessor */
+ reset_cf(master);
+
+ /* We mark the copro not-started */
+ iowrite32(0, master->sram + CF_STARTED);
+
+ /* We mark the ARB register as having given up arbitration to
+ * deal with a potential race with the arbitration request
+ */
+ iowrite8(ARB_ARM_ACK, master->sram + ARB_REG);
+
+ local_irq_restore(flags);
+
+ /* Return the GPIOs to the ARM */
+ release_copro_gpios(master);
+}
+
+static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
+{
+ /* Setup GPIOs for external FSI master (FSP box) */
+ gpiod_direction_output(master->gpio_mux, 0);
+ gpiod_direction_output(master->gpio_trans, 0);
+ gpiod_direction_output(master->gpio_enable, 1);
+ gpiod_direction_input(master->gpio_clk);
+ gpiod_direction_input(master->gpio_data);
+}
+
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc = -EBUSY;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (!master->external_mode) {
+ gpiod_set_value(master->gpio_enable, 1);
+ rc = 0;
+ }
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ dev_dbg(master->dev, "Changing delays: send=%d echo=%d\n",
+ t_send_delay, t_echo_delay);
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+ mutex_unlock(&master->lock);
+
+ return 0;
+}
+
+static ssize_t external_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n",
+ master->external_mode ? 1 : 0);
+}
+
+static ssize_t external_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+ unsigned long val;
+ bool external_mode;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ external_mode = !!val;
+
+ mutex_lock(&master->lock);
+
+ if (external_mode == master->external_mode) {
+ mutex_unlock(&master->lock);
+ return count;
+ }
+
+ master->external_mode = external_mode;
+ if (master->external_mode) {
+ fsi_master_acf_terminate(master);
+ fsi_master_acf_setup_external(master);
+ } else
+ fsi_master_acf_setup(master);
+
+ mutex_unlock(&master->lock);
+
+ fsi_master_rescan(&master->master);
+
+ return count;
+}
+
+static DEVICE_ATTR(external_mode, 0664,
+ external_mode_show, external_mode_store);
+
+static int fsi_master_acf_gpio_request(void *data)
+{
+ struct fsi_master_acf *master = data;
+ int timeout;
+ u8 val;
+
+ /* Note: This doesn't require holding out mutex */
+
+ /* Write reqest */
+ iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
+
+ /*
+ * There is a race (which does happen at boot time) when we get an
+ * arbitration request as we are either about to or just starting
+ * the coprocessor.
+ *
+ * To handle it, we first check if we are running. If not yet we
+ * check whether the copro is started in the SCU.
+ *
+ * If it's not started, we can basically just assume we have arbitration
+ * and return. Otherwise, we wait normally expecting for the arbitration
+ * to eventually complete.
+ */
+ if (ioread32(master->sram + CF_STARTED) == 0) {
+ unsigned int reg = 0;
+
+ regmap_read(master->scu, SCU_COPRO_CTRL, &reg);
+ if (!(reg & SCU_COPRO_CLK_EN))
+ return 0;
+ }
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ for (timeout = 0; timeout < 10000; timeout++) {
+ val = ioread8(master->sram + ARB_REG);
+ if (val != ARB_ARM_REQ)
+ break;
+ udelay(1);
+ }
+
+ /* If it failed, override anyway */
+ if (val != ARB_ARM_ACK)
+ dev_warn(master->dev, "GPIO request arbitration timeout\n");
+
+ return 0;
+}
+
+static int fsi_master_acf_gpio_release(void *data)
+{
+ struct fsi_master_acf *master = data;
+
+ /* Write release */
+ iowrite8(0, master->sram + ARB_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ return 0;
+}
+
+static void fsi_master_acf_release(struct device *dev)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(dev_to_fsi_master(dev));
+
+ /* Cleanup, stop coprocessor */
+ mutex_lock(&master->lock);
+ fsi_master_acf_terminate(master);
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ mutex_unlock(&master->lock);
+
+ /* Free resources */
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
+static const struct aspeed_gpio_copro_ops fsi_master_acf_gpio_ops = {
+ .request_access = fsi_master_acf_gpio_request,
+ .release_access = fsi_master_acf_gpio_release,
+};
+
+static int fsi_master_acf_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *mnode = dev_of_node(&pdev->dev);
+ struct genpool_data_fixed gpdf;
+ struct fsi_master_acf *master;
+ struct gpio_desc *gpio;
+ struct resource res;
+ uint32_t cf_mem_align;
+ int rc;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = &pdev->dev;
+ master->master.dev.parent = master->dev;
+ master->last_addr = LAST_ADDR_INVALID;
+
+ /* AST2400 vs. AST2500 */
+ master->is_ast2500 = of_device_is_compatible(mnode, "aspeed,ast2500-cf-fsi-master");
+
+ /* Grab the SCU, we'll need to access it to configure the coprocessor */
+ if (master->is_ast2500)
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ else
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu");
+ if (IS_ERR(master->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ rc = PTR_ERR(master->scu);
+ goto err_free;
+ }
+
+ /* Grab all the GPIOs we need */
+ gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get clock gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_clk = gpio;
+
+ gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get data gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_data = gpio;
+
+ /* Optional GPIOs */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get trans gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_trans = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_enable = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get mux gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_mux = gpio;
+
+ /* Grab the reserved memory region (use DMA API instead ?) */
+ np = of_parse_phandle(mnode, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find reserved memory\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ rc = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem_size = resource_size(&res);
+ master->cf_mem_addr = (uint32_t)res.start;
+ cf_mem_align = master->is_ast2500 ? 0x00100000 : 0x00200000;
+ if (master->cf_mem_addr & (cf_mem_align - 1)) {
+ dev_err(&pdev->dev, "Reserved memory has insufficient alignment\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(master->cf_mem)) {
+ rc = PTR_ERR(master->cf_mem);
+ dev_err(&pdev->dev, "Error %d mapping coldfire memory\n", rc);
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
+
+ /* AST2500 has a SW interrupt to the coprocessor */
+ if (master->is_ast2500) {
+ /* Grab the CVIC (ColdFire interrupts controller) */
+ np = of_parse_phandle(mnode, "aspeed,cvic", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find CVIC\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(master->cvic)) {
+ rc = PTR_ERR(master->cvic);
+ dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
+ goto err_free;
+ }
+ rc = of_property_read_u32(np, "copro-sw-interrupts",
+ &master->cvic_sw_irq);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
+ goto err_free;
+ }
+ }
+
+ /* Grab the SRAM */
+ master->sram_pool = of_gen_pool_get(dev_of_node(&pdev->dev), "aspeed,sram", 0);
+ if (!master->sram_pool) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "Can't find sram pool\n");
+ goto err_free;
+ }
+
+ /* Current microcode only deals with fixed location in SRAM */
+ gpdf.offset = 0;
+ master->sram = (void __iomem *)gen_pool_alloc_algo(master->sram_pool, SRAM_SIZE,
+ gen_pool_fixed_alloc, &gpdf);
+ if (!master->sram) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate sram from pool\n");
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "SRAM allocation @%lx\n",
+ (unsigned long)gen_pool_virt_to_phys(master->sram_pool,
+ (unsigned long)master->sram));
+
+ /*
+ * Hookup with the GPIO driver for arbitration of GPIO banks
+ * ownership.
+ */
+ aspeed_gpio_copro_set_ops(&fsi_master_acf_gpio_ops, master);
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+ master->master.n_links = 1;
+ if (master->is_ast2500)
+ master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+ master->master.read = fsi_master_acf_read;
+ master->master.write = fsi_master_acf_write;
+ master->master.term = fsi_master_acf_term;
+ master->master.send_break = fsi_master_acf_break;
+ master->master.link_enable = fsi_master_acf_link_enable;
+ master->master.link_config = fsi_master_acf_link_config;
+ master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_acf_release;
+ platform_set_drvdata(pdev, master);
+ mutex_init(&master->lock);
+
+ mutex_lock(&master->lock);
+ rc = fsi_master_acf_setup(master);
+ mutex_unlock(&master->lock);
+ if (rc)
+ goto release_of_dev;
+
+ rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
+ if (rc)
+ goto stop_copro;
+
+ rc = fsi_master_register(&master->master);
+ if (!rc)
+ return 0;
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+
+ stop_copro:
+ fsi_master_acf_terminate(master);
+ release_of_dev:
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+ err_free:
+ kfree(master);
+ return rc;
+}
+
+
+static int fsi_master_acf_remove(struct platform_device *pdev)
+{
+ struct fsi_master_acf *master = platform_get_drvdata(pdev);
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+
+ fsi_master_unregister(&master->master);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_acf_match[] = {
+ { .compatible = "aspeed,ast2400-cf-fsi-master" },
+ { .compatible = "aspeed,ast2500-cf-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_acf = {
+ .driver = {
+ .name = "fsi-master-acf",
+ .of_match_table = fsi_master_acf_match,
+ },
+ .probe = fsi_master_acf_probe,
+ .remove = fsi_master_acf_remove,
+};
+
+module_platform_driver(fsi_master_acf);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 3f487449a277..4eb3a766fd4a 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -8,59 +8,31 @@
#include <linux/fsi.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
+#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include "fsi-master.h"
#define FSI_GPIO_STD_DLY 1 /* Standard pin delay in nS */
-#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
-#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
-#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
-#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
-#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
-#define FSI_GPIO_STD_DELAY 10 /* Standard GPIO delay in nS */
- /* todo: adjust down as low as */
- /* possible or eliminate */
-#define FSI_GPIO_CMD_DPOLL 0x2
-#define FSI_GPIO_CMD_TERM 0x3f
-#define FSI_GPIO_CMD_ABS_AR 0x4
-
-#define FSI_GPIO_DPOLL_CLOCKS 100 /* < 21 will cause slave to hang */
-
-/* Bus errors */
-#define FSI_GPIO_ERR_BUSY 1 /* Slave stuck in busy state */
-#define FSI_GPIO_RESP_ERRA 2 /* Any (misc) Error */
-#define FSI_GPIO_RESP_ERRC 3 /* Slave reports master CRC error */
-#define FSI_GPIO_MTOE 4 /* Master time out error */
-#define FSI_GPIO_CRC_INVAL 5 /* Master reports slave CRC error */
-
-/* Normal slave responses */
-#define FSI_GPIO_RESP_BUSY 1
-#define FSI_GPIO_RESP_ACK 0
-#define FSI_GPIO_RESP_ACKD 4
-
-#define FSI_GPIO_MAX_BUSY 100
-#define FSI_GPIO_MTOE_COUNT 1000
-#define FSI_GPIO_DRAIN_BITS 20
-#define FSI_GPIO_CRC_SIZE 4
-#define FSI_GPIO_MSG_ID_SIZE 2
-#define FSI_GPIO_MSG_RESPID_SIZE 2
-#define FSI_GPIO_PRIME_SLAVE_CLOCKS 100
+#define LAST_ADDR_INVALID 0x1
struct fsi_master_gpio {
struct fsi_master master;
struct device *dev;
- spinlock_t cmd_lock; /* Lock for commands */
+ struct mutex cmd_lock; /* mutex for command ordering */
struct gpio_desc *gpio_clk;
struct gpio_desc *gpio_data;
struct gpio_desc *gpio_trans; /* Voltage translator */
struct gpio_desc *gpio_enable; /* FSI enable */
struct gpio_desc *gpio_mux; /* Mux control */
bool external_mode;
+ bool no_delays;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
};
#define CREATE_TRACE_POINTS
@@ -78,19 +50,31 @@ static void clock_toggle(struct fsi_master_gpio *master, int count)
int i;
for (i = 0; i < count; i++) {
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 0);
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 1);
}
}
-static int sda_in(struct fsi_master_gpio *master)
+static int sda_clock_in(struct fsi_master_gpio *master)
{
int in;
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 0);
+
+ /* Dummy read to feed the synchronizers */
+ gpiod_get_value(master->gpio_data);
+
+ /* Actual data read */
in = gpiod_get_value(master->gpio_data);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 1);
return in ? 1 : 0;
}
@@ -113,10 +97,17 @@ static void set_sda_output(struct fsi_master_gpio *master, int value)
static void clock_zeros(struct fsi_master_gpio *master, int count)
{
+ trace_fsi_master_gpio_clock_zeros(master, count);
set_sda_output(master, 1);
clock_toggle(master, count);
}
+static void echo_delay(struct fsi_master_gpio *master)
+{
+ clock_zeros(master, master->t_echo_delay);
+}
+
+
static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
uint8_t num_bits)
{
@@ -125,8 +116,7 @@ static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
set_sda_input(master);
for (bit = 0; bit < num_bits; bit++) {
- clock_toggle(master, 1);
- in_bit = sda_in(master);
+ in_bit = sda_clock_in(master);
msg->msg <<= 1;
msg->msg |= ~in_bit & 0x1; /* Data is active low */
}
@@ -191,22 +181,92 @@ static void msg_push_crc(struct fsi_gpio_msg *msg)
msg_push_bits(msg, crc, 4);
}
+static bool check_same_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_gpio *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
/*
- * Encode an Absolute Address command
+ * Encode an Absolute/Relative/Same Address command
*/
-static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
- uint8_t id, uint32_t addr, size_t size, const void *data)
+static void build_ar_command(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size, const void *data)
{
+ int i, addr_bits, opcode_bits;
bool write = !!data;
- uint8_t ds;
- int i;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
cmd->bits = 0;
cmd->msg = 0;
- msg_push_bits(cmd, id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_ABS_AR, 3);
- msg_push_bits(cmd, write ? 0 : 1, 1);
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_gpio_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_gpio_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_gpio_cmd_abs_addr(master, addr);
+ }
/*
* The read/write size is encoded in the lower bits of the address
@@ -223,7 +283,10 @@ static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
if (size == 4)
addr |= 1;
- msg_push_bits(cmd, addr & ((1 << 21) - 1), 21);
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
msg_push_bits(cmd, ds, 1);
for (i = 0; write && i < size; i++)
msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
@@ -237,14 +300,18 @@ static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_DPOLL, 3);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
msg_push_crc(cmd);
}
-static void echo_delay(struct fsi_master_gpio *master)
+static void build_epoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
- set_sda_output(master, 1);
- clock_toggle(master, FSI_ECHO_DELAY_CLOCKS);
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
}
static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
@@ -253,40 +320,40 @@ static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_TERM, 6);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
msg_push_crc(cmd);
}
/*
- * Store information on master errors so handler can detect and clean
- * up the bus
+ * Note: callers rely specifically on this returning -EAGAIN for
+ * a CRC error detected in the response. Use other error code
+ * for other situations. It will be converted to something else
+ * higher up the stack before it reaches userspace.
*/
-static void fsi_master_gpio_error(struct fsi_master_gpio *master, int error)
-{
-
-}
-
static int read_one_response(struct fsi_master_gpio *master,
uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
{
struct fsi_gpio_msg msg;
- uint8_t id, tag;
+ unsigned long flags;
uint32_t crc;
+ uint8_t tag;
int i;
+ local_irq_save(flags);
+
/* wait for the start bit */
- for (i = 0; i < FSI_GPIO_MTOE_COUNT; i++) {
+ for (i = 0; i < FSI_MASTER_MTOE_COUNT; i++) {
msg.bits = 0;
msg.msg = 0;
serial_in(master, &msg, 1);
if (msg.msg)
break;
}
- if (i == FSI_GPIO_MTOE_COUNT) {
+ if (i == FSI_MASTER_MTOE_COUNT) {
dev_dbg(master->dev,
"Master time out waiting for response\n");
- fsi_master_gpio_error(master, FSI_GPIO_MTOE);
- return -EIO;
+ local_irq_restore(flags);
+ return -ETIMEDOUT;
}
msg.bits = 0;
@@ -295,23 +362,27 @@ static int read_one_response(struct fsi_master_gpio *master,
/* Read slave ID & response tag */
serial_in(master, &msg, 4);
- id = (msg.msg >> FSI_GPIO_MSG_RESPID_SIZE) & 0x3;
tag = msg.msg & 0x3;
/* If we have an ACK and we're expecting data, clock the data in too */
- if (tag == FSI_GPIO_RESP_ACK && data_size)
+ if (tag == FSI_RESP_ACK && data_size)
serial_in(master, &msg, data_size * 8);
/* read CRC */
- serial_in(master, &msg, FSI_GPIO_CRC_SIZE);
+ serial_in(master, &msg, FSI_CRC_SIZE);
+
+ local_irq_restore(flags);
/* we have a whole message now; check CRC */
crc = crc4(0, 1, 1);
crc = crc4(crc, msg.msg, msg.bits);
if (crc) {
- dev_dbg(master->dev, "ERR response CRC\n");
- fsi_master_gpio_error(master, FSI_GPIO_CRC_INVAL);
- return -EIO;
+ /* Check if it's all 1's, that probably means the host is off */
+ if (((~msg.msg) & ((1ull << msg.bits) - 1)) == 0)
+ return -ENODEV;
+ dev_dbg(master->dev, "ERR response CRC msg: 0x%016llx (%d bits)\n",
+ msg.msg, msg.bits);
+ return -EAGAIN;
}
if (msgp)
@@ -325,19 +396,23 @@ static int read_one_response(struct fsi_master_gpio *master,
static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
{
struct fsi_gpio_msg cmd;
+ unsigned long flags;
uint8_t tag;
int rc;
build_term_command(&cmd, slave);
+
+ local_irq_save(flags);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
rc = read_one_response(master, 0, NULL, &tag);
if (rc < 0) {
dev_err(master->dev,
"TERM failed; lost communication with slave\n");
return -EIO;
- } else if (tag != FSI_GPIO_RESP_ACK) {
+ } else if (tag != FSI_RESP_ACK) {
dev_err(master->dev, "TERM failed; response %d\n", tag);
return -EIO;
}
@@ -350,16 +425,39 @@ static int poll_for_response(struct fsi_master_gpio *master,
{
struct fsi_gpio_msg response, cmd;
int busy_count = 0, rc, i;
+ unsigned long flags;
uint8_t tag;
uint8_t *data_byte = data;
-
+ int crc_err_retries = 0;
retry:
rc = read_one_response(master, size, &response, &tag);
- if (rc)
- return rc;
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto fail;
+ }
+ dev_dbg(master->dev,
+ "CRC error retry %d\n", crc_err_retries);
+ trace_fsi_master_gpio_crc_rsp_error(master);
+ build_epoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ serial_out(master, &cmd);
+ echo_delay(master);
+ local_irq_restore(flags);
+ goto retry;
+ } else if (rc)
+ goto fail;
switch (tag) {
- case FSI_GPIO_RESP_ACK:
+ case FSI_RESP_ACK:
if (size && data) {
uint64_t val = response.msg;
/* clear crc & mask */
@@ -372,57 +470,89 @@ retry:
}
}
break;
- case FSI_GPIO_RESP_BUSY:
+ case FSI_RESP_BUSY:
/*
* Its necessary to clock slave before issuing
* d-poll, not indicated in the hardware protocol
* spec. < 20 clocks causes slave to hang, 21 ok.
*/
- clock_zeros(master, FSI_GPIO_DPOLL_CLOCKS);
- if (busy_count++ < FSI_GPIO_MAX_BUSY) {
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
build_dpoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
goto retry;
}
dev_warn(master->dev,
"ERR slave is stuck in busy state, issuing TERM\n");
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ local_irq_restore(flags);
issue_term(master, slave);
rc = -EIO;
break;
- case FSI_GPIO_RESP_ERRA:
- case FSI_GPIO_RESP_ERRC:
- dev_dbg(master->dev, "ERR%c received: 0x%x\n",
- tag == FSI_GPIO_RESP_ERRA ? 'A' : 'C',
- (int)response.msg);
- fsi_master_gpio_error(master, response.msg);
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received: 0x%x\n", (int)response.msg);
rc = -EIO;
break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received: 0x%x\n", (int)response.msg);
+ trace_fsi_master_gpio_crc_cmd_error(master);
+ rc = -EAGAIN;
+ break;
}
- /* Clock the slave enough to be ready for next operation */
- clock_zeros(master, FSI_GPIO_PRIME_SLAVE_CLOCKS);
+ if (busy_count > 0)
+ trace_fsi_master_gpio_poll_response_busy(master, busy_count);
+ fail:
+ /*
+ * tSendDelay clocks, avoids signal reflections when switching
+ * from receive of response back to send of data.
+ */
+ local_irq_save(flags);
+ clock_zeros(master, master->t_send_delay);
+ local_irq_restore(flags);
+
return rc;
}
-static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
- struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+static int send_request(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd)
{
unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&master->cmd_lock, flags);
- if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ if (master->external_mode)
return -EBUSY;
- }
+ local_irq_save(flags);
serial_out(master, cmd);
echo_delay(master);
- rc = poll_for_response(master, slave, resp_len, resp);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+ struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd);
+ if (rc)
+ break;
+ rc = poll_for_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_warn(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
return rc;
}
@@ -432,12 +562,18 @@ static int fsi_master_gpio_read(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, NULL);
- return fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_write(struct fsi_master *_master, int link,
@@ -445,12 +581,18 @@ static int fsi_master_gpio_write(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, val);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_term(struct fsi_master *_master,
@@ -458,12 +600,18 @@ static int fsi_master_gpio_term(struct fsi_master *_master,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
+ mutex_lock(&master->cmd_lock);
build_term_command(&cmd, id);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_break(struct fsi_master *_master, int link)
@@ -476,11 +624,14 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
trace_fsi_master_gpio_break(master);
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return -EBUSY;
}
+
+ local_irq_save(flags);
+
set_sda_output(master, 1);
sda_out(master, 1);
clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
@@ -489,7 +640,11 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
echo_delay(master);
sda_out(master, 1);
clock_toggle(master, FSI_POST_BREAK_CLOCKS);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ local_irq_restore(flags);
+
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->cmd_lock);
/* Wait for logic reset to take effect */
udelay(200);
@@ -499,6 +654,8 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
static void fsi_master_gpio_init(struct fsi_master_gpio *master)
{
+ unsigned long flags;
+
gpiod_direction_output(master->gpio_mux, 1);
gpiod_direction_output(master->gpio_trans, 1);
gpiod_direction_output(master->gpio_enable, 1);
@@ -506,7 +663,9 @@ static void fsi_master_gpio_init(struct fsi_master_gpio *master)
gpiod_direction_output(master->gpio_data, 1);
/* todo: evaluate if clocks can be reduced */
+ local_irq_save(flags);
clock_zeros(master, FSI_INIT_CLOCKS);
+ local_irq_restore(flags);
}
static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
@@ -521,22 +680,37 @@ static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
- unsigned long flags;
int rc = -EBUSY;
if (link != 0)
return -ENODEV;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
gpiod_set_value(master->gpio_enable, 1);
rc = 0;
}
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return rc;
}
+static int fsi_master_gpio_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->cmd_lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ mutex_unlock(&master->cmd_lock);
+
+ return 0;
+}
+
static ssize_t external_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -550,7 +724,7 @@ static ssize_t external_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master_gpio *master = dev_get_drvdata(dev);
- unsigned long flags, val;
+ unsigned long val;
bool external_mode;
int err;
@@ -560,10 +734,10 @@ static ssize_t external_mode_store(struct device *dev,
external_mode = !!val;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (external_mode == master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return count;
}
@@ -572,7 +746,8 @@ static ssize_t external_mode_store(struct device *dev,
fsi_master_gpio_init_external(master);
else
fsi_master_gpio_init(master);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ mutex_unlock(&master->cmd_lock);
fsi_master_rescan(&master->master);
@@ -582,31 +757,44 @@ static ssize_t external_mode_store(struct device *dev,
static DEVICE_ATTR(external_mode, 0664,
external_mode_show, external_mode_store);
+static void fsi_master_gpio_release(struct device *dev)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(dev_to_fsi_master(dev));
+
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
static int fsi_master_gpio_probe(struct platform_device *pdev)
{
struct fsi_master_gpio *master;
struct gpio_desc *gpio;
int rc;
- master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = &pdev->dev;
master->master.dev.parent = master->dev;
master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_gpio_release;
+ master->last_addr = LAST_ADDR_INVALID;
gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get clock gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_clk = gpio;
gpio = devm_gpiod_get(&pdev->dev, "data", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get data gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_data = gpio;
@@ -614,24 +802,38 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get trans gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_trans = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get enable gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_enable = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get mux gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_mux = gpio;
+ /*
+ * Check if GPIO block is slow enought that no extra delays
+ * are necessary. This improves performance on ast2500 by
+ * an order of magnitude.
+ */
+ master->no_delays = device_property_present(&pdev->dev, "no-gpio-delays");
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+
master->master.n_links = 1;
master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
master->master.read = fsi_master_gpio_read;
@@ -639,34 +841,37 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
master->master.term = fsi_master_gpio_term;
master->master.send_break = fsi_master_gpio_break;
master->master.link_enable = fsi_master_gpio_link_enable;
+ master->master.link_config = fsi_master_gpio_link_config;
platform_set_drvdata(pdev, master);
- spin_lock_init(&master->cmd_lock);
+ mutex_init(&master->cmd_lock);
fsi_master_gpio_init(master);
rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
if (rc)
- return rc;
+ goto err_free;
- return fsi_master_register(&master->master);
+ rc = fsi_master_register(&master->master);
+ if (rc) {
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+ }
+ return 0;
+ err_free:
+ kfree(master);
+ return rc;
}
+
static int fsi_master_gpio_remove(struct platform_device *pdev)
{
struct fsi_master_gpio *master = platform_get_drvdata(pdev);
- devm_gpiod_put(&pdev->dev, master->gpio_clk);
- devm_gpiod_put(&pdev->dev, master->gpio_data);
- if (master->gpio_trans)
- devm_gpiod_put(&pdev->dev, master->gpio_trans);
- if (master->gpio_enable)
- devm_gpiod_put(&pdev->dev, master->gpio_enable);
- if (master->gpio_mux)
- devm_gpiod_put(&pdev->dev, master->gpio_mux);
- fsi_master_unregister(&master->master);
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
- of_node_put(master->master.dev.of_node);
+ fsi_master_unregister(&master->master);
return 0;
}
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index 5885fc4a1ef0..b3c1e9debcf2 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -122,7 +122,8 @@ static int hub_master_write(struct fsi_master *master, int link,
static int hub_master_break(struct fsi_master *master, int link)
{
- uint32_t addr, cmd;
+ uint32_t addr;
+ __be32 cmd;
addr = 0x4;
cmd = cpu_to_be32(0xc0de0000);
@@ -205,7 +206,7 @@ static int hub_master_init(struct fsi_master_hub *hub)
if (rc)
return rc;
- reg = ~0;
+ reg = cpu_to_be32(~0);
rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
if (rc)
return rc;
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index ee0b46086026..040a7d4cf717 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -18,7 +18,41 @@
#define DRIVERS_FSI_MASTER_H
#include <linux/device.h>
+#include <linux/mutex.h>
+/* Various protocol delays */
+#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
+#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
+#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
+#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
+#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
+#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
+#define FSI_MASTER_DPOLL_CLOCKS 50 /* < 21 will cause slave to hang */
+#define FSI_MASTER_EPOLL_CLOCKS 50 /* Number of clocks for E_POLL retry */
+
+/* Various retry maximums */
+#define FSI_CRC_ERR_RETRIES 10
+#define FSI_MASTER_MAX_BUSY 200
+#define FSI_MASTER_MTOE_COUNT 1000
+
+/* Command encodings */
+#define FSI_CMD_DPOLL 0x2
+#define FSI_CMD_EPOLL 0x3
+#define FSI_CMD_TERM 0x3f
+#define FSI_CMD_ABS_AR 0x4
+#define FSI_CMD_REL_AR 0x5
+#define FSI_CMD_SAME_AR 0x3 /* but only a 2-bit opcode... */
+
+/* Slave responses */
+#define FSI_RESP_ACK 0 /* Success */
+#define FSI_RESP_BUSY 1 /* Slave busy */
+#define FSI_RESP_ERRA 2 /* Any (misc) Error */
+#define FSI_RESP_ERRC 3 /* Slave reports master CRC error */
+
+/* Misc */
+#define FSI_CRC_SIZE 4
+
+/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
struct fsi_master {
@@ -26,6 +60,7 @@ struct fsi_master {
int idx;
int n_links;
int flags;
+ struct mutex scan_lock;
int (*read)(struct fsi_master *, int link, uint8_t id,
uint32_t addr, void *val, size_t size);
int (*write)(struct fsi_master *, int link, uint8_t id,
@@ -33,6 +68,8 @@ struct fsi_master {
int (*term)(struct fsi_master *, int link, uint8_t id);
int (*send_break)(struct fsi_master *, int link);
int (*link_enable)(struct fsi_master *, int link);
+ int (*link_config)(struct fsi_master *, int link,
+ u8 t_send_delay, u8 t_echo_delay);
};
#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
new file mode 100644
index 000000000000..ae861342626e
--- /dev/null
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IBM Corporation 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsi.h>
+#include <linux/fsi-sbefifo.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+/*
+ * The SBEFIFO is a pipe-like FSI device for communicating with
+ * the self boot engine on POWER processors.
+ */
+
+#define DEVICE_NAME "sbefifo"
+#define FSI_ENGID_SBE 0x22
+
+/*
+ * Register layout
+ */
+
+/* Register banks */
+#define SBEFIFO_UP 0x00 /* FSI -> Host */
+#define SBEFIFO_DOWN 0x40 /* Host -> FSI */
+
+/* Per-bank registers */
+#define SBEFIFO_FIFO 0x00 /* The FIFO itself */
+#define SBEFIFO_STS 0x04 /* Status register */
+#define SBEFIFO_STS_PARITY_ERR 0x20000000
+#define SBEFIFO_STS_RESET_REQ 0x02000000
+#define SBEFIFO_STS_GOT_EOT 0x00800000
+#define SBEFIFO_STS_MAX_XFER_LIMIT 0x00400000
+#define SBEFIFO_STS_FULL 0x00200000
+#define SBEFIFO_STS_EMPTY 0x00100000
+#define SBEFIFO_STS_ECNT_MASK 0x000f0000
+#define SBEFIFO_STS_ECNT_SHIFT 16
+#define SBEFIFO_STS_VALID_MASK 0x0000ff00
+#define SBEFIFO_STS_VALID_SHIFT 8
+#define SBEFIFO_STS_EOT_MASK 0x000000ff
+#define SBEFIFO_STS_EOT_SHIFT 0
+#define SBEFIFO_EOT_RAISE 0x08 /* (Up only) Set End Of Transfer */
+#define SBEFIFO_REQ_RESET 0x0C /* (Up only) Reset Request */
+#define SBEFIFO_PERFORM_RESET 0x10 /* (Down only) Perform Reset */
+#define SBEFIFO_EOT_ACK 0x14 /* (Down only) Acknowledge EOT */
+#define SBEFIFO_DOWN_MAX 0x18 /* (Down only) Max transfer */
+
+/* CFAM GP Mailbox SelfBoot Message register */
+#define CFAM_GP_MBOX_SBM_ADDR 0x2824 /* Converted 0x2809 */
+
+#define CFAM_SBM_SBE_BOOTED 0x80000000
+#define CFAM_SBM_SBE_ASYNC_FFDC 0x40000000
+#define CFAM_SBM_SBE_STATE_MASK 0x00f00000
+#define CFAM_SBM_SBE_STATE_SHIFT 20
+
+enum sbe_state
+{
+ SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
+ SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
+ SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
+ SBE_STATE_MPIPL = 0x3, // MPIPL
+ SBE_STATE_RUNTIME = 0x4, // SBE Runtime
+ SBE_STATE_DMT = 0x5, // Dead Man Timer State (transient)
+ SBE_STATE_DUMP = 0x6, // Dumping
+ SBE_STATE_FAILURE = 0x7, // Internal SBE failure
+ SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
+};
+
+/* FIFO depth */
+#define SBEFIFO_FIFO_DEPTH 8
+
+/* Helpers */
+#define sbefifo_empty(sts) ((sts) & SBEFIFO_STS_EMPTY)
+#define sbefifo_full(sts) ((sts) & SBEFIFO_STS_FULL)
+#define sbefifo_parity_err(sts) ((sts) & SBEFIFO_STS_PARITY_ERR)
+#define sbefifo_populated(sts) (((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
+#define sbefifo_vacant(sts) (SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
+#define sbefifo_eot_set(sts) (((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
+
+/* Reset request timeout in ms */
+#define SBEFIFO_RESET_TIMEOUT 10000
+
+/* Timeouts for commands in ms */
+#define SBEFIFO_TIMEOUT_START_CMD 10000
+#define SBEFIFO_TIMEOUT_IN_CMD 1000
+#define SBEFIFO_TIMEOUT_START_RSP 10000
+#define SBEFIFO_TIMEOUT_IN_RSP 1000
+
+/* Other constants */
+#define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
+#define SBEFIFO_RESET_MAGIC 0x52534554 /* "RSET" */
+
+struct sbefifo {
+ uint32_t magic;
+#define SBEFIFO_MAGIC 0x53424546 /* "SBEF" */
+ struct fsi_device *fsi_dev;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool broken;
+ bool dead;
+ bool async_ffdc;
+};
+
+struct sbefifo_user {
+ struct sbefifo *sbefifo;
+ struct mutex file_lock;
+ void *cmd_page;
+ void *pending_cmd;
+ size_t pending_len;
+};
+
+static DEFINE_MUTEX(sbefifo_ffdc_mutex);
+
+
+static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ int pack = 0;
+#define FFDC_LSIZE 60
+ static char ffdc_line[FFDC_LSIZE];
+ char *p = ffdc_line;
+
+ while (ffdc_sz) {
+ u32 w0, w1, w2, i;
+ if (ffdc_sz < 3) {
+ dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
+ return;
+ }
+ w0 = be32_to_cpu(*(ffdc++));
+ w1 = be32_to_cpu(*(ffdc++));
+ w2 = be32_to_cpu(*(ffdc++));
+ ffdc_sz -= 3;
+ if ((w0 >> 16) != 0xFFDC) {
+ dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
+ w0, w1, w2);
+ break;
+ }
+ w0 &= 0xffff;
+ if (w0 > ffdc_sz) {
+ dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
+ w0, ffdc_sz);
+ w0 = ffdc_sz;
+ break;
+ }
+ if (internal) {
+ dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
+ pack++);
+ } else {
+ dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
+ pack++, (w1 >> 8) & 0xff, w1 & 0xff);
+ }
+ dev_warn(dev, "| Response code: %08x |\n", w2);
+ dev_warn(dev, "|-------------------------------------------|\n");
+ for (i = 0; i < w0; i++) {
+ if ((i & 3) == 0) {
+ p = ffdc_line;
+ p += sprintf(p, "| %04x:", i << 4);
+ }
+ p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
+ ffdc_sz--;
+ if ((i & 3) == 3 || i == (w0 - 1)) {
+ while ((i & 3) < 3) {
+ p += sprintf(p, " ");
+ i++;
+ }
+ dev_warn(dev, "%s |\n", ffdc_line);
+ }
+ }
+ dev_warn(dev, "+-------------------------------------------+\n");
+ }
+}
+
+static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ mutex_lock(&sbefifo_ffdc_mutex);
+ __sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
+ mutex_unlock(&sbefifo_ffdc_mutex);
+}
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len)
+{
+ u32 dh, s0, s1;
+ size_t ffdc_sz;
+
+ if (resp_len < 3) {
+ pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
+ cmd, resp_len);
+ return -ENXIO;
+ }
+ dh = be32_to_cpu(response[resp_len - 1]);
+ if (dh > resp_len || dh < 3) {
+ dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
+ cmd >> 8, cmd & 0xff, dh, resp_len);
+ return -ENXIO;
+ }
+ s0 = be32_to_cpu(response[resp_len - dh]);
+ s1 = be32_to_cpu(response[resp_len - dh + 1]);
+ if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
+ dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
+ cmd >> 8, cmd & 0xff, s0, s1);
+ return -ENXIO;
+ }
+ if (s1 != 0) {
+ ffdc_sz = dh - 3;
+ dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
+ cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
+ if (ffdc_sz)
+ sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
+ ffdc_sz, false);
+ }
+ if (data_len)
+ *data_len = resp_len - dh;
+
+ /*
+ * Primary status don't have the top bit set, so can't be confused with
+ * Linux negative error codes, so return the status word whole.
+ */
+ return s1;
+}
+EXPORT_SYMBOL_GPL(sbefifo_parse_status);
+
+static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
+{
+ __be32 raw_word;
+ int rc;
+
+ rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+ if (rc)
+ return rc;
+
+ *word = be32_to_cpu(raw_word);
+
+ return 0;
+}
+
+static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
+{
+ __be32 raw_word = cpu_to_be32(word);
+
+ return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+}
+
+static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
+{
+ __be32 raw_word;
+ u32 sbm;
+ int rc;
+
+ rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
+ &raw_word, sizeof(raw_word));
+ if (rc)
+ return rc;
+ sbm = be32_to_cpu(raw_word);
+
+ /* SBE booted at all ? */
+ if (!(sbm & CFAM_SBM_SBE_BOOTED))
+ return -ESHUTDOWN;
+
+ /* Check its state */
+ switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
+ case SBE_STATE_UNKNOWN:
+ return -ESHUTDOWN;
+ case SBE_STATE_IPLING:
+ case SBE_STATE_ISTEP:
+ case SBE_STATE_MPIPL:
+ case SBE_STATE_DMT:
+ return -EBUSY;
+ case SBE_STATE_RUNTIME:
+ case SBE_STATE_DUMP: /* Not sure about that one */
+ break;
+ case SBE_STATE_FAILURE:
+ case SBE_STATE_QUIESCE:
+ return -ESHUTDOWN;
+ }
+
+ /* Is there async FFDC available ? Remember it */
+ if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
+ sbefifo->async_ffdc = true;
+
+ return 0;
+}
+
+/* Don't flip endianness of data to/from FIFO, just pass through. */
+static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
+{
+ return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
+ sizeof(*word));
+}
+
+static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
+{
+ return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
+ sizeof(word));
+}
+
+static int sbefifo_request_reset(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, timeout;
+ int rc;
+
+ dev_dbg(dev, "Requesting FIFO reset\n");
+
+ /* Mark broken first, will be cleared if reset succeeds */
+ sbefifo->broken = true;
+
+ /* Send reset request */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
+ if (rc) {
+ dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Wait for it to complete */
+ for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
+ if (rc) {
+ dev_err(dev, "Failed to read UP fifo status during reset"
+ " , rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!(status & SBEFIFO_STS_RESET_REQ)) {
+ dev_dbg(dev, "FIFO reset done\n");
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ msleep(1);
+ }
+ dev_err(dev, "FIFO reset timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 up_status, down_status;
+ bool need_reset = false;
+ int rc;
+
+ rc = sbefifo_check_sbe_state(sbefifo);
+ if (rc) {
+ dev_dbg(dev, "SBE state=%d\n", rc);
+ return rc;
+ }
+
+ /* If broken, we don't need to look at status, go straight to reset */
+ if (sbefifo->broken)
+ goto do_reset;
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ /* The FIFO already contains a reset request from the SBE ? */
+ if (down_status & SBEFIFO_STS_RESET_REQ) {
+ dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+ if (rc) {
+ sbefifo->broken = true;
+ dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
+ return rc;
+ }
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ /* Parity error on either FIFO ? */
+ if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
+ need_reset = true;
+
+ /* Either FIFO not empty ? */
+ if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
+ need_reset = true;
+
+ if (!need_reset)
+ return 0;
+
+ dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
+ up_status, down_status);
+
+ do_reset:
+
+ /* Mark broken, will be cleared if/when reset succeeds */
+ return sbefifo_request_reset(sbefifo);
+}
+
+static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
+ u32 *status, unsigned long timeout)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ unsigned long end_time;
+ bool ready = false;
+ u32 addr, sts = 0;
+ int rc;
+
+ dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
+
+ addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
+
+ end_time = jiffies + timeout;
+ while (!time_after(jiffies, end_time)) {
+ cond_resched();
+ rc = sbefifo_regr(sbefifo, addr, &sts);
+ if (rc < 0) {
+ dev_err(dev, "FSI error %d reading status register\n", rc);
+ return rc;
+ }
+ if (!up && sbefifo_parity_err(sts)) {
+ dev_err(dev, "Parity error in DOWN FIFO\n");
+ return -ENXIO;
+ }
+ ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
+ if (ready)
+ break;
+ }
+ if (!ready) {
+ dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+ return -ETIMEDOUT;
+ }
+ dev_vdbg(dev, "End of wait status: %08x\n", sts);
+
+ *status = sts;
+
+ return 0;
+}
+
+static int sbefifo_send_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ size_t len, chunk, vacant = 0, remaining = cmd_len;
+ unsigned long timeout;
+ u32 status;
+ int rc;
+
+ dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
+ cmd_len, be32_to_cpu(command[1]));
+
+ /* As long as there's something to send */
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
+ while (remaining) {
+ /* Wait for room in the FIFO */
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
+
+ vacant = sbefifo_vacant(status);
+ len = chunk = min(vacant, remaining);
+
+ dev_vdbg(dev, " status=%08x vacant=%zd chunk=%zd\n",
+ status, vacant, chunk);
+
+ /* Write as much as we can */
+ while (len--) {
+ rc = sbefifo_up_write(sbefifo, *(command++));
+ if (rc) {
+ dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
+ return rc;
+ }
+ }
+ remaining -= chunk;
+ vacant -= chunk;
+ }
+
+ /* If there's no room left, wait for some to write EOT */
+ if (!vacant) {
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc)
+ return rc;
+ }
+
+ /* Send an EOT */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
+ if (rc)
+ dev_err(dev, "FSI error %d writing EOT\n", rc);
+ return rc;
+}
+
+static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, eot_set;
+ unsigned long timeout;
+ bool overflow = false;
+ __be32 data;
+ size_t len;
+ int rc;
+
+ dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
+ for (;;) {
+ /* Grab FIFO status (this will handle parity errors) */
+ rc = sbefifo_wait(sbefifo, false, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
+
+ /* Decode status */
+ len = sbefifo_populated(status);
+ eot_set = sbefifo_eot_set(status);
+
+ dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
+
+ /* Go through the chunk */
+ while(len--) {
+ /* Read the data */
+ rc = sbefifo_down_read(sbefifo, &data);
+ if (rc < 0)
+ return rc;
+
+ /* Was it an EOT ? */
+ if (eot_set & 0x80) {
+ /*
+ * There should be nothing else in the FIFO,
+ * if there is, mark broken, this will force
+ * a reset on next use, but don't fail the
+ * command.
+ */
+ if (len) {
+ dev_warn(dev, "FIFO read hit"
+ " EOT with still %zd data\n",
+ len);
+ sbefifo->broken = true;
+ }
+
+ /* We are done */
+ rc = sbefifo_regw(sbefifo,
+ SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
+
+ /*
+ * If that write fail, still complete the request but mark
+ * the fifo as broken for subsequent reset (not much else
+ * we can do here).
+ */
+ if (rc) {
+ dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
+ sbefifo->broken = true;
+ }
+
+ /* Tell whether we overflowed */
+ return overflow ? -EOVERFLOW : 0;
+ }
+
+ /* Store it if there is room */
+ if (iov_iter_count(response) >= sizeof(__be32)) {
+ if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
+ return -EFAULT;
+ } else {
+ dev_vdbg(dev, "Response overflowed !\n");
+
+ overflow = true;
+ }
+
+ /* Next EOT bit */
+ eot_set <<= 1;
+ }
+ }
+ /* Shouldn't happen */
+ return -EIO;
+}
+
+static int sbefifo_do_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ /* Try sending the command */
+ int rc = sbefifo_send_command(sbefifo, command, cmd_len);
+ if (rc)
+ return rc;
+
+ /* Now, get the response */
+ return sbefifo_read_response(sbefifo, response);
+}
+
+static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ struct iov_iter ffdc_iter;
+ struct kvec ffdc_iov;
+ __be32 *ffdc;
+ size_t ffdc_sz;
+ __be32 cmd[2];
+ int rc;
+
+ sbefifo->async_ffdc = false;
+ ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
+ if (!ffdc) {
+ dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
+ return;
+ }
+ ffdc_iov.iov_base = ffdc;
+ ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
+ iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ cmd[0] = cpu_to_be32(2);
+ cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
+ rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
+ if (rc != 0) {
+ dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
+ goto bail;
+ }
+ ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
+ ffdc_sz /= sizeof(__be32);
+ rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
+ ffdc_sz, &ffdc_sz);
+ if (rc != 0) {
+ dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
+ goto bail;
+ }
+ if (ffdc_sz > 0)
+ sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
+ bail:
+ vfree(ffdc);
+
+}
+
+static int __sbefifo_submit(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ int rc;
+
+ if (sbefifo->dead)
+ return -ENODEV;
+
+ if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
+ dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
+ cmd_len, be32_to_cpu(command[0]));
+ return -EINVAL;
+ }
+
+ /* First ensure the HW is in a clean state */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc)
+ return rc;
+
+ /* Look for async FFDC first if any */
+ if (sbefifo->async_ffdc)
+ sbefifo_collect_async_ffdc(sbefifo);
+
+ rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
+ if (rc != 0 && rc != -EOVERFLOW)
+ goto fail;
+ return rc;
+ fail:
+ /*
+ * On failure, attempt a reset. Ignore the result, it will mark
+ * the fifo broken if the reset fails
+ */
+ sbefifo_request_reset(sbefifo);
+
+ /* Return original error */
+ return rc;
+}
+
+/**
+ * sbefifo_submit() - Submit and SBE fifo command and receive response
+ * @dev: The sbefifo device
+ * @command: The raw command data
+ * @cmd_len: The command size (in 32-bit words)
+ * @response: The output response buffer
+ * @resp_len: In: Response buffer size, Out: Response size
+ *
+ * This will perform the entire operation. If the reponse buffer
+ * overflows, returns -EOVERFLOW
+ */
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len)
+{
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct kvec resp_iov;
+ size_t rbytes;
+ int rc;
+
+ if (!dev)
+ return -ENODEV;
+ sbefifo = dev_get_drvdata(dev);
+ if (!sbefifo)
+ return -ENODEV;
+ if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
+ return -ENODEV;
+ if (!resp_len || !command || !response)
+ return -EINVAL;
+
+ /* Prepare iov iterator */
+ rbytes = (*resp_len) * sizeof(__be32);
+ resp_iov.iov_base = response;
+ resp_iov.iov_len = rbytes;
+ iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+
+ /* Extract the response length */
+ rbytes -= iov_iter_count(&resp_iter);
+ *resp_len = rbytes / sizeof(__be32);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sbefifo_submit);
+
+/*
+ * Char device interface
+ */
+
+static void sbefifo_release_command(struct sbefifo_user *user)
+{
+ if (is_vmalloc_addr(user->pending_cmd))
+ vfree(user->pending_cmd);
+ user->pending_cmd = NULL;
+ user->pending_len = 0;
+}
+
+static int sbefifo_user_open(struct inode *inode, struct file *file)
+{
+ struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
+ struct sbefifo_user *user;
+
+ user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
+ if (!user)
+ return -ENOMEM;
+
+ file->private_data = user;
+ user->sbefifo = sbefifo;
+ user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!user->cmd_page) {
+ kfree(user);
+ return -ENOMEM;
+ }
+ mutex_init(&user->file_lock);
+
+ return 0;
+}
+
+static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct iovec resp_iov;
+ size_t cmd_len;
+ int rc;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Cronus relies on -EAGAIN after a short read */
+ if (user->pending_len == 0) {
+ rc = -EAGAIN;
+ goto bail;
+ }
+ if (user->pending_len < 8) {
+ rc = -EINVAL;
+ goto bail;
+ }
+ cmd_len = user->pending_len >> 2;
+
+ /* Prepare iov iterator */
+ resp_iov.iov_base = buf;
+ resp_iov.iov_len = len;
+ iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+ if (rc < 0)
+ goto bail;
+
+ /* Extract the response length */
+ rc = len - iov_iter_count(&resp_iter);
+ bail:
+ sbefifo_release_command(user);
+ mutex_unlock(&user->file_lock);
+ return rc;
+}
+
+static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ int rc = len;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len > SBEFIFO_MAX_USER_CMD_LEN)
+ return -EINVAL;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Can we use the pre-allocate buffer ? If not, allocate */
+ if (len <= PAGE_SIZE)
+ user->pending_cmd = user->cmd_page;
+ else
+ user->pending_cmd = vmalloc(len);
+ if (!user->pending_cmd) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ /* Copy the command into the staging buffer */
+ if (copy_from_user(user->pending_cmd, buf, len)) {
+ rc = -EFAULT;
+ goto bail;
+ }
+
+ /* Check for the magic reset command */
+ if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
+ SBEFIFO_RESET_MAGIC) {
+
+ /* Clear out any pending command */
+ user->pending_len = 0;
+
+ /* Trigger reset request */
+ mutex_lock(&sbefifo->lock);
+ rc = sbefifo_request_reset(user->sbefifo);
+ mutex_unlock(&sbefifo->lock);
+ if (rc == 0)
+ rc = 4;
+ goto bail;
+ }
+
+ /* Update the staging buffer size */
+ user->pending_len = len;
+ bail:
+ if (!user->pending_len)
+ sbefifo_release_command(user);
+
+ mutex_unlock(&user->file_lock);
+
+ /* And that's it, we'll issue the command on a read */
+ return rc;
+}
+
+static int sbefifo_user_release(struct inode *inode, struct file *file)
+{
+ struct sbefifo_user *user = file->private_data;
+
+ if (!user)
+ return -EINVAL;
+
+ sbefifo_release_command(user);
+ free_page((unsigned long)user->cmd_page);
+ kfree(user);
+
+ return 0;
+}
+
+static const struct file_operations sbefifo_fops = {
+ .owner = THIS_MODULE,
+ .open = sbefifo_user_open,
+ .read = sbefifo_user_read,
+ .write = sbefifo_user_write,
+ .release = sbefifo_user_release,
+};
+
+static void sbefifo_free(struct device *dev)
+{
+ struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
+
+ put_device(&sbefifo->fsi_dev->dev);
+ kfree(sbefifo);
+}
+
+/*
+ * Probe/remove
+ */
+
+static int sbefifo_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct sbefifo *sbefifo;
+ struct device_node *np;
+ struct platform_device *child;
+ char child_name[32];
+ int rc, didx, child_idx = 0;
+
+ dev_dbg(dev, "Found sbefifo device\n");
+
+ sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
+ if (!sbefifo)
+ return -ENOMEM;
+
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(sbefifo);
+ return -ENODEV;
+ }
+
+ sbefifo->magic = SBEFIFO_MAGIC;
+ sbefifo->fsi_dev = fsi_dev;
+ dev_set_drvdata(dev, sbefifo);
+ mutex_init(&sbefifo->lock);
+
+ /*
+ * Try cleaning up the FIFO. If this fails, we still register the
+ * driver and will try cleaning things up again on the next access.
+ */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc && rc != -ESHUTDOWN)
+ dev_err(dev, "Initial HW cleanup failed, will retry later\n");
+
+ /* Create chardev for userspace access */
+ sbefifo->dev.type = &fsi_cdev_type;
+ sbefifo->dev.parent = dev;
+ sbefifo->dev.release = sbefifo_free;
+ device_initialize(&sbefifo->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
+ cdev_init(&sbefifo->cdev, &sbefifo_fops);
+ rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&sbefifo->dev));
+ goto err_free_minor;
+ }
+
+ /* Create platform devs for dts child nodes (occ, etc) */
+ for_each_available_child_of_node(dev->of_node, np) {
+ snprintf(child_name, sizeof(child_name), "%s-dev%d",
+ dev_name(&sbefifo->dev), child_idx++);
+ child = of_platform_device_create(np, child_name, dev);
+ if (!child)
+ dev_warn(dev, "failed to create child %s dev\n",
+ child_name);
+ }
+
+ return 0;
+ err_free_minor:
+ fsi_free_minor(sbefifo->dev.devt);
+ err:
+ put_device(&sbefifo->dev);
+ return rc;
+}
+
+static int sbefifo_unregister_child(struct device *dev, void *data)
+{
+ struct platform_device *child = to_platform_device(dev);
+
+ of_device_unregister(child);
+ if (dev->of_node)
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
+static int sbefifo_remove(struct device *dev)
+{
+ struct sbefifo *sbefifo = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "Removing sbefifo device...\n");
+
+ mutex_lock(&sbefifo->lock);
+ sbefifo->dead = true;
+ mutex_unlock(&sbefifo->lock);
+
+ cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
+ fsi_free_minor(sbefifo->dev.devt);
+ device_for_each_child(dev, NULL, sbefifo_unregister_child);
+ put_device(&sbefifo->dev);
+
+ return 0;
+}
+
+static struct fsi_device_id sbefifo_ids[] = {
+ {
+ .engine_type = FSI_ENGID_SBE,
+ .version = FSI_VERSION_ANY,
+ },
+ { 0 }
+};
+
+static struct fsi_driver sbefifo_drv = {
+ .id_table = sbefifo_ids,
+ .drv = {
+ .name = DEVICE_NAME,
+ .bus = &fsi_bus_type,
+ .probe = sbefifo_probe,
+ .remove = sbefifo_remove,
+ }
+};
+
+static int sbefifo_init(void)
+{
+ return fsi_driver_register(&sbefifo_drv);
+}
+
+static void sbefifo_exit(void)
+{
+ fsi_driver_unregister(&sbefifo_drv);
+}
+
+module_init(sbefifo_init);
+module_exit(sbefifo_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brad Bishop <bradleyb@fuzziesquirrel.com>");
+MODULE_AUTHOR("Eddie James <eajames@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index e13353a2fd7c..df94021dd9d1 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,42 +20,73 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
+#include <linux/cdev.h>
#include <linux/list.h>
-#include <linux/idr.h>
-#define FSI_ENGID_SCOM 0x5
+#include <uapi/linux/fsi.h>
-#define SCOM_FSI2PIB_DELAY 50
+#define FSI_ENGID_SCOM 0x5
/* SCOM engine register set */
#define SCOM_DATA0_REG 0x00
#define SCOM_DATA1_REG 0x04
#define SCOM_CMD_REG 0x08
-#define SCOM_RESET_REG 0x1C
+#define SCOM_FSI2PIB_RESET_REG 0x18
+#define SCOM_STATUS_REG 0x1C /* Read */
+#define SCOM_PIB_RESET_REG 0x1C /* Write */
-#define SCOM_RESET_CMD 0x80000000
+/* Command register */
#define SCOM_WRITE_CMD 0x80000000
+#define SCOM_READ_CMD 0x00000000
+
+/* Status register bits */
+#define SCOM_STATUS_ERR_SUMMARY 0x80000000
+#define SCOM_STATUS_PROTECTION 0x01000000
+#define SCOM_STATUS_PARITY 0x04000000
+#define SCOM_STATUS_PIB_ABORT 0x00100000
+#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
+#define SCOM_STATUS_PIB_RESP_SHIFT 12
+
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
+ SCOM_STATUS_PROTECTION | \
+ SCOM_STATUS_PARITY | \
+ SCOM_STATUS_PIB_ABORT | \
+ SCOM_STATUS_PIB_RESP_MASK)
+/* SCOM address encodings */
+#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
+#define XSCOM_ADDR_INF_FORM1 BIT_ULL(60)
+
+/* SCOM indirect stuff */
+#define XSCOM_ADDR_DIRECT_PART 0x7fffffffull
+#define XSCOM_ADDR_INDIRECT_PART 0x000fffff00000000ull
+#define XSCOM_DATA_IND_READ BIT_ULL(63)
+#define XSCOM_DATA_IND_COMPLETE BIT_ULL(31)
+#define XSCOM_DATA_IND_ERR_MASK 0x70000000ull
+#define XSCOM_DATA_IND_ERR_SHIFT 28
+#define XSCOM_DATA_IND_DATA 0x0000ffffull
+#define XSCOM_DATA_IND_FORM1_DATA 0x000fffffffffffffull
+#define XSCOM_ADDR_FORM1_LOW 0x000ffffffffull
+#define XSCOM_ADDR_FORM1_HI 0xfff00000000ull
+#define XSCOM_ADDR_FORM1_HI_SHIFT 20
+
+/* Retries */
+#define SCOM_MAX_RETRIES 100 /* Retries on busy */
+#define SCOM_MAX_IND_RETRIES 10 /* Retries indirect not ready */
struct scom_device {
struct list_head link;
struct fsi_device *fsi_dev;
- struct miscdevice mdev;
- char name[32];
- int idx;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool dead;
};
-#define to_scom_dev(x) container_of((x), struct scom_device, mdev)
-
-static struct list_head scom_devices;
-
-static DEFINE_IDA(scom_ida);
-
-static int put_scom(struct scom_device *scom_dev, uint64_t value,
- uint32_t addr)
+static int __put_scom(struct scom_device *scom_dev, uint64_t value,
+ uint32_t addr, uint32_t *status)
{
+ __be32 data, raw_status;
int rc;
- uint32_t data;
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
@@ -70,53 +101,286 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
return rc;
data = cpu_to_be32(SCOM_WRITE_CMD | addr);
- return fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
+ if (rc)
+ return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+ *status = be32_to_cpu(raw_status);
+
+ return 0;
}
-static int get_scom(struct scom_device *scom_dev, uint64_t *value,
- uint32_t addr)
+static int __get_scom(struct scom_device *scom_dev, uint64_t *value,
+ uint32_t addr, uint32_t *status)
{
- uint32_t result, data;
+ __be32 data, raw_status;
int rc;
+
*value = 0ULL;
- data = cpu_to_be32(addr);
+ data = cpu_to_be32(SCOM_READ_CMD | addr);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &result,
+ /*
+ * Read the data registers even on error, so we don't have
+ * to interpret the status register here.
+ */
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
-
- *value |= (uint64_t)cpu_to_be32(result) << 32;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &result,
+ *value |= (uint64_t)be32_to_cpu(data) << 32;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ *value |= be32_to_cpu(data);
+ *status = be32_to_cpu(raw_status);
- *value |= cpu_to_be32(result);
+ return rc;
+}
+
+static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ if (value & ~XSCOM_DATA_IND_DATA)
+ return -EINVAL;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | value;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+
+ if (value & ~XSCOM_DATA_IND_FORM1_DATA)
+ return -EINVAL;
+ ind_addr = addr & XSCOM_ADDR_FORM1_LOW;
+ ind_data = value | (addr & XSCOM_ADDR_FORM1_HI) << XSCOM_ADDR_FORM1_HI_SHIFT;
+ return __put_scom(scom, ind_data, ind_addr, status);
+}
+
+static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ *value = ind_data & XSCOM_DATA_IND_DATA;
+
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int raw_put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return put_indirect_scom_form1(scom, value, addr, status);
+ else
+ return put_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __put_scom(scom, value, addr, status);
+}
+
+static int raw_get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return -ENXIO;
+ return get_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __get_scom(scom, value, addr, status);
+}
+
+static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ return -EPERM;
+ if (status & SCOM_STATUS_PARITY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
+ /* Return -EBUSY on PIB abort to force a retry */
+ if (status & SCOM_STATUS_PIB_ABORT)
+ return -EBUSY;
+ if (status & SCOM_STATUS_ERR_SUMMARY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
return 0;
}
+static int handle_pib_status(struct scom_device *scom, uint8_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status == SCOM_PIB_SUCCESS)
+ return 0;
+ if (status == SCOM_PIB_BLOCKED)
+ return -EBUSY;
+
+ /* Reset the bridge */
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+
+ switch(status) {
+ case SCOM_PIB_OFFLINE:
+ return -ENODEV;
+ case SCOM_PIB_BAD_ADDR:
+ return -ENXIO;
+ case SCOM_PIB_TIMEOUT:
+ return -ETIMEDOUT;
+ case SCOM_PIB_PARTIAL:
+ case SCOM_PIB_CLK_ERR:
+ case SCOM_PIB_PARITY_ERR:
+ default:
+ return -EIO;
+ }
+}
+
+static int put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_put_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
+static int get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_get_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
- loff_t *offset)
+ loff_t *offset)
{
- int rc;
- struct miscdevice *mdev =
- (struct miscdevice *)filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
+ int rc;
if (len != sizeof(uint64_t))
return -EINVAL;
- rc = get_scom(scom, &val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = get_scom(scom, &val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "get_scom fail:%d\n", rc);
return rc;
@@ -130,11 +394,10 @@ static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
}
static ssize_t scom_write(struct file *filep, const char __user *buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
int rc;
- struct miscdevice *mdev = filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
@@ -147,7 +410,12 @@ static ssize_t scom_write(struct file *filep, const char __user *buf,
return -EINVAL;
}
- rc = put_scom(scom, val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = put_scom(scom, val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "put_scom failed with:%d\n", rc);
return rc;
@@ -171,50 +439,205 @@ static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
return offset;
}
+static void raw_convert_status(struct scom_access *acc, uint32_t status)
+{
+ acc->pib_status = (status & SCOM_STATUS_PIB_RESP_MASK) >>
+ SCOM_STATUS_PIB_RESP_SHIFT;
+ acc->intf_errors = 0;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ acc->intf_errors |= SCOM_INTF_ERR_PROTECTION;
+ else if (status & SCOM_STATUS_PARITY)
+ acc->intf_errors |= SCOM_INTF_ERR_PARITY;
+ else if (status & SCOM_STATUS_PIB_ABORT)
+ acc->intf_errors |= SCOM_INTF_ERR_ABORT;
+ else if (status & SCOM_STATUS_ERR_SUMMARY)
+ acc->intf_errors |= SCOM_INTF_ERR_UNKNOWN;
+}
+
+static int scom_raw_read(struct scom_device *scom, void __user *argp)
+{
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ rc = raw_get_scom(scom, &acc.data, acc.addr, &status);
+ if (rc)
+ return rc;
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_raw_write(struct scom_device *scom, void __user *argp)
+{
+ u64 prev_data, mask, data;
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ if (acc.mask) {
+ rc = raw_get_scom(scom, &prev_data, acc.addr, &status);
+ if (rc)
+ return rc;
+ if (status & SCOM_STATUS_ANY_ERR)
+ goto fail;
+ mask = acc.mask;
+ } else {
+ prev_data = mask = -1ull;
+ }
+ data = (prev_data & ~mask) | (acc.data & mask);
+ rc = raw_put_scom(scom, data, acc.addr, &status);
+ if (rc)
+ return rc;
+ fail:
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_reset(struct scom_device *scom, void __user *argp)
+{
+ uint32_t flags, dummy = -1;
+ int rc = 0;
+
+ if (get_user(flags, (__u32 __user *)argp))
+ return -EFAULT;
+ if (flags & SCOM_RESET_PIB)
+ rc = fsi_device_write(scom->fsi_dev, SCOM_PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ if (!rc && (flags & (SCOM_RESET_PIB | SCOM_RESET_INTF)))
+ rc = fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return rc;
+}
+
+static int scom_check(struct scom_device *scom, void __user *argp)
+{
+ /* Still need to find out how to get "protected" */
+ return put_user(SCOM_CHECK_SUPPORTED, (__u32 __user *)argp);
+}
+
+static long scom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct scom_device *scom = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int rc = -ENOTTY;
+
+ mutex_lock(&scom->lock);
+ if (scom->dead) {
+ mutex_unlock(&scom->lock);
+ return -ENODEV;
+ }
+ switch(cmd) {
+ case FSI_SCOM_CHECK:
+ rc = scom_check(scom, argp);
+ break;
+ case FSI_SCOM_READ:
+ rc = scom_raw_read(scom, argp);
+ break;
+ case FSI_SCOM_WRITE:
+ rc = scom_raw_write(scom, argp);
+ break;
+ case FSI_SCOM_RESET:
+ rc = scom_reset(scom, argp);
+ break;
+ }
+ mutex_unlock(&scom->lock);
+ return rc;
+}
+
+static int scom_open(struct inode *inode, struct file *file)
+{
+ struct scom_device *scom = container_of(inode->i_cdev, struct scom_device, cdev);
+
+ file->private_data = scom;
+
+ return 0;
+}
+
static const struct file_operations scom_fops = {
- .owner = THIS_MODULE,
- .llseek = scom_llseek,
- .read = scom_read,
- .write = scom_write,
+ .owner = THIS_MODULE,
+ .open = scom_open,
+ .llseek = scom_llseek,
+ .read = scom_read,
+ .write = scom_write,
+ .unlocked_ioctl = scom_ioctl,
};
+static void scom_free(struct device *dev)
+{
+ struct scom_device *scom = container_of(dev, struct scom_device, dev);
+
+ put_device(&scom->fsi_dev->dev);
+ kfree(scom);
+}
+
static int scom_probe(struct device *dev)
{
- uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
+ int rc, didx;
- scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+ scom = kzalloc(sizeof(*scom), GFP_KERNEL);
if (!scom)
return -ENOMEM;
+ dev_set_drvdata(dev, scom);
+ mutex_init(&scom->lock);
- scom->idx = ida_simple_get(&scom_ida, 1, INT_MAX, GFP_KERNEL);
- snprintf(scom->name, sizeof(scom->name), "scom%d", scom->idx);
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(scom);
+ return -ENODEV;
+ }
scom->fsi_dev = fsi_dev;
- scom->mdev.minor = MISC_DYNAMIC_MINOR;
- scom->mdev.fops = &scom_fops;
- scom->mdev.name = scom->name;
- scom->mdev.parent = dev;
- list_add(&scom->link, &scom_devices);
- data = cpu_to_be32(SCOM_RESET_CMD);
- fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+ /* Create chardev for userspace access */
+ scom->dev.type = &fsi_cdev_type;
+ scom->dev.parent = dev;
+ scom->dev.release = scom_free;
+ device_initialize(&scom->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&scom->dev, "scom%d", didx);
+ cdev_init(&scom->cdev, &scom_fops);
+ rc = cdev_device_add(&scom->cdev, &scom->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&scom->dev));
+ goto err_free_minor;
+ }
- return misc_register(&scom->mdev);
+ return 0;
+ err_free_minor:
+ fsi_free_minor(scom->dev.devt);
+ err:
+ put_device(&scom->dev);
+ return rc;
}
static int scom_remove(struct device *dev)
{
- struct scom_device *scom, *scom_tmp;
- struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct scom_device *scom = dev_get_drvdata(dev);
- list_for_each_entry_safe(scom, scom_tmp, &scom_devices, link) {
- if (scom->fsi_dev == fsi_dev) {
- list_del(&scom->link);
- ida_simple_remove(&scom_ida, scom->idx);
- misc_deregister(&scom->mdev);
- }
- }
+ mutex_lock(&scom->lock);
+ scom->dead = true;
+ mutex_unlock(&scom->lock);
+ cdev_device_del(&scom->cdev, &scom->dev);
+ fsi_free_minor(scom->dev.devt);
+ put_device(&scom->dev);
return 0;
}
@@ -239,20 +662,11 @@ static struct fsi_driver scom_drv = {
static int scom_init(void)
{
- INIT_LIST_HEAD(&scom_devices);
return fsi_driver_register(&scom_drv);
}
static void scom_exit(void)
{
- struct list_head *pos;
- struct scom_device *scom;
-
- list_for_each(pos, &scom_devices) {
- scom = list_entry(pos, struct scom_device, link);
- misc_deregister(&scom->mdev);
- devm_kfree(&scom->fsi_dev->dev, scom);
- }
fsi_driver_unregister(&scom_drv);
}
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
new file mode 100644
index 000000000000..6abc88514512
--- /dev/null
+++ b/drivers/gnss/Kconfig
@@ -0,0 +1,43 @@
+#
+# GNSS receiver configuration
+#
+
+menuconfig GNSS
+ tristate "GNSS receiver support"
+ ---help---
+ Say Y here if you have a GNSS receiver (e.g. a GPS receiver).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss.
+
+if GNSS
+
+config GNSS_SERIAL
+ tristate
+
+config GNSS_SIRF_SERIAL
+ tristate "SiRFstar GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ ---help---
+ Say Y here if you have a SiRFstar-based GNSS receiver which uses a
+ serial interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-sirf.
+
+ If unsure, say N.
+
+config GNSS_UBX_SERIAL
+ tristate "u-blox GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ select GNSS_SERIAL
+ ---help---
+ Say Y here if you have a u-blox GNSS receiver which uses a serial
+ interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-ubx.
+
+ If unsure, say N.
+
+endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
new file mode 100644
index 000000000000..5cf0ebe0330a
--- /dev/null
+++ b/drivers/gnss/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the GNSS subsystem.
+#
+
+obj-$(CONFIG_GNSS) += gnss.o
+gnss-y := core.o
+
+obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
+gnss-serial-y := serial.o
+
+obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
+gnss-sirf-y := sirf.o
+
+obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o
+gnss-ubx-y := ubx.o
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
new file mode 100644
index 000000000000..4291a0dd22aa
--- /dev/null
+++ b/drivers/gnss/core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GNSS receiver core
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gnss.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#define GNSS_FLAG_HAS_WRITE_RAW BIT(0)
+
+#define GNSS_MINORS 16
+
+static DEFINE_IDA(gnss_minors);
+static dev_t gnss_first;
+
+/* FIFO size must be a power of two */
+#define GNSS_READ_FIFO_SIZE 4096
+#define GNSS_WRITE_BUF_SIZE 1024
+
+#define to_gnss_device(d) container_of((d), struct gnss_device, dev)
+
+static int gnss_open(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev;
+ int ret = 0;
+
+ gdev = container_of(inode->i_cdev, struct gnss_device, cdev);
+
+ get_device(&gdev->dev);
+
+ nonseekable_open(inode, file);
+ file->private_data = gdev;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (gdev->count++ == 0) {
+ ret = gdev->ops->open(gdev);
+ if (ret)
+ gdev->count--;
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ if (ret)
+ put_device(&gdev->dev);
+
+ return ret;
+}
+
+static int gnss_release(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev = file->private_data;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected)
+ goto unlock;
+
+ if (--gdev->count == 0) {
+ gdev->ops->close(gdev);
+ kfifo_reset(&gdev->read_fifo);
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ put_device(&gdev->dev);
+
+ return 0;
+}
+
+static ssize_t gnss_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ unsigned int copied;
+ int ret;
+
+ mutex_lock(&gdev->read_mutex);
+ while (kfifo_is_empty(&gdev->read_fifo)) {
+ mutex_unlock(&gdev->read_mutex);
+
+ if (gdev->disconnected)
+ return 0;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(gdev->read_queue,
+ gdev->disconnected ||
+ !kfifo_is_empty(&gdev->read_fifo));
+ if (ret)
+ return -ERESTARTSYS;
+
+ mutex_lock(&gdev->read_mutex);
+ }
+
+ ret = kfifo_to_user(&gdev->read_fifo, buf, count, &copied);
+ if (ret == 0)
+ ret = copied;
+
+ mutex_unlock(&gdev->read_mutex);
+
+ return ret;
+}
+
+static ssize_t gnss_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ size_t written = 0;
+ int ret;
+
+ if (gdev->disconnected)
+ return -EIO;
+
+ if (!count)
+ return 0;
+
+ if (!(gdev->flags & GNSS_FLAG_HAS_WRITE_RAW))
+ return -EIO;
+
+ /* Ignoring O_NONBLOCK, write_raw() is synchronous. */
+
+ ret = mutex_lock_interruptible(&gdev->write_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (;;) {
+ size_t n = count - written;
+
+ if (n > GNSS_WRITE_BUF_SIZE)
+ n = GNSS_WRITE_BUF_SIZE;
+
+ if (copy_from_user(gdev->write_buf, buf, n)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /*
+ * Assumes write_raw can always accept GNSS_WRITE_BUF_SIZE
+ * bytes.
+ *
+ * FIXME: revisit
+ */
+ down_read(&gdev->rwsem);
+ if (!gdev->disconnected)
+ ret = gdev->ops->write_raw(gdev, gdev->write_buf, n);
+ else
+ ret = -EIO;
+ up_read(&gdev->rwsem);
+
+ if (ret < 0)
+ break;
+
+ written += ret;
+ buf += ret;
+
+ if (written == count)
+ break;
+ }
+
+ if (written)
+ ret = written;
+out_unlock:
+ mutex_unlock(&gdev->write_mutex);
+
+ return ret;
+}
+
+static __poll_t gnss_poll(struct file *file, poll_table *wait)
+{
+ struct gnss_device *gdev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &gdev->read_queue, wait);
+
+ if (!kfifo_is_empty(&gdev->read_fifo))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (gdev->disconnected)
+ mask |= EPOLLHUP;
+
+ return mask;
+}
+
+static const struct file_operations gnss_fops = {
+ .owner = THIS_MODULE,
+ .open = gnss_open,
+ .release = gnss_release,
+ .read = gnss_read,
+ .write = gnss_write,
+ .poll = gnss_poll,
+ .llseek = no_llseek,
+};
+
+static struct class *gnss_class;
+
+static void gnss_device_release(struct device *dev)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ kfree(gdev->write_buf);
+ kfifo_free(&gdev->read_fifo);
+ ida_simple_remove(&gnss_minors, gdev->id);
+ kfree(gdev);
+}
+
+struct gnss_device *gnss_allocate_device(struct device *parent)
+{
+ struct gnss_device *gdev;
+ struct device *dev;
+ int id;
+ int ret;
+
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return NULL;
+
+ id = ida_simple_get(&gnss_minors, 0, GNSS_MINORS, GFP_KERNEL);
+ if (id < 0) {
+ kfree(gdev);
+ return NULL;
+ }
+
+ gdev->id = id;
+
+ dev = &gdev->dev;
+ device_initialize(dev);
+ dev->devt = gnss_first + id;
+ dev->class = gnss_class;
+ dev->parent = parent;
+ dev->release = gnss_device_release;
+ dev_set_drvdata(dev, gdev);
+ dev_set_name(dev, "gnss%d", id);
+
+ init_rwsem(&gdev->rwsem);
+ mutex_init(&gdev->read_mutex);
+ mutex_init(&gdev->write_mutex);
+ init_waitqueue_head(&gdev->read_queue);
+
+ ret = kfifo_alloc(&gdev->read_fifo, GNSS_READ_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ goto err_put_device;
+
+ gdev->write_buf = kzalloc(GNSS_WRITE_BUF_SIZE, GFP_KERNEL);
+ if (!gdev->write_buf)
+ goto err_put_device;
+
+ cdev_init(&gdev->cdev, &gnss_fops);
+ gdev->cdev.owner = THIS_MODULE;
+
+ return gdev;
+
+err_put_device:
+ put_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gnss_allocate_device);
+
+void gnss_put_device(struct gnss_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_put_device);
+
+int gnss_register_device(struct gnss_device *gdev)
+{
+ int ret;
+
+ /* Set a flag which can be accessed without holding the rwsem. */
+ if (gdev->ops->write_raw != NULL)
+ gdev->flags |= GNSS_FLAG_HAS_WRITE_RAW;
+
+ ret = cdev_device_add(&gdev->cdev, &gdev->dev);
+ if (ret) {
+ dev_err(&gdev->dev, "failed to add device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnss_register_device);
+
+void gnss_deregister_device(struct gnss_device *gdev)
+{
+ down_write(&gdev->rwsem);
+ gdev->disconnected = true;
+ if (gdev->count) {
+ wake_up_interruptible(&gdev->read_queue);
+ gdev->ops->close(gdev);
+ }
+ up_write(&gdev->rwsem);
+
+ cdev_device_del(&gdev->cdev, &gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_deregister_device);
+
+/*
+ * Caller guarantees serialisation.
+ *
+ * Must not be called for a closed device.
+ */
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kfifo_in(&gdev->read_fifo, buf, count);
+
+ wake_up_interruptible(&gdev->read_queue);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_insert_raw);
+
+static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
+ [GNSS_TYPE_NMEA] = "NMEA",
+ [GNSS_TYPE_SIRF] = "SiRF",
+ [GNSS_TYPE_UBX] = "UBX",
+};
+
+static const char *gnss_type_name(struct gnss_device *gdev)
+{
+ const char *name = NULL;
+
+ if (gdev->type < GNSS_TYPE_COUNT)
+ name = gnss_type_names[gdev->type];
+
+ if (!name)
+ dev_WARN(&gdev->dev, "type name not defined\n");
+
+ return name;
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ return sprintf(buf, "%s\n", gnss_type_name(gdev));
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *gnss_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gnss);
+
+static int gnss_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __init gnss_module_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&gnss_first, 0, GNSS_MINORS, "gnss");
+ if (ret < 0) {
+ pr_err("failed to allocate device numbers: %d\n", ret);
+ return ret;
+ }
+
+ gnss_class = class_create(THIS_MODULE, "gnss");
+ if (IS_ERR(gnss_class)) {
+ ret = PTR_ERR(gnss_class);
+ pr_err("failed to create class: %d\n", ret);
+ goto err_unregister_chrdev;
+ }
+
+ gnss_class->dev_groups = gnss_groups;
+ gnss_class->dev_uevent = gnss_uevent;
+
+ pr_info("GNSS driver registered with major %d\n", MAJOR(gnss_first));
+
+ return 0;
+
+err_unregister_chrdev:
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+
+ return ret;
+}
+module_init(gnss_module_init);
+
+static void __exit gnss_module_exit(void)
+{
+ class_destroy(gnss_class);
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+ ida_destroy(&gnss_minors);
+}
+module_exit(gnss_module_exit);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("GNSS receiver core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
new file mode 100644
index 000000000000..b01ba4438501
--- /dev/null
+++ b/drivers/gnss/serial.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+#include "serial.h"
+
+static int gnss_serial_open(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, gserial->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void gnss_serial_close(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int gnss_serial_write_raw(struct gnss_device *gdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations gnss_serial_gnss_ops = {
+ .open = gnss_serial_open,
+ .close = gnss_serial_close,
+ .write_raw = gnss_serial_write_raw,
+};
+
+static int gnss_serial_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = gserial->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops gnss_serial_serdev_ops = {
+ .receive_buf = gnss_serial_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int gnss_serial_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ if (!gserial->ops || !gserial->ops->set_power)
+ return 0;
+
+ return gserial->ops->set_power(gserial, state);
+}
+
+/*
+ * FIXME: need to provide subdriver defaults or separate dt parsing from
+ * allocation.
+ */
+static int gnss_serial_parse_dt(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 4800;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ gserial->speed = speed;
+
+ return 0;
+}
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *serdev,
+ size_t data_size)
+{
+ struct gnss_serial *gserial;
+ struct gnss_device *gdev;
+ int ret;
+
+ gserial = kzalloc(sizeof(*gserial) + data_size, GFP_KERNEL);
+ if (!gserial)
+ return ERR_PTR(-ENOMEM);
+
+ gdev = gnss_allocate_device(&serdev->dev);
+ if (!gdev) {
+ ret = -ENOMEM;
+ goto err_free_gserial;
+ }
+
+ gdev->ops = &gnss_serial_gnss_ops;
+ gnss_set_drvdata(gdev, gserial);
+
+ gserial->serdev = serdev;
+ gserial->gdev = gdev;
+
+ serdev_device_set_drvdata(serdev, gserial);
+ serdev_device_set_client_ops(serdev, &gnss_serial_serdev_ops);
+
+ ret = gnss_serial_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ return gserial;
+
+err_put_device:
+ gnss_put_device(gserial->gdev);
+err_free_gserial:
+ kfree(gserial);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_allocate);
+
+void gnss_serial_free(struct gnss_serial *gserial)
+{
+ gnss_put_device(gserial->gdev);
+ kfree(gserial);
+};
+EXPORT_SYMBOL_GPL(gnss_serial_free);
+
+int gnss_serial_register(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_enable(&serdev->dev);
+ } else {
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = gnss_register_device(gserial->gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_serial_register);
+
+void gnss_serial_deregister(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+
+ gnss_deregister_device(gserial->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_deregister);
+
+#ifdef CONFIG_PM
+static int gnss_serial_runtime_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+}
+
+static int gnss_serial_runtime_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+}
+#endif /* CONFIG_PM */
+
+static int gnss_serial_prepare(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gnss_serial_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ /*
+ * FIXME: serdev currently lacks support for managing the underlying
+ * device's wakeup settings. A workaround would be to close the serdev
+ * device here if it is open.
+ */
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+
+ return ret;
+}
+
+static int gnss_serial_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops gnss_serial_pm_ops = {
+ .prepare = gnss_serial_prepare,
+ SET_SYSTEM_SLEEP_PM_OPS(gnss_serial_suspend, gnss_serial_resume)
+ SET_RUNTIME_PM_OPS(gnss_serial_runtime_suspend, gnss_serial_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(gnss_serial_pm_ops);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("Generic serial GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
new file mode 100644
index 000000000000..980ffdc86c2a
--- /dev/null
+++ b/drivers/gnss/serial.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_SERIAL_H
+#define _LINUX_GNSS_SERIAL_H
+
+#include <asm/termbits.h>
+#include <linux/pm.h>
+
+struct gnss_serial {
+ struct serdev_device *serdev;
+ struct gnss_device *gdev;
+ speed_t speed;
+ const struct gnss_serial_ops *ops;
+ unsigned long drvdata[0];
+};
+
+enum gnss_serial_pm_state {
+ GNSS_SERIAL_OFF,
+ GNSS_SERIAL_ACTIVE,
+ GNSS_SERIAL_STANDBY,
+};
+
+struct gnss_serial_ops {
+ int (*set_power)(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state);
+};
+
+extern const struct dev_pm_ops gnss_serial_pm_ops;
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *gserial,
+ size_t data_size);
+void gnss_serial_free(struct gnss_serial *gserial);
+
+int gnss_serial_register(struct gnss_serial *gserial);
+void gnss_serial_deregister(struct gnss_serial *gserial);
+
+static inline void *gnss_serial_get_drvdata(struct gnss_serial *gserial)
+{
+ return gserial->drvdata;
+}
+
+#endif /* _LINUX_GNSS_SERIAL_H */
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
new file mode 100644
index 000000000000..79cb98950013
--- /dev/null
+++ b/drivers/gnss/sirf.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiRFstar GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#define SIRF_BOOT_DELAY 500
+#define SIRF_ON_OFF_PULSE_TIME 100
+#define SIRF_ACTIVATE_TIMEOUT 200
+#define SIRF_HIBERNATE_TIMEOUT 200
+
+struct sirf_data {
+ struct gnss_device *gdev;
+ struct serdev_device *serdev;
+ speed_t speed;
+ struct regulator *vcc;
+ struct gpio_desc *on_off;
+ struct gpio_desc *wakeup;
+ int irq;
+ bool active;
+ wait_queue_head_t power_wait;
+};
+
+static int sirf_open(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, data->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ dev_err(&gdev->dev, "failed to runtime resume: %d\n", ret);
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void sirf_close(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations sirf_gnss_ops = {
+ .open = sirf_open,
+ .close = sirf_close,
+ .write_raw = sirf_write_raw,
+};
+
+static int sirf_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = data->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops sirf_serdev_ops = {
+ .receive_buf = sirf_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
+{
+ struct sirf_data *data = dev_id;
+ struct device *dev = &data->serdev->dev;
+ int ret;
+
+ ret = gpiod_get_value_cansleep(data->wakeup);
+ dev_dbg(dev, "%s - wakeup = %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ data->active = !!ret;
+ wake_up_interruptible(&data->power_wait);
+out:
+ return IRQ_HANDLED;
+}
+
+static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(data->power_wait,
+ data->active == active, msecs_to_jiffies(timeout));
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ dev_warn(&data->serdev->dev, "timeout waiting for active state = %d\n",
+ active);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sirf_pulse_on_off(struct sirf_data *data)
+{
+ gpiod_set_value_cansleep(data->on_off, 1);
+ msleep(SIRF_ON_OFF_PULSE_TIME);
+ gpiod_set_value_cansleep(data->on_off, 0);
+}
+
+static int sirf_set_active(struct sirf_data *data, bool active)
+{
+ unsigned long timeout;
+ int retries = 3;
+ int ret;
+
+ if (active)
+ timeout = SIRF_ACTIVATE_TIMEOUT;
+ else
+ timeout = SIRF_HIBERNATE_TIMEOUT;
+
+ while (retries-- > 0) {
+ sirf_pulse_on_off(data);
+ ret = sirf_wait_for_power_state(data, active, timeout);
+ if (ret < 0) {
+ if (ret == -ETIMEDOUT)
+ continue;
+
+ return ret;
+ }
+
+ break;
+ }
+
+ if (retries == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int sirf_runtime_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_disable(data->vcc);
+
+ return sirf_set_active(data, false);
+}
+
+static int sirf_runtime_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_enable(data->vcc);
+
+ return sirf_set_active(data, true);
+}
+
+static int __maybe_unused sirf_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_suspend(dev);
+
+ if (data->wakeup)
+ disable_irq(data->irq);
+
+ return ret;
+}
+
+static int __maybe_unused sirf_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (data->wakeup)
+ enable_irq(data->irq);
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sirf_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sirf_suspend, sirf_resume)
+ SET_RUNTIME_PM_OPS(sirf_runtime_suspend, sirf_runtime_resume, NULL)
+};
+
+static int sirf_parse_dt(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 9600;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ data->speed = speed;
+
+ return 0;
+}
+
+static int sirf_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct gnss_device *gdev;
+ struct sirf_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ gdev = gnss_allocate_device(dev);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->type = GNSS_TYPE_SIRF;
+ gdev->ops = &sirf_gnss_ops;
+ gnss_set_drvdata(gdev, data);
+
+ data->serdev = serdev;
+ data->gdev = gdev;
+
+ init_waitqueue_head(&data->power_wait);
+
+ serdev_device_set_drvdata(serdev, data);
+ serdev_device_set_client_ops(serdev, &sirf_serdev_ops);
+
+ ret = sirf_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ data->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_put_device;
+ }
+
+ data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->on_off))
+ goto err_put_device;
+
+ if (data->on_off) {
+ data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
+ GPIOD_IN);
+ if (IS_ERR(data->wakeup))
+ goto err_put_device;
+
+ /*
+ * Configurations where WAKEUP has been left not connected,
+ * are currently not supported.
+ */
+ if (!data->wakeup) {
+ dev_err(dev, "no wakeup gpio specified\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+ }
+
+ if (data->wakeup) {
+ ret = gpiod_to_irq(data->wakeup);
+ if (ret < 0)
+ goto err_put_device;
+
+ data->irq = ret;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
+ sirf_wakeup_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "wakeup", data);
+ if (ret)
+ goto err_put_device;
+ }
+
+ if (data->on_off) {
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ goto err_put_device;
+
+ /* Wait for chip to boot into hibernate mode */
+ msleep(SIRF_BOOT_DELAY);
+ }
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_set_suspended(dev); /* clear runtime_error flag */
+ pm_runtime_enable(dev);
+ } else {
+ ret = sirf_runtime_resume(dev);
+ if (ret < 0)
+ goto err_disable_vcc;
+ }
+
+ ret = gnss_register_device(gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(dev);
+ else
+ sirf_runtime_suspend(dev);
+err_disable_vcc:
+ if (data->on_off)
+ regulator_disable(data->vcc);
+err_put_device:
+ gnss_put_device(data->gdev);
+
+ return ret;
+}
+
+static void sirf_remove(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+
+ gnss_deregister_device(data->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ sirf_runtime_suspend(&serdev->dev);
+
+ if (data->on_off)
+ regulator_disable(data->vcc);
+
+ gnss_put_device(data->gdev);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id sirf_of_match[] = {
+ { .compatible = "fastrax,uc430" },
+ { .compatible = "linx,r4" },
+ { .compatible = "wi2wi,w2sg0008i" },
+ { .compatible = "wi2wi,w2sg0084i" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirf_of_match);
+#endif
+
+static struct serdev_device_driver sirf_driver = {
+ .driver = {
+ .name = "gnss-sirf",
+ .of_match_table = of_match_ptr(sirf_of_match),
+ .pm = &sirf_pm_ops,
+ },
+ .probe = sirf_probe,
+ .remove = sirf_remove,
+};
+module_serdev_device_driver(sirf_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("SiRFstar GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
new file mode 100644
index 000000000000..12568aebb7f6
--- /dev/null
+++ b/drivers/gnss/ubx.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * u-blox GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+
+#include "serial.h"
+
+struct ubx_data {
+ struct regulator *v_bckp;
+ struct regulator *vcc;
+};
+
+static int ubx_set_active(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_standby(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_disable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ switch (state) {
+ case GNSS_SERIAL_ACTIVE:
+ return ubx_set_active(gserial);
+ case GNSS_SERIAL_OFF:
+ case GNSS_SERIAL_STANDBY:
+ return ubx_set_standby(gserial);
+ }
+
+ return -EINVAL;
+}
+
+static const struct gnss_serial_ops ubx_gserial_ops = {
+ .set_power = ubx_set_power,
+};
+
+static int ubx_probe(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial;
+ struct ubx_data *data;
+ int ret;
+
+ gserial = gnss_serial_allocate(serdev, sizeof(*data));
+ if (IS_ERR(gserial)) {
+ ret = PTR_ERR(gserial);
+ return ret;
+ }
+
+ gserial->ops = &ubx_gserial_ops;
+
+ gserial->gdev->type = GNSS_TYPE_UBX;
+
+ data = gnss_serial_get_drvdata(gserial);
+
+ data->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_free_gserial;
+ }
+
+ data->v_bckp = devm_regulator_get_optional(&serdev->dev, "v-bckp");
+ if (IS_ERR(data->v_bckp)) {
+ ret = PTR_ERR(data->v_bckp);
+ if (ret == -ENODEV)
+ data->v_bckp = NULL;
+ else
+ goto err_free_gserial;
+ }
+
+ if (data->v_bckp) {
+ ret = regulator_enable(data->v_bckp);
+ if (ret)
+ goto err_free_gserial;
+ }
+
+ ret = gnss_serial_register(gserial);
+ if (ret)
+ goto err_disable_v_bckp;
+
+ return 0;
+
+err_disable_v_bckp:
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+err_free_gserial:
+ gnss_serial_free(gserial);
+
+ return ret;
+}
+
+static void ubx_remove(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+
+ gnss_serial_deregister(gserial);
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+ gnss_serial_free(gserial);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id ubx_of_match[] = {
+ { .compatible = "u-blox,neo-8" },
+ { .compatible = "u-blox,neo-m8" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ubx_of_match);
+#endif
+
+static struct serdev_device_driver ubx_driver = {
+ .driver = {
+ .name = "gnss-ubx",
+ .of_match_table = of_match_ptr(ubx_of_match),
+ .pm = &gnss_serial_pm_ops,
+ },
+ .probe = ubx_probe,
+ .remove = ubx_remove,
+};
+module_serdev_device_driver(ubx_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("u-blox GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 71c0ab46f216..4f52c3a8ec99 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -359,6 +359,15 @@ config GPIO_MPC8XXX
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
+config GPIO_MT7621
+ bool "Mediatek MT7621 GPIO Support"
+ depends on SOC_MT7620 || SOC_MT7621 || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the Mediatek MT7621 SoC GPIO device
+
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION || ARCH_MVEBU
@@ -684,7 +693,8 @@ config GPIO_IT87
Say yes here to support GPIO functionality of IT87xx Super I/O chips.
This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
- supports the IT8761E, IT8620E and IT8628E Super I/O chip as well.
+ supports the IT8761E, IT8613, IT8620E, and IT8628E Super I/O chips as
+ well.
To compile this driver as a module, choose M here: the module will
be called gpio_it87
@@ -1039,6 +1049,12 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MADERA
+ tristate "Cirrus Logic Madera class codecs"
+ depends on PINCTRL_MADERA
+ help
+ Support for GPIOs on Cirrus Logic Madera class codecs.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1324c8f966a7..c256aff66a65 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
+obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
+obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index b31ae16170e7..2342e154029b 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -12,6 +12,7 @@
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/aspeed.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -22,6 +23,15 @@
#include <linux/spinlock.h>
#include <linux/string.h>
+/*
+ * These two headers aren't meant to be used by GPIO drivers. We need
+ * them in order to access gpio_chip_hwgpio() which we need to implement
+ * the aspeed specific API which allows the coprocessor to request
+ * access to some GPIOs and to arbitrate between coprocessor and ARM.
+ */
+#include <linux/gpio/consumer.h>
+#include "gpiolib.h"
+
struct aspeed_bank_props {
unsigned int bank;
u32 input;
@@ -56,83 +66,130 @@ struct aspeed_gpio {
struct clk *clk;
u32 *dcache;
+ u8 *cf_copro_bankmap;
};
struct aspeed_gpio_bank {
- uint16_t val_regs;
+ uint16_t val_regs; /* +0: Rd: read input value, Wr: set write latch
+ * +4: Rd/Wr: Direction (0=in, 1=out)
+ */
+ uint16_t rdata_reg; /* Rd: read write latch, Wr: <none> */
uint16_t irq_regs;
uint16_t debounce_regs;
uint16_t tolerance_regs;
+ uint16_t cmdsrc_regs;
const char names[4][3];
};
+/*
+ * Note: The "value" register returns the input value sampled on the
+ * line even when the GPIO is configured as an output. Since
+ * that input goes through synchronizers, writing, then reading
+ * back may not return the written value right away.
+ *
+ * The "rdata" register returns the content of the write latch
+ * and thus can be used to read back what was last written
+ * reliably.
+ */
+
static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+static const struct aspeed_gpio_copro_ops *copro_ops;
+static void *copro_data;
+
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
+ .rdata_reg = 0x00c0,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
.tolerance_regs = 0x001c,
+ .cmdsrc_regs = 0x0060,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
+ .rdata_reg = 0x00c4,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
.tolerance_regs = 0x003c,
+ .cmdsrc_regs = 0x0068,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
+ .rdata_reg = 0x00c8,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
.tolerance_regs = 0x00ac,
+ .cmdsrc_regs = 0x0090,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
+ .rdata_reg = 0x00cc,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
.tolerance_regs = 0x00fc,
+ .cmdsrc_regs = 0x00e0,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
+ .rdata_reg = 0x00d0,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
.tolerance_regs = 0x012c,
+ .cmdsrc_regs = 0x0110,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
+ .rdata_reg = 0x00d4,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
.tolerance_regs = 0x015c,
+ .cmdsrc_regs = 0x0140,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
+ .rdata_reg = 0x00d8,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
.tolerance_regs = 0x018c,
+ .cmdsrc_regs = 0x0170,
.names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01e8,
+ .rdata_reg = 0x00dc,
.irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
.tolerance_regs = 0x01bc,
+ .cmdsrc_regs = 0x01a0,
.names = { "AC", "", "", "" },
},
};
-#define GPIO_BANK(x) ((x) >> 5)
-#define GPIO_OFFSET(x) ((x) & 0x1f)
-#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+enum aspeed_gpio_reg {
+ reg_val,
+ reg_rdata,
+ reg_dir,
+ reg_irq_enable,
+ reg_irq_type0,
+ reg_irq_type1,
+ reg_irq_type2,
+ reg_irq_status,
+ reg_debounce_sel1,
+ reg_debounce_sel2,
+ reg_tolerance,
+ reg_cmdsrc0,
+ reg_cmdsrc1,
+};
-#define GPIO_DATA 0x00
-#define GPIO_DIR 0x04
+#define GPIO_VAL_VALUE 0x00
+#define GPIO_VAL_DIR 0x04
#define GPIO_IRQ_ENABLE 0x00
#define GPIO_IRQ_TYPE0 0x04
@@ -143,6 +200,53 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
#define GPIO_DEBOUNCE_SEL1 0x00
#define GPIO_DEBOUNCE_SEL2 0x04
+#define GPIO_CMDSRC_0 0x00
+#define GPIO_CMDSRC_1 0x04
+#define GPIO_CMDSRC_ARM 0
+#define GPIO_CMDSRC_LPC 1
+#define GPIO_CMDSRC_COLDFIRE 2
+#define GPIO_CMDSRC_RESERVED 3
+
+/* This will be resolved at compile time */
+static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ const enum aspeed_gpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return gpio->base + bank->val_regs + GPIO_VAL_VALUE;
+ case reg_rdata:
+ return gpio->base + bank->rdata_reg;
+ case reg_dir:
+ return gpio->base + bank->val_regs + GPIO_VAL_DIR;
+ case reg_irq_enable:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE;
+ case reg_irq_type0:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0;
+ case reg_irq_type1:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1;
+ case reg_irq_type2:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
+ case reg_irq_status:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+ case reg_debounce_sel1:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL1;
+ case reg_debounce_sel2:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL2;
+ case reg_tolerance:
+ return gpio->base + bank->tolerance_regs;
+ case reg_cmdsrc0:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_0;
+ case reg_cmdsrc1:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_1;
+ }
+ BUG();
+}
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+
#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
@@ -201,18 +305,80 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
return !props || (props->output & GPIO_BIT(offset));
}
-static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ int bindex, int cmdsrc)
{
- return gpio->base + bank->val_regs + reg;
+ void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0);
+ void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1);
+ u32 bit, reg;
+
+ /*
+ * Each register controls 4 banks, so take the bottom 2
+ * bits of the bank index, and use them to select the
+ * right control bit (0, 8, 16 or 24).
+ */
+ bit = BIT((bindex & 3) << 3);
+
+ /* Source 1 first to avoid illegal 11 combination */
+ reg = ioread32(c1);
+ if (cmdsrc & 2)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c1);
+
+ /* Then Source 0 */
+ reg = ioread32(c0);
+ if (cmdsrc & 1)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c0);
}
-static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio,
+ unsigned int offset)
{
- return gpio->base + bank->irq_regs + reg;
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return false;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return false;
+ if (!copro_ops->request_access)
+ return false;
+
+ /* Pause the coprocessor */
+ copro_ops->request_access(copro_data);
+
+ /* Change command source back to ARM */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM);
+
+ /* Update cache */
+ gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata));
+
+ return true;
+}
+
+static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio,
+ unsigned int offset)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return;
+ if (!copro_ops->release_access)
+ return;
+
+ /* Change command source back to ColdFire */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3,
+ GPIO_CMDSRC_COLDFIRE);
+
+ /* Restart the coprocessor */
+ copro_ops->release_access(copro_data);
}
static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -220,8 +386,7 @@ static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA))
- & GPIO_BIT(offset));
+ return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset));
}
static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
@@ -232,7 +397,7 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
void __iomem *addr;
u32 reg;
- addr = bank_val_reg(gpio, bank, GPIO_DATA);
+ addr = bank_reg(gpio, bank, reg_val);
reg = gpio->dcache[GPIO_BANK(offset)];
if (val)
@@ -249,11 +414,15 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
+ bool copro;
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -261,7 +430,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_input(gpio, offset))
@@ -269,8 +440,13 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ reg = ioread32(addr);
+ reg &= ~GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -282,7 +458,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_output(gpio, offset))
@@ -290,10 +468,15 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
spin_lock_irqsave(&gpio->lock, flags);
+ reg = ioread32(addr);
+ reg |= GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -314,7 +497,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
+ val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -323,24 +506,23 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
- struct aspeed_gpio **gpio,
- const struct aspeed_gpio_bank **bank,
- u32 *bit)
+ struct aspeed_gpio **gpio,
+ const struct aspeed_gpio_bank **bank,
+ u32 *bit, int *offset)
{
- int offset;
struct aspeed_gpio *internal;
- offset = irqd_to_hwirq(d);
+ *offset = irqd_to_hwirq(d);
internal = irq_data_get_irq_chip_data(d);
/* This might be a bit of a questionable place to check */
- if (!have_irq(internal, offset))
+ if (!have_irq(internal, *offset))
return -ENOTSUPP;
*gpio = internal;
- *bank = to_bank(offset);
- *bit = GPIO_BIT(offset);
+ *bank = to_bank(*offset);
+ *bit = GPIO_BIT(*offset);
return 0;
}
@@ -351,17 +533,23 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *status_addr;
+ int rc, offset;
+ bool copro;
u32 bit;
- int rc;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS);
+ status_addr = bank_reg(gpio, bank, reg_irq_status);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
iowrite32(bit, status_addr);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -372,15 +560,17 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
unsigned long flags;
u32 reg, bit;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE);
+ addr = bank_reg(gpio, bank, reg_irq_enable);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
reg = ioread32(addr);
if (set)
@@ -389,6 +579,8 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
reg &= ~bit;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -413,9 +605,10 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return -EINVAL;
@@ -441,22 +634,25 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
}
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0);
+ addr = bank_reg(gpio, bank, reg_irq_type0);
reg = ioread32(addr);
reg = (reg & ~bit) | type0;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1);
+ addr = bank_reg(gpio, bank, reg_irq_type1);
reg = ioread32(addr);
reg = (reg & ~bit) | type1;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2);
+ addr = bank_reg(gpio, bank, reg_irq_type2);
reg = ioread32(addr);
reg = (reg & ~bit) | type2;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
irq_set_handler_locked(d, handler);
@@ -477,7 +673,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
+ reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32) {
girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
@@ -549,21 +745,27 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- const struct aspeed_gpio_bank *bank;
unsigned long flags;
+ void __iomem *treg;
+ bool copro;
u32 val;
- bank = to_bank(offset);
+ treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
spin_lock_irqsave(&gpio->lock, flags);
- val = readl(gpio->base + bank->tolerance_regs);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
+ val = readl(treg);
if (enable)
val |= GPIO_BIT(offset);
else
val &= ~GPIO_BIT(offset);
- writel(val, gpio->base + bank->tolerance_regs);
+ writel(val, treg);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -582,13 +784,6 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_gpio_free(chip->base + offset);
}
-static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
-{
- return gpio->base + bank->debounce_regs + reg;
-}
-
static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
u32 *cycles)
{
@@ -666,11 +861,14 @@ static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
void __iomem *addr;
u32 val;
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL1);
+ /* Note: Debounce timer isn't under control of the command
+ * source registers, so no need to sync with the coprocessor
+ */
+ addr = bank_reg(gpio, bank, reg_debounce_sel1);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL2);
+ addr = bank_reg(gpio, bank, reg_debounce_sel2);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
}
@@ -812,6 +1010,111 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return -ENOTSUPP;
}
+/**
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * the coprocessor for shared GPIO banks
+ * @ops: The callbacks
+ * @data: Pointer passed back to the callbacks
+ */
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data)
+{
+ copro_data = data;
+ copro_ops = ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_set_ops);
+
+/**
+ * aspeed_gpio_copro_grab_gpio - Mark a GPIO used by the coprocessor. The entire
+ * bank gets marked and any access from the ARM will
+ * result in handshaking via callbacks.
+ * @desc: The GPIO to be marked
+ * @vreg_offset: If non-NULL, returns the value register offset in the GPIO space
+ * @dreg_offset: If non-NULL, returns the data latch register offset in the GPIO space
+ * @bit: If non-NULL, returns the bit number of the GPIO in the registers
+ */
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL);
+ if (!gpio->cf_copro_bankmap)
+ return -ENOMEM;
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0xff) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]++;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 1)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_COLDFIRE);
+
+ if (vreg_offset)
+ *vreg_offset = bank->val_regs;
+ if (dreg_offset)
+ *dreg_offset = bank->rdata_reg;
+ if (bit)
+ *bit = GPIO_OFFSET(offset);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
+
+/**
+ * aspeed_gpio_copro_release_gpio - Unmark a GPIO used by the coprocessor.
+ * @desc: The GPIO to be marked
+ */
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ return -ENXIO;
+
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]--;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_ARM);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -902,11 +1205,18 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio->dcache)
return -ENOMEM;
- /* Populate it with initial values read from the HW */
+ /*
+ * Populate it with initial values read from the HW and switch
+ * all command sources to the ARM by default
+ */
for (i = 0; i < banks; i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- gpio->dcache[i] = ioread32(gpio->base + bank->val_regs +
- GPIO_DATA);
+ void __iomem *addr = bank_reg(gpio, bank, reg_rdata);
+ gpio->dcache[i] = ioread32(addr);
+ aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
}
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 684e9d6d6623..0a553d676042 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -51,7 +51,7 @@ static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl,
unsigned reg, u32 val)
{
- return writel(val, ctrl->base + reg);
+ writel(val, ctrl->base + reg);
}
static bool ath79_gpio_update_bits(
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 00272fa7cc4f..d0707fc23afd 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -485,12 +485,14 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+ if (ret) {
dev_err(kona_gpio->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 035a454eca43..a5ece8ea79bc 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -167,8 +167,8 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
static int ctrl_num, bank_base;
- int gpio, bank, ret = 0;
- unsigned ngpio, nbank;
+ int gpio, bank, i, ret = 0;
+ unsigned int ngpio, nbank, nirq;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
@@ -197,6 +197,16 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
ngpio = ARCH_NR_GPIOS;
+ /*
+ * If there are unbanked interrupts then the number of
+ * interrupts is equal to number of gpios else all are banked so
+ * number of interrupts is equal to number of banks(each with 16 gpios)
+ */
+ if (pdata->gpio_unbanked)
+ nirq = pdata->gpio_unbanked;
+ else
+ nirq = DIV_ROUND_UP(ngpio, 16);
+
nbank = DIV_ROUND_UP(ngpio, 32);
chips = devm_kcalloc(dev,
nbank, sizeof(struct davinci_gpio_controller),
@@ -209,6 +219,15 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
+ for (i = 0; i < nirq; i++) {
+ chips->irqs[i] = platform_get_irq(pdev, i);
+ if (chips->irqs[i] < 0) {
+ dev_info(dev, "IRQ not populated, err = %d\n",
+ chips->irqs[i]);
+ return chips->irqs[i];
+ }
+ }
+
snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", ctrl_num++);
chips->chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
if (!chips->chip.label)
@@ -377,7 +396,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
if (offset < d->gpio_unbanked)
- return d->base_irq + offset;
+ return d->irqs[offset];
else
return -ENODEV;
}
@@ -386,11 +405,18 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
{
struct davinci_gpio_controller *d;
struct davinci_gpio_regs __iomem *g;
- u32 mask;
+ u32 mask, i;
d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
g = (struct davinci_gpio_regs __iomem *)d->regs[0];
- mask = __gpio_mask(data->irq - d->base_irq);
+ for (i = 0; i < MAX_INT_PER_BANK; i++)
+ if (data->irq == d->irqs[i])
+ break;
+
+ if (i == MAX_INT_PER_BANK)
+ return -EINVAL;
+
+ mask = __gpio_mask(i);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
@@ -459,9 +485,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
int ret;
struct clk *clk;
u32 binten = 0;
- unsigned ngpio, bank_irq;
+ unsigned ngpio;
struct device *dev = &pdev->dev;
- struct resource *res;
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
@@ -481,24 +506,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data;
ngpio = pdata->ngpio;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -EBUSY;
- }
-
- bank_irq = res->start;
-
- if (!bank_irq) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -ENODEV;
- }
clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
+
ret = clk_prepare_enable(clk);
if (ret)
return ret;
@@ -538,12 +552,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
if (pdata->gpio_unbanked) {
/* pass "bank 0" GPIO IRQs to AINTC */
chips->chip.to_irq = gpio_to_irq_unbanked;
- chips->base_irq = bank_irq;
chips->gpio_unbanked = pdata->gpio_unbanked;
binten = GENMASK(pdata->gpio_unbanked / 16, 0);
/* AINTC handles mask/unmask; GPIO handles triggering */
- irq = bank_irq;
+ irq = chips->irqs[0];
irq_chip = gpio_get_irq_chip(irq);
irq_chip->name = "GPIO-AINTC";
irq_chip->irq_set_type = gpio_irq_type_unbanked;
@@ -554,10 +567,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
- irq_set_chip(irq, irq_chip);
- irq_set_handler_data(irq, chips);
- irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
+ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) {
+ irq_set_chip(chips->irqs[gpio], irq_chip);
+ irq_set_handler_data(chips->irqs[gpio], chips);
+ irq_set_status_flags(chips->irqs[gpio],
+ IRQ_TYPE_EDGE_BOTH);
}
goto done;
@@ -567,7 +581,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
* then chain through our own handler.
*/
- for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 16) {
/* disabled by default, enabled only as needed
* There are register sets for 32 GPIOs. 2 banks of 16
* GPIOs are covered by each set of registers hence divide by 2
@@ -594,8 +608,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
irqdata->bank_num = bank;
irqdata->chip = chips;
- irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
- irqdata);
+ irq_set_chained_handler_and_data(chips->irqs[bank],
+ gpio_irq_handler, irqdata);
binten |= BIT(bank);
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 7a2de3de6571..28da700f5f52 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -255,11 +255,13 @@ static int dwapb_irq_reqres(struct irq_data *d)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
+ int ret;
- if (gpiochip_lock_as_irq(gc, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
+ if (ret) {
dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 2b466b80e70a..982e699a5b81 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -101,12 +101,14 @@ static void em_gio_irq_enable(struct irq_data *d)
static int em_gio_irq_reqres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
+ if (ret) {
dev_err(p->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 7cad14d3f127..389ecd8b7d26 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -35,12 +35,15 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
+#define IT8613_ID 0x8613
#define IT8620_ID 0x8620
#define IT8628_ID 0x8628
+#define IT8718_ID 0x8718
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
#define IT8772_ID 0x8772
+#define IT8786_ID 0x8786
/* IO Ports */
#define REG 0x2e
@@ -306,6 +309,14 @@ static int __init it87_gpio_init(void)
it87_gpio->chip = it87_template_chip;
switch (chip_type) {
+ case IT8613_ID:
+ gpio_ba_reg = 0x62;
+ it87_gpio->io_size = 8; /* it8613 only needs 6, use 8 for alignment */
+ it87_gpio->output_base = 0xc8;
+ it87_gpio->simple_base = 0xc0;
+ it87_gpio->simple_size = 6;
+ it87_gpio->chip.ngpio = 64; /* has 48, use 64 for convenient calc */
+ break;
case IT8620_ID:
case IT8628_ID:
gpio_ba_reg = 0x62;
@@ -314,9 +325,11 @@ static int __init it87_gpio_init(void)
it87_gpio->simple_size = 0;
it87_gpio->chip.ngpio = 64;
break;
+ case IT8718_ID:
case IT8728_ID:
case IT8732_ID:
case IT8772_ID:
+ case IT8786_ID:
gpio_ba_reg = 0x62;
it87_gpio->io_size = 8;
it87_gpio->output_base = 0xc8;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
new file mode 100644
index 000000000000..7ba68d1a0932
--- /dev/null
+++ b/drivers/gpio/gpio-madera.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPIO support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
+struct madera_gpio {
+ struct madera *madera;
+ /* storage space for the gpio_chip we're using */
+ struct gpio_chip gpio_chip;
+};
+
+static int madera_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_2 + reg_offset,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & MADERA_GP1_DIR_MASK);
+}
+
+static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_2 + reg_offset,
+ MADERA_GP1_DIR_MASK, MADERA_GP1_DIR);
+}
+
+static int madera_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_1 + reg_offset,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & MADERA_GP1_LVL_MASK);
+}
+
+static int madera_gpio_direction_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
+ int ret;
+
+ ret = regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_2 + reg_offset,
+ MADERA_GP1_DIR_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
+}
+
+static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
+ struct madera *madera = madera_gpio->madera;
+ unsigned int reg_offset = 2 * offset;
+ unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
+ int ret;
+
+ ret = regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
+
+ /* set() doesn't return an error so log a warning */
+ if (ret)
+ dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
+ MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+}
+
+static struct gpio_chip madera_gpio_chip = {
+ .label = "madera",
+ .owner = THIS_MODULE,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .get_direction = madera_gpio_get_direction,
+ .direction_input = madera_gpio_direction_in,
+ .get = madera_gpio_get,
+ .direction_output = madera_gpio_direction_out,
+ .set = madera_gpio_set,
+ .set_config = gpiochip_generic_config,
+ .can_sleep = true,
+};
+
+static int madera_gpio_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct madera_pdata *pdata = dev_get_platdata(madera->dev);
+ struct madera_gpio *madera_gpio;
+ int ret;
+
+ madera_gpio = devm_kzalloc(&pdev->dev, sizeof(*madera_gpio),
+ GFP_KERNEL);
+ if (!madera_gpio)
+ return -ENOMEM;
+
+ madera_gpio->madera = madera;
+
+ /* Construct suitable gpio_chip from the template in madera_gpio_chip */
+ madera_gpio->gpio_chip = madera_gpio_chip;
+ madera_gpio->gpio_chip.parent = pdev->dev.parent;
+
+ switch (madera->type) {
+ case CS47L35:
+ madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
+ break;
+ case CS47L85:
+ case WM1840:
+ madera_gpio->gpio_chip.ngpio = CS47L85_NUM_GPIOS;
+ break;
+ case CS47L90:
+ case CS47L91:
+ madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
+ return -EINVAL;
+ }
+
+ /* We want to be usable on systems that don't use devicetree or acpi */
+ if (pdata && pdata->gpio_base)
+ madera_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ madera_gpio->gpio_chip.base = -1;
+
+ ret = devm_gpiochip_add_data(&pdev->dev,
+ &madera_gpio->gpio_chip,
+ madera_gpio);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * This is part of a composite MFD device which can only be used with
+ * the corresponding pinctrl driver. On all supported silicon the GPIO
+ * to pinctrl mapping is fixed in the silicon, so we register it
+ * explicitly instead of requiring a redundant gpio-ranges in the
+ * devicetree.
+ * In any case we also want to work on systems that don't use devicetree
+ * or acpi.
+ */
+ ret = gpiochip_add_pin_range(&madera_gpio->gpio_chip, "madera-pinctrl",
+ 0, 0, madera_gpio->gpio_chip.ngpio);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Failed to add pin range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver madera_gpio_driver = {
+ .driver = {
+ .name = "madera-gpio",
+ },
+ .probe = madera_gpio_probe,
+};
+
+module_platform_driver(madera_gpio_driver);
+
+MODULE_SOFTDEP("pre: pinctrl-madera");
+MODULE_DESCRIPTION("GPIO interface for Madera codecs");
+MODULE_AUTHOR("Nariman Poushin <nariman@opensource.cirrus.com>");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:madera-gpio");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 9d8bcc69f245..f03cb0ba7726 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -653,6 +653,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_a = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_b = chip->client_dummy = c;
}
break;
@@ -660,6 +666,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_b = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_a = chip->client_dummy = c;
}
break;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index e1037582e34d..b2635326546e 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
- debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
- debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+ debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b23d9a36be1f..51c7d1b84c2e 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;
err_gpiochip_add:
+ chip = chip_save;
while (--i >= 0) {
- chip--;
gpiochip_remove(&chip->gpio);
+ chip++;
}
kfree(chip_save);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 7b14d6280e44..935292a30c99 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -136,8 +136,20 @@ static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long pinmask = bgpio_line2mask(gc, gpio);
+ bool dir = !!(gc->bgpio_dir & pinmask);
- if (gc->bgpio_dir & pinmask)
+ /*
+ * If the direction is OUT we read the value from the SET
+ * register, and if the direction is IN we read the value
+ * from the DAT register.
+ *
+ * If the direction bits are inverted, naturally this gets
+ * inverted too.
+ */
+ if (gc->bgpio_dir_inverted)
+ dir = !dir;
+
+ if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
return !!(gc->read_reg(gc->reg_dat) & pinmask);
@@ -157,8 +169,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
*bits &= ~*mask;
/* Exploit the fact that we know which directions are set */
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ if (gc->bgpio_dir_inverted) {
+ set_mask = *mask & ~gc->bgpio_dir;
+ get_mask = *mask & gc->bgpio_dir;
+ } else {
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
+ }
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -359,7 +376,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -370,7 +390,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 of input */
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ if (gc->bgpio_dir_inverted)
+ return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ else
+ return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -381,37 +404,10 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- gc->set(gc, gpio, val);
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -419,12 +415,6 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- /* Return 0 if output, 1 if input */
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
-}
-
static int bgpio_setup_accessors(struct device *dev,
struct gpio_chip *gc,
bool byte_be)
@@ -560,9 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
gc->get_direction = bgpio_get_dir;
} else if (dirin) {
gc->reg_dir = dirin;
- gc->direction_output = bgpio_dir_out_inv;
- gc->direction_input = bgpio_dir_in_inv;
- gc->get_direction = bgpio_get_dir_inv;
+ gc->direction_output = bgpio_dir_out;
+ gc->direction_input = bgpio_dir_in;
+ gc->get_direction = bgpio_get_dir;
+ gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -582,6 +573,33 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
return -EINVAL;
}
+/**
+ * bgpio_init() - Initialize generic GPIO accessor functions
+ * @gc: the GPIO chip to set up
+ * @dev: the parent device of the new GPIO chip (compulsory)
+ * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means the
+ * line is asserted
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * low. It is allowed to leave this address as NULL, in that case the SET register
+ * will be assumed to also clear the GPIO lines, by actively writing the line
+ * with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * output line. Conversely, setting the line to 0 will turn that line into
+ * an input. Either this or @dirin can be defined, but never both.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * input line. Conversely, setting the line to 0 will turn that line into
+ * an output. Either this or @dirout can be defined, but never both.
+ * @flags: Different flags that will affect the behaviour of the device, such as
+ * endianness etc.
+ */
int bgpio_init(struct gpio_chip *gc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
new file mode 100644
index 000000000000..d72af6f6cdbd
--- /dev/null
+++ b/drivers/gpio/gpio-mt7621.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MTK_BANK_CNT 3
+#define MTK_BANK_WIDTH 32
+
+#define GPIO_BANK_STRIDE 0x04
+#define GPIO_REG_CTRL 0x00
+#define GPIO_REG_POL 0x10
+#define GPIO_REG_DATA 0x20
+#define GPIO_REG_DSET 0x30
+#define GPIO_REG_DCLR 0x40
+#define GPIO_REG_REDGE 0x50
+#define GPIO_REG_FEDGE 0x60
+#define GPIO_REG_HLVL 0x70
+#define GPIO_REG_LLVL 0x80
+#define GPIO_REG_STAT 0x90
+#define GPIO_REG_EDGE 0xA0
+
+struct mtk_gc {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ int bank;
+ u32 rising;
+ u32 falling;
+ u32 hlevel;
+ u32 llevel;
+};
+
+/**
+ * struct mtk - state container for
+ * data of the platform driver. It is 3
+ * separate gpio-chip each one with its
+ * own irq_chip.
+ * @dev: device instance
+ * @base: memory base address
+ * @gpio_irq: irq number from the device tree
+ * @gc_map: array of the gpio chips
+ */
+struct mtk {
+ struct device *dev;
+ void __iomem *base;
+ int gpio_irq;
+ struct mtk_gc gc_map[MTK_BANK_CNT];
+};
+
+static inline struct mtk_gc *
+to_mediatek_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct mtk_gc, chip);
+}
+
+static inline void
+mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ gc->write_reg(mtk->base + offset, val);
+}
+
+static inline u32
+mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ return gc->read_reg(mtk->base + offset);
+}
+
+static irqreturn_t
+mediatek_gpio_irq_handler(int irq, void *data)
+{
+ struct gpio_chip *gc = data;
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long pending;
+ int bit;
+
+ pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
+
+ for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
+ u32 map = irq_find_mapping(gc->irq.domain, bit);
+
+ generic_handle_irq(map);
+ mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
+ ret |= IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void
+mediatek_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(pin) & rg->rising));
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static void
+mediatek_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static int
+mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ u32 mask = BIT(pin);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((rg->rising | rg->falling |
+ rg->hlevel | rg->llevel) & mask)
+ return 0;
+
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ rg->rising &= ~mask;
+ rg->falling &= ~mask;
+ rg->hlevel &= ~mask;
+ rg->llevel &= ~mask;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ rg->rising |= mask;
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ rg->rising |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ rg->hlevel |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ rg->llevel |= mask;
+ break;
+ }
+
+ return 0;
+}
+
+static struct irq_chip mediatek_gpio_irq_chip = {
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_set_type = mediatek_gpio_irq_type,
+};
+
+static int
+mediatek_gpio_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *spec, u32 *flags)
+{
+ int gpio = spec->args[0];
+ struct mtk_gc *rg = to_mediatek_gpio(chip);
+
+ if (rg->bank != gpio / MTK_BANK_WIDTH)
+ return -EINVAL;
+
+ if (flags)
+ *flags = spec->args[1];
+
+ return gpio % MTK_BANK_WIDTH;
+}
+
+static int
+mediatek_gpio_bank_probe(struct device *dev,
+ struct device_node *node, int bank)
+{
+ struct mtk *mtk = dev_get_drvdata(dev);
+ struct mtk_gc *rg;
+ void __iomem *dat, *set, *ctrl, *diro;
+ int ret;
+
+ rg = &mtk->gc_map[bank];
+ memset(rg, 0, sizeof(*rg));
+
+ spin_lock_init(&rg->lock);
+ rg->chip.of_node = node;
+ rg->bank = bank;
+
+ dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
+ set = mtk->base + GPIO_REG_DSET + (rg->bank * GPIO_BANK_STRIDE);
+ ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
+ diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
+
+ ret = bgpio_init(&rg->chip, dev, 4,
+ dat, set, ctrl, diro, NULL, 0);
+ if (ret) {
+ dev_err(dev, "bgpio_init() failed\n");
+ return ret;
+ }
+
+ rg->chip.of_gpio_n_cells = 2;
+ rg->chip.of_xlate = mediatek_gpio_xlate;
+ rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ dev_name(dev), bank);
+
+ ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ if (ret < 0) {
+ dev_err(dev, "Could not register gpio %d, ret=%d\n",
+ rg->chip.ngpio, ret);
+ return ret;
+ }
+
+ if (mtk->gpio_irq) {
+ /*
+ * Manually request the irq here instead of passing
+ * a flow-handler to gpiochip_set_chained_irqchip,
+ * because the irq is shared.
+ */
+ ret = devm_request_irq(dev, mtk->gpio_irq,
+ mediatek_gpio_irq_handler, IRQF_SHARED,
+ rg->chip.label, &rg->chip);
+
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ %d: %d\n",
+ mtk->gpio_irq, ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ 0, handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add gpiochip_irqchip\n");
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ mtk->gpio_irq, NULL);
+ }
+
+ /* set polarity to low for all gpios */
+ mtk_gpio_w32(rg, GPIO_REG_POL, 0);
+
+ dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+
+ return 0;
+}
+
+static int
+mediatek_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk *mtk;
+ int i;
+
+ mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
+ if (!mtk)
+ return -ENOMEM;
+
+ mtk->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtk->base))
+ return PTR_ERR(mtk->base);
+
+ mtk->gpio_irq = irq_of_parse_and_map(np, 0);
+ mtk->dev = dev;
+ platform_set_drvdata(pdev, mtk);
+ mediatek_gpio_irq_chip.name = dev_name(dev);
+
+ for (i = 0; i < MTK_BANK_CNT; i++)
+ mediatek_gpio_bank_probe(dev, np, i);
+
+ return 0;
+}
+
+static const struct of_device_id mediatek_gpio_match[] = {
+ { .compatible = "mediatek,mt7621-gpio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
+
+static struct platform_driver mediatek_gpio_driver = {
+ .probe = mediatek_gpio_probe,
+ .driver = {
+ .name = "mt7621_gpio",
+ .of_match_table = mediatek_gpio_match,
+ },
+};
+
+builtin_platform_driver(mediatek_gpio_driver);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 2f2829966d4c..995cf0b9e0b1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -45,6 +45,15 @@ struct mxc_gpio_hwdata {
unsigned fall_edge;
};
+struct mxc_gpio_reg_saved {
+ u32 icr1;
+ u32 icr2;
+ u32 imr;
+ u32 gdir;
+ u32 edge_sel;
+ u32 dr;
+};
+
struct mxc_gpio_port {
struct list_head node;
void __iomem *base;
@@ -55,6 +64,8 @@ struct mxc_gpio_port {
struct gpio_chip gc;
struct device *dev;
u32 both_edges;
+ struct mxc_gpio_reg_saved gpio_saved_reg;
+ bool power_off;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
@@ -143,6 +154,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
{ .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
{ .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
+ { .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
@@ -434,6 +446,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return err;
}
+ if (of_device_is_compatible(np, "fsl,imx7d-gpio"))
+ port->power_off = true;
+
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
@@ -497,6 +512,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
list_add_tail(&port->node, &mxc_gpio_ports);
+ platform_set_drvdata(pdev, port);
+
return 0;
out_irqdomain_remove:
@@ -507,11 +524,67 @@ out_bgio:
return err;
}
+static void mxc_gpio_save_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ port->gpio_saved_reg.icr1 = readl(port->base + GPIO_ICR1);
+ port->gpio_saved_reg.icr2 = readl(port->base + GPIO_ICR2);
+ port->gpio_saved_reg.imr = readl(port->base + GPIO_IMR);
+ port->gpio_saved_reg.gdir = readl(port->base + GPIO_GDIR);
+ port->gpio_saved_reg.edge_sel = readl(port->base + GPIO_EDGE_SEL);
+ port->gpio_saved_reg.dr = readl(port->base + GPIO_DR);
+}
+
+static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ writel(port->gpio_saved_reg.icr1, port->base + GPIO_ICR1);
+ writel(port->gpio_saved_reg.icr2, port->base + GPIO_ICR2);
+ writel(port->gpio_saved_reg.imr, port->base + GPIO_IMR);
+ writel(port->gpio_saved_reg.gdir, port->base + GPIO_GDIR);
+ writel(port->gpio_saved_reg.edge_sel, port->base + GPIO_EDGE_SEL);
+ writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
+}
+
+static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+
+ mxc_gpio_save_regs(port);
+ clk_disable_unprepare(port->clk);
+
+ return 0;
+}
+
+static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ return ret;
+ mxc_gpio_restore_regs(port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
.suppress_bind_attrs = true,
+ .pm = &mxc_gpio_dev_pm_ops,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index e2831ee70cdc..df30490da820 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -126,8 +126,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
else
writel(pin_mask, pin_addr + MXS_CLR);
- writel(pin_mask,
- port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
+ writel(pin_mask, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d1afedf4dcbf..e81008678a38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -77,6 +77,8 @@ struct gpio_bank {
bool workaround_enabled;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
+ void (*set_dataout_multiple)(struct gpio_bank *bank,
+ unsigned long *mask, unsigned long *bits);
int (*get_context_loss_count)(struct device *dev);
struct omap_gpio_reg_offs *regs;
@@ -161,6 +163,51 @@ static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
+/* set multiple data out values using dedicate set/clear register */
+static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ l = *bits & *mask;
+ writel_relaxed(l, reg + bank->regs->set_dataout);
+ bank->context.dataout |= l;
+
+ l = ~*bits & *mask;
+ writel_relaxed(l, reg + bank->regs->clr_dataout);
+ bank->context.dataout &= ~l;
+}
+
+/* set multiple data out values using mask register */
+static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
+
+ writel_relaxed(l, reg);
+ bank->context.dataout = l;
+}
+
+static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->datain;
+
+ return readl_relaxed(reg) & *mask;
+}
+
+static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+
+ return readl_relaxed(reg) & *mask;
+}
+
static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
int l = readl_relaxed(base + reg);
@@ -968,6 +1015,26 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
return 0;
}
+static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ void __iomem *reg = bank->base + bank->regs->direction;
+ unsigned long in = readl_relaxed(reg), l;
+
+ *bits = 0;
+
+ l = in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_datain_multiple(bank, &l);
+
+ l = ~in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_dataout_multiple(bank, &l);
+
+ return 0;
+}
+
static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
@@ -1012,6 +1079,17 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
+static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->set_dataout_multiple(bank, mask, bits);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+}
+
/*---------------------------------------------------------------------*/
static void omap_gpio_show_rev(struct gpio_bank *bank)
@@ -1073,9 +1151,11 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.get_direction = omap_gpio_get_direction;
bank->chip.direction_input = omap_gpio_input;
bank->chip.get = omap_gpio_get;
+ bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1209,10 +1289,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
bank->set_dataout = omap_set_gpio_dataout_reg;
- else
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
raw_spin_lock_init(&bank->lock);
raw_spin_lock_init(&bank->wa_lock);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c55ad157e820..023a32cfac42 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -708,7 +708,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
- if (irq_base != -1 && (chip->driver_data & PCA_INT))
+ if (client->irq && irq_base != -1 && (chip->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index f5545049c187..f809a5a8e9eb 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -12,6 +12,8 @@
* GNU General Public License version 2 for more details.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -90,6 +92,25 @@ static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
}
+static int pisosr_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pisosr_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int nbytes = DIV_ROUND_UP(chip->ngpio, 8);
+ unsigned int i, j;
+
+ pisosr_gpio_refresh(gpio);
+
+ bitmap_zero(bits, chip->ngpio);
+ for (i = 0; i < nbytes; i++) {
+ j = i / sizeof(unsigned long);
+ bits[j] |= ((unsigned long) gpio->buffer[i])
+ << (8 * (i % sizeof(unsigned long)));
+ }
+
+ return 0;
+}
+
static const struct gpio_chip template_chip = {
.label = "pisosr-gpio",
.owner = THIS_MODULE,
@@ -97,6 +118,7 @@ static const struct gpio_chip template_chip = {
.direction_input = pisosr_gpio_direction_input,
.direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
+ .get_multiple = pisosr_gpio_get_multiple,
.base = -1,
.ngpio = DEFAULT_NGPIO,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1e66f808051c..c18712dabf93 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
return irq_gpio0;
}
+static bool pxa_gpio_has_pinctrl(void)
+{
+ switch (gpio_type) {
+ case PXA3XX_GPIO:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
int ret;
- ret = pinctrl_gpio_direction_input(chip->base + offset);
- if (!ret)
- return 0;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_input(chip->base + offset);
+ if (!ret)
+ return 0;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
- ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (ret)
- return ret;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_output(chip->base + offset);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
+
+ if (pxa_gpio_has_pinctrl()) {
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
+ }
+
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
@@ -607,7 +626,7 @@ static int pxa_gpio_probe(struct platform_device *pdev)
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
int gpio, ret;
- int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+ int irq0 = 0, irq1 = 0, irq_mux;
pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL);
if (!pchip)
@@ -646,14 +665,13 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_reg_base)
return -EINVAL;
- if (irq0 > 0)
- gpio_offset = 2;
-
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 3b4dc1a9a68d..a499c633a6c5 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rc5t583.h>
struct rc5t583_gpio {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 350390c0b290..55cc61086d99 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -15,7 +15,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -278,6 +278,13 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
pm_runtime_put(&p->pdev->dev);
}
+static int gpio_rcar_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+
+ return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
+}
+
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
{
gpio_rcar_config_general_input_output_mode(chip, offset, false);
@@ -461,6 +468,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip = &p->gpio_chip;
gpio_chip->request = gpio_rcar_request;
gpio_chip->free = gpio_rcar_free;
+ gpio_chip->get_direction = gpio_rcar_get_direction;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index cbf0f9e6465b..2938217566d3 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rdc321x.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 249f433aa62d..986eb3b231ac 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 545004445846..e9878f6ede67 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -26,8 +26,7 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/pci_ids.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#define GEN 0x00
#define GIO 0x04
@@ -138,6 +137,13 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
return 0;
}
+static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned gpio_num)
+{
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+
+ return sch_gpio_reg_get(sch, gpio_num, GIO);
+}
+
static const struct gpio_chip sch_gpio_chip = {
.label = "sch_gpio",
.owner = THIS_MODULE,
@@ -145,6 +151,7 @@ static const struct gpio_chip sch_gpio_chip = {
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
.set = sch_gpio_set,
+ .get_direction = sch_gpio_get_direction,
};
static int sch_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index b96990c262a1..5497f0a88cf0 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -17,16 +17,15 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/io.h>
#define DRV_NAME "gpio-sch311x"
-#define SCH311X_GPIO_CONF_OUT 0x00
-#define SCH311X_GPIO_CONF_IN 0x01
-#define SCH311X_GPIO_CONF_INVERT 0x02
-#define SCH311X_GPIO_CONF_OPEN_DRAIN 0x80
+#define SCH311X_GPIO_CONF_DIR BIT(0)
+#define SCH311X_GPIO_CONF_INVERT BIT(1)
+#define SCH311X_GPIO_CONF_OPEN_DRAIN BIT(7)
#define SIO_CONFIG_KEY_ENTER 0x55
#define SIO_CONFIG_KEY_EXIT 0xaa
@@ -163,7 +162,7 @@ static void sch311x_gpio_free(struct gpio_chip *chip, unsigned offset)
static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
- unsigned char data;
+ u8 data;
spin_lock(&block->lock);
data = inb(block->runtime_reg + block->data_reg);
@@ -175,7 +174,7 @@ static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
unsigned offset, int value)
{
- unsigned char data = inb(block->runtime_reg + block->data_reg);
+ u8 data = inb(block->runtime_reg + block->data_reg);
if (value)
data |= BIT(offset);
else
@@ -196,10 +195,12 @@ static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_IN, block->runtime_reg +
- block->config_regs[offset]);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
spin_unlock(&block->lock);
return 0;
@@ -209,18 +210,59 @@ static int sch311x_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_OUT, block->runtime_reg +
- block->config_regs[offset]);
-
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
return 0;
}
+static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
+
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+
+ return !!(data & SCH311X_GPIO_CONF_DIR);
+}
+
+static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u8 data;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static int sch311x_gpio_probe(struct platform_device *pdev)
{
struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev);
@@ -253,6 +295,8 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.free = sch311x_gpio_free;
block->chip.direction_input = sch311x_gpio_direction_in;
block->chip.direction_output = sch311x_gpio_direction_out;
+ block->chip.get_direction = sch311x_gpio_get_direction;
+ block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
@@ -309,7 +353,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
{
int err = 0, reg;
unsigned short base_addr;
- unsigned char dev_id;
+ u8 dev_id;
err = sch311x_sio_enter(sio_config_port);
if (err)
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 22267479ba68..ee3039f091f4 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 407359da08f9..2283c869ad5d 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -23,7 +23,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pci.h>
@@ -58,16 +59,6 @@ struct gsta_gpio {
unsigned irq_type[GSTA_NR_GPIO];
};
-static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
-{
- return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
-}
-
-static inline u32 __bit(int nr)
-{
- return 1U << (nr % GSTA_GPIO_PER_BLOCK);
-}
-
/*
* gpio methods
*/
@@ -75,8 +66,8 @@ static inline u32 __bit(int nr)
static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
if (val)
writel(bit, &regs->dats);
@@ -87,8 +78,8 @@ static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
return !!(readl(&regs->dat) & bit);
}
@@ -97,8 +88,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirs);
/* Data register after direction, otherwise pullup/down is selected */
@@ -112,8 +103,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirc);
return 0;
@@ -165,9 +156,9 @@ static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
*/
static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
{
- struct gsta_regs __iomem *regs = __regs(chip, nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
unsigned long flags;
- u32 bit = __bit(nr);
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int err = 0;
@@ -234,8 +225,8 @@ static void gsta_irq_disable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
unsigned long flags;
@@ -257,8 +248,8 @@ static void gsta_irq_enable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int type;
unsigned long flags;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 8d6a5a7e612d..65a2315f1673 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mfd/stmpe.h>
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index c07385b71403..19972084c45b 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -13,9 +13,8 @@
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/mutex.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -91,6 +90,20 @@ struct xway_stp {
};
/**
+ * xway_stp_get() - gpio_chip->get - get gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * Gets the shadow value.
+ */
+static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct xway_stp *chip = gpiochip_get_data(gc);
+
+ return (xway_stp_r32(chip->virt, XWAY_STP_CPU0) & BIT(gpio));
+}
+
+/**
* xway_stp_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
@@ -215,6 +228,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.get = xway_stp_get;
chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 8b0a69c5ba88..87c18a544513 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -135,6 +135,33 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
+static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int offs;
+ u8 bit;
+ u32 data;
+ int ret;
+
+ offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
+ bit = offs % SYSCON_REG_BITS;
+ data = (val ? BIT(bit) : 0) | BIT(bit + 16);
+ ret = regmap_write(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ data);
+ if (ret < 0)
+ dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+}
+
+static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
+ /* RK3328 GPIO_MUTE is an output only pin at GRF_SOC_CON10[1] */
+ .flags = GPIO_SYSCON_FEAT_OUT,
+ .bit_count = 1,
+ .dat_bit_offset = 0x0428 * 8 + 1,
+ .set = rockchip_gpio_set,
+};
+
#define KEYSTONE_LOCK_BIT BIT(0)
static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
@@ -175,6 +202,10 @@ static const struct of_device_id syscon_gpio_ids[] = {
.compatible = "ti,keystone-dsp-gpio",
.data = &keystone_dsp_gpio,
},
+ {
+ .compatible = "rockchip,rk3328-grf-gpio",
+ .data = &rockchip_rk3328_gpio_mute,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index ac6f2a9841e5..a12cd0b5c972 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -30,7 +30,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 94396caaca75..47dbd19751d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -22,7 +22,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -207,7 +207,7 @@ static int tegra_gpio_get_direction(struct gpio_chip *chip,
oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
- return (oe & pin_mask) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+ return !(oe & pin_mask);
}
static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -323,13 +323,6 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
- if (ret) {
- dev_err(tgi->dev,
- "unable to lock Tegra GPIO %u as IRQ\n", gpio);
- return ret;
- }
-
spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
@@ -342,6 +335,14 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
tegra_gpio_enable(tgi, gpio);
+ ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
+ if (ret) {
+ dev_err(tgi->dev,
+ "unable to lock Tegra GPIO %u as IRQ\n", gpio);
+ tegra_gpio_disable(tgi, gpio);
+ return ret;
+ }
+
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -550,13 +551,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-/*
- * This lock class tells lockdep that GPIO irqs are in a different category
- * than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-static struct lock_class_key gpio_request_class;
-
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
@@ -661,8 +655,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class,
- &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
@@ -720,4 +712,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 7f1aa4c21e0d..9d0292c8a199 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
+#include <dt-bindings/gpio/tegra194-gpio.h>
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
@@ -593,6 +594,73 @@ static const struct tegra_gpio_soc tegra186_aon_soc = {
.name = "tegra186-gpio-aon",
};
+#define TEGRA194_MAIN_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_MAIN_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_main_ports[] = {
+ TEGRA194_MAIN_GPIO_PORT( A, 0x1400, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( B, 0x4e00, 2, 4),
+ TEGRA194_MAIN_GPIO_PORT( C, 0x4600, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( D, 0x4800, 4, 4),
+ TEGRA194_MAIN_GPIO_PORT( E, 0x4a00, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( F, 0x4c00, 6, 4),
+ TEGRA194_MAIN_GPIO_PORT( G, 0x4000, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( H, 0x4200, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( I, 0x4400, 5, 4),
+ TEGRA194_MAIN_GPIO_PORT( J, 0x5200, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( K, 0x3000, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( L, 0x3200, 4, 3),
+ TEGRA194_MAIN_GPIO_PORT( M, 0x2600, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( N, 0x2800, 3, 2),
+ TEGRA194_MAIN_GPIO_PORT( O, 0x5000, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( P, 0x2a00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Q, 0x2c00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( R, 0x2e00, 6, 2),
+ TEGRA194_MAIN_GPIO_PORT( S, 0x3600, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( T, 0x3800, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( U, 0x3a00, 1, 3),
+ TEGRA194_MAIN_GPIO_PORT( V, 0x1000, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( W, 0x1200, 2, 1),
+ TEGRA194_MAIN_GPIO_PORT( X, 0x2000, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Y, 0x2200, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Z, 0x2400, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT(FF, 0x3400, 2, 3),
+ TEGRA194_MAIN_GPIO_PORT(GG, 0x0000, 2, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_main_ports),
+ .ports = tegra194_main_ports,
+ .name = "tegra194-gpio",
+};
+
+#define TEGRA194_AON_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_AON_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_aon_ports[] = {
+ TEGRA194_AON_GPIO_PORT(AA, 0x0600, 8, 0),
+ TEGRA194_AON_GPIO_PORT(BB, 0x0800, 4, 0),
+ TEGRA194_AON_GPIO_PORT(CC, 0x0200, 8, 0),
+ TEGRA194_AON_GPIO_PORT(DD, 0x0400, 3, 0),
+ TEGRA194_AON_GPIO_PORT(EE, 0x0000, 7, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_aon_ports),
+ .ports = tegra194_aon_ports,
+ .name = "tegra194-gpio-aon",
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -601,6 +669,12 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra186-gpio-aon",
.data = &tegra186_aon_soc
}, {
+ .compatible = "nvidia,tegra194-gpio",
+ .data = &tegra194_main_soc
+ }, {
+ .compatible = "nvidia,tegra194-gpio-aon",
+ .data = &tegra194_aon_soc
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 6520a8475910..314e300d6ba3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -22,7 +22,7 @@
*/
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..7fdac9060979 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
- fwspec.param[1] = IRQ_TYPE_NONE;
+ /*
+ * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+ * temporarily. Anyway, ->irq_set_type() will override it later.
+ */
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
return irq_create_fwspec_mapping(&fwspec);
}
@@ -306,8 +310,7 @@ static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
struct uniphier_gpio_priv *priv = domain->host_data;
struct gpio_chip *chip = &priv->chip;
- gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
- return 0;
+ return gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
}
static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index ac8deb01f6f6..027699cec911 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -138,10 +138,16 @@ static void unmask_giuint_low(struct irq_data *d)
static unsigned int startup_giuint(struct irq_data *data)
{
- if (gpiochip_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ int ret;
+
+ ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
+ if (ret) {
dev_err(vr41xx_gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
data->hwirq);
+ return ret;
+ }
+
/* Satisfy the .enable semantics by unmasking the line */
unmask_giuint_low(data);
return 0;
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index acd59113e08b..2eb76f35aa7e 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -143,12 +143,14 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+ int ret;
- if (gpiochip_lock_as_irq(&priv->gc, gpio)) {
+ ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ if (ret) {
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
- return -ENOSPC;
+ return ret;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index e8ec0e33a0a9..8f24478cc18b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -20,7 +20,7 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Register Offset Definitions */
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index e2232cbcec8b..c48ed9d89ff5 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,6 +25,7 @@
struct acpi_gpio_event {
struct list_head node;
+ struct list_head initial_sync_list;
acpi_handle handle;
unsigned int pin;
unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
struct list_head events;
};
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ if (!list_empty(&event->initial_sync_list))
+ list_del_init(&event->initial_sync_list);
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
irq_handler_t handler = NULL;
struct gpio_desc *desc;
unsigned long irqflags;
- int ret, pin, irq;
+ int ret, pin, irq, value;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
+ value = gpiod_get_value(desc);
+
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq;
event->pin = pin;
event->desc = desc;
+ INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events);
+
+ /*
+ * Make sure we trigger the initial state of the IRQ when using RISING
+ * or FALLING. Note we run the handlers on late_init, the AML code
+ * may refer to OperationRegions from other (builtin) drivers which
+ * may be probed after us.
+ */
+ if (handler == acpi_gpio_irq_handler &&
+ (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+ acpi_gpio_add_to_initial_sync_list(event);
+
return AE_OK;
fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
+ acpi_gpio_del_from_initial_sync_list(event);
+
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
@@ -353,7 +389,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args,
+ struct fwnode_reference_args *args,
unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -365,7 +401,7 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
const struct acpi_gpio_params *par = gm->data + index;
- args->adev = adev;
+ args->fwnode = acpi_fwnode_handle(adev);
args->args[0] = par->crs_entry_index;
args->args[1] = par->line_index;
args->args[2] = par->active_low;
@@ -528,7 +564,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_lookup *lookup)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
unsigned int quirks = 0;
int ret;
@@ -549,6 +585,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
+ if (!to_acpi_device_node(args.fwnode))
+ return -EINVAL;
if (args.nargs != 3)
return -EPROTO;
@@ -556,8 +594,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
- lookup->info.adev = args.adev;
+ lookup->info.adev = to_acpi_device_node(args.fwnode);
lookup->info.quirks = quirks;
+
return 0;
}
@@ -1158,3 +1197,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+ struct acpi_gpio_event *event, *ep;
+
+ mutex_lock(&acpi_gpio_initial_sync_list_lock);
+ list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+ initial_sync_list) {
+ acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+ list_del_init(&event->initial_sync_list);
+ }
+ mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..a4f1157d6aa0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Note that active low is the default.
*/
if (IS_ENABLED(CONFIG_REGULATOR) &&
- (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ (of_device_is_compatible(np, "regulator-fixed") ||
+ of_device_is_compatible(np, "reg-fixed-voltage") ||
of_device_is_compatible(np, "regulator-gpio"))) {
/*
* The regulator GPIO handles are specified such that the
@@ -620,9 +621,6 @@ int of_gpiochip_add(struct gpio_chip *chip)
{
int status;
- if ((!chip->of_node) && (chip->parent))
- chip->of_node = chip->parent->of_node;
-
if (!chip->of_node)
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e11a3bb03820..e8f8a1999393 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -431,7 +431,7 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really input */
+ /* NOTE: It's ok to read values of output lines. */
int ret = gpiod_get_array_value_complex(false,
true,
lh->numdescs,
@@ -449,7 +449,13 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
return 0;
} else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really output */
+ /*
+ * All line descriptors were created at once with the same
+ * flags so just check if the first one is really output.
+ */
+ if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ return -EPERM;
+
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
@@ -1256,6 +1262,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
/* If the gpiochip has an assigned OF node this takes precedence */
if (chip->of_node)
gdev->dev.of_node = chip->of_node;
+ else
+ chip->of_node = gdev->dev.of_node;
#endif
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
@@ -1408,9 +1416,9 @@ err_free_descs:
err_free_gdev:
ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic");
+ chip->label ? : "generic", status);
kfree(gdev);
return status;
}
@@ -1664,8 +1672,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
if (parent_handler) {
if (gpiochip->can_sleep) {
chip_err(gpiochip,
- "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ "you cannot have chained interrupts on a chip that may sleep\n");
return;
}
/*
@@ -1800,16 +1807,18 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
if (!try_module_get(chip->gpiodev->owner))
return -ENODEV;
- if (gpiochip_lock_as_irq(chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(chip, d->hwirq);
+ if (ret) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
module_put(chip->gpiodev->owner);
- return -EINVAL;
+ return ret;
}
return 0;
}
@@ -1850,8 +1859,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
return 0;
if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
- chip_err(gpiochip, "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
@@ -2259,6 +2267,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
struct gpio_chip *chip = desc->gdev->chip;
int status;
unsigned long flags;
+ unsigned offset;
spin_lock_irqsave(&gpio_lock, flags);
@@ -2277,7 +2286,11 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- status = chip->request(chip, gpio_chip_hwgpio(desc));
+ offset = gpio_chip_hwgpio(desc);
+ if (gpiochip_line_is_valid(chip, offset))
+ status = chip->request(chip, offset);
+ else
+ status = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
@@ -3194,6 +3207,19 @@ int gpiod_cansleep(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_cansleep);
/**
+ * gpiod_set_consumer_name() - set the consumer name for the descriptor
+ * @desc: gpio to set the consumer name on
+ * @name: the new consumer name
+ */
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ VALIDATE_DESC_VOID(desc);
+ /* Just overwrite whatever the previous name was */
+ desc->label = name;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
+
+/**
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
@@ -3249,18 +3275,19 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
* behind our back
*/
if (!chip->can_sleep && chip->get_direction) {
- int dir = chip->get_direction(chip, offset);
+ int dir = gpiod_get_direction(desc);
- if (dir)
- clear_bit(FLAG_IS_OUT, &desc->flags);
- else
- set_bit(FLAG_IS_OUT, &desc->flags);
+ if (dir < 0) {
+ chip_err(chip, "%s: cannot get GPIO direction\n",
+ __func__);
+ return dir;
+ }
}
if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
- "%s: tried to flag a GPIO set as output for IRQ\n",
- __func__);
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
return -EIO;
}
@@ -3639,9 +3666,16 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
chip = find_chip_by_name(p->chip_label);
if (!chip) {
- dev_err(dev, "cannot find GPIO chip %s\n",
- p->chip_label);
- return ERR_PTR(-ENODEV);
+ /*
+ * As the lookup table indicates a chip with
+ * p->chip_label should exist, assume it may
+ * still appear later and let the interested
+ * consumer be probed again or let the Deferred
+ * Probe infrastructure handle the error.
+ */
+ dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
+ p->chip_label);
+ return ERR_PTR(-EPROBE_DEFER);
}
if (chip->ngpio <= p->chip_hwnum) {
@@ -4215,7 +4249,7 @@ static int __init gpiolib_dev_init(void)
int ret;
/* Register GPIO sysfs bus */
- ret = bus_register(&gpio_bus_type);
+ ret = bus_register(&gpio_bus_type);
if (ret < 0) {
pr_err("gpiolib: could not register GPIO bus type\n");
return ret;
@@ -4259,9 +4293,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
- chip->get
- ? (chip->get(chip, i) ? "hi" : "lo")
- : "? ",
+ chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
is_irq ? "IRQ" : " ");
seq_printf(s, "\n");
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 1a8e20363861..a7e49fef73d4 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -92,7 +92,7 @@ struct acpi_gpio_info {
};
/* gpio suffixes used for ACPI and device tree lookup */
-static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
#ifdef CONFIG_OF_GPIO
struct gpio_desc *of_find_gpio(struct device *dev,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2a72d2feb76d..cb88528e7b10 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -122,6 +122,16 @@ config DRM_LOAD_EDID_FIRMWARE
default case is N. Details and instructions how to build your own
EDID data are given in Documentation/EDID/HOWTO.txt.
+config DRM_DP_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ select CEC_CORE
+ help
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
config DRM_TTM
tristate
depends on DRM && MMU
@@ -213,6 +223,17 @@ config DRM_VGEM
as used by Mesa's software renderer for enhanced performance.
If M is selected the module will be called vgem.
+config DRM_VKMS
+ tristate "Virtual KMS (EXPERIMENTAL)"
+ depends on DRM
+ select DRM_KMS_HELPER
+ default n
+ help
+ Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+ running GPU in a headless machines. Choose this option to get
+ a VKMS.
+
+ If M is selected the module will be called vkms.
source "drivers/gpu/drm/exynos/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ef9f3dab287f..a6771cef85e2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o
+ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -41,6 +41,7 @@ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
@@ -69,6 +70,7 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_VGEM) += vgem/
+obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..5b393622f592 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -136,6 +136,7 @@
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -714,6 +715,13 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a59c07590cee..447c4c7a36d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,8 @@
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
/*
* Modules parameters.
@@ -105,11 +107,8 @@ extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
extern int amdgpu_dc;
-extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
extern uint amdgpu_cg_mask;
@@ -190,6 +189,7 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -599,17 +599,6 @@ struct amdgpu_ib {
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
-void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f);
-
/*
* Queue manager
*/
@@ -683,8 +672,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
@@ -702,37 +691,6 @@ struct amdgpu_fpriv {
};
/*
- * residency list
- */
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_list {
- struct mutex lock;
- struct rcu_head rhead;
- struct kref refcount;
- struct amdgpu_bo *gds_obj;
- struct amdgpu_bo *gws_obj;
- struct amdgpu_bo *oa_obj;
- unsigned first_userptr;
- unsigned num_entries;
- struct amdgpu_bo_list_entry *array;
-};
-
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-
-/*
* GFX stuff
*/
#include "clearstate_defs.h"
@@ -930,6 +888,11 @@ struct amdgpu_ngg {
bool init;
};
+struct sq_work {
+ struct work_struct work;
+ unsigned ih_data;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -968,6 +931,10 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
+ struct amdgpu_irq_src sq_irq;
+ struct sq_work sq_work;
+
/* gfx status */
uint32_t gfx_current_status;
/* ce ram size*/
@@ -1019,6 +986,7 @@ struct amdgpu_cs_parser {
/* scheduler job object */
struct amdgpu_job *job;
+ struct amdgpu_ring *ring;
/* buffer objects */
struct ww_acquire_ctx ticket;
@@ -1040,40 +1008,6 @@ struct amdgpu_cs_parser {
struct drm_syncobj **post_dep_syncobjs;
};
-#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
-#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
-
-struct amdgpu_job {
- struct drm_sched_job base;
- struct amdgpu_device *adev;
- struct amdgpu_vm *vm;
- struct amdgpu_ring *ring;
- struct amdgpu_sync sync;
- struct amdgpu_sync sched_sync;
- struct amdgpu_ib *ibs;
- struct dma_fence *fence; /* the hw fence */
- uint32_t preamble_status;
- uint32_t num_ibs;
- void *owner;
- uint64_t fence_ctx; /* the fence_context this job uses */
- bool vm_needs_flush;
- uint64_t vm_pd_addr;
- unsigned vmid;
- unsigned pasid;
- uint32_t gds_base, gds_size;
- uint32_t gws_base, gws_size;
- uint32_t oa_base, oa_size;
- uint32_t vram_lost_counter;
-
- /* user fence handling */
- uint64_t uf_addr;
- uint64_t uf_sequence;
-
-};
-#define to_amdgpu_job(sched_job) \
- container_of((sched_job), struct amdgpu_job, base)
-
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
uint32_t ib_idx, int idx)
{
@@ -1269,43 +1203,6 @@ struct amdgpu_vram_scratch {
/*
* ACPI
*/
-struct amdgpu_atif_notification_cfg {
- bool enabled;
- int command_code;
-};
-
-struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
- bool thermal_state;
- bool forced_power_state;
- bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
- bool brightness_change;
- bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
- bool system_params;
- bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
- bool temperature_change;
- bool graphics_device_types;
-};
-
-struct amdgpu_atif {
- struct amdgpu_atif_notifications notifications;
- struct amdgpu_atif_functions functions;
- struct amdgpu_atif_notification_cfg notification_cfg;
- struct amdgpu_encoder *encoder_for_bl;
-};
-
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
@@ -1425,6 +1322,7 @@ enum amd_hw_ip_block_type {
PWR_HWIP,
NBIF_HWIP,
THM_HWIP,
+ CLK_HWIP,
MAX_HWIP
};
@@ -1466,7 +1364,7 @@ struct amdgpu_device {
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
- struct amdgpu_atif atif;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1615,9 +1513,9 @@ struct amdgpu_device {
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
- u64 vram_pin_size;
- u64 invisible_pin_size;
- u64 gart_pin_size;
+ atomic64_t vram_pin_size;
+ atomic64_t visible_pin_size;
+ atomic64_t gart_pin_size;
/* amdkfd interface */
struct kfd_dev *kfd;
@@ -1812,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -1865,8 +1764,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,
@@ -1894,6 +1791,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
/*
* KMS
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index f4c474a95875..71efcf38f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -57,6 +57,10 @@
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+#define ACP_BT_PLAY_REGS_START 0x14970
+#define ACP_BT_PLAY_REGS_END 0x14a24
+#define ACP_BT_COMP1_REG_OFFSET 0xac
+#define ACP_BT_COMP2_REG_OFFSET 0xa8
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
@@ -77,7 +81,7 @@
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
+#define ACP_DEVS 4
#define ACP_SRC_ID 162
enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
- adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
- i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+ i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ break;
+ }
+
+ i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+ i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
- adev->acp.acp_res[3].name = "acp2x_dma_irq";
- adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
- adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
- adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+ adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+ adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+ adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+ adev->acp.acp_res[4].name = "acp2x_dma_irq";
+ adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
- adev->acp.acp_cell[0].num_resources = 4;
+ adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+ adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].num_resources = 1;
+ adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+ adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+ adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8fa850a070e0..353993218f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -34,6 +34,45 @@
#include "amd_acpi.h"
#include "atom.h"
+struct amdgpu_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct amdgpu_atif_notifications {
+ bool display_switch;
+ bool expansion_mode_change;
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool display_conf_change;
+ bool px_gfx_switch;
+ bool brightness_change;
+ bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool select_active_disp;
+ bool lid_state;
+ bool get_tv_standard;
+ bool set_tv_standard;
+ bool get_panel_expansion_mode;
+ bool set_panel_expansion_mode;
+ bool temperature_change;
+ bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+ acpi_handle handle;
+
+ struct amdgpu_atif_notifications notifications;
+ struct amdgpu_atif_functions functions;
+ struct amdgpu_atif_notification_cfg notification_cfg;
+ struct amdgpu_encoder *encoder_for_bl;
+};
+
/* Call the ATIF method
*/
/**
@@ -46,8 +85,9 @@
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
- struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ int function,
+ struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
atif_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atif_verify_interface(acpi_handle handle,
- struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -176,6 +216,35 @@ out:
return err;
}
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+ acpi_handle handle = NULL;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_status status;
+
+ /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+ * systems, ATIF is in the dGPU's namespace.
+ */
+ status = acpi_get_handle(dhandle, "ATIF", &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+
+ if (amdgpu_has_atpx()) {
+ status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+ &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("No ATIF handle found\n");
+ return NULL;
+out:
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ return handle;
+}
+
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -188,15 +257,16 @@ out:
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
- struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
+ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+ NULL);
if (!info) {
err = -EIO;
goto out;
@@ -250,14 +320,15 @@ out:
* (all asics).
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
- struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+ NULL);
if (!info)
return -EIO;
@@ -290,11 +361,9 @@ out:
* Returns NOTIFY code
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
- struct acpi_bus_event *event)
+ struct acpi_bus_event *event)
{
- struct amdgpu_atif *atif = &adev->atif;
- struct atif_sbios_requests req;
- acpi_handle handle;
+ struct amdgpu_atif *atif = adev->atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,48 +372,54 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!atif->notification_cfg.enabled ||
+ if (!atif ||
+ !atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code)
/* Not our event */
return NOTIFY_DONE;
- /* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- count = amdgpu_atif_get_sbios_requests(handle, &req);
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
- if (count <= 0)
- return NOTIFY_DONE;
+ /* Check pending SBIOS requests */
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+ if (count <= 0)
+ return NOTIFY_DONE;
- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
- struct amdgpu_encoder *enc = atif->encoder_for_bl;
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- if (enc) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ /* todo: add DC handling */
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ !amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_encoder *enc = atif->encoder_for_bl;
- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
- req.backlight_level);
+ if (enc) {
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- backlight_force_update(dig->bl_dev,
- BACKLIGHT_UPDATE_HOTKEY);
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
#endif
+ }
}
- }
- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(adev->ddev->dev);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
}
+ /* TODO: check other events */
}
- /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
@@ -641,8 +716,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle;
- struct amdgpu_atif *atif = &adev->atif;
+ acpi_handle handle, atif_handle;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs *atcs = &adev->atcs;
int ret;
@@ -658,12 +733,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
}
+ /* Probe for ATIF, and initialize it if found */
+ atif_handle = amdgpu_atif_probe_handle(handle);
+ if (!atif_handle)
+ goto out;
+
+ atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+ if (!atif) {
+ DRM_WARN("Not enough memory to initialize ATIF\n");
+ goto out;
+ }
+ atif->handle = atif_handle;
+
/* Call the ATIF method */
- ret = amdgpu_atif_verify_interface(handle, atif);
+ ret = amdgpu_atif_verify_interface(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ kfree(atif);
goto out;
}
+ adev->atif = atif;
if (atif->notifications.brightness_change) {
struct drm_encoder *tmp;
@@ -693,8 +782,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
if (atif->functions.system_params) {
- ret = amdgpu_atif_get_notification_params(handle,
- &atif->notification_cfg);
+ ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
@@ -720,4 +808,6 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
+ if (adev->atif)
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 305143fcc1ce..f8bbbb3a9504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
return r;
}
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->pre_reset(adev->kfd);
+
+ return r;
+}
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->post_reset(adev->kfd);
+
+ return r;
+}
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_device_gpu_recover(adev, NULL, false);
+}
+
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
@@ -251,7 +278,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r;
- uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
memset(&bp, 0, sizeof(bp));
@@ -275,13 +301,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
goto allocate_mem_reserve_bo_failed;
}
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
- &gpu_addr_tmp);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r) {
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
goto allocate_mem_pin_bo_failed;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto allocate_mem_kmap_bo_failed;
+ }
+
r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
if (r) {
dev_err(adev->dev,
@@ -290,7 +321,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
}
*mem_obj = bo;
- *gpu_addr = gpu_addr_tmp;
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
*cpu_ptr = cpu_ptr_tmp;
amdgpu_bo_unreserve(bo);
@@ -457,6 +488,14 @@ err:
return ret;
}
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+}
+
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
if (adev->kfd) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8418a3f4e9d..2f379c183ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
@@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 2c14025e5e76..574c1181ae9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -173,7 +173,5 @@ static const struct dma_fence_ops amdkfd_fence_ops = {
.get_driver_name = amdkfd_fence_get_driver_name,
.get_timeline_name = amdkfd_fence_get_timeline_name,
.enable_signaling = amdkfd_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = amdkfd_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea79908dac4c..ea3f698aef5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint32_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
@@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+ /**
+ * read_vmid_from_vmfault_reg - read vmid from register
+ *
+ * adev: amdgpu_device pointer
+ * @vmid: vmid pointer
+ * read vmid from register (CIK).
+ */
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+
+ return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19dd665e7307..f6e53e9352bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1db60aa5b7f0..8efedfcb9dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
@@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
if (ring->ready)
return invalidate_tlbs_with_kiq(adev, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ff8fd75f7ca5..8a707d8bbb1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
@@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
goto bo_reserve_failed;
}
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Failed to pin bo. ret %d\n", ret);
goto pin_failed;
@@ -1621,6 +1621,20 @@ bo_reserve_failed:
return ret;
}
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *mem)
+{
+ struct amdgpu_device *adev;
+
+ adev = (struct amdgpu_device *)kgd;
+ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1680,7 +1694,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
@@ -1824,7 +1838,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (mem->user_pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index daa06e7c5bb7..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
- bool disp_detetion_ports;
+ bool disp_detection_ports;
};
struct amdgpu_atpx {
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+ return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -156,7 +162,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
- f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+ f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
@@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 19cfff31f2e1..3079ea8523c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(sobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+ r = amdgpu_bo_pin(sobj, sdomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&sobj->tbo);
amdgpu_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
+ saddr = amdgpu_bo_gpu_offset(sobj);
bp.domain = ddomain;
r = amdgpu_bo_create(adev, &bp, &dobj);
if (r) {
@@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(dobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+ r = amdgpu_bo_pin(dobj, ddomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&dobj->tbo);
amdgpu_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
+ daddr = amdgpu_bo_gpu_offset(dobj);
if (adev->mman.buffer_funcs) {
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 92be7f6de197..d472a2c8399f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -35,92 +35,53 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries);
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+{
+ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+ rhead);
+
+ kvfree(list);
+}
-static void amdgpu_bo_list_release_rcu(struct kref *ref)
+static void amdgpu_bo_list_free(struct kref *ref)
{
- unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount);
+ struct amdgpu_bo_list_entry *e;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree_rcu(list, rhead);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
-static int amdgpu_bo_list_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
- int *id)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries, struct amdgpu_bo_list **result)
{
- int r;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *array;
struct amdgpu_bo_list *list;
+ uint64_t total_size = 0;
+ size_t size;
+ unsigned i;
+ int r;
+
+ if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
+ return -EINVAL;
- list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ size = sizeof(struct amdgpu_bo_list);
+ size += num_entries * sizeof(struct amdgpu_bo_list_entry);
+ list = kvmalloc(size, GFP_KERNEL);
if (!list)
return -ENOMEM;
- /* initialize bo list*/
- mutex_init(&list->lock);
kref_init(&list->refcount);
- r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
- if (r) {
- kfree(list);
- return r;
- }
-
- /* idr alloc should be called only after initialization of bo list. */
- mutex_lock(&fpriv->bo_list_lock);
- r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->bo_list_lock);
- if (r < 0) {
- amdgpu_bo_list_free(list);
- return r;
- }
- *id = r;
-
- return 0;
-}
-
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
-{
- struct amdgpu_bo_list *list;
-
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- mutex_unlock(&fpriv->bo_list_lock);
- if (list)
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries)
-{
- struct amdgpu_bo_list_entry *array;
- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
- unsigned last_entry = 0, first_userptr = num_entries;
- unsigned i;
- int r;
- unsigned long total_size = 0;
+ list->gds_obj = adev->gds.gds_gfx_bo;
+ list->gws_obj = adev->gds.gws_gfx_bo;
+ list->oa_obj = adev->gds.oa_gfx_bo;
- array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
- if (!array)
- return -ENOMEM;
+ array = amdgpu_bo_list_array_entry(list, 0);
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
@@ -157,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
+ list->gds_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
- gws_obj = entry->robj;
+ list->gws_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
- oa_obj = entry->robj;
+ list->oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- kvfree(list->array);
-
- list->gds_obj = gds_obj;
- list->gws_obj = gws_obj;
- list->oa_obj = oa_obj;
list->first_userptr = first_userptr;
- list->array = array;
list->num_entries = num_entries;
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+ *result = list;
return 0;
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- kvfree(array);
+ kvfree(list);
return r;
+
}
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
- struct amdgpu_bo_list *result;
+ struct amdgpu_bo_list *list;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ list = idr_remove(&fpriv->bo_list_handles, id);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (list)
+ kref_put(&list->refcount, amdgpu_bo_list_free);
+}
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result)
+{
rcu_read_lock();
- result = idr_find(&fpriv->bo_list_handles, id);
+ *result = idr_find(&fpriv->bo_list_handles, id);
- if (result) {
- if (kref_get_unless_zero(&result->refcount)) {
- rcu_read_unlock();
- mutex_lock(&result->lock);
- } else {
- rcu_read_unlock();
- result = NULL;
- }
- } else {
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
rcu_read_unlock();
+ return 0;
}
- return result;
+ rcu_read_unlock();
+ return -ENOENT;
}
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -220,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -230,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ amdgpu_bo_list_for_each_entry(e, list) {
+ unsigned priority = e->priority;
- if (!list->array[i].robj->parent)
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!e->robj->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
- list->array[i].user_pages = NULL;
+ e->user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
@@ -247,71 +205,82 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
- mutex_unlock(&list->lock);
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
-{
- unsigned i;
-
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree(list);
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
-int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
{
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
- union drm_amdgpu_bo_list *args = data;
- uint32_t handle = args->in.list_handle;
- const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
-
struct drm_amdgpu_bo_list_entry *info;
- struct amdgpu_bo_list *list;
-
int r;
- info = kvmalloc_array(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
+ info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
if (!info)
return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
r = -EFAULT;
- if (likely(info_size == args->in.bo_info_size)) {
- unsigned long bytes = args->in.bo_number *
- args->in.bo_info_size;
+ if (likely(info_size == in->bo_info_size)) {
+ unsigned long bytes = in->bo_number *
+ in->bo_info_size;
if (copy_from_user(info, uptr, bytes))
goto error_free;
} else {
- unsigned long bytes = min(args->in.bo_info_size, info_size);
+ unsigned long bytes = min(in->bo_info_size, info_size);
unsigned i;
- memset(info, 0, args->in.bo_number * info_size);
- for (i = 0; i < args->in.bo_number; ++i) {
+ memset(info, 0, in->bo_number * info_size);
+ for (i = 0; i < in->bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
- uptr += args->in.bo_info_size;
+ uptr += in->bo_info_size;
}
}
+ *info_param = info;
+ return 0;
+
+error_free:
+ kvfree(info);
+ return r;
+}
+
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ union drm_amdgpu_bo_list *args = data;
+ uint32_t handle = args->in.list_handle;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+ struct amdgpu_bo_list *list, *old;
+ int r;
+
+ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+ if (r)
+ goto error_free;
+
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
- &handle);
+ &list);
if (r)
goto error_free;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (r < 0) {
+ amdgpu_bo_list_put(list);
+ return r;
+ }
+
+ handle = r;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
@@ -320,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
break;
case AMDGPU_BO_LIST_OP_UPDATE:
- r = -ENOENT;
- list = amdgpu_bo_list_get(fpriv, handle);
- if (!list)
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
+ if (r)
goto error_free;
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
- if (r)
+ mutex_lock(&fpriv->bo_list_lock);
+ old = idr_replace(&fpriv->bo_list_handles, list, handle);
+ mutex_unlock(&fpriv->bo_list_lock);
+
+ if (IS_ERR(old)) {
+ amdgpu_bo_list_put(list);
+ r = PTR_ERR(old);
goto error_free;
+ }
+ amdgpu_bo_list_put(old);
break;
default:
@@ -345,6 +319,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
error_free:
- kvfree(info);
+ if (info)
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..61b089768e1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/amdgpu_drm.h>
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
+
+struct amdgpu_bo_list {
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+ struct amdgpu_bo *gws_obj;
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
+static inline struct amdgpu_bo_list_entry *
+amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
+{
+ struct amdgpu_bo_list_entry *array = (void *)&list[1];
+
+ return &array[index];
+}
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, 0); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index e950730f1933..693ec5ea4950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/bonaire_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/hawaii_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8e66851eb427..c770d73352a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -212,30 +212,21 @@ static void
amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
enum drm_connector_status status)
{
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
-
}
}
@@ -246,17 +237,11 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -349,22 +334,24 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
int ret;
if (amdgpu_connector->edid) {
- drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -985,9 +972,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1077,14 +1063,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ struct drm_encoder *encoder;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1132,18 +1114,11 @@ exit:
static struct drm_encoder *
amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1158,8 +1133,9 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1296,15 +1272,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct amdgpu_encoder *amdgpu_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1326,14 +1294,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82312a7bc6ad..502b94fb116a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -31,6 +31,7 @@
#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_gmc.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -65,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
+{
+ int r;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+
+ r = amdgpu_bo_create_list_entry_array(data, &info);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
+ &p->bo_list);
+ if (r)
+ goto error_free;
+
+ kvfree(info);
+ return 0;
+
+error_free:
+ if (info)
+ kvfree(info);
+
+ return r;
+}
+
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
unsigned size, num_ibs = 0;
@@ -163,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
+ case AMDGPU_CHUNK_ID_BO_HANDLES:
+ size = sizeof(struct drm_amdgpu_bo_list_in);
+ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
+
+ ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
+
+ break;
+
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
@@ -186,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
+
+ /* Use this opportunity to fill in task info for the vm */
+ amdgpu_vm_set_task_info(vm);
+
return 0;
free_all_kdata:
@@ -257,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return;
}
- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
+ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
@@ -302,7 +344,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -359,7 +401,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero.
*/
if (p->bytes_moved < p->bytes_moved_threshold) {
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do
@@ -377,11 +419,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
}
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
p->bytes_moved_vis += ctx.bytes_moved;
@@ -434,9 +476,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
update_bytes_moved_vis =
- adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_ttm_placement_from_domain(bo, other);
+ !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_bo_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
@@ -490,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
- amdgpu_ttm_placement_from_domain(bo,
- AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
@@ -519,23 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (p->bo_list) {
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+ if (cs->in.bo_list_handle) {
+ if (p->bo_list)
+ return -EINVAL;
+
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
+ } else if (!p->bo_list) {
+ /* Create a empty bo_list when no handle is provided */
+ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+ &p->bo_list);
+ if (r)
+ return r;
}
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -544,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
while (1) {
struct list_head need_pages;
- unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
@@ -554,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_free_pages;
}
- /* Without a BO list we don't have userptr BOs */
- if (!p->bo_list)
- break;
-
INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo;
-
- e = &p->bo_list->array[i];
- bo = e->robj;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
@@ -656,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- if (p->bo_list) {
- struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
-
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+ gds = p->bo_list->gds_obj;
+ gws = p->bo_list->gws_obj;
+ oa = p->bo_list->oa_obj;
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
- } else {
- gds = p->adev->gds.gds_gfx_bo;
- gws = p->adev->gds.gws_gfx_bo;
- oa = p->adev->gds.oa_gfx_bo;
- }
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -700,18 +737,13 @@ error_validate:
error_free_pages:
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
-
- if (!e->user_pages)
- continue;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages);
- kvfree(e->user_pages);
- }
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages);
+ kvfree(e->user_pages);
}
return r;
@@ -773,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
{
- struct amdgpu_device *adev = p->adev;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
- int i, r;
+ int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
@@ -808,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
}
- if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct dma_fence *f;
-
- /* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
- continue;
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_fence *f;
- bo_va = p->bo_list->array[i].bo_va;
- if (bo_va == NULL)
- continue;
+ /* ignore duplicates */
+ bo = e->robj;
+ if (!bo)
+ continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
- return r;
+ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
- if (r)
- return r;
- }
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ if (r)
+ return r;
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -845,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- if (amdgpu_vm_debug && p->bo_list) {
+ if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
+ if (!e->robj)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
}
}
@@ -865,11 +894,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_ring *ring = p->ring;
int r;
/* Only for UVD/VCE VM emulation */
- if (p->job->ring->funcs->parse_cs) {
+ if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -910,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += va_start - offset;
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, j);
- if (r)
- return r;
+ if (p->ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
j++;
}
@@ -927,6 +964,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
r = amdgpu_bo_vm_update_pte(p);
if (r)
return r;
+
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+ if (r)
+ return r;
}
return amdgpu_cs_sync_rings(p);
@@ -979,10 +1020,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
}
- if (parser->job->ring && parser->job->ring != ring)
+ if (parser->ring && parser->ring != ring)
return -EINVAL;
- parser->job->ring = ring;
+ parser->ring = ring;
r = amdgpu_ib_get(adev, vm,
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1001,11 +1042,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1156,31 +1197,30 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
- unsigned i;
uint64_t seq;
int r;
amdgpu_mn_lock(p->mn);
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
- }
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
}
}
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1188,7 +1228,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
job->owner = p->filp;
- job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
@@ -1206,11 +1245,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
@@ -1601,7 +1644,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c5bb36275e93..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring)
continue;
- r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, &ctx->guilty);
+ r = drm_sched_entity_init(&ctx->rings[i].entity,
+ &rq, 1, &ctx->guilty);
if (r)
goto failed;
}
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- drm_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@@ -444,34 +442,36 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
+ long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
+ mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- if (!ctx->adev)
+ if (!ctx->adev) {
+ mutex_unlock(&mgr->lock);
return;
+ }
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
- else
- DRM_ERROR("ctx %p is still alive\n", ctx);
+ max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
+ mutex_unlock(&mgr->lock);
}
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -490,8 +490,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
@@ -504,7 +503,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
struct idr *idp;
uint32_t id;
- amdgpu_ctx_mgr_entity_cleanup(mgr);
+ amdgpu_ctx_mgr_entity_fini(mgr);
idp = &mgr->ctx_handles;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e5284e6c028..1e66dfd0e39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
@@ -675,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
*
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
*/
void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
@@ -698,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -713,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
@@ -1077,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
/**
* amdgpu_device_ip_set_clockgating_state - set the CG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: clockgating state (gate or ungate)
*
@@ -1111,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
/**
* amdgpu_device_ip_set_powergating_state - set the PG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: powergating state (gate or ungate)
*
@@ -1222,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
* amdgpu_device_ip_get_ip_block - get a hw IP pointer
*
* @adev: amdgpu_device pointer
- * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Returns a pointer to the hardware IP block structure
* if it exists for the asic, otherwise NULL.
@@ -1708,10 +1707,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
if (amdgpu_emu_mode == 1)
return 0;
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1731,17 +1726,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
}
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
- /* enable gfx powergating */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- /* enable gfxoff */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_PG_STATE_GATE);
- }
+ return 0;
+}
+static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+{
+ int i = 0, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ AMD_PG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ }
+ }
return 0;
}
@@ -1775,6 +1787,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
}
}
+ amdgpu_device_ip_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_pg_state(adev);
+
queue_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -1813,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1901,11 +1918,15 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_device_ip_late_set_cg_state(adev);
+ int r;
+
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
}
/**
- * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
@@ -1915,18 +1936,60 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
- /* ungate SMC block powergating */
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ /* ungate blocks so that suspend can properly shut them down */
+ if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
@@ -1935,9 +1998,16 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
+ /* call smu to disable gfx off feature first when suspend */
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1963,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
+ return r;
+}
+
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1985,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2020,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2181,7 +2274,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
#endif
return amdgpu_dc != 0;
@@ -2210,7 +2303,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @pdev: drm dev pointer
+ * @ddev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
@@ -2301,6 +2394,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->late_init_work,
amdgpu_device_ip_late_init_func_handler);
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
/* Registers mapping */
/* TODO: block userspace mapping of io register */
if (adev->asic_type >= CHIP_BONAIRE) {
@@ -2581,8 +2676,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/**
* amdgpu_device_suspend - initiate device suspend
*
- * @pdev: drm dev pointer
- * @state: suspend state
+ * @dev: drm dev pointer
+ * @suspend: suspend state
+ * @fbcon : notify the fbdev of suspend
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
@@ -2606,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@@ -2613,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
- }
-
- amdgpu_amdkfd_suspend(adev);
-
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
- }
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
}
}
}
+
+ amdgpu_amdkfd_suspend(adev);
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_device_ip_suspend(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2669,18 +2770,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
DRM_ERROR("amdgpu asic reset failed\n");
}
- if (fbcon) {
- console_lock();
- amdgpu_fbdev_set_suspend(adev, 1);
- console_unlock();
- }
return 0;
}
/**
* amdgpu_device_resume - initiate device resume
*
- * @pdev: drm dev pointer
+ * @dev: drm dev pointer
+ * @resume: resume state
+ * @fbcon : notify the fbdev of resume
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
@@ -2696,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon)
- console_lock();
-
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
- goto unlock;
+ return r;
}
/* post card */
@@ -2717,29 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_device_ip_resume(adev);
if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
- goto unlock;
+ return r;
}
amdgpu_fence_driver_resume(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
- goto unlock;
-
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj,
- AMDGPU_GEM_DOMAIN_VRAM,
- &amdgpu_crtc->cursor_addr);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- amdgpu_bo_unreserve(aobj);
+ return r;
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
}
}
@@ -2747,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
+ /* Make sure IB tests flushed */
+ flush_delayed_work(&adev->late_init_work);
+
/* blat the mode back in */
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
@@ -2760,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
drm_modeset_unlock_all(dev);
}
+ amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);
@@ -2783,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
-
- if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
-
-unlock:
- if (fbcon)
- console_unlock();
-
- return r;
+ return 0;
}
/**
@@ -3016,7 +3108,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
long tmo;
if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
+ tmo = msecs_to_jiffies(8000);
else
tmo = msecs_to_jiffies(100);
@@ -3068,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
* @adev: amdgpu device pointer
*
* attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset(struct amdgpu_device *adev)
{
@@ -3143,9 +3235,10 @@ out:
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu device pointer
+ * @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
@@ -3190,7 +3283,7 @@ error:
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
- * @force forces reset regardless of amdgpu_gpu_recovery
+ * @force: forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
@@ -3217,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -3229,10 +3325,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_park(ring->sched.thread);
- if (job && job->ring->idx != i)
+ if (job && job->base.sched == &ring->sched)
continue;
- drm_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
@@ -3253,7 +3349,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->ring->idx == i) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !r)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3270,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
}
+ /*unlock kfd */
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
@@ -3290,8 +3388,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- u32 mask;
- int ret;
+ struct pci_dev *pdev;
+ enum pci_bus_speed speed_cap;
+ enum pcie_link_width link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3309,27 +3408,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
if (adev->pm.pcie_gen_mask == 0) {
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (!ret) {
- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ /* asic caps */
+ pdev = adev->pdev;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
- if (mask & DRM_PCIE_SPEED_25)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
- if (mask & DRM_PCIE_SPEED_50)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
- if (mask & DRM_PCIE_SPEED_80)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
} else {
- adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
+ }
+ /* platform caps */
+ pdev = adev->ddev->pdev->bus->self;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ } else {
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+
}
}
if (adev->pm.pcie_mlw_mask == 0) {
- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
- if (!ret) {
- switch (mask) {
- case 32:
+ pdev = adev->ddev->pdev->bus->self;
+ link_width = pcie_get_width_cap(pdev);
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
@@ -3338,7 +3471,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 16:
+ case PCIE_LNK_X16:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
@@ -3346,36 +3479,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 12:
+ case PCIE_LNK_X12:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 8:
+ case PCIE_LNK_X8:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 4:
+ case PCIE_LNK_X4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 2:
+ case PCIE_LNK_X2:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 1:
+ case PCIE_LNK_X1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
- } else {
- adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76ee8e04ff11..6748cd7fc129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
- u64 base;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", new_abo);
+ goto unpin;
+ }
+
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
@@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = base;
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 77ad59ade85c..1c4595562f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -28,6 +28,7 @@
#include "amdgpu_i2c.h"
#include "amdgpu_dpm.h"
#include "atom.h"
+#include "amd_pcie.h"
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
case AMDGPU_PCIE_GEN3:
return AMDGPU_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+ (default_gen == AMDGPU_PCIE_GEN3))
return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+ (default_gen == AMDGPU_PCIE_GEN2))
return AMDGPU_PCIE_GEN2;
else
return AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index dd6203a0a6b7..ff24e1cc5b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
+#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
+ ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
+ (adev)->powerplay.pp_handle, block_type, gate))
+
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
-#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
- ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
- (adev)->powerplay.pp_handle))
-
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -402,7 +396,6 @@ struct amdgpu_dpm {
u32 tdp_adjustment;
u16 load_line_slope;
bool power_control;
- bool ac_power;
/* special states active */
bool thermal_active;
bool uvd_active;
@@ -439,6 +432,7 @@ struct amdgpu_pm {
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
+ bool ac_power;
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b0bf2f24da48..8843a06360fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1,10 +1,3 @@
-/**
- * \file amdgpu_drv.c
- * AMD Amdgpu driver
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -76,9 +69,10 @@
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 26
+#define KMS_DRIVER_MINOR 27
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -110,11 +104,8 @@ int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
uint amdgpu_pcie_gen_cap = 0;
uint amdgpu_pcie_lane_cap = 0;
uint amdgpu_cg_mask = 0xffffffff;
@@ -122,7 +113,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -135,163 +127,368 @@ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+ */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
+/**
+ * DOC: vis_vramlimit (int)
+ * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
+ */
MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+/**
+ * DOC: gartsize (uint)
+ * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
+ */
MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+/**
+ * DOC: gttsize (int)
+ * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
+ * otherwise 3/4 RAM size).
+ */
MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
+/**
+ * DOC: moverate (int)
+ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
+ */
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);
+/**
+ * DOC: benchmark (int)
+ * Run benchmarks. The default is 0 (Skip benchmarks).
+ */
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+/**
+ * DOC: test (int)
+ * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
+ */
MODULE_PARM_DESC(test, "Run tests");
module_param_named(test, amdgpu_testing, int, 0444);
+/**
+ * DOC: audio (int)
+ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
+ */
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
+/**
+ * DOC: disp_priority (int)
+ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
+ */
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+/**
+ * DOC: hw_i2c (int)
+ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+/**
+ * DOC: pcie_gen2 (int)
+ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+/**
+ * DOC: msi (int)
+ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
+/**
+ * DOC: lockup_timeout (int)
+ * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
+ * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
+ */
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+/**
+ * DOC: dpm (int)
+ * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
+ */
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, amdgpu_dpm, int, 0444);
+/**
+ * DOC: fw_load_type (int)
+ * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ */
MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
+/**
+ * DOC: aspm (int)
+ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, amdgpu_aspm, int, 0444);
+/**
+ * DOC: runpm (int)
+ * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
+ * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ */
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+/**
+ * DOC: ip_block_mask (uint)
+ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
+ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
+ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
+ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
+ */
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+/**
+ * DOC: bapm (int)
+ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
+ * The default -1 (auto, enabled)
+ */
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, amdgpu_bapm, int, 0444);
+/**
+ * DOC: deep_color (int)
+ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+/**
+ * DOC: vm_size (int)
+ * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+/**
+ * DOC: vm_fragment_size (int)
+ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+/**
+ * DOC: vm_block_size (int)
+ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+/**
+ * DOC: vm_fault_stop (int)
+ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
+ */
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+/**
+ * DOC: vm_debug (int)
+ * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+/**
+ * DOC: vm_update_mode (int)
+ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
+ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
+ */
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
+/**
+ * DOC: vram_page_split (int)
+ * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
+ */
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+/**
+ * DOC: exp_hw_support (int)
+ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+/**
+ * DOC: dc (int)
+ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
+/**
+ * DOC: sched_jobs (int)
+ * Override the max number of jobs supported in the sw queue. The default is 32.
+ */
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+/**
+ * DOC: sched_hw_submission (int)
+ * Override the max number of HW submissions. The default is 2.
+ */
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+/**
+ * DOC: ppfeaturemask (uint)
+ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable power features.
+ */
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
+/**
+ * DOC: pcie_gen_cap (uint)
+ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+/**
+ * DOC: pcie_lane_cap (uint)
+ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+/**
+ * DOC: cg_mask (uint)
+ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+/**
+ * DOC: pg_mask (uint)
+ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+/**
+ * DOC: sdma_phase_quantum (uint)
+ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
+ */
MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+/**
+ * DOC: disable_cu (charp)
+ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
+ */
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+/**
+ * DOC: virtual_display (charp)
+ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
+ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
+ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
+ * device at 26:00.0. The default is NULL.
+ */
MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+/**
+ * DOC: ngg (int)
+ * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
module_param_named(ngg, amdgpu_ngg, int, 0444);
+/**
+ * DOC: prim_buf_per_se (int)
+ * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
+/**
+ * DOC: pos_buf_per_se (int)
+ * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
+/**
+ * DOC: cntl_sb_buf_per_se (int)
+ * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
+/**
+ * DOC: param_buf_per_se (int)
+ * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
+/**
+ * DOC: job_hang_limit (int)
+ * Set how much time allow a job hang and not drop it. The default is 0.
+ */
MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
+/**
+ * DOC: lbpw (int)
+ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+/**
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+/**
+ * DOC: emu_mode (int)
+ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+/**
+ * DOC: si_support (int)
+ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -305,6 +502,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
+/**
+ * DOC: cik_support (int)
+ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_CIK
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -318,6 +521,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
+/**
+ * DOC: smu_memory_pool_size (uint)
+ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
+ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(smu_memory_pool_size,
"reserve gtt for smu debug usage, 0 = disable,"
"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
@@ -664,7 +872,7 @@ retry_init:
err_pci:
pci_disable_device(pdev);
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
@@ -674,7 +882,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -855,9 +1063,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.runtime_idle = amdgpu_pmops_runtime_idle,
};
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *file_priv = f->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+
+ return 0;
+}
+
+
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
+ .flush = amdgpu_flush,
.release = drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = amdgpu_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 94138abe093b..ae8fac34f7a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -46,7 +46,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
adev->mode_info.bl_encoder = amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index bc5fd8ebab5d..69c5d22f29bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, NULL, &gobj);
+ ttm_bo_type_kernel, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin(abo, domain, NULL);
+ ret = amdgpu_bo_pin(abo, domain);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
}
+
+ ret = amdgpu_ttm_alloc_gart(&abo->tbo);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+ dev_err(adev->dev, "%p bind failed\n", abo);
+ goto out_unref;
+ }
+
ret = amdgpu_bo_kmap(abo, NULL);
amdgpu_bo_unreserve(abo);
if (ret) {
@@ -365,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
- state);
+ drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index e74d620d9699..7056925eb386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -646,7 +646,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index dd11b7313ca0..a54d5655a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
{
- uint64_t gpu_addr;
int r;
r = amdgpu_bo_reserve(adev->gart.robj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin(adev->gart.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+ r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
amdgpu_bo_unreserve(adev->gart.robj);
return r;
@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
if (r)
amdgpu_bo_unpin(adev->gart.robj);
amdgpu_bo_unreserve(adev->gart.robj);
- adev->gart.table_addr = gpu_addr;
+ adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
return r;
}
@@ -234,7 +232,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
@@ -243,7 +241,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
if (!adev->gart.ptr)
continue;
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) {
page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
@@ -319,7 +317,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++)
adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 456295c00291..9f9e9dc87da1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -37,6 +37,8 @@ struct amdgpu_bo;
#define AMDGPU_GPU_PAGE_SHIFT 12
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
struct amdgpu_gart {
u64 table_addr;
struct amdgpu_bo *robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5fb156a01774..71792d820ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -265,7 +265,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- flags, false, resv, &gobj);
+ flags, ttm_bo_type_device, resv, &gobj);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -317,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
- 0, 0, NULL, &gobj);
+ 0, ttm_bo_type_device, NULL, &gobj);
if (r)
return r;
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto free_pages;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
@@ -510,7 +510,6 @@ out:
* @adev: amdgpu_device pointer
* @vm: vm to update
* @bo_va: bo_va to update
- * @list: validation list
* @operation: map, unmap or clear
*
* Update the bo_va directly after setting its address. Errors are not
@@ -519,7 +518,6 @@ out:
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va *bo_va,
- struct list_head *list,
uint32_t operation)
{
int r;
@@ -612,7 +610,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = false;
+ tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
list_add(&tv.head, &list);
} else {
gobj = NULL;
@@ -673,7 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
+ amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
error_backoff:
@@ -768,7 +766,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
amdgpu_display_supported_domains(adev));
r = amdgpu_gem_object_create(adev, args->size, 0, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- false, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 893c2490b783..bb5a47a45790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -105,8 +105,25 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
+ struct kfd_vm_fault_info *vm_fault_info;
+ atomic_t vm_fault_info_updated;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+ WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f70eeed9ed76..5518e623fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->fence_ctx;
+ fence_ctx = job->base.s_fence->scheduled.context;
} else {
vm = NULL;
fence_ctx = 0;
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+ /* wrap the last IB with fence */
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+ }
+
r = amdgpu_fence_emit(ring, f, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
- /* wrap the last IB with fence */
- if (job && job->uf_addr) {
- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
- fence_flags | AMDGPU_FENCE_FLAG_64BIT);
- }
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
@@ -353,7 +353,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
tmo = tmo_mm;
else
tmo = tmo_gfx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index a1c78f90eadf..3a072a7a39f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -578,11 +578,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
-
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 3a5ca462abf0..1abf5b5bac9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,23 @@
* Alex Deucher
* Jerome Glisse
*/
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -43,19 +60,21 @@
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
/**
- * amdgpu_hotplug_work_func - display hotplug work handler
+ * amdgpu_hotplug_work_func - work handler for display hotplug event
*
- * @work: work struct
+ * @work: work struct pointer
*
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt. It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt. It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
*/
static void amdgpu_hotplug_work_func(struct work_struct *work)
{
@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_reset_work_func - execute GPU reset
*
- * @work: work struct
+ * @work: work struct pointer
*
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Execute scheduled GPU reset (Cayman+).
+ * This function is called when the IRQ handler thinks we need a GPU reset.
*/
static void amdgpu_irq_reset_work_func(struct work_struct *work)
{
@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, false);
}
-/* Disable *all* interrupts */
+/**
+ * amdgpu_irq_disable_all - disable *all* interrupts
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Disable all types of interrupts from all sources.
+ */
void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
*
- * @int irq, void *arg: args
+ * IRQ handler for amdgpu driver (all ASICs).
*
- * This is the irq handler for the amdgpu driver (all asics).
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
*/
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
}
/**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
+ *
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
*
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
*/
static bool amdgpu_msi_ok(struct amdgpu_device *adev)
{
- /* force MSI on */
if (amdgpu_msi == 1)
return true;
else if (amdgpu_msi == 0)
@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
*
* @adev: amdgpu device pointer
*
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
*/
int amdgpu_irq_init(struct amdgpu_device *adev)
{
@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- /* enable msi */
+ /* Enable MSI if not disabled by module parameter */
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
+ /* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true;
@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r)
return r;
- /* pre DCE11 */
+ /* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
*/
void amdgpu_irq_fini(struct amdgpu_device *adev)
{
@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
*
* @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
*
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @entry: interrupt vector pointer
*
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
*/
int amdgpu_irq_update(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type)
@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
spin_lock_irqsave(&adev->irq.lock, irqflags);
- /* we need to determine after taking the lock, otherwise
+ /* We need to determine after taking the lock, otherwise
we might disable just enabled interrupts again */
if (amdgpu_irq_enabled(adev, src, type))
state = AMDGPU_IRQ_STATE_ENABLE;
@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
* amdgpu_irq_get - enable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* amdgpu_irq_put - disable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
*
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
}
/**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
*
* @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
*/
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -486,7 +543,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return !!atomic_read(&src->enabled_types[type]);
}
-/* gen irq */
+/* XXX: Generic IRQ handling */
static void amdgpu_irq_mask(struct irq_data *irqd)
{
/* XXX */
@@ -497,12 +554,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
/* XXX */
}
+/* amdgpu hardware interrupt chip descriptor */
static struct irq_chip amdgpu_irq_chip = {
.name = "amdgpu-ih",
.irq_mask = amdgpu_irq_mask,
.irq_unmask = amdgpu_irq_unmask,
};
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
static int amdgpu_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
@@ -514,17 +585,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
+/* Implementation of methods for amdgpu IRQ domain */
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
/**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
*
* @adev: amdgpu device pointer
*
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
@@ -539,11 +614,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
*
* @adev: amdgpu device pointer
*
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -555,16 +630,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- * Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
*
* @adev: amdgpu device pointer
* @src_id: IH source id
*
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
* Use this for components that generate a GPU interrupt, but are driven
* by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
*/
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 2bd56760c744..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,14 +30,14 @@
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
+ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
- amdgpu_device_gpu_recover(job->adev, job, false);
+ amdgpu_device_gpu_recover(ring->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
if (!*job)
return -ENOMEM;
- (*job)->adev = adev;
+ /*
+ * Initialize the scheduler to at least some ring so that we always
+ * have a pointer to adev.
+ */
+ (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- amdgpu_ring_priority_put(job->ring, s_job->s_priority);
+ amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
kfree(job);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f)
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
{
+ enum drm_sched_priority priority;
+ struct amdgpu_ring *ring;
int r;
- job->ring = ring;
if (!f)
return -EINVAL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
job->owner = owner;
- job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
+ return 0;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
+{
+ int r;
+
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
+ job->fence = dma_fence_get(*fence);
+ if (r)
+ return r;
+
+ amdgpu_job_free(job);
return 0;
}
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
+ struct dma_fence *fence;
bool explicit = false;
int r;
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+ fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+ fence, false);
if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ DRM_ERROR("Error adding fence (%d)\n", r);
}
}
while (fence == NULL && vm && !job->vmid) {
- struct amdgpu_ring *ring = job->ring;
-
r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished,
job);
@@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
- struct amdgpu_device *adev;
struct amdgpu_job *job;
int r;
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
- adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
} else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644
index 000000000000..57cfe78a262b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_JOB_H__
+#define __AMDGPU_JOB_H__
+
+/* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
+/* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
+/* bit set means context switch occured */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
+
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
+
+struct amdgpu_fence;
+
+struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_vm *vm;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+ struct amdgpu_ib *ibs;
+ struct dma_fence *fence; /* the hw fence */
+ uint32_t preamble_status;
+ uint32_t num_ibs;
+ void *owner;
+ bool vm_needs_flush;
+ uint64_t vm_pd_addr;
+ unsigned vmid;
+ unsigned pasid;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+ uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
+
+ /* user fence handling */
+ uint64_t uf_addr;
+ uint64_t uf_sequence;
+
+};
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job, struct amdgpu_vm *vm);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ struct amdgpu_job **job);
+
+void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f);
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 91517b166a3b..bd98cc5fb97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -328,61 +328,71 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.compute_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->sdma.instance[i].ring.ready << i;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
- ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 16;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ ring_mask |= adev->uvd.inst[i].ring.ready;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vce.ring[i].ready << i;
+ ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- ring_mask |=
- ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
- (j + i * adev->uvd.num_enc_rings));
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask = adev->vcn.ring_dec.ready;
+ ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vcn.ring_enc[i].ready << i;
+ ib_start_alignment = 64;
ib_size_alignment = 1;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ ring_mask = adev->vcn.ring_jpeg.ready;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
default:
return -EINVAL;
}
@@ -427,6 +437,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
default:
@@ -494,13 +505,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
- vram_gtt.vram_size = adev->gmc.real_vram_size;
- vram_gtt.vram_size -= adev->vram_pin_size;
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
+ vram_gtt.vram_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
+ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
- vram_gtt.gtt_size -= adev->gart_pin_size;
+ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
@@ -509,17 +520,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
- mem.vram.usable_heap_size =
- adev->gmc.real_vram_size - adev->vram_pin_size;
+ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size =
- adev->gmc.visible_vram_size -
- (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -527,8 +537,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
mem.gtt.total_heap_size *= PAGE_SIZE;
- mem.gtt.usable_heap_size = mem.gtt.total_heap_size
- - adev->gart_pin_size;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
+ atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
@@ -930,7 +940,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return;
pm_runtime_get_sync(dev->dev);
- amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
if (adev->asic_type != CHIP_RAVEN) {
amdgpu_uvd_free_handles(adev, file_priv);
@@ -958,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 83e344fbb50a..e55508b39496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -28,6 +28,21 @@
* Christian König <christian.koenig@amd.com>
*/
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
@@ -38,6 +53,22 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structure
+ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @read_lock: mutex for recursive locking of @lock
+ * @recursion: depth of recursion
+ *
+ * Data for each amdgpu device and process address space.
+ */
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
@@ -58,13 +89,21 @@ struct amdgpu_mn {
atomic_t recursion;
};
+/**
+ * struct amdgpu_mn_node
+ *
+ * @it: interval node defining start-last of the affected address range
+ * @bos: list of all BOs in the affected address range
+ *
+ * Manages all BOs which are affected of a certain range of address space.
+ */
struct amdgpu_mn_node {
struct interval_tree_node it;
struct list_head bos;
};
/**
- * amdgpu_mn_destroy - destroy the rmn
+ * amdgpu_mn_destroy - destroy the MMU notifier
*
* @work: previously sheduled work item
*
@@ -72,47 +111,50 @@ struct amdgpu_mn_node {
*/
static void amdgpu_mn_destroy(struct work_struct *work)
{
- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = rmn->adev;
+ struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
+ struct amdgpu_device *adev = amn->adev;
struct amdgpu_mn_node *node, *next_node;
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->lock);
- hash_del(&rmn->node);
+ down_write(&amn->lock);
+ hash_del(&amn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
- &rmn->objects.rb_root, it.rb) {
+ &amn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
}
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
- kfree(rmn);
+ mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+ kfree(amn);
}
/**
* amdgpu_mn_release - callback to notify about mm destruction
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
*
* Shedule a work item to lazy destroy our notifier.
*/
static void amdgpu_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
- schedule_work(&rmn->work);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+ INIT_WORK(&amn->work, amdgpu_mn_destroy);
+ schedule_work(&amn->work);
}
/**
- * amdgpu_mn_lock - take the write side lock for this mn
+ * amdgpu_mn_lock - take the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
@@ -121,7 +163,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_unlock - drop the write side lock for this mn
+ * amdgpu_mn_unlock - drop the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
@@ -130,40 +174,44 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_read_lock - take the rmn read lock
+ * amdgpu_mn_read_lock - take the read side lock for this notifier
*
- * @rmn: our notifier
- *
- * Take the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
- mutex_lock(&rmn->read_lock);
- if (atomic_inc_return(&rmn->recursion) == 1)
- down_read_non_owner(&rmn->lock);
- mutex_unlock(&rmn->read_lock);
+ if (blockable)
+ mutex_lock(&amn->read_lock);
+ else if (!mutex_trylock(&amn->read_lock))
+ return -EAGAIN;
+
+ if (atomic_inc_return(&amn->recursion) == 1)
+ down_read_non_owner(&amn->lock);
+ mutex_unlock(&amn->read_lock);
+
+ return 0;
}
/**
- * amdgpu_mn_read_unlock - drop the rmn read lock
+ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
*
- * @rmn: our notifier
- *
- * Drop the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{
- if (atomic_dec_return(&rmn->recursion) == 0)
- up_read_non_owner(&rmn->lock);
+ if (atomic_dec_return(&amn->recursion) == 0)
+ up_read_non_owner(&amn->lock);
}
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
* @node: the node with the BOs to unmap
+ * @start: start of address range affected
+ * @end: end of address range affected
*
- * We block for all BOs and unmap them by move them
- * into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
unsigned long start,
@@ -190,42 +238,54 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
-static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ /* TODO we should be able to split locking for interval tree and
+ * amdgpu_mn_invalidate_node
+ */
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
amdgpu_mn_invalidate_node(node, start, end);
}
+
+ return 0;
}
/**
* amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -233,24 +293,31 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
-static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -262,13 +329,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
amdgpu_amdkfd_evict_userptr(mem, mm);
}
}
+
+ return 0;
}
/**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -279,9 +348,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
- amdgpu_mn_read_unlock(rmn);
+ amdgpu_mn_read_unlock(amn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +384,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
{
struct mm_struct *mm = current->mm;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
unsigned long key = AMDGPU_MN_KEY(mm, type);
int r;
@@ -325,41 +394,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
return ERR_PTR(-EINTR);
}
- hash_for_each_possible(adev->mn_hash, rmn, node, key)
- if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
+ hash_for_each_possible(adev->mn_hash, amn, node, key)
+ if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
goto release_locks;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
- rmn = ERR_PTR(-ENOMEM);
+ amn = kzalloc(sizeof(*amn), GFP_KERNEL);
+ if (!amn) {
+ amn = ERR_PTR(-ENOMEM);
goto release_locks;
}
- rmn->adev = adev;
- rmn->mm = mm;
- init_rwsem(&rmn->lock);
- rmn->type = type;
- rmn->mn.ops = &amdgpu_mn_ops[type];
- rmn->objects = RB_ROOT_CACHED;
- mutex_init(&rmn->read_lock);
- atomic_set(&rmn->recursion, 0);
+ amn->adev = adev;
+ amn->mm = mm;
+ init_rwsem(&amn->lock);
+ amn->type = type;
+ amn->mn.ops = &amdgpu_mn_ops[type];
+ amn->objects = RB_ROOT_CACHED;
+ mutex_init(&amn->read_lock);
+ atomic_set(&amn->recursion, 0);
- r = __mmu_notifier_register(&rmn->mn, mm);
+ r = __mmu_notifier_register(&amn->mn, mm);
if (r)
- goto free_rmn;
+ goto free_amn;
- hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
+ hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
release_locks:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- return rmn;
+ return amn;
-free_rmn:
+free_amn:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- kfree(rmn);
+ kfree(amn);
return ERR_PTR(r);
}
@@ -379,14 +448,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
enum amdgpu_mn_type type =
bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct amdgpu_mn_node *node = NULL, *new_node;
struct list_head bos;
struct interval_tree_node *it;
- rmn = amdgpu_mn_get(adev, type);
- if (IS_ERR(rmn))
- return PTR_ERR(rmn);
+ amn = amdgpu_mn_get(adev, type);
+ if (IS_ERR(amn))
+ return PTR_ERR(amn);
new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -394,12 +463,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->lock);
+ down_write(&amn->lock);
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+ while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
kfree(node);
node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
addr = min(it->start, addr);
end = max(it->last, end);
list_splice(&node->bos, &bos);
@@ -410,7 +479,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
else
kfree(new_node);
- bo->mn = rmn;
+ bo->mn = amn;
node->it.start = addr;
node->it.last = end;
@@ -418,9 +487,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
list_splice(&bos, &node->bos);
list_add(&bo->mn_list, &node->bos);
- interval_tree_insert(&node->it, &rmn->objects);
+ interval_tree_insert(&node->it, &amn->objects);
- up_write(&rmn->lock);
+ up_write(&amn->lock);
return 0;
}
@@ -435,18 +504,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct list_head *head;
mutex_lock(&adev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
+ amn = bo->mn;
+ if (amn == NULL) {
mutex_unlock(&adev->mn_lock);
return;
}
- down_write(&rmn->lock);
+ down_write(&amn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -456,12 +525,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
if (list_empty(head)) {
struct amdgpu_mn_node *node;
+
node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3526efa8960e..b0e14a3d54ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,7 +38,20 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
-static bool amdgpu_need_backup(struct amdgpu_device *adev)
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
+
+static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
@@ -50,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+/**
+ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
+ *
+ * @bo: &amdgpu_bo buffer object
+ *
+ * This function is called when a BO stops being pinned, and updates the
+ * &amdgpu_device pin_size values accordingly.
+ */
+static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
+}
+
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ if (bo->pin_count > 0)
+ amdgpu_bo_subtract_pin_size(bo);
+
if (bo->kfd_bo)
amdgpu_amdkfd_unreserve_system_memory_limit(bo);
@@ -73,14 +110,32 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_ttm_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy)
return true;
return false;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+/**
+ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
@@ -161,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+
placement->num_placement = c;
placement->placement = places;
@@ -184,7 +241,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -220,22 +278,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
goto error_free;
}
- r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ r = amdgpu_bo_pin(*bo_ptr, domain);
if (r) {
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
goto error_unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
+ goto error_unpin;
+ }
+
+ if (gpu_addr)
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
if (cpu_addr) {
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
- goto error_unreserve;
+ goto error_unpin;
}
}
return 0;
+error_unpin:
+ amdgpu_bo_unpin(*bo_ptr);
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
@@ -261,7 +330,8 @@ error_free:
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -285,6 +355,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
*/
@@ -418,17 +490,17 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, bp->domain);
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+ NULL, bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
@@ -498,6 +570,20 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
+ * shadow object.
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
@@ -510,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (r)
return r;
- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
@@ -527,6 +613,21 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be backed up
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -559,6 +660,18 @@ err:
return r;
}
+/**
+ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
+ * @bo: pointer to the buffer object
+ *
+ * Sets placement according to domain; and changes placement and caching
+ * policy of the buffer object according to the placement.
+ * This is used for validating shadow bos. It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
@@ -571,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
domain = bo->preferred_domains;
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -581,6 +694,22 @@ retry:
return r;
}
+/**
+ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be restored
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -613,6 +742,17 @@ err:
return r;
}
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
void *kptr;
@@ -643,6 +783,15 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
{
bool is_iomem;
@@ -650,21 +799,42 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
}
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
if (bo->kmap.bo)
ttm_bo_kunmap(&bo->kmap);
}
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
{
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -673,14 +843,34 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
+/**
+ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+ * pin_count and pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr)
+ u64 min_offset, u64 max_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
@@ -712,8 +902,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
@@ -728,7 +916,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned fpfn, lpfn;
@@ -749,33 +937,48 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
goto error;
}
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
-
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->vram_pin_size += amdgpu_bo_size(bo);
- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- adev->gart_pin_size += amdgpu_bo_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
error:
return r;
}
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+ return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -790,12 +993,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- adev->vram_pin_size -= amdgpu_bo_size(bo);
- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- adev->gart_pin_size -= amdgpu_bo_size(bo);
- }
+ amdgpu_bo_subtract_pin_size(bo);
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
@@ -808,6 +1006,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
return r;
}
+/**
+ * amdgpu_bo_evict_vram - evict VRAM buffers
+ * @adev: amdgpu device object
+ *
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
@@ -830,6 +1038,15 @@ static const char *amdgpu_vram_names[] = {
"DDR4",
};
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* reserve PAT memory space to WC for VRAM */
@@ -847,6 +1064,16 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
return amdgpu_ttm_init(adev);
}
+/**
+ * amdgpu_bo_late_init - late init
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_late_init(struct amdgpu_device *adev)
{
amdgpu_ttm_late_init(adev);
@@ -854,6 +1081,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
@@ -861,12 +1094,33 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
+/**
+ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
+ * @bo: &amdgpu_bo buffer object
+ * @vma: vma as input from the fbdev mmap method
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
return ttm_fbdev_mmap(vma, &bo->tbo);
}
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -879,6 +1133,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
return 0;
}
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
lockdep_assert_held(&bo->tbo.resv->lock.base);
@@ -887,6 +1149,19 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
*tiling_flags = bo->tiling_flags;
}
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
uint32_t metadata_size, uint64_t flags)
{
@@ -916,6 +1191,21 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
return 0;
}
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
@@ -939,6 +1229,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
return 0;
}
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new information of the bufer object
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem)
@@ -947,7 +1247,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
@@ -964,9 +1264,20 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -975,7 +1286,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
unsigned long offset, size;
int r;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
@@ -997,8 +1308,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;
@@ -1040,10 +1351,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
- * Returns current GPU offset of the object.
- *
* Note: object should either be pinned or reserved when calling this
* function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
@@ -1059,6 +1371,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
return bo->tbo.offset;
}
+/**
+ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for pinning the BO for scanout.
+ */
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t domain)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 731748033878..18945dd6982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
struct amdgpu_bo_param {
unsigned long size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
/* Protected by tbo.reserved */
u32 preferred_domains;
u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);
@@ -252,10 +256,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr);
+ u64 min_offset, u64 max_offset);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b455da487782..8f98629fbe59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -31,7 +31,7 @@
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-
+#include <linux/nospec.h>
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
+ adev->pm.ac_power = true;
else
- adev->pm.dpm.ac_power = false;
+ adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
}
@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
/**
* DOC: power_dpm_state
*
- * This is a legacy interface and is only provided for backwards compatibility.
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters. The file power_dpm_state is used for this.
+ * The power_dpm_state file is a legacy interface and is only provided for
+ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
+ * certain power related parameters. The file power_dpm_state is used for this.
* It accepts the following arguments:
+ *
* - battery
+ *
* - balanced
+ *
* - performance
*
* battery
@@ -169,14 +172,21 @@ fail:
* The amdgpu driver provides a sysfs API for adjusting certain power
* related parameters. The file power_dpm_force_performance_level is
* used for this. It accepts the following arguments:
+ *
* - auto
+ *
* - low
+ *
* - high
+ *
* - manual
- * - GPU fan
+ *
* - profile_standard
+ *
* - profile_min_sclk
+ *
* - profile_min_mclk
+ *
* - profile_peak
*
* auto
@@ -393,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
count = -EINVAL;
goto fail;
}
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
@@ -463,8 +474,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* this.
*
* Reading the file will display:
+ *
* - a list of engine clock levels and voltages labeled OD_SCLK
+ *
* - a list of memory clock levels and voltages labeled OD_MCLK
+ *
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
*
* To manually adjust these settings, first select manual using
@@ -593,40 +607,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return snprintf(buf, PAGE_SIZE, "\n");
}
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+/*
+ * Worst case: 32 bits individually specified, in octal at 12 characters
+ * per line (+1 for \n).
+ */
+#define AMDGPU_MASK_BUF_MAX (32 * 13)
+
+static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t mask = 0;
char *sub_str = NULL;
char *tmp;
- char buf_cpy[count];
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
const char delimiter[3] = {' ', '\n', '\0'};
+ size_t bytes;
- memcpy(buf_cpy, buf, count+1);
+ *mask = 0;
+
+ bytes = min(count, sizeof(buf_cpy) - 1);
+ memcpy(buf_cpy, buf, bytes);
+ buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ if (ret)
+ return -EINVAL;
+ *mask |= 1 << level;
} else
break;
}
+
+ return 0;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
-fail:
return count;
}
@@ -651,32 +684,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-fail:
return count;
}
@@ -701,33 +717,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
-
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
-fail:
return count;
}
@@ -905,6 +903,36 @@ fail:
return -EINVAL;
}
+/**
+ * DOC: busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the GPU
+ * is as a percentage. The file gpu_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int r, value, size = sizeof(value);
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* read the IP busy sensor */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
+ (void *)&value, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -938,6 +966,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_get_pp_od_clk_voltage,
amdgpu_set_pp_od_clk_voltage);
+static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
+ amdgpu_get_busy_percent, NULL);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1156,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
int r, size = sizeof(vddnb);
/* only APUs have vddnb */
- if (adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
/* Can't get voltage when the card is off */
@@ -1285,35 +1315,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
* DOC: hwmon
*
* The amdgpu driver exposes the following sensor interfaces:
+ *
* - GPU temperature (via the on-die sensor)
+ *
* - GPU voltage
+ *
* - Northbridge voltage (APUs only)
+ *
* - GPU power
+ *
* - GPU fan
*
* hwmon interfaces for GPU temperature:
+ *
* - temp1_input: the on die GPU temperature in millidegrees Celsius
+ *
* - temp1_crit: temperature critical max value in millidegrees Celsius
+ *
* - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
*
* hwmon interfaces for GPU voltage:
+ *
* - in0_input: the voltage on the GPU in millivolts
+ *
* - in1_input: the voltage on the Northbridge in millivolts
*
* hwmon interfaces for GPU power:
+ *
* - power1_average: average power used by the GPU in microWatts
+ *
* - power1_cap_min: minimum cap supported in microWatts
+ *
* - power1_cap_max: maximum cap supported in microWatts
+ *
* - power1_cap: selected power cap in microWatts
*
* hwmon interfaces for GPU fan:
+ *
* - pwm1: pulse width modulation fan level (0-255)
- * - pwm1_enable: pulse width modulation fan control method
- * 0: no fan speed control
- * 1: manual fan speed control using pwm interface
- * 2: automatic fan speed control
+ *
+ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
+ *
* - pwm1_min: pulse width modulation fan control minimum level (0)
+ *
* - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
* - fan1_input: fan speed in RPM
*
* You can use hwmon tools like sensors to view this information on your system.
@@ -1668,10 +1714,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1690,10 +1736,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1825,6 +1871,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_od_clk_voltage\n");
return ret;
}
+ ret = device_create_file(adev->dev,
+ &dev_attr_gpu_busy_percent);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "gpu_busy_level\n");
+ return ret;
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1860,6 +1913,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
&dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
+ device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1878,11 +1932,19 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
+ mutex_lock(&adev->pm.mutex);
+ /* update battery/ac status */
+ if (power_supply_is_system_supplied() > 0)
+ adev->pm.ac_power = true;
+ else
+ adev->pm.ac_power = false;
+ mutex_unlock(&adev->pm.mutex);
+
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
@@ -1898,14 +1960,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
} else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
- else
- adev->pm.dpm.ac_power = false;
-
amdgpu_dpm_change_power_state_locked(adev);
-
mutex_unlock(&adev->pm.mutex);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4683626b065f..1c5d97f4b4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -23,6 +23,14 @@
*
* Authors: Alex Deucher
*/
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -32,6 +40,14 @@
static const struct dma_buf_ops amdgpu_dmabuf_ops;
+/**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * A scatter/gather table for the pinned pages of the buffer object's memory.
+ */
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
+/**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+ * @obj: GEM buffer object
+ *
+ * Sets up an in-kernel virtual mapping of the buffer object's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+ */
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
return bo->dma_buf_vmap.virtual;
}
+/**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+ * @obj: GEM buffer object
+ * @vaddr: virtual address (unused)
+ *
+ * Tears down the in-kernel virtual mapping of the buffer object's memory.
+ */
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+/**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+ * @obj: GEM buffer object
+ * @vma: virtual memory area
+ *
+ * Sets up a userspace mapping of the buffer object's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
return ret;
}
+/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Import shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM buffer object of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -132,8 +188,19 @@ error:
return ERR_PTR(ret);
}
+/**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -141,7 +208,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
@@ -165,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
}
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error_unreserve;
@@ -181,6 +248,14 @@ error_detach:
return r;
}
+/**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * the other device. For now, simply unpins the buffer from GTT.
+ */
static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
@@ -202,6 +277,13 @@ error:
drm_gem_map_detach(dma_buf, attach);
}
+/**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * The buffer object's reservation object.
+ */
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +291,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+/**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: shared DMA buffer
+ * @direction: direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction direction)
{
@@ -229,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
@@ -245,14 +339,24 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+ * @gobj: GEM buffer object
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM buffer object from the given device.
+ */
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -273,6 +377,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return buf;
}
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * The main work is done by the &drm_gem_prime_import helper, which in turn
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+ * GEM buffer object representing the shared DMA buffer for the given device.
+ */
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9f1a5bd39ae8..5b39d1399630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
msleep(1);
}
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 8af16e81c7d4..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
- u32 instance;
-
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
- instance = ring;
- *out_ring = &adev->uvd.inst[instance].ring;
+ *out_ring = &adev->uvd.inst[0].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
- instance = ring / adev->uvd.num_enc_rings;
- *out_ring =
- &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+ *out_ring = &adev->uvd.inst[0].ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;
@@ -96,6 +91,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
*out_ring = &adev->vcn.ring_enc[ring];
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ *out_ring = &adev->vcn.ring_jpeg;
+ break;
default:
*out_ring = NULL;
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
@@ -216,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
- int r, ip_num_rings;
+ int i, r, ip_num_rings = 0;
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
if (!adev || !mgr || !out_ring)
@@ -245,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
- ip_num_rings = adev->uvd.num_uvd_inst;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
ip_num_rings =
- adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+ adev->uvd.num_enc_rings * ip_num_rings;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;
@@ -260,6 +265,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
ip_num_rings = adev->vcn.num_enc_rings;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ ip_num_rings = 1;
+ break;
default:
DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
@@ -287,6 +295,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_UVD_ENC:
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index c6850b629d0e..93794a85f83d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
if (!ring->funcs->set_priority)
return;
- atomic_inc(&ring->num_jobs[priority]);
+ if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
+ return;
mutex_lock(&ring->priority_mutex);
if (priority <= ring->priority)
@@ -304,7 +305,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
0xffffffffffffffff : ring->buf_mask;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1513124c5659..d242b9a51e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -44,6 +44,8 @@
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE,
@@ -53,7 +55,8 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_UVD_ENC,
AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC
+ AMDGPU_RING_TYPE_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG
};
struct amdgpu_device;
@@ -112,6 +115,7 @@ struct amdgpu_ring_funcs {
u32 nop;
bool support_64bit_ptrs;
unsigned vmhub;
+ unsigned extra_dw;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -119,6 +123,7 @@ struct amdgpu_ring_funcs {
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index e3878256743a..8904e62dca7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
@@ -75,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(vram_obj, false);
if (unlikely(r != 0))
goto out_unref;
- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+ r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_unres;
}
+ vram_addr = amdgpu_bo_gpu_offset(vram_obj);
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gart_start, **gart_end;
@@ -96,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
+ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres;
}
+ r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
+ if (r) {
+ DRM_ERROR("%p bind failed\n", gtt_obj[i]);
+ goto out_lclean_unpin;
+ }
+ gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e96e26d3f3b0..7206a0025b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = p->job->ring->idx;
+ __entry->ring = p->ring->idx;
__entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- p->job->ring);
+ p->ring);
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(mapping)
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags),
@@ -436,7 +441,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
-TRACE_EVENT(amdgpu_ttm_bo_move,
+TRACE_EVENT(amdgpu_bo_move,
TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e93a0a237dc3..fcf421263fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
}
/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference
- * structures.
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
*
- * @adev: AMDGPU device for which the global structures need to be
- * registered.
+ * @adev: AMDGPU device for which the global structures need to be registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
@@ -104,8 +102,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
int r;
/* ensure reference is false in case init fails */
@@ -138,21 +134,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
- ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- goto error_entity;
- }
-
adev->mman.mem_global_referenced = true;
return 0;
-error_entity:
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
error_bo:
drm_global_item_unref(&adev->mman.mem_global_ref);
error_mem:
@@ -162,8 +147,6 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- drm_sched_entity_fini(adev->mman.entity.sched,
- &adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
@@ -177,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
}
/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific
- * type of memory request.
+ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
+ * memory request.
*
- * @bdev: The TTM BO device object (contains a reference to
- * amdgpu_device)
- * @type: The type of memory requested
- * @man:
+ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
+ * @type: The type of memory requested
+ * @man: The memory type manager for each domain
*
* This is called by ttm_bo_init_mm() when a buffer object is being
* initialized.
@@ -263,7 +245,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/* Object isn't an AMDGPU object so ignore */
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -276,8 +258,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
- } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -286,7 +268,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
@@ -294,12 +276,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = abo->placement;
}
@@ -307,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/**
* amdgpu_verify_access - Verify access for a mmap call
*
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
+ * @bo: The buffer object to map
+ * @filp: The file pointer from the process performing the mmap
*
* This is called by ttm_bo_mmap() to verify whether a process
* has the right to mmap a BO to their process space.
@@ -333,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
/**
* amdgpu_move_null - Register memory for a buffer object
*
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Assign the memory from new_mem to the memory of the buffer object
- * bo.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
@@ -350,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
- * buffer.
+ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
+ *
+ * @bo: The bo to assign the memory to.
+ * @mm_node: Memory manager node for drm allocator.
+ * @mem: The region where the bo resides.
+ *
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
@@ -367,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
- * corresponding to @offset. It also modifies
- * the offset to be within the drm_mm_node
- * returned
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
+ * @offset. It also modifies the offset to be within the drm_mm_node returned
+ *
+ * @mem: The region where the bo resides.
+ * @offset: The offset that drm_mm_node is used for finding.
+ *
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
@@ -512,8 +499,8 @@ error:
/**
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
- * This is a helper called by amdgpu_bo_move() and
- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
@@ -595,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -647,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -809,8 +796,8 @@ struct amdgpu_ttm_tt {
};
/**
- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
- * by a USERPTR pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
+ * pointer to memory
*
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
* This provides a wrapper around the get_user_pages() call to provide
@@ -833,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
down_read(&mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only use anonymous memory
- to prevent problems with writeback */
+ /*
+ * check that we only use anonymous memory to prevent problems
+ * with writeback
+ */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -885,10 +874,9 @@ release_pages:
}
/**
- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
- * as necessary.
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
- * Called by amdgpu_cs_list_validate(). This creates the page list
+ * Called by amdgpu_cs_list_validate(). This creates the page list
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
@@ -930,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
- * user pages
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
@@ -1310,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
- * for the current task
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
*
* @ttm: The ttm_tt object to bind this userptr object to
* @addr: The address in the current tasks VM space to use
@@ -1361,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
- * inside an address range for the
- * current task.
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1401,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
}
/**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
- * invalidated?
+ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
*/
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated)
@@ -1415,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
- * ttm_tt object been invalidated
- * since the last time they've
- * been set?
+ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
+ * been invalidated since the last time they've been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
@@ -1474,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
- * a buffer object.
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
*
- * Return true if eviction is sensible. Called by
- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
- * which tries to evict buffer objects until it can find space
- * for a new object and by ttm_bo_force_list_clean() which is
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
* used to clean out a memory space.
*/
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1530,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_ttm_access_memory - Read or Write memory that backs a
- * buffer object.
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
* @bo: The buffer object to read/write
* @offset: Offset into buffer object
@@ -1695,7 +1676,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), NULL);
+ adev->fw_vram_usage.size));
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
@@ -1719,8 +1700,8 @@ error_create:
return r;
}
/**
- * amdgpu_ttm_init - Init the memory management (ttm) as well as
- * various gtt/vram related fields.
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
*
* This initializes all of the memory space pools that the TTM layer
* will need such as the GTT space (system memory mapped to the device),
@@ -1871,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_ttm_late_init - Handle any late initialization for
- * amdgpu_ttm
+ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
@@ -1921,10 +1901,30 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
uint64_t size;
+ int r;
- if (!adev->mman.initialized || adev->in_gpu_reset)
+ if (!adev->mman.initialized || adev->in_gpu_reset ||
+ adev->mman.buffer_funcs_enabled == enable)
return;
+ if (enable) {
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+
+ ring = adev->mman.buffer_funcs_ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+ r);
+ return;
+ }
+ } else {
+ drm_sched_entity_destroy(&adev->mman.entity);
+ dma_fence_put(man->move);
+ man->move = NULL;
+ }
+
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
if (enable)
size = adev->gmc.real_vram_size;
@@ -2002,7 +2002,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_free;
@@ -2071,24 +2071,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- if (direct_submit) {
- r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
- NULL, fence);
- job->fence = dma_fence_get(*fence);
- if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ if (direct_submit)
+ r = amdgpu_job_submit_direct(job, ring, fence);
+ else
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
- }
+ if (r)
+ goto error_free;
return r;
error_free:
amdgpu_job_free(job);
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2171,7 +2166,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e5da4654b630..8b3cc6687769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 08e38579af24..bdc472b6e641 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
AMDGPU_UCODE_ID_MAXIMUM,
};
@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
+ /* starting tmr mc address */
+ uint32_t tmr_mc_addr_lo;
+ uint32_t tmr_mc_addr_hi;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3ff08e326838..e5a6db6beab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -53,11 +53,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
@@ -122,12 +122,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
+ unsigned family_id;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
@@ -208,29 +206,46 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
-
- /*
- * Limit the number of UVD handles depending on microcode major
- * and minor versions. The firmware version which has 40 UVD
- * instances support is 1.80. So all subsequent versions should
- * also have the same support.
- */
- if ((version_major > 0x01) ||
- ((version_major == 0x01) && (version_minor >= 0x50)))
- adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
- (family_id << 8));
+ if (adev->asic_type < CHIP_VEGA20) {
+ unsigned version_major, version_minor;
- if ((adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS11) &&
- (adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
- version_major, version_minor);
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
+ if ((adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS11) &&
+ (adev->uvd.fw_version < FW_1_66_16))
+ DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ version_major, version_minor);
+ } else {
+ unsigned int enc_major, enc_minor, dec_minor;
+
+ dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+ DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ enc_major, enc_minor, dec_minor, family_id);
+
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+ }
bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
@@ -238,7 +253,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -246,21 +262,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
+ }
- ring = &adev->uvd.inst[j].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
- rq, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
- return r;
- }
-
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
- adev->uvd.inst[j].filp[i] = NULL;
- }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ atomic_set(&adev->uvd.handles[i], 0);
+ adev->uvd.filp[i] = NULL;
}
+
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
@@ -289,10 +297,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
- for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- kfree(adev->uvd.inst[j].saved_bo);
+ drm_sched_entity_destroy(&adev->uvd.entity);
- drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ kvfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
@@ -308,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+
+ return 0;
+}
+
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
@@ -316,24 +349,26 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->uvd.idle_work);
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
+
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
- /* only valid for physical mode */
- if (adev->asic_type < CHIP_POLARIS10) {
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.inst[j].handles[i]))
- break;
-
- if (i == adev->uvd.max_handles)
- continue;
- }
-
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
- adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
@@ -349,6 +384,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
@@ -357,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
- kfree(adev->uvd.inst[i].saved_bo);
+ kvfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
@@ -381,30 +418,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
- struct amdgpu_ring *ring;
- int i, j, r;
-
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring;
+ struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
+ int i, r;
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
- if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
- struct dma_fence *fence;
-
- r = amdgpu_uvd_get_destroy_msg(ring, handle,
- false, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
- continue;
- }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- dma_fence_wait(fence, false);
- dma_fence_put(fence);
+ if (handle != 0 && adev->uvd.filp[i] == filp) {
+ struct dma_fence *fence;
- adev->uvd.inst[j].filp[i] = NULL;
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
+ &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD %d!\n", r);
+ continue;
}
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ adev->uvd.filp[i] = NULL;
+ atomic_set(&adev->uvd.handles[i], 0);
}
}
}
@@ -459,7 +493,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@@ -679,16 +713,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr;
long r;
int i;
- uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) {
- DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
+ DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
return r;
}
@@ -698,7 +731,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
- DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
+ DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL;
}
@@ -709,18 +742,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ DRM_ERROR(")Handle 0x%x already in use!\n",
+ handle);
return -EINVAL;
}
- if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
- adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
+ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+ adev->uvd.filp[i] = ctx->parser->filp;
return 0;
}
}
- DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
+ DRM_ERROR("No more free UVD handles!\n");
return -ENOSPC;
case 1:
@@ -732,27 +766,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
- DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ if (adev->uvd.filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL;
}
return 0;
}
}
- DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i)
- atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
+ atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
default:
- DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
BUG();
@@ -1000,7 +1034,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
@@ -1045,19 +1079,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r < 0)
goto err_free;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
-
- amdgpu_job_free(job);
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
- r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+ r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
@@ -1149,6 +1180,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
@@ -1259,7 +1292,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count
* all non-zero handles.
*/
- if (atomic_read(&adev->uvd.inst->handles[i]))
+ if (atomic_read(&adev->uvd.handles[i]))
used_handles++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 8b23a1b00c76..a3ab1a41060f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -42,30 +42,34 @@ struct amdgpu_uvd_inst {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
- struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct drm_sched_entity entity;
- struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
};
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings;
- uint8_t num_uvd_inst;
+ uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
- struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_sched_entity entity;
struct delayed_work idle_work;
+ unsigned harvest_config;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 23d960ec1cf2..0cc5190f4f36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -40,11 +40,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,15 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return r;
}
- ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up VCE run queue.\n");
- return r;
- }
-
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
@@ -222,7 +211,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -237,6 +226,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
}
/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vce_suspend - unpin VCE fw memory
*
* @adev: amdgpu_device pointer
@@ -470,12 +482,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -532,19 +542,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 71781267ee4c..a1f209eed4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -55,6 +55,7 @@ struct amdgpu_vce {
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1b4ad9b2a755..fd654a4406db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
version_major, version_minor, family_id);
}
- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ AMDGPU_VCN_SESSION_SIZE * 40;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@@ -129,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i;
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
@@ -140,6 +141,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+ amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+
release_firmware(adev->vcn.fw);
return 0;
@@ -158,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
- adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.saved_bo)
return -ENOMEM;
@@ -180,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
if (adev->vcn.saved_bo != NULL) {
memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
adev->vcn.saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
memset_io(ptr, 0, size);
}
@@ -209,6 +214,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+
if (fences == 0) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
@@ -225,7 +232,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks && adev->pm.dpm_enabled) {
+ if (set_clocks) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
else
@@ -304,13 +311,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
}
ib->length_dw = 16;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
- amdgpu_job_free(job);
-
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
@@ -495,12 +499,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -549,12 +551,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -597,3 +597,127 @@ error:
dma_fence_put(fence);
return r;
}
+
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto error;
+ } else
+ r = 0;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout)
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else {
+ DRM_ERROR("ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+
+ dma_fence_put(fence);
+
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 773010b9ff15..0b0b8638d73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,6 +66,7 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
unsigned num_enc_rings;
};
@@ -83,4 +84,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index edf16b2b957a..ece0ac703e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,9 +33,11 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
@@ -63,37 +65,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef START
#undef LAST
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
* the number of function parameters
+ *
*/
struct amdgpu_pte_update_params {
- /* amdgpu device we do this update for */
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
struct amdgpu_device *adev;
- /* optional amdgpu_vm we do this update for */
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
struct amdgpu_vm *vm;
- /* address where to copy page table entries from */
+
+ /**
+ * @src: address where to copy page table entries from
+ */
uint64_t src;
- /* indirect buffer to fill with commands */
+
+ /**
+ * @ib: indirect buffer to fill with commands
+ */
struct amdgpu_ib *ib;
- /* Function which actually does the update */
+
+ /**
+ * @func: Function which actually does the update
+ */
void (*func)(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* The next two are used during VM update by CPU
- * DMA addresses to use for mapping
- * Kernel pointer of PD/PT BO that needs to be updated
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping, used during VM update by CPU
*/
dma_addr_t *pages_addr;
+
+ /**
+ * @kptr:
+ *
+ * Kernel pointer of PD/PT BO that needs to be updated,
+ * used during VM update by CPU
+ */
void *kptr;
};
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
struct amdgpu_prt_cb {
+
+ /**
+ * @adev: amdgpu device
+ */
struct amdgpu_device *adev;
+
+ /**
+ * @cb: callback
+ */
struct dma_fence_cb cb;
};
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -107,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
list_add_tail(&base->bo_list, &bo->va);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&base->vm_status, &vm->relocated);
+
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
@@ -126,8 +178,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
*/
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
@@ -155,8 +209,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
*/
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
@@ -179,8 +235,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
*/
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
{
@@ -218,6 +276,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
*/
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
@@ -273,6 +334,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @vm: VM to check
*
* Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
@@ -283,10 +347,15 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
*
* @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
* @bo: BO to clear
* @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
@@ -318,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ats_entries = 0;
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
@@ -356,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+ &fence);
if (r)
goto error_free;
@@ -382,10 +451,16 @@ error:
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @parent: parent PT
* @saddr: start of the address range
* @eaddr: end of the address range
+ * @level: VMPT level
+ * @ats: indicate ATS support from PTE
*
* Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -420,11 +495,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
eaddr = eaddr & ((1 << shift) - 1);
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if (vm->root.base.bo->shadow)
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
@@ -468,7 +544,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- list_move(&entry->base.vm_status, &vm->relocated);
}
if (level < AMDGPU_VM_PTB) {
@@ -494,6 +569,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
* @size: Size from start address we need.
*
* Make sure the page tables are allocated.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -559,6 +637,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
@@ -586,19 +673,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
return vm_flush_needed || gds_switch_needed;
}
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
- return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
-}
-
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
*
* Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
@@ -706,6 +791,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
*/
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -787,7 +875,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
*/
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
@@ -840,6 +931,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *owner)
{
@@ -893,7 +995,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
/*
* amdgpu_vm_invalidate_level - mark all PD levels as invalid
*
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
* @parent: parent PD
+ * @level: VMPT level
*
* Mark all PD level as invalid after an error.
*/
@@ -928,7 +1033,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
* @vm: requested vm
*
* Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
@@ -978,7 +1085,7 @@ restart:
struct amdgpu_vm_bo_base,
vm_status);
bo_base->moved = false;
- list_move(&bo_base->vm_status, &vm->idle);
+ list_del_init(&bo_base->vm_status);
bo = bo_base->bo->parent;
if (!bo)
@@ -1007,15 +1114,15 @@ restart:
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+ &fence);
if (r)
goto error;
@@ -1115,14 +1222,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1176,7 +1284,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
* @end: last PTE to handle
* @dst: addr those PTEs should point to
* @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1248,7 +1358,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1292,7 +1404,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@@ -1324,7 +1436,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += ncmds * 10;
/* extra commands for begin/end fragments */
- ndw += 2 * 10 * adev->vm_manager.fragment_size;
+ if (vm->root.base.bo->shadow)
+ ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+ else
+ ndw += 2 * 10 * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1371,8 +1486,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@@ -1400,7 +1514,9 @@ error_free:
*
* Split the mapping into smaller chunks so that each update fits
* into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1453,7 +1569,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) *
- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
} else {
addr = 0;
max_entries = S64_MAX;
@@ -1464,7 +1580,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
max_entries = min(max_entries, 16ull * 1024ull);
for (count = 1;
- count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++count) {
uint64_t idx = pfn + count;
@@ -1478,7 +1594,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1493,7 +1609,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
- pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
@@ -1513,7 +1629,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1529,18 +1647,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
uint64_t flags;
int r;
- if (clear || !bo_va->base.bo) {
+ if (clear || !bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->base.bo->tbo.mem;
+ mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->base.bo->tbo.ttm,
- struct ttm_dma_tt, ttm);
+ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -1608,6 +1725,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
/**
* amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
@@ -1622,6 +1741,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
@@ -1634,6 +1755,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
@@ -1643,6 +1766,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
*/
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
@@ -1654,6 +1780,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
/**
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
*/
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
struct dma_fence *fence)
@@ -1745,9 +1874,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
* PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -1792,10 +1923,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @sync: sync object to add fences to
*
* Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
*
* PTs have to be reserved!
*/
@@ -1850,7 +1982,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
*
* Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
@@ -1913,10 +2047,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1974,11 +2111,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
* mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2035,7 +2175,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
* @saddr: where to the BO is mapped
*
* Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2089,7 +2231,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @size: size of the range
*
* Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -2186,8 +2330,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
* amdgpu_vm_bo_lookup_mapping - find mapping by address
*
* @vm: the requested VM
+ * @addr: the address
*
* Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
*/
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr)
@@ -2196,6 +2345,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
}
/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2239,8 +2417,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
- * @vm: requested vm
* @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
@@ -2280,6 +2458,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
@@ -2298,6 +2484,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
*/
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
uint32_t fragment_size_default, unsigned max_level,
@@ -2365,8 +2555,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
*
* Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid)
@@ -2398,8 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, NULL);
+ r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
if (r)
return r;
@@ -2417,14 +2610,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
+ else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
@@ -2479,7 +2672,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
@@ -2487,6 +2680,9 @@ error_free_sched_entity:
/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
*
@@ -2496,10 +2692,10 @@ error_free_sched_entity:
* - pasid (old PASID is released, because compute manages its own PASIDs)
*
* Reinitializes the page directory to reflect the changed ATS
- * setting. May leave behind an unused shadow BO for the page
- * directory when switching from SDMA updates to CPU updates.
+ * setting.
*
- * Returns 0 for success, -errno for errors.
+ * Returns:
+ * 0 for success, -errno for errors.
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
@@ -2533,7 +2729,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->pasid) {
@@ -2546,6 +2742,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
error:
amdgpu_bo_unreserve(vm->root.base.bo);
return r;
@@ -2612,7 +2811,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2654,8 +2853,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @pasid: PASID do identify the VM
*
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
*/
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid)
@@ -2709,7 +2910,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_vm_is_large_bar(adev))
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
@@ -2739,6 +2940,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
amdgpu_vmid_mgr_fini(adev);
}
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
@@ -2762,3 +2973,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
+
+/**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+ * @dev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ *task_info = vm->task_info;
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info.pid) {
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm == current->mm) {
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 061b99a18cb8..67a15d439ac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
+struct amdgpu_task_info {
+ char process_name[TASK_COMM_LEN];
+ char task_name[TASK_COMM_LEN];
+ pid_t pid;
+ pid_t tgid;
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -215,6 +223,9 @@ struct amdgpu_vm {
/* Valid while the PD is reserved or fenced */
uint64_t pd_phys_addr;
+
+ /* Some basic info about the task */
+ struct amdgpu_task_info task_info;
};
struct amdgpu_vm_manager {
@@ -307,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
@@ -317,4 +329,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info);
+
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index b6333f92ba45..9cfa8a9ada92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
}
/**
- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
*/
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
- u64 usage = 0;
+ u64 usage;
- if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
- return 0;
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ return amdgpu_bo_size(bo);
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
- return amdgpu_bo_size(bo);
+ return 0;
- while (nodes && pages) {
- usage += nodes->size << PAGE_SHIFT;
- usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
- pages -= nodes->size;
- ++nodes;
- }
+ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
+ usage += amdgpu_vram_mgr_vis_size(adev, nodes);
return usage;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 7fbad2f5f0bd..d2469453dca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -49,10 +49,10 @@
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
else
pi->battery_state = false;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -5846,8 +5846,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
adev->pm.dpm.priv = pi;
pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
@@ -6767,6 +6766,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
}
}
+static int ci_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ci_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6804,7 +6816,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
- .powergate_uvd = &ci_dpm_powergate_uvd,
+ .set_powergating_by_smu = &ci_set_powergating_by_smu,
.set_fan_control_mode = &ci_dpm_set_fan_control_mode,
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8ff4c60d1b59..78ab939ae5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
- mdelay(100);
+ msleep(100);
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a7576255cc30..d0fa2aac2388 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err)
goto out;
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
}
/**
@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
}
@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ada241bfeee9..308f9f238bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v10_0_lock_cursor(crtc, true);
@@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a5b96eac3033..76dfb76f7900 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v11_0_lock_cursor(crtc, true);
@@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 394cc1e8fe20..c9adc627305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v6_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c9b9ab8f1b05..50cd03beac7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v8_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index dbf2ccd0c744..15257634a53a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -36,6 +36,7 @@
#include "dce_v10_0.h"
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@@ -269,25 +270,18 @@ static int dce_virtual_early_init(void *handle)
static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -378,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
if (r)
return r;
@@ -634,7 +628,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
drm_connector_register(connector);
/* link them */
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index cd6bf291a853..de184a886057 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
-MODULE_FIRMWARE("radeon/tahiti_me.bin");
-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
-
-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
-
-MODULE_FIRMWARE("radeon/verde_pfp.bin");
-MODULE_FIRMWARE("radeon/verde_me.bin");
-MODULE_FIRMWARE("radeon/verde_ce.bin");
-MODULE_FIRMWARE("radeon/verde_rlc.bin");
-
-MODULE_FIRMWARE("radeon/oland_pfp.bin");
-MODULE_FIRMWARE("radeon/oland_me.bin");
-MODULE_FIRMWARE("radeon/oland_ce.bin");
-MODULE_FIRMWARE("radeon/oland_rlc.bin");
-
-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
-MODULE_FIRMWARE("radeon/hainan_me.bin");
-MODULE_FIRMWARE("radeon/hainan_ce.bin");
-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
+MODULE_FIRMWARE("amdgpu/verde_me.bin");
+MODULE_FIRMWARE("amdgpu/verde_ce.bin");
+MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
+MODULE_FIRMWARE("amdgpu/oland_me.bin");
+MODULE_FIRMWARE("amdgpu/oland_ce.bin");
+MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hainan_me.bin");
+MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
+MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 42b6144c1fd5..95452c5a9df6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
-MODULE_FIRMWARE("radeon/bonaire_me.bin");
-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
-
-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
-MODULE_FIRMWARE("radeon/hawaii_me.bin");
-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
-
-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
-MODULE_FIRMWARE("radeon/kaveri_me.bin");
-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
-
-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
-MODULE_FIRMWARE("radeon/kabini_me.bin");
-MODULE_FIRMWARE("radeon/kabini_ce.bin");
-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
-MODULE_FIRMWARE("radeon/kabini_mec.bin");
-
-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
-MODULE_FIRMWARE("radeon/mullins_me.bin");
-MODULE_FIRMWARE("radeon/mullins_ce.bin");
-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
-MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
+
+MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kabini_me.bin");
+MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
+MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
+MODULE_FIRMWARE("amdgpu/mullins_me.bin");
+MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
+MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
+MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 818874b13c99..5cd45210113f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -51,6 +51,8 @@
#include "smu/smu_7_1_3_d.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_MEC_HPD_SIZE 2048
@@ -704,6 +706,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
};
+
+static const char * const sq_edc_source_names[] = {
+ "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
+ "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
+ "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
+ "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
+ "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
+ "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
+ "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
+};
+
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -866,26 +879,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+
+ unsigned int index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
- WREG32(scratch, 0xCAFEDEAD);
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -900,20 +919,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF) {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
r = -EINVAL;
}
+
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1999,6 +2019,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
return 0;
}
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
+
static int gfx_v8_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
@@ -2027,27 +2049,43 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
+ /* Add CP EDC/ECC irq */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* SQ interrupts. */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ &adev->gfx.sq_irq);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+ return r;
+ }
+
+ INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v8_0_scratch_init(adev);
@@ -5111,6 +5149,10 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
+ amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
@@ -5542,9 +5584,19 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
+ return r;
+ }
+
+ r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
+ if (r) {
+ DRM_ERROR(
+ "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
+ r);
+ return r;
+ }
return 0;
}
@@ -5552,14 +5604,12 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if ((adev->asic_type == CHIP_POLARIS11) ||
+ if (((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM))
+ (adev->asic_type == CHIP_VEGAM)) &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
/* Send msg to SMU via Powerplay */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6787,6 +6837,77 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 0;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+
+ return 0;
+}
+
+static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 1;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
+ enable_flag);
+
+ return 0;
+}
+
static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6837,6 +6958,114 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("CP EDC/ECC error detected.");
+ return 0;
+}
+
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+{
+ u32 enc, se_id, sh_id, cu_id;
+ char type[20];
+ int sq_edc_source = -1;
+
+ enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
+ se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+
+ switch (enc) {
+ case 0:
+ DRM_INFO("SQ general purpose intr detected:"
+ "se_id %d, immed_overflow %d, host_reg_overflow %d,"
+ "host_cmd_overflow %d, cmd_timestamp %d,"
+ "reg_timestamp %d, thread_trace_buff_full %d,"
+ "wlt %d, thread_trace %d.\n",
+ se_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
+ );
+ break;
+ case 1:
+ case 2:
+
+ cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
+ sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
+
+ /*
+ * This function can be called either directly from ISR
+ * or from BH in which case we can access SQ_EDC_INFO
+ * instance
+ */
+ if (in_task()) {
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+
+ sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
+
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (enc == 1)
+ sprintf(type, "instruction intr");
+ else
+ sprintf(type, "EDC/ECC error");
+
+ DRM_INFO(
+ "SQ %s detected: "
+ "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
+ "trap %s, sq_ed_info.source %s.\n",
+ type, se_id, sh_id, cu_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
+ (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
+ );
+ break;
+ default:
+ DRM_ERROR("SQ invalid encoding type\n.");
+ }
+}
+
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
+{
+
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
+ struct sq_work *sq_work = container_of(work, struct sq_work, work);
+
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+}
+
+static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned ih_data = entry->src_data[0];
+
+ /*
+ * Try to submit work so SQ_EDC_INFO can be accessed from
+ * BH. If previous work submission hasn't finished yet
+ * just print whatever info is possible directly from the ISR.
+ */
+ if (work_pending(&adev->gfx.sq_work.work)) {
+ gfx_v8_0_parse_sq_irq(adev, ih_data);
+ } else {
+ adev->gfx.sq_work.ih_data = ih_data;
+ schedule_work(&adev->gfx.sq_work.work);
+ }
+
+ return 0;
+}
+
static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -7037,6 +7266,16 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
.process = gfx_v8_0_kiq_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v8_0_set_cp_ecc_int_state,
+ .process = gfx_v8_0_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
+ .set = gfx_v8_0_set_sq_int_state,
+ .process = gfx_v8_0_sq_irq,
+};
+
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -7050,6 +7289,12 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 1;
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+ adev->gfx.sq_irq.num_types = 1;
+ adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
}
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a69153435ea7..ef00d14f8645 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,6 +38,8 @@
#include "clearstate_gfx9.h"
#include "v9_structs.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- if (adev->gfx.rlc.is_rlc_v2_1) {
+ if (adev->gfx.rlc.is_rlc_v2_1 &&
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
+ adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
info->fw = adev->gfx.rlc_fw;
@@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v9_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
@@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
+static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (!r)
+ adev->gfx.rlc.clear_state_gpu_addr =
+ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return r;
+}
+
+static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!adev->gfx.rlc.clear_state_obj)
+ return;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+ if (likely(r == 0)) {
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+}
+
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
- if (!adev->gfx.rlc.is_rlc_v2_1)
- return;
+ gfx_v9_0_init_csb(adev);
+
+ /*
+ * Rlc save restore list is workable since v2_1.
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1) {
+ gfx_v9_1_init_rlc_save_restore_list(adev);
+ gfx_v9_0_enable_save_restore_machine(adev);
+ }
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
@@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- gfx_v9_0_init_csb(adev);
- gfx_v9_1_init_rlc_save_restore_list(adev);
- gfx_v9_0_enable_save_restore_machine(adev);
-
WREG32(mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
@@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- /* disable PG */
- WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
-
gfx_v9_0_rlc_reset(adev);
gfx_v9_0_init_pg(adev);
@@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_gpu_init(adev);
+ r = gfx_v9_0_csb_vram_pin(adev);
+ if (r)
+ return r;
+
r = gfx_v9_0_rlc_resume(adev);
if (r)
return r;
@@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
+ gfx_v9_0_csb_vram_unpin(adev);
+
return 0;
}
@@ -3433,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
@@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
} else {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable 3Dcgcg FSM(0x0020003f) */
+
+ /* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable cgcg FSM(0x0020003F) */
+ /* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3714,6 +3781,15 @@ static int gfx_v9_0_set_powergating_state(void *handle,
/* update mgcg state */
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+ break;
+ case CHIP_VEGA12:
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 79f9ac29019b..75317f283c69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
-MODULE_FIRMWARE("radeon/verde_mc.bin");
-MODULE_FIRMWARE("radeon/oland_mc.bin");
-MODULE_FIRMWARE("radeon/si58_mc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
+MODULE_FIRMWARE("amdgpu/verde_mc.bin");
+MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
is_58_fw = true;
if (is_58_fw)
- snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7147bfe25a23..36dc367c4b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -28,6 +28,7 @@
#include "cik.h"
#include "gmc_v7_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
@@ -43,12 +44,14 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] =
@@ -147,10 +150,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- if (adev->asic_type == CHIP_TOPAZ)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
@@ -999,11 +999,11 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1079,6 +1079,12 @@ static int gmc_v7_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1088,6 +1094,7 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v7_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1277,7 +1284,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
@@ -1302,6 +1309,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1edbe6b477b5..70fc97b59b4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
@@ -44,6 +45,7 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1101,11 +1103,11 @@ static int gmc_v8_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1181,6 +1183,12 @@ static int gmc_v8_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1190,6 +1198,7 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v8_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1425,7 +1434,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
if (amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
@@ -1447,8 +1456,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
gmc_v8_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data[0]);
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
+ entry->src_id, entry->src_data[0], task_info.process_name,
+ task_info.tgid, task_info.task_name, task_info.pid);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1457,6 +1471,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3c0a85d4e4ab..399a5db27649 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,6 +43,8 @@
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
+#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -257,12 +259,17 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
if (printk_ratelimit()) {
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid);
- dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " at address 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev,
@@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
}
/* This interrupt is VMC page fault.*/
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7a1e77c93bf1..3f57f6463dc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
int ret;
if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+ ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
return ret;
@@ -3306,6 +3306,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
}
+static int kv_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ kv_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3342,7 +3355,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
.print_power_state = &kv_dpm_print_power_state,
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
- .powergate_uvd = &kv_dpm_powergate_uvd,
+ .set_powergating_by_smu = kv_set_powergating_by_smu,
.enable_bapm = &kv_dpm_enable_bapm,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3d53c4413f13..e70a0d4d6db4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
RENG_EXECUTE_ON_REG_UPDATE, 1);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
- if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
- amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
} else {
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 0ff136d02d9b..02be34e72ed9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c7190c39c4f5..15ae4bc9c072 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -44,6 +44,8 @@
#include "iceland_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -202,8 +204,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
@@ -218,9 +219,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -273,7 +273,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -898,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -910,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1213,8 +1213,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa9ab299fd32..1e07ff274d73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -44,6 +44,8 @@
#include "tonga_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -365,9 +367,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
}
return wptr;
@@ -394,9 +394,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
}
@@ -450,7 +448,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1179,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1191,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1655,8 +1653,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ca53b3fba422..e7ca4623cfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -38,6 +38,9 @@
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -296,13 +299,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
u32 lowbit, highbit;
- int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- me, highbit, lowbit);
+ ring->me, highbit, lowbit);
wptr = highbit;
wptr = wptr << 32;
wptr |= lowbit;
@@ -339,17 +341,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- me,
+ ring->me,
lower_32_bits(ring->wptr << 2),
- me,
+ ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -430,7 +430,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
@@ -1228,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1651,8 +1651,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 5c97a3671726..db327b412562 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,16 +56,16 @@
#define BIOS_SCRATCH_4 0x5cd
-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("radeon/verde_smc.bin");
-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
-MODULE_FIRMWARE("radeon/oland_smc.bin");
-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
static const struct amd_pm_funcs si_dpm_funcs;
@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
disable_sclk_switching = true;
}
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
}
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -7318,8 +7318,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi = &eg_pi->rv7xx;
si_pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
@@ -7667,7 +7666,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 8dc29107228f..edfe50821cd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -53,6 +53,29 @@
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+#define PACKETJ_CONDITION_CHECK0 0
+#define PACKETJ_CONDITION_CHECK1 1
+#define PACKETJ_CONDITION_CHECK2 2
+#define PACKETJ_CONDITION_CHECK3 3
+#define PACKETJ_CONDITION_CHECK4 4
+#define PACKETJ_CONDITION_CHECK5 5
+#define PACKETJ_CONDITION_CHECK6 6
+#define PACKETJ_CONDITION_CHECK7 7
+
+#define PACKETJ_TYPE0 0
+#define PACKETJ_TYPE1 1
+#define PACKETJ_TYPE2 2
+#define PACKETJ_TYPE3 3
+#define PACKETJ_TYPE4 4
+#define PACKETJ_TYPE5 5
+#define PACKETJ_TYPE6 6
+#define PACKETJ_TYPE7 7
+
+#define PACKETJ(reg, r, cond, type) ((reg & 0x3FFFF) | \
+ ((r & 0x3F) << 18) | \
+ ((cond & 0xF) << 24) | \
+ ((type & 0xF) << 28))
+
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 6fed3d7797a8..8a926d1df939 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 341ee6d55ce8..50248059412e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -35,6 +35,7 @@
#include "vi.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
@@ -119,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bfddf97dd13e..6ae82cc2e55e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -36,6 +36,7 @@
#include "bif/bif_5_1_d.h"
#include "gmc/gmc_8_1_d.h"
#include "vi.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
/* Polaris10/11/12 firmware version */
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
@@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
@@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n");
- } else {
- struct drm_sched_rq *rq;
- ring = &adev->uvd.inst->ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up UVD ENC run queue.\n");
- return r;
- }
}
r = amdgpu_uvd_resume(adev);
@@ -457,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+
return r;
}
@@ -470,8 +455,6 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
}
@@ -1569,7 +1552,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1587,7 +1569,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 57d32f21b3a6..9b7f8469bc5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -39,6 +39,13 @@
#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
+#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
+#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
+//UVD_PG0_CC_UVD_HARVESTING
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
@@ -249,12 +256,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -312,19 +317,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -377,10 +376,25 @@ error:
static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_VEGA20)
+
+ if (adev->asic_type == CHIP_VEGA20) {
+ u32 harvest;
+ int i;
+
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
- else
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
+ if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
+ adev->uvd.harvest_config |= 1 << i;
+ }
+ }
+ if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
+ AMDGPU_UVD_HARVEST_UVD1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else {
adev->uvd.num_uvd_inst = 1;
+ }
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
@@ -396,19 +410,21 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
if (r)
return r;
}
@@ -428,22 +444,13 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
- return r;
- }
- }
-
r = amdgpu_uvd_resume(adev);
if (r)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd<%d>", j);
@@ -472,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -491,8 +502,8 @@ static int uvd_v7_0_sw_fini(void *handle)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
@@ -521,6 +532,8 @@ static int uvd_v7_0_hw_init(void *handle)
goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
@@ -600,8 +613,11 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.ready = false;
+ }
return 0;
}
@@ -644,6 +660,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -716,6 +734,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -772,6 +792,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
ring = &adev->uvd.inst[i].ring;
ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -911,6 +933,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
/* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -923,6 +947,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
ring = &adev->uvd.inst[k].ring;
/* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1090,6 +1116,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
/* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -1227,6 +1255,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
}
/**
+ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
+ *
+ * @p: the CS parser with the IBs
+ * @ib_idx: which IB to patch
+ *
+ */
+static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ unsigned i;
+
+ /* No patching necessary for the first instance */
+ if (!p->ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+
+ reg -= p->adev->reg_offset[UVD_HWIP][0][1];
+ reg += p->adev->reg_offset[UVD_HWIP][1][1];
+
+ amdgpu_set_ib_value(p, ib_idx, i, reg);
+ }
+ return 0;
+}
+
+/**
* uvd_v7_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
@@ -1718,6 +1774,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
+ .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1777,6 +1834,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1788,6 +1847,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1807,6 +1868,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 47f70827195b..7eaa54ba016b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR);
else
return RREG32(mmVCE_RB_RPTR2);
@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR);
else
return RREG32(mmVCE_RB_WPTR2);
@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
@@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
@@ -627,8 +629,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 0999c843f623..c8390f9adfd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -39,6 +39,7 @@
#include "smu/smu_7_1_2_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
@@ -86,9 +87,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2);
else
v = RREG32(mmVCE_RB_RPTR3);
@@ -118,9 +119,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2);
else
v = RREG32(mmVCE_RB_WPTR3);
@@ -149,9 +150,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
@@ -447,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
@@ -900,7 +903,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.emit_frame_size =
4 + /* vce_v3_0_emit_pipeline_sync */
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
- .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +927,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
- .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -942,12 +945,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
if (adev->asic_type >= CHIP_STONEY) {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
} else {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in physical mode\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8fd1b742985a..2e4d1b5f6243 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -39,6 +39,8 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V4_0_FW_SIZE (384 * 1024)
@@ -60,9 +62,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
@@ -82,9 +84,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
@@ -108,10 +110,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
return;
}
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr));
else
@@ -417,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
+
unsigned size;
int r, i;
@@ -436,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
const struct common_firmware_header *hdr;
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vce.saved_bo)
return -ENOMEM;
@@ -472,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
+
+ r = amdgpu_vce_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -488,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
amdgpu_virt_free_mm_table(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- kfree(adev->vce.saved_bo);
+ kvfree(adev->vce.saved_bo);
adev->vce.saved_bo = NULL;
}
@@ -1088,8 +1096,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 29684c3ea4ef..072371ef5975 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -35,10 +35,14 @@
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
/**
* vcn_v1_0_early_init - set function pointers
@@ -55,6 +59,7 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
+ vcn_v1_0_set_jpeg_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
return 0;
@@ -74,22 +79,37 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.irq);
if (r)
return r;
}
+ /* VCN JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+ if (r)
+ return r;
+
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ DRM_INFO("PSP loading VCN firmware\n");
+ }
+
r = amdgpu_vcn_resume(adev);
if (r)
return r;
@@ -108,6 +128,12 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
+ ring = &adev->vcn.ring_jpeg;
+ sprintf(ring->name, "vcn_jpeg");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ if (r)
+ return r;
+
return r;
}
@@ -162,6 +188,14 @@ static int vcn_v1_0_hw_init(void *handle)
}
}
+ ring = &adev->vcn.ring_jpeg;
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully.\n");
@@ -241,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ uint32_t offset;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ offset = size;
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size));
+ lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size));
+ upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
@@ -578,12 +624,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_v1_0_mc_resume(adev);
-
vcn_1_0_disable_static_power_gating(adev);
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_mc_resume(adev);
+
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
@@ -729,6 +775,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
return 0;
}
@@ -1126,6 +1188,383 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0xffffffff);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vmid, bool ctx_switch)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[(*ptr)++] = 0;
+ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+ } else {
+ ring->ring[(*ptr)++] = reg_offset;
+ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+ }
+ ring->ring[(*ptr)++] = val;
+}
+
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ uint32_t reg, reg_offset, val, mask, i;
+
+ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+ reg_offset = (reg << 2);
+ val = lower_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+ reg_offset = (reg << 2);
+ val = upper_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 3rd to 5th: issue MEM_READ commands
+ for (i = 0; i <= 2; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+ ring->ring[ptr++] = 0;
+ }
+
+ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x13;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 7th: program mmUVD_JRBC_RB_REF_DATA
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ mask = 0x1;
+
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = 0x01400200;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = val;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[ptr++] = 0;
+ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+ } else {
+ ring->ring[ptr++] = reg_offset;
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+ }
+ ring->ring[ptr++] = mask;
+
+ //9th to 21st: insert no-op
+ for (i = 0; i <= 12; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ring->ring[ptr++] = 0;
+ }
+
+ //22nd: reset mmUVD_JRBC_RB_RPTR
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
+ reg_offset = (reg << 2);
+ val = 0;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x12;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1150,6 +1589,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
case 120:
amdgpu_fence_process(&adev->vcn.ring_enc[1]);
break;
+ case 126:
+ amdgpu_fence_process(&adev->vcn.ring_jpeg);
+ break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1273,6 +1715,39 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
+ .extra_dw = 64,
+ .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+ .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+ .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+ .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+ .insert_nop = vcn_v1_0_jpeg_ring_nop,
+ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+ .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
+};
+
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -1289,6 +1764,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
DRM_INFO("VCN encode is enabled in VM mode\n");
}
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+ DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
.set = vcn_v1_0_set_interrupt_state,
.process = vcn_v1_0_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 45aafca7f315..c5c9b2bc190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4ac1288ab7df..42c8ad105b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
- pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
- pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 49df6c791cfc..5d2475d5392c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -25,12 +25,39 @@
#include "cik_int.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
unsigned int vmid, pasid;
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+ */
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
+ struct cik_ih_ring_entry *tmp_ihre =
+ (struct cik_ih_ring_entry *)patched_ihre;
+
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+
+ tmp_ihre->ring_id &= 0x000000ff;
+ tmp_ihre->ring_id |= vmid << 8;
+ tmp_ihre->ring_id |= pasid << 16;
+
+ return (pasid != 0) &&
+ vmid >= dev->vm_info.first_vmid_kfd &&
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
/* Only handle interrupts from KFD VMIDs */
vmid = (ihre->ring_id & 0x0000ff00) >> 8;
if (vmid < dev->vm_info.first_vmid_kfd ||
@@ -48,18 +75,19 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
}
static void cik_event_interrupt_wq(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
- unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
-
- pasid = (ihre->ring_id & 0xffff0000) >> 16;
+ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
@@ -72,6 +100,22 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+ if (info.vmid == vmid)
+ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
}
const struct kfd_event_interrupt_class event_interrupt_class_cik = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 109298b9d507..76f8677a7926 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
-#define HSA_RADEON_CIK_INT_H_INCLUDED
+#ifndef CIK_INT_H_INCLUDED
+#define CIK_INT_H_INCLUDED
#include <linux/types.h>
@@ -34,9 +34,10 @@ struct cik_ih_ring_entry {
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+#define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+#define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index f68aef02fc1f..3621efbd5759 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -21,18 +21,21 @@
*/
static const uint32_t cwsr_trap_gfx8_hex[] = {
- 0xbf820001, 0xbf820125,
+ 0xbf820001, 0xbf82012b,
0xb8f4f802, 0x89748674,
0xb8f5f803, 0x8675ff75,
- 0x00000400, 0xbf850011,
+ 0x00000400, 0xbf850017,
0xc00a1e37, 0x00000000,
0xbf8c007f, 0x87777978,
- 0xbf840002, 0xb974f802,
- 0xbe801d78, 0xb8f5f803,
- 0x8675ff75, 0x000001ff,
- 0xbf850002, 0x80708470,
- 0x82718071, 0x8671ff71,
- 0x0000ffff, 0xb974f802,
+ 0xbf840005, 0x8f728374,
+ 0xb972e0c2, 0xbf800002,
+ 0xb9740002, 0xbe801d78,
+ 0xb8f5f803, 0x8675ff75,
+ 0x000001ff, 0xbf850002,
+ 0x80708470, 0x82718071,
+ 0x8671ff71, 0x0000ffff,
+ 0x8f728374, 0xb972e0c2,
+ 0xbf800002, 0xb9740002,
0xbe801f70, 0xb8f5f803,
0x8675ff75, 0x00000100,
0xbf840006, 0xbefa0080,
@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200ca, 0xbef8007e,
+ 0xbf8200cd, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x8f739773, 0xb976f807,
0x8671ff71, 0x0000ffff,
0x86fe7e7e, 0x86ea6a6a,
- 0xb974f802, 0xbf8a0000,
- 0x95807370, 0xbf810000,
+ 0x8f768374, 0xb976e0c2,
+ 0xbf800002, 0xb9740002,
+ 0xbf8a0000, 0x95807370,
+ 0xbf810000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015a,
+ 0xbf820001, 0xbf82015d,
0xb8f8f802, 0x89788678,
0xb8f1f803, 0x866eff71,
- 0x00000400, 0xbf850034,
+ 0x00000400, 0xbf850037,
0x866eff71, 0x00000800,
0xbf850003, 0x866eff71,
0x00000100, 0xbf840008,
@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8f6e8b77, 0x866eff6e,
0x001f8000, 0xb96ef807,
0x86fe7e7e, 0x86ea6a6a,
- 0xb978f802, 0xbe801f6c,
- 0x866dff6d, 0x0000ffff,
- 0xbef00080, 0xb9700283,
- 0xb8f02407, 0x8e709c70,
- 0x876d706d, 0xb8f003c7,
- 0x8e709b70, 0x876d706d,
- 0xb8f0f807, 0x8670ff70,
- 0x00007fff, 0xb970f807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x87708478, 0xb970f802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8f11605,
- 0x80718171, 0x8e718671,
- 0x80707170, 0x80707e70,
- 0x8271807f, 0x8671ff71,
- 0x0000ffff, 0xc0471cb8,
- 0x00000040, 0xbf8cc07f,
- 0xc04b1d38, 0x00000048,
- 0xbf8cc07f, 0xc0431e78,
- 0x00000058, 0xbf8cc07f,
- 0xc0471eb8, 0x0000005c,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x8670ff7f,
- 0x08000000, 0x8f708370,
- 0x87777077, 0x8670ff7f,
- 0x70000000, 0x8f708170,
- 0x87777077, 0xbefb007c,
- 0xbefa0080, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8f01605, 0x80708170,
- 0x8e708670, 0x807a707a,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc007a, 0xc0611efa,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbef00080,
+ 0xb9700283, 0xb8f02407,
+ 0x8e709c70, 0x876d706d,
+ 0xb8f003c7, 0x8e709b70,
+ 0x876d706d, 0xb8f0f807,
+ 0x8670ff70, 0x00007fff,
+ 0xb970f807, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x87708478,
+ 0xb970f802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8f11605, 0x80718171,
+ 0x8e718671, 0x80707170,
+ 0x80707e70, 0x8271807f,
+ 0x8671ff71, 0x0000ffff,
+ 0xc0471cb8, 0x00000040,
+ 0xbf8cc07f, 0xc04b1d38,
+ 0x00000048, 0xbf8cc07f,
+ 0xc0431e78, 0x00000058,
+ 0xbf8cc07f, 0xc0471eb8,
+ 0x0000005c, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x8670ff7f, 0x08000000,
+ 0x8f708370, 0x87777077,
+ 0x8670ff7f, 0x70000000,
+ 0x8f708170, 0x87777077,
+ 0xbefb007c, 0xbefa0080,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f01605,
+ 0x80708170, 0x8e708670,
+ 0x807a707a, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc007a,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611b7a,
+ 0xbefc007a, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611bfa,
+ 0xbefc007a, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8f1f803,
- 0xbefe007c, 0xbefc007a,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611a3a,
+ 0xbefc007a, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8f1f803, 0xbefe007c,
+ 0xbefc007a, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8fbf801,
- 0xbefe007c, 0xbefc007a,
- 0xc0611efa, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0x8670ff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f70, 0xb8fa2a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc007a, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8fbf801, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0x8670ff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f70,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f11605,
+ 0x80718171, 0x8e718471,
+ 0x8e768271, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747a74, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a717c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbefa0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0xe0724000,
+ 0x7a1d0000, 0xe0724100,
+ 0x7a1d0100, 0xe0724200,
+ 0x7a1d0200, 0xe0724300,
+ 0x7a1d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f14306,
+ 0x8671c171, 0xbf84002c,
+ 0xbf8a0000, 0x8670ff6f,
+ 0x04000000, 0xbf840028,
+ 0x8e718671, 0x8e718271,
+ 0xbef60071, 0xb8fa2a05,
0x807a817a, 0x8e7a8a7a,
- 0xb8f11605, 0x80718171,
- 0x8e718471, 0x8e768271,
+ 0xb8f01605, 0x80708170,
+ 0x8e708670, 0x807a707a,
+ 0x807aff7a, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747a74,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a717c, 0xbf85ffe7,
- 0xbef40172, 0xbefa0080,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x7a1d0002, 0x68040702,
+ 0xd0c9006a, 0x0000e302,
+ 0xbf87fff7, 0xbef70000,
+ 0xbefa00ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8f12a05, 0x80718171,
+ 0x8e718271, 0x8e768871,
0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a717c,
+ 0xbf840015, 0xbf11017c,
+ 0x8071ff71, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x7a1d0000,
0xe0724100, 0x7a1d0100,
0xe0724200, 0x7a1d0200,
0xe0724300, 0x7a1d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200dc, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x08000000, 0x8f6e836e,
+ 0x87776e77, 0x866eff7f,
+ 0x70000000, 0x8f6e816e,
+ 0x87776e77, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
0xbefe00c1, 0xbeff00c1,
- 0xb8f14306, 0x8671c171,
- 0xbf84002c, 0xbf8a0000,
- 0x8670ff6f, 0x04000000,
- 0xbf840028, 0x8e718671,
- 0x8e718271, 0xbef60071,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f01605,
- 0x80708170, 0x8e708670,
- 0x807a707a, 0x807aff7a,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
0x00000080, 0xbef600ff,
0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x7a1d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000e302, 0xbf87fff7,
- 0xbef70000, 0xbefa00ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8f12a05,
- 0x80718171, 0x8e718271,
- 0x8e768871, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a717c, 0xbf840015,
- 0xbf11017c, 0x8071ff71,
- 0x00001000, 0x7e000300,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x7a1d0000, 0xe0724100,
- 0x7a1d0100, 0xe0724200,
- 0x7a1d0200, 0xe0724300,
- 0x7a1d0300, 0x807c847c,
- 0x807aff7a, 0x00000400,
- 0xbf0a717c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200d9,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0xbef60084,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe007a,
- 0xbeff007b, 0x866f71ff,
- 0x000003ff, 0xb96f4803,
- 0x866f71ff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc0071cb7, 0x00000040,
- 0xc00b1d37, 0x00000048,
- 0xc0031e77, 0x00000058,
- 0xc0071eb7, 0x0000005c,
- 0xbf8cc07f, 0x866fff6d,
- 0xf0000000, 0x8f6f9c6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x08000000, 0x8f6f9b6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff70, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0xb970f802, 0xbf8a0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe007a, 0xbeff007b,
+ 0x866f71ff, 0x000003ff,
+ 0xb96f4803, 0x866f71ff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc0071cb7,
+ 0x00000040, 0xc00b1d37,
+ 0x00000048, 0xc0031e77,
+ 0x00000058, 0xc0071eb7,
+ 0x0000005c, 0xbf8cc07f,
+ 0x866fff6d, 0xf0000000,
+ 0x8f6f9c6f, 0x8e6f906f,
+ 0xbeee0080, 0x876e6f6e,
+ 0x866fff6d, 0x08000000,
+ 0x8f6f9b6f, 0x8e6f8f6f,
+ 0x876e6f6e, 0x866fff70,
+ 0x00800000, 0x8f6f976f,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8370,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9700002, 0xbf8a0000,
0x95806f6c, 0xbf810000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a2a04bb64096..abe1a5da29fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
s_waitcnt lgkmcnt(0)
s_or_b32 ttmp7, ttmp8, ttmp9
s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
L_NO_NEXT_TRAP:
@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
s_addc_u32 ttmp1, ttmp1, 0
L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
end
// ********* End handling of non-CWSR traps *******************
@@ -1053,7 +1057,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1134,3 +1138,11 @@ end
function get_hwreg_size_bytes
return 128 //HWREG size 128 bytes
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 998be96be736..0bb9c577b3a2 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -317,7 +321,7 @@ L_EXCP_CASE:
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+ set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
end
@@ -1120,7 +1124,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
s_waitcnt lgkmcnt(0)
end
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f64c5551cdba..297b36c26a05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -122,6 +122,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
+ if (kfd_is_locked())
+ return -EAGAIN;
+
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -389,6 +392,61 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return retval;
}
+static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ int retval;
+ const int max_num_cus = 1024;
+ struct kfd_ioctl_set_cu_mask_args *args = data;
+ struct queue_properties properties;
+ uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
+ size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
+
+ if ((args->num_cu_mask % 32) != 0) {
+ pr_debug("num_cu_mask 0x%x must be a multiple of 32",
+ args->num_cu_mask);
+ return -EINVAL;
+ }
+
+ properties.cu_mask_count = args->num_cu_mask;
+ if (properties.cu_mask_count == 0) {
+ pr_debug("CU mask cannot be 0");
+ return -EINVAL;
+ }
+
+ /* To prevent an unreasonably large CU mask size, set an arbitrary
+ * limit of max_num_cus bits. We can then just drop any CU mask bits
+ * past max_num_cus bits and just use the first max_num_cus bits.
+ */
+ if (properties.cu_mask_count > max_num_cus) {
+ pr_debug("CU mask cannot be greater than 1024 bits");
+ properties.cu_mask_count = max_num_cus;
+ cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
+ }
+
+ properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!properties.cu_mask)
+ return -ENOMEM;
+
+ retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ if (retval) {
+ pr_debug("Could not copy CU mask from userspace");
+ kfree(properties.cu_mask);
+ return -EFAULT;
+ }
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ if (retval)
+ kfree(properties.cu_mask);
+
+ return retval;
+}
+
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -754,7 +812,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev)
@@ -766,11 +823,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic64(&time);
- args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
-
- get_monotonic_boottime64(&time);
- args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
+ args->cpu_clock_counter = ktime_get_raw_ns();
+ args->system_clock_counter = ktime_get_boot_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
@@ -1558,6 +1612,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
kfd_ioctl_unmap_memory_from_gpu, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 296b3f230280..ee4996029a86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -189,6 +189,21 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
return 0;
}
+static struct kfd_mem_properties *
+find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
+ struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *props;
+
+ list_for_each_entry(props, &dev->mem_props, list) {
+ if (props->heap_type == heap_type
+ && props->flags == flags
+ && props->width == width)
+ return props;
+ }
+
+ return NULL;
+}
/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
* topology device present in the device_list
*/
@@ -197,36 +212,56 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
{
struct kfd_mem_properties *props;
struct kfd_topology_device *dev;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags = 0;
+ uint32_t width;
pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->proximity_domain);
list_for_each_entry(dev, device_list, list) {
if (mem->proximity_domain == dev->proximity_domain) {
- props = kfd_alloc_struct(props);
- if (!props)
- return -ENOMEM;
-
/* We're on GPU node */
if (dev->node_props.cpu_cores_count == 0) {
/* APU */
if (mem->visibility_type == 0)
- props->heap_type =
+ heap_type =
HSA_MEM_HEAP_TYPE_FB_PRIVATE;
/* dGPU */
else
- props->heap_type = mem->visibility_type;
+ heap_type = mem->visibility_type;
} else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+ heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+ flags |= HSA_MEM_FLAGS_NON_VOLATILE;
- props->size_in_bytes =
+ size_in_bytes =
((uint64_t)mem->length_high << 32) +
mem->length_low;
- props->width = mem->width;
+ width = mem->width;
+
+ /* Multiple banks of the same type are aggregated into
+ * one. User mode doesn't care about multiple physical
+ * memory segments. It's managed as a single virtual
+ * heap for user mode.
+ */
+ props = find_subtype_mem(heap_type, flags, width, dev);
+ if (props) {
+ props->size_in_bytes += size_in_bytes;
+ break;
+ }
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->heap_type = heap_type;
+ props->flags = flags;
+ props->size_in_bytes = size_in_bytes;
+ props->width = width;
dev->node_props.mem_banks_count++;
list_add_tail(&props->list, &dev->mem_props);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index afb26f205d29..a3441b0e385b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -38,7 +38,6 @@
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
#include "kfd_device_queue_manager.h"
-#include "../../radeon/cik_reg.h"
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
index 03424c20920c..0619c777b47e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
@@ -60,6 +60,9 @@ enum {
SH_REG_SIZE = SH_REG_END - SH_REG_BASE
};
+/* SQ_CMD definitions */
+#define SQ_CMD 0x8DEC
+
enum SQ_IND_CMD_CMD {
SQ_IND_CMD_CMD_NULL = 0x00000000,
SQ_IND_CMD_CMD_HALT = 0x00000001,
@@ -190,4 +193,38 @@ union ULARGE_INTEGER {
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type);
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits in order to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4bd6ebfaf425..ab37d36d9cd6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -21,6 +21,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
#include "kfd_priv.h"
static struct dentry *debugfs_root;
@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct kfd_dev *dev;
+ char tmp[16];
+ uint32_t gpu_id;
+ int ret = -EINVAL;
+
+ memset(tmp, 0, 16);
+ if (size >= 16) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ if (copy_from_user(tmp, user_buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (kstrtoint(tmp, 10, &gpu_id)) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ dev = kfd_device_by_id(gpu_id);
+ if (dev) {
+ kfd_debugfs_hang_hws(dev);
+ ret = size;
+ } else
+ pr_err("Cannot find device %d.\n", gpu_id);
+
+out:
+ return ret;
+}
+
static const struct file_operations kfd_debugfs_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
.release = single_release,
};
+static const struct file_operations kfd_debugfs_hang_hws_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .write = kfd_debugfs_hang_hws_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void kfd_debugfs_init(void)
{
struct dentry *ent;
@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device,
&kfd_debugfs_fops);
+
+ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ NULL,
+ &kfd_debugfs_hang_hws_fops);
+
if (!ent)
pr_warn("Failed to create rls in kfd debugfs\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7ee6cec2c060..1b048715ab8a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -30,7 +30,13 @@
#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+/*
+ * kfd_locked is used to lock the kfd driver during suspend or reset
+ * once locked, kfd driver will stop any further GPU execution.
+ * create process (open) will return -EAGAIN.
+ */
+static atomic_t kfd_locked = ATOMIC_INIT(0);
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
@@ -46,6 +52,7 @@ static const struct kfd_device_info kaveri_device_info = {
.supports_cwsr = false,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -61,6 +68,22 @@ static const struct kfd_device_info carrizo_device_info = {
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+};
+
+static const struct kfd_device_info raven_device_info = {
+ .asic_family = CHIP_RAVEN,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
};
#endif
@@ -77,6 +100,7 @@ static const struct kfd_device_info hawaii_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_device_info = {
@@ -91,6 +115,7 @@ static const struct kfd_device_info tonga_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_vf_device_info = {
@@ -105,6 +130,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_device_info = {
@@ -119,6 +145,7 @@ static const struct kfd_device_info fiji_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
@@ -133,6 +160,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -148,6 +176,7 @@ static const struct kfd_device_info polaris10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
@@ -162,6 +191,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris11_device_info = {
@@ -176,6 +206,7 @@ static const struct kfd_device_info polaris11_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_device_info = {
@@ -190,6 +221,7 @@ static const struct kfd_device_info vega10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
@@ -204,6 +236,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -241,6 +274,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
+ { 0x15DD, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
@@ -514,13 +548,54 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfree(kfd);
}
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return 0;
+ kgd2kfd_suspend(kfd);
+
+ /* hold dqm->lock to prevent further execution*/
+ dqm_lock(kfd->dqm);
+
+ kfd_signal_reset_event(kfd);
+ return 0;
+}
+
+/*
+ * Fix me. KFD won't be able to resume existing process for now.
+ * We will keep all existing process in a evicted state and
+ * wait the process to be terminated.
+ */
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ int ret, count;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ dqm_unlock(kfd->dqm);
+
+ ret = kfd_resume(kfd);
+ if (ret)
+ return ret;
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+}
+
+bool kfd_is_locked(void)
+{
+ return (atomic_read(&kfd_locked) > 0);
+}
+
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return;
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_device_suspended) == 1)
+ if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes();
kfd->dqm->ops.stop(kfd->dqm);
@@ -539,7 +614,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_device_suspended);
+ count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -577,14 +652,24 @@ dqm_start_error:
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ bool is_patched = false;
+
if (!kfd->init_complete)
return;
+ if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ dev_err_once(kfd_device, "Ring entry too small\n");
+ return;
+ }
+
spin_lock(&kfd->interrupt_lock);
if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry)
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ && interrupt_is_wanted(kfd, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+ is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
@@ -739,8 +824,8 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
- *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if ((*mem_obj) == NULL)
+ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
@@ -857,3 +942,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
kfree(mem_obj);
return 0;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* This function will send a package to HIQ to hang the HWS
+ * which will trigger a GPU reset and bring the HWS back to normal state
+ */
+int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+{
+ int r = 0;
+
+ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+ pr_err("HWS is not enabled");
+ return -EINVAL;
+ }
+
+ r = pm_debugfs_hang_hws(&dev->dqm->packets);
+ if (!r)
+ r = dqm_debugfs_execute_queues(dev->dqm);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 668ad07ebe1f..ec0d62a16e53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -61,6 +61,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id);
+static void kfd_process_hw_exception(struct work_struct *work);
+
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
@@ -99,6 +101,17 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
+static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines;
+}
+
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines
+ * KFD_SDMA_QUEUES_PER_ENGINE;
+}
+
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -240,7 +253,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
print_queue(q);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -297,7 +310,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -346,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
@@ -360,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_deallocate_hqd;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -374,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
- q->process->mm);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_hqd:
@@ -399,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd)
+ if (!mqd_mgr)
return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -420,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
@@ -457,9 +470,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -467,19 +480,19 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
pdd = kfd_get_process_device_data(q->device, q->process);
if (!pdd) {
retval = -ENODEV;
goto out_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_unlock;
}
@@ -506,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
@@ -515,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
/*
* check active state vs. the previous state and modify
@@ -533,44 +546,44 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
&q->properties, q->process->mm);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
pr_debug("mqd type %d\n", type);
- mqd = dqm->mqds[type];
- if (!mqd) {
- mqd = mqd_manager_init(type, dqm->dev);
- if (!mqd)
+ mqd_mgr = dqm->mqd_mgrs[type];
+ if (!mqd_mgr) {
+ mqd_mgr = mqd_manager_init(type, dqm->dev);
+ if (!mqd_mgr)
pr_err("mqd manager is NULL");
- dqm->mqds[type] = mqd;
+ dqm->mqd_mgrs[type] = mqd_mgr;
}
- return mqd;
+ return mqd_mgr;
}
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -582,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot evict queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = true;
q->properties.is_active = false;
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval)
@@ -600,7 +613,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -611,7 +624,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -633,7 +646,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -641,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint32_t pd_base;
int retval = 0;
@@ -650,7 +663,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -677,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot restore queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = false;
q->properties.is_active = true;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -695,7 +708,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
}
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -711,7 +724,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -739,7 +752,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!retval)
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -761,7 +774,7 @@ static int register_process(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
/* Update PD Base in QPD */
@@ -769,9 +782,10 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
- dqm->processes_count++;
+ if (dqm->processes_count++ == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -786,20 +800,22 @@ static int unregister_process(struct device_queue_manager *dqm,
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
- dqm->processes_count--;
+ if (--dqm->processes_count == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(
+ dqm->dev->kgd, true);
goto out;
}
}
/* qpd not found in dqm list */
retval = 1;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -838,7 +854,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
if (!dqm->allocated_queues)
return -ENOMEM;
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
@@ -853,7 +869,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
}
dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
return 0;
}
@@ -866,8 +882,8 @@ static void uninitialize(struct device_queue_manager *dqm)
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
- kfree(dqm->mqds[i]);
- mutex_destroy(&dqm->lock);
+ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock_hidden);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
@@ -901,7 +917,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id)
{
- if (sdma_queue_id >= CIK_SDMA_QUEUES)
+ if (sdma_queue_id >= get_num_sdma_queues(dqm))
return;
dqm->sdma_bitmap |= (1 << sdma_queue_id);
}
@@ -910,19 +926,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int retval;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
+ q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
retval = allocate_doorbell(qpd, q);
if (retval)
@@ -933,19 +949,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+ NULL);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
@@ -1003,12 +1020,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
return 0;
}
@@ -1041,9 +1060,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
+ /* clear hang status when driver try to start the hw scheduler */
+ dqm->is_hws_hang = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
@@ -1055,9 +1076,9 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -1069,11 +1090,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return -EPERM;
}
@@ -1089,7 +1110,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count++;
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
}
@@ -1098,7 +1119,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -1110,18 +1131,18 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -1135,19 +1156,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_unlock;
q->properties.sdma_queue_id =
- q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id / get_num_sdma_engines(dqm);
q->properties.sdma_engine_id =
- q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id % get_num_sdma_engines(dqm);
}
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
@@ -1164,7 +1185,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -1188,7 +1209,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
out_deallocate_doorbell:
@@ -1197,7 +1218,8 @@ out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q->sdma_id);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
+
return retval;
}
@@ -1210,6 +1232,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
+ /* In HWS case, this is used to halt the driver thread
+ * in order not to mess up CP states before doing
+ * scandumps for FW debugging.
+ */
+ while (halt_if_hws_hang)
+ schedule();
+
return -ETIME;
}
schedule();
@@ -1254,6 +1283,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (dqm->is_hws_hang)
+ return -EIO;
if (!dqm->active_runlist)
return retval;
@@ -1292,9 +1323,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
{
int retval;
+ if (dqm->is_hws_hang)
+ return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ schedule_work(&dqm->hw_exception_work);
return retval;
}
@@ -1306,7 +1341,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
bool preempt_all_queues;
preempt_all_queues = false;
@@ -1314,7 +1349,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->is_debug) {
/*
@@ -1326,9 +1361,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto failed;
}
@@ -1350,7 +1385,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -1360,14 +1395,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
failed:
failed_try_destroy_debugged_queue:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1391,7 +1426,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if (!dqm->asic_ops.set_cache_memory_policy)
return retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (alternate_aperture_size == 0) {
/* base > limit disables APE1 */
@@ -1437,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1468,7 +1503,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *cur, *next_dpn;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clear all user mode queues */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
@@ -1489,7 +1524,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
}
}
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1500,14 +1535,14 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
int retval;
struct queue *q, *next;
struct kernel_queue *kq, *kq_next;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
@@ -1542,7 +1577,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
retval = execute_queues_cpsch(dqm, filter, 0);
- if (retval || qpd->reset_wavefronts) {
+ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
@@ -1550,19 +1585,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* lastly, free mqd resources */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out;
}
list_del(&q->list);
qpd->queue_count--;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1683,6 +1718,30 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ unsigned int pasid)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ kfd_unref_process(p);
+
+ return ret;
+}
+
+static void kfd_process_hw_exception(struct work_struct *work)
+{
+ struct device_queue_manager *dqm = container_of(work,
+ struct device_queue_manager, hw_exception_work);
+ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -1746,8 +1805,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
- for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->kgd, pipe, queue, &dump, &n_regs);
if (r)
@@ -1764,4 +1823,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return r;
}
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+{
+ int r = 0;
+
+ dqm_lock(dqm);
+ dqm->active_runlist = true;
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm_unlock(dqm);
+
+ return r;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 59a6b1956932..00da3169a004 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -26,15 +26,14 @@
#include <linux/rwsem.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-
-#define CIK_SDMA_QUEUES (4)
-#define CIK_SDMA_QUEUES_PER_ENGINE (2)
-#define CIK_SDMA_ENGINE_NUM (2)
+#define KFD_SDMA_QUEUES_PER_ENGINE (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -170,11 +169,12 @@ struct device_queue_manager {
struct device_queue_manager_ops ops;
struct device_queue_manager_asic_ops asic_ops;
- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
struct kfd_dev *dev;
- struct mutex lock;
+ struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
+ unsigned int saved_flags;
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
@@ -190,6 +190,10 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+
+ /* hw exception */
+ bool is_hws_hang;
+ struct work_struct hw_exception_work;
};
void device_queue_manager_init_cik(
@@ -207,6 +211,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@@ -219,4 +224,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
+/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void dqm_lock(struct device_queue_manager *dqm)
+{
+ mutex_lock(&dqm->lock_hidden);
+ dqm->saved_flags = memalloc_nofs_save();
+}
+static inline void dqm_unlock(struct device_queue_manager *dqm)
+{
+ memalloc_nofs_restore(dqm->saved_flags);
+ mutex_unlock(&dqm->lock_hidden);
+}
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 79e5bcf6367c..417515332c35 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (vega10_noretry &&
+ if (noretry &&
!dqm->dev->device_info->needs_iommu_device)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index c3744d89352c..ebe79bf00145 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
- " doorbell offset == 0x%08X\n"
- " kernel address == %p\n",
- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+ " doorbell offset == 0x%08X\n"
+ " doorbell index == 0x%x\n",
+ *doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+ * sizeof(u32) / kfd->device_info->doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5562e94e786a..e9f0e0a1b41c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -850,6 +850,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
ev->memory_exception_data = *ev_data;
}
+ if (type == KFD_EVENT_TYPE_MEMORY) {
+ dev_warn(kfd_device,
+ "Sending SIGSEGV to HSA Process with PID %d ",
+ p->lead_thread->pid);
+ send_sig(SIGSEGV, p->lead_thread, 0);
+ }
+
/* Send SIGTERM no event of type "type" has been found*/
if (send_signal) {
if (send_sigterm) {
@@ -904,34 +911,41 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NotPresent = 1;
memory_exception_data.failure.NoExecute = 0;
memory_exception_data.failure.ReadOnly = 0;
- if (vma) {
- if (vma->vm_start > address) {
- memory_exception_data.failure.NotPresent = 1;
- memory_exception_data.failure.NoExecute = 0;
+ if (vma && address >= vma->vm_start) {
+ memory_exception_data.failure.NotPresent = 0;
+
+ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+ memory_exception_data.failure.ReadOnly = 1;
+ else
memory_exception_data.failure.ReadOnly = 0;
- } else {
- memory_exception_data.failure.NotPresent = 0;
- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
- memory_exception_data.failure.ReadOnly = 1;
- else
- memory_exception_data.failure.ReadOnly = 0;
- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
- memory_exception_data.failure.NoExecute = 1;
- else
- memory_exception_data.failure.NoExecute = 0;
- }
+
+ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+ memory_exception_data.failure.NoExecute = 1;
+ else
+ memory_exception_data.failure.NoExecute = 0;
}
up_read(&mm->mmap_sem);
mmput(mm);
- mutex_lock(&p->event_mutex);
+ pr_debug("notpresent %d, noexecute %d, readonly %d\n",
+ memory_exception_data.failure.NotPresent,
+ memory_exception_data.failure.NoExecute,
+ memory_exception_data.failure.ReadOnly);
- /* Lookup events by type and signal them */
- lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
- &memory_exception_data);
+ /* Workaround on Raven to not kill the process when memory is freed
+ * before IOMMU is able to finish processing all the excessive PPRs
+ */
+ if (dev->device_info->asic_family != CHIP_RAVEN) {
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+ &memory_exception_data);
+
+ mutex_unlock(&p->event_mutex);
+ }
- mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -956,3 +970,67 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
+
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info)
+{
+ struct kfd_event *ev;
+ uint32_t id;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+
+ if (!p)
+ return; /* Presumably process exited. */
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = 1;
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
+ mutex_lock(&p->event_mutex);
+
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+}
+
+void kfd_signal_reset_event(struct kfd_dev *dev)
+{
+ struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_process *p;
+ struct kfd_event *ev;
+ unsigned int temp;
+ uint32_t id, idx;
+
+ /* Whole gpu reset caused by GPU hang and memory is lost */
+ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+ hw_exception_data.gpu_id = dev->id;
+ hw_exception_data.memory_lost = 1;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->event_mutex);
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ ev->hw_exception_data = hw_exception_data;
+ set_event(ev);
+ }
+ mutex_unlock(&p->event_mutex);
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index abca5bfebbff..c7ac6c73af86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -66,6 +66,7 @@ struct kfd_event {
/* type specific data */
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
};
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 37029baa3346..f836897bbf58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -26,7 +26,9 @@
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
@@ -57,7 +59,9 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
- source_id == SOC15_INTSRC_CP_BAD_OPCODE;
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_UTCL2;
}
static void event_interrupt_wq_v9(struct kfd_dev *dev,
@@ -82,7 +86,19 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
- /* TODO */
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+ kfd_signal_vm_fault_event(dev, pasid, &info);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index db6d9336b80d..c56ac47cd318 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -151,13 +151,15 @@ static void interrupt_wq(struct work_struct *work)
ih_ring_entry);
}
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
- ih_ring_entry);
+ ih_ring_entry, patched_ihre, flag);
return wanted != 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index c71817963eea..7a61f38c09e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -190,7 +190,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
{
struct kfd_dev *dev;
- dev_warn(kfd_device,
+ dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 476951d8c91c..9f84b4d9fb88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ);
break;
default:
@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
return false;
}
- if (!kq->mqd)
+ if (!kq->mqd_mgr)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
@@ -123,6 +123,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
+ prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
@@ -130,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->queue->device = dev;
kq->queue->process = kfd_get_process(current);
- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
&kq->queue->mqd_mem_obj,
&kq->queue->gart_mqd_addr,
&kq->queue->properties);
@@ -142,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, &kq->queue->properties,
- NULL);
+ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->pipe, kq->queue->queue,
+ &kq->queue->properties, NULL);
} else {
/* allocate fence for DIQ */
@@ -182,7 +183,7 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
- kq->mqd->destroy_mqd(kq->mqd,
+ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -191,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 97aff2041a5d..a7116a939029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -70,7 +70,7 @@ struct kernel_queue {
/* data */
struct kfd_dev *dev;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
uint32_t pending_wptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 76bf2dc8aec4..6e1f5c7c2d4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -47,6 +47,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
.resume_mm = kgd2kfd_resume_mm,
.schedule_evict_and_restore_process =
kgd2kfd_schedule_evict_and_restore_process,
+ .pre_reset = kgd2kfd_pre_reset,
+ .post_reset = kgd2kfd_post_reset,
};
int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -61,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
int cwsr_enable = 1;
module_param(cwsr_enable, int, 0444);
-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
@@ -83,13 +85,19 @@ module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
-int vega10_noretry;
-module_param_named(noretry, vega10_noretry, int, 0644);
+int noretry;
+module_param(noretry, int, 0644);
MODULE_PARM_DESC(noretry,
- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)");
+
+int halt_if_hws_hang;
+module_param(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
static int amdkfd_init_completed;
+
int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 4b8eb506642b..3bc25ab84f34 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -21,7 +21,7 @@
*
*/
-#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -48,3 +48,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return NULL;
}
+
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask)
+{
+ struct kfd_cu_info cu_info;
+ uint32_t cu_per_sh[4] = {0};
+ int i, se, cu = 0;
+
+ mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+
+ if (cu_mask_count > cu_info.cu_active_number)
+ cu_mask_count = cu_info.cu_active_number;
+
+ for (se = 0; se < cu_info.num_shader_engines; se++)
+ for (i = 0; i < 4; i++)
+ cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+
+ /* Symmetrically map cu_mask to all SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0;
+ * cu_mask[0] bit1 -> se_mask[1] bit0;
+ * ... (if # SE is 4)
+ * cu_mask[0] bit4 -> se_mask[0] bit1;
+ * ...
+ */
+ se = 0;
+ for (i = 0; i < cu_mask_count; i++) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << cu;
+
+ do {
+ se++;
+ if (se == cu_info.num_shader_engines) {
+ se = 0;
+ cu++;
+ }
+ } while (cu >= cu_per_sh[se] && cu < 32);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 8972bcfbf701..4e84052d4e21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -93,4 +93,8 @@ struct mqd_manager {
struct kfd_dev *dev;
};
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask);
+
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 06eaa218eba6..47243165a082 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -41,6 +41,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -408,7 +435,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 684054ff02cd..f5fc3675f21e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -41,6 +41,31 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -55,7 +80,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
@@ -198,6 +223,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -393,7 +420,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 481307b8b4db..b81fda3754da 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -43,6 +43,31 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -394,7 +421,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c317feb43f69..1092631765cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -418,4 +418,30 @@ out:
return 0;
}
+int pm_debugfs_hang_hws(struct packet_manager *pm)
+{
+ uint32_t *buffer, size;
+ int r = 0;
+
+ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(buffer, 0x55, size);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
+
+ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6]);
+out:
+ mutex_unlock(&pm->lock);
+ return r;
+}
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5e3990bb4c4b..f971710f1c91 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -73,7 +73,7 @@
/*
* When working with cp scheduler we should assign the HIQ manually or via
- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
* definitions for Kaveri. In Kaveri only the first ME queues participates
* in the cp scheduling taking that in mind we set the HIQ slot in the
* second ME.
@@ -142,7 +142,12 @@ extern int ignore_crat;
/*
* Set sh_mem_config.retry_disable on Vega10
*/
-extern int vega10_noretry;
+extern int noretry;
+
+/*
+ * Halt if HWS hang is detected
+ */
+extern int halt_if_hws_hang;
/**
* enum kfd_sched_policy
@@ -180,9 +185,10 @@ enum cache_policy {
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
+ bool *patched_flag);
void (*interrupt_wq)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry);
};
struct kfd_device_info {
@@ -197,6 +203,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ unsigned int num_sdma_engines;
};
struct kfd_mem_obj {
@@ -415,6 +422,9 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ /* Relevant for CU */
+ uint32_t cu_mask_count; /* Must be a multiple of 32 */
+ uint32_t *cu_mask;
};
/**
@@ -806,12 +816,18 @@ int kfd_interrupt_init(struct kfd_dev *dev);
void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd);
+/* GPU reset */
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -838,6 +854,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
struct process_queue_node {
@@ -858,6 +875,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
@@ -964,10 +983,17 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
+void kfd_signal_reset_event(struct kfd_dev *dev);
+
void kfd_flush_tlb(struct kfd_process_device *pdd);
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+bool kfd_is_locked(void);
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -980,6 +1006,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
+int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int pm_debugfs_hang_hws(struct packet_manager *pm);
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+
#else
static inline void kfd_debugfs_init(void) {}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1d80b4f7c681..4694386cc623 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -244,6 +244,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
return ERR_PTR(-EINVAL);
process = find_process(thread);
+ if (!process)
+ return ERR_PTR(-EINVAL);
return process;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d65ce0436b31..c8cad9c078ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -186,8 +186,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
- if (dev->dqm->queue_count >=
- CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
@@ -209,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -326,6 +325,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+ kfree(pqn->q->properties.cu_mask);
+ pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -366,6 +367,34 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+ return -EFAULT;
+ }
+
+ /* Free the old CU mask memory if it is already allocated, then
+ * allocate memory for the new CU mask.
+ */
+ kfree(pqn->q->properties.cu_mask);
+
+ pqn->q->properties.cu_mask_count = p->cu_mask_count;
+ pqn->q->properties.cu_mask = p->cu_mask;
+
+ retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
@@ -387,7 +416,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct process_queue_node *pqn;
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
- struct mqd_manager *mqd_manager;
+ struct mqd_manager *mqd_mgr;
int r = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
@@ -410,11 +439,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q->properties.type, q->device->id);
continue;
}
- mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
q->device->dqm, mqd_type);
} else if (pqn->kq) {
q = pqn->kq->queue;
- mqd_manager = pqn->kq->mqd;
+ mqd_mgr = pqn->kq->mqd_mgr;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
@@ -434,7 +463,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
if (r != 0)
break;
}
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index d5d4586e6176..325083b0297e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,23 +9,6 @@ config DRM_AMD_DC
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_FBC
- bool "AMD FBC - Enable Frame Buffer Compression"
- depends on DRM_AMD_DC
- help
- Choose this option if you want to use frame buffer compression
- support.
- This is a power optimisation feature, check its availability
- on your hardware before enabling this option.
-
-
-config DRM_AMD_DC_DCN1_0
- bool "DCN 1.0 Raven family"
- depends on DRM_AMD_DC && X86
- help
- Choose this option if you want to have
- RV family for display engine
-
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 357d59648401..a8a6c106e8c7 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
issue with DC - other drivers, especially around DP sink handling, are equally
guilty.
-19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
-stuff just isn't up to the challenges either. We need to figure out something
-that integrates better with DRM and linux debug printing, while not being
-useless with filtering output. dynamic debug printing might be an option.
+19. DONE - The DC logger is still a rather sore thing, but I know that the
+DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
+something that integrates better with DRM and linux debug printing, while not
+being useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index af16973f2c41..94911871eb9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,11 +28,11 @@
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
ifneq ($(CONFIG_DEBUG_FS),)
-AMDGPUDM += amdgpu_dm_crc.o
+AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3a8d6356afc2..34f34823bab5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -39,6 +39,9 @@
#include "dm_helpers.h"
#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -57,7 +60,7 @@
#include "modules/inc/mod_freesync.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "ivsrcid/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -347,7 +350,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
@@ -388,7 +390,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
}
-#endif
/* Init display KMS
@@ -902,14 +903,14 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
(struct edid *) sink->dc_edid.raw_edid;
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
aconnector->edid);
}
amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
} else {
amdgpu_dm_remove_sink_from_freesync_module(connector);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -1040,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
- if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1191,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1319,7 +1320,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
- return bd->props.brightness;
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return bd->props.brightness;
+ return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -1334,6 +1340,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
struct backlight_properties props = { 0 };
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -1525,16 +1532,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
- /*
- * Temporary disable until pplib/smu interaction is implemented
- */
- dm->dc->debug.disable_stutter = true;
break;
#endif
default:
@@ -1542,6 +1545,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
return 0;
fail:
kfree(aencoder);
@@ -1573,18 +1579,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
- u8 level)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
-}
-
-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
- return 0;
-}
-
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -1613,10 +1607,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
- .backlight_set_level =
- dm_set_backlight_level,/* called unconditionally */
- .backlight_get_level =
- dm_get_backlight_level,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
@@ -1724,7 +1716,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6;
adev->mode_info.plane_type = dm_plane_type_default;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
@@ -2123,13 +2115,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode *mode_in)
{
- int32_t width = mode_in->crtc_hdisplay * 9;
- int32_t height = mode_in->crtc_vdisplay * 16;
-
- if ((width - height) < 10 && (width - height) > -10)
- return ASPECT_RATIO_16_9;
- else
- return ASPECT_RATIO_4_3;
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
}
static enum dc_color_space
@@ -2175,6 +2162,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+
+ timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ int normalized_clk;
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+ do {
+ normalized_clk = timing_out->pix_clk_khz;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (timing_out->display_color_depth) {
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ return;
+ }
+ if (normalized_clk <= info->max_tmds_clock)
+ return;
+ reduce_mode_colour_depth(timing_out);
+
+ } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
/*****************************************************************************/
static void
@@ -2183,6 +2210,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2191,8 +2219,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
timing_out->v_border_top = 0;
timing_out->v_border_bottom = 0;
/* TODO: un-hardcode */
-
- if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
else
@@ -2228,6 +2258,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ adjust_colour_depth_from_display_info(timing_out, info);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -3048,15 +3080,25 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
- r = amdgpu_bo_pin(rbo, domain, &afb->address);
- amdgpu_bo_unreserve(rbo);
-
+ r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ amdgpu_bo_unreserve(rbo);
return r;
}
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ DRM_ERROR("%p bind failed\n", rbo);
+ return r;
+ }
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
amdgpu_bo_ref(rbo);
if (dm_plane_state_new->dc_state &&
@@ -3426,12 +3468,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct edid *edid = amdgpu_dm_connector->edid;
encoder = helper->best_encoder(connector);
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (!edid || !drm_edid_is_valid(edid)) {
+ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ }
amdgpu_dm_fbc_init(connector);
-#endif
+
return amdgpu_dm_connector->num_modes;
}
@@ -3450,7 +3495,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.stereo_allowed = false;
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
-
mutex_init(&aconnector->hpd_lock);
/* configure support HPD hot plug connector_>polled default value is 0
@@ -3459,9 +3503,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3614,10 +3662,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link,
link_index);
- drm_mode_connector_attach_encoder(
+ drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+ res = connector_debugfs_init(aconnector);
+ if (res) {
+ DRM_ERROR("Failed to create debugfs for connector");
+ goto out_free;
+ }
+#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -3914,8 +3969,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* update crtc fb */
- crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d5aa89ad5571..a29dc35954c9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -72,13 +72,11 @@ struct irq_list_head {
struct work_struct work;
};
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
-#endif
struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
* Caches device atomic state for suspend/resume
*/
struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
struct dm_comressor_info compressor;
-#endif
};
struct amdgpu_dm_connector {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b329393307e5..326f6fb7e0bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* preparation for hardware commit. If no lut is specified by user, we default
* to SRGB degamma.
*
- * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
- * Programmable degamma is not supported, and an attempt to do so will return
- * -EINVAL.
+ * We support degamma bypass, predefined SRGB, and custom degamma
*
* RETURNS:
- * 0 on success, -EINVAL if custom degamma curve is given.
+ * 0 on success
+ * -EINVAL if crtc_state has a degamma_lut of invalid size
+ * -ENOMEM if gamma allocation fails
*/
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_property_blob *blob = crtc_state->degamma_lut;
struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+ bool ret;
if (!blob) {
/* Default to SRGB */
@@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
return 0;
}
- /* Otherwise, assume SRGB, since programmable degamma is not
- * supported.
- */
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- return -EINVAL;
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CUSTOM;
+ else {
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ DRM_ERROR("Out of memory when calculating degamma params\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 52f2c01349e3..9bfb040352e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
*/
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
{
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
+ struct dm_crtc_state *crtc_state;
+ struct dc_stream_state *stream_state;
uint32_t crcs[3];
+ if (crtc == NULL)
+ return;
+
+ crtc_state = to_dm_crtc_state(crtc->state);
+ stream_state = crtc_state->stream;
+
/* Early return if CRC capture is not enabled. */
if (!crtc_state->crc_enabled)
return;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644
index 000000000000..0d9e410ca01e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_debugfs.h"
+
+/* function description
+ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+ *
+ * --- to get dp configuration
+ *
+ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+ * verified --- maximum configuration which pass link training
+ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ * preferred --- user force settings
+ *
+ * --- set (or force) dp configuration
+ *
+ * echo <lane_count> <link_rate> > link_settings
+ *
+ * for example, to force to 2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+ *
+ * spread_spectrum could not be changed dynamically.
+ *
+ * in case invalid lane count, link rate are force, no hw programming will be
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+ */
+static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ const uint32_t rd_buf_size = 100;
+ uint32_t result = 0;
+ uint8_t str_len = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return 0;
+
+ rd_buf_ptr = rd_buf;
+
+ str_len = strlen("Current: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
+ link->cur_link_settings.lane_count,
+ link->cur_link_settings.link_rate,
+ link->cur_link_settings.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Verified: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ link->verified_link_cap.lane_count,
+ link->verified_link_cap.link_rate,
+ link->verified_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Reported: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ link->reported_link_cap.lane_count,
+ link->reported_link_cap.link_rate,
+ link->reported_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Preferred: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ link->preferred_link_setting.lane_count,
+ link->preferred_link_setting.link_rate,
+ link->preferred_link_setting.link_spread);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ const uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ /* 0: lane_count; 1: link_rate */
+ uint8_t param_index = 0;
+ long param[2];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool valid_input = false;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -EINVAL;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not read\n");
+ return -EINVAL;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function: get current DP PHY settings: voltage swing, pre-emphasis,
+ * post-cursor2 (defined by VESA DP specification)
+ *
+ * valid values
+ * voltage swing: 0,1,2,3
+ * pre-emphasis : 0,1,2,3
+ * post cursor2 : 0,1,2,3
+ *
+ *
+ * how to use this debugfs
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
+ *
+ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+ *
+ * To figure out which DP-x is the display for DP to be check,
+ * cd DP-x
+ * ls -ll
+ * There should be debugfs file, like link_settings, phy_settings.
+ * cat link_settings
+ * from lane_count, link_rate to figure which DP-x is for display to be worked
+ * on
+ *
+ * To get current DP PHY settings,
+ * cat phy_settings
+ *
+ * To change DP PHY settings,
+ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
+ * 0,
+ * echo 2 3 0 > phy_settings
+ *
+ * To check if change be applied, get current phy settings by
+ * cat phy_settings
+ *
+ * In case invalid values are set by user, like
+ * echo 1 4 0 > phy_settings
+ *
+ * HW will NOT be programmed by these settings.
+ * cat phy_settings will show the previous valid settings.
+ */
+static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ const uint32_t rd_buf_size = 20;
+ uint32_t result = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return -EINVAL;
+
+ snprintf(rd_buf, rd_buf_size, " %d %d %d ",
+ link->cur_lane_setting.VOLTAGE_SWING,
+ link->cur_lane_setting.PRE_EMPHASIS,
+ link->cur_lane_setting.POST_CURSOR2);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user((*(rd_buf + result)), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ uint8_t param_index = 0;
+ long param[3];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool use_prefer_link_setting;
+ struct link_training_settings link_lane_settings;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
+ (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
+ (param[2] > POST_CURSOR2_MAX_LEVEL)) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* get link settings: lane count, link rate */
+ use_prefer_link_setting =
+ ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
+ (link->test_pattern_enabled));
+
+ memset(&link_lane_settings, 0, sizeof(link_lane_settings));
+
+ if (use_prefer_link_setting) {
+ link_lane_settings.link_settings.lane_count =
+ link->preferred_link_setting.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->preferred_link_setting.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->preferred_link_setting.link_spread;
+ } else {
+ link_lane_settings.link_settings.lane_count =
+ link->cur_link_settings.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->cur_link_settings.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->cur_link_settings.link_spread;
+ }
+
+ /* apply phy settings from user */
+ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
+ link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+ (enum dc_voltage_swing) (param[0]);
+ link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+ (enum dc_pre_emphasis) (param[1]);
+ link_lane_settings.lane_settings[r].POST_CURSOR2 =
+ (enum dc_post_cursor2) (param[2]);
+ }
+
+ /* program ASIC registers and DPCD registers */
+ dc_link_set_drive_settings(dc, &link_lane_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function description
+ *
+ * set PHY layer or Link layer test pattern
+ * PHY test pattern is used for PHY SI check.
+ * Link layer test will not affect PHY SI.
+ *
+ * Reset Test Pattern:
+ * 0 = DP_TEST_PATTERN_VIDEO_MODE
+ *
+ * PHY test pattern supported:
+ * 1 = DP_TEST_PATTERN_D102
+ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ * 3 = DP_TEST_PATTERN_PRBS7
+ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ * 5 = DP_TEST_PATTERN_CP2520_1
+ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ * 7 = DP_TEST_PATTERN_CP2520_3
+ *
+ * DP PHY Link Training Patterns
+ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
+ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
+ *
+ * DP Link Layer Test pattern
+ * c = DP_TEST_PATTERN_COLOR_SQUARES
+ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ * e = DP_TEST_PATTERN_VERTICAL_BARS
+ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
+ * 10= DP_TEST_PATTERN_COLOR_RAMP
+ *
+ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+ *
+ * --- set test pattern
+ * echo <test pattern #> > test_pattern
+ *
+ * If test pattern # is not supported, NO HW programming will be done.
+ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ * for the user pattern. input 10 bytes data are separated by space
+ *
+ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
+ *
+ * --- reset test pattern
+ * echo 0 > test_pattern
+ *
+ * --- HPD detection is disabled when set PHY test pattern
+ *
+ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
+ * is disable. User could unplug DP display from DP connected and plug scope to
+ * check test pattern PHY SI.
+ * If there is need unplug scope and plug DP display back, do steps below:
+ * echo 0 > phy_test_pattern
+ * unplug scope
+ * plug DP display.
+ *
+ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
+ * driver could detect "unplug scope" and "plug DP display"
+ */
+static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 100;
+ uint32_t wr_buf_count = 0;
+ int r;
+ int bytes_from_user;
+ char *sub_str = NULL;
+ uint8_t param_index = 0;
+ uint8_t param_nums = 0;
+ long param[11] = {0x0};
+ const char delimiter[3] = {' ', '\n', '\0'};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ bool disable_hpd = false;
+ bool valid_test_pattern = false;
+ /* init with defalut 80bit custom pattern */
+ uint8_t custom_pattern[10] = {
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07
+ };
+ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct link_training_settings link_training_settings;
+ int i;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ /* check number of parameters. isspace could not differ space and \n */
+ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+ /* skip space*/
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+
+ /* skip non-space*/
+ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ param_nums++;
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+ }
+
+ /* max 11 parameters */
+ if (param_nums > 11)
+ param_nums = 11;
+
+ wr_buf_ptr = wr_buf; /* reset buf pinter */
+ wr_buf_count = 0; /* number of char already checked */
+
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ while (param_index < param_nums) {
+ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ }
+
+ test_pattern = param[0];
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ valid_test_pattern = true;
+ break;
+
+ case DP_TEST_PATTERN_D102:
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ case DP_TEST_PATTERN_PRBS7:
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ disable_hpd = true;
+ valid_test_pattern = true;
+ break;
+
+ default:
+ valid_test_pattern = false;
+ test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ break;
+ }
+
+ if (!valid_test_pattern) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
+ return bytes_from_user;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ for (i = 0; i < 10; i++) {
+ if ((uint8_t) param[i + 1] != 0x0)
+ break;
+ }
+
+ if (i < 10) {
+ /* not use default value */
+ for (i = 0; i < 10; i++)
+ custom_pattern[i] = (uint8_t) param[i + 1];
+ }
+ }
+
+ /* Usage: set DP physical test pattern using debugfs with normal DP
+ * panel. Then plug out DP panel and connect a scope to measure
+ * For normal video mode and test pattern generated from CRCT,
+ * they are visibile to user. So do not disable HPD.
+ * Video Mode is also set to clear the test pattern, so enable HPD
+ * because it might have been disabled after a test pattern was set.
+ * AUX depends on HPD * sequence dependent, do not move!
+ */
+ if (!disable_hpd)
+ dc_link_enable_hpd(link);
+
+ prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
+ prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
+ prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
+
+ cur_link_settings.lane_count = link->cur_link_settings.lane_count;
+ cur_link_settings.link_rate = link->cur_link_settings.link_rate;
+ cur_link_settings.link_spread = link->cur_link_settings.link_spread;
+
+ link_training_settings.link_settings = cur_link_settings;
+
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
+ prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
+ (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
+ prefer_link_settings.link_rate != cur_link_settings.link_rate))
+ link_training_settings.link_settings = prefer_link_settings;
+ }
+
+ for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
+ link_training_settings.lane_settings[i] = link->cur_lane_setting;
+
+ dc_link_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ custom_pattern,
+ 10);
+
+ /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
+ * Then plug out DP panel and connect a scope to measure DP PHY signal.
+ * Need disable interrupt to avoid SW driver disable DP output. This is
+ * done after the test pattern is set.
+ */
+ if (valid_test_pattern && disable_hpd)
+ dc_link_disable_hpd(link);
+
+ kfree(wr_buf);
+
+ return bytes_from_user;
+}
+
+static const struct file_operations dp_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_link_settings_read,
+ .write = dp_link_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_settings_debugfs_fop = {
+ .owner = THIS_MODULE,
+ .read = dp_phy_settings_read,
+ .write = dp_phy_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_test_pattern_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_phy_test_pattern_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} dp_debugfs_entries[] = {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"test_pattern", &dp_phy_test_pattern_fops}
+};
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+{
+ int i;
+ struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+ ent = debugfs_create_file(dp_debugfs_entries[i].name,
+ 0644,
+ dir,
+ connector,
+ dp_debugfs_entries[i].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index b43315cc5d58..d9ed1b2aa811 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef PP_POWERSOURCE_H
-#define PP_POWERSOURCE_H
+#ifndef __AMDGPU_DM_DEBUGFS_H__
+#define __AMDGPU_DM_DEBUGFS_H__
-enum pp_power_source {
- PP_PowerSource_AC = 0,
- PP_PowerSource_DC,
- PP_PowerSource_LimitedPower,
- PP_PowerSource_LimitedPower_2,
- PP_PowerSource_Max
-};
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+int connector_debugfs_init(struct amdgpu_dm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ec304b1a5973..8403b6a9a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -169,6 +169,11 @@ static void get_payload_table(
mutex_unlock(&mst_mgr->payload_lock);
}
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_is_dp_sink_present(struct dc_link *link)
+{
+ bool dp_sink_present;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ BUG_ON("Failed to found connector for link!");
+ return true;
+ }
+
+ mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ dp_sink_present = dc_link_is_dp_sink_present(link);
+ mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ return dp_sink_present;
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -497,6 +518,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = { {0} };
+ union test_response test_response = { {0} };
+
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 4304d9e408b8..9a300732ba37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -80,55 +80,72 @@ static void log_dpcd(uint8_t type,
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
- enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
- I2C_MOT_TRUE : I2C_MOT_FALSE;
- enum ddc_result res;
- uint32_t read_bytes = msg->size;
+ ssize_t result = 0;
+ enum i2caux_transaction_action action;
+ enum aux_transaction_type type;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- res = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size,
- &read_bytes);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
break;
case DP_AUX_NATIVE_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
case DP_AUX_I2C_READ:
- res = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size,
- &read_bytes);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
break;
case DP_AUX_I2C_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
default:
- return 0;
+ return -EINVAL;
}
#ifdef TRACE_DPCD
@@ -139,9 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- if (res != DDC_RESULT_SUCESSFULL)
- return -EIO;
- return read_bytes;
+ if (result < 0) /* DC doesn't know about kernel error codes */
+ result = -EIO;
+
+ return result;
}
static enum drm_connector_status
@@ -233,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base,
NULL);
return ret;
@@ -261,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector, aconnector->edid);
}
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base, aconnector->edid);
ret = drm_add_edid_modes(connector, aconnector->edid);
@@ -345,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
@@ -393,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dev->mode_config.tile_property,
0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
/*
* Initialize connector state before adding the connectror to drm and
@@ -441,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
- drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644
index 000000000000..fbe878ae1e8c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+#include "dm_pp_smu.h"
+
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int i;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_dcef_set_clk =
+ pp_display_cfg->min_dcfclock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ for (i = 0; i < pp_display_cfg->display_count; i++) {
+ const struct dm_pp_single_disp_config *dc_cfg =
+ &pp_display_cfg->disp_configs[i];
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ }
+
+ /* TODO: complete implementation of
+ * pp_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ amd_pp_clk_type = amd_pp_dcef_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ amd_pp_clk_type = amd_pp_dcf_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ amd_pp_clk_type = amd_pp_pixel_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_FCLK:
+ amd_pp_clk_type = amd_pp_f_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ amd_pp_clk_type = amd_pp_phy_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ amd_pp_clk_type = amd_pp_dpp_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
+ enum PP_DAL_POWERLEVEL max_clocks_state)
+{
+ switch (max_clocks_state) {
+ case PP_DAL_POWERLEVEL_0:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
+ case PP_DAL_POWERLEVEL_1:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
+ case PP_DAL_POWERLEVEL_2:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
+ case PP_DAL_POWERLEVEL_3:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
+ case PP_DAL_POWERLEVEL_4:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
+ case PP_DAL_POWERLEVEL_5:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
+ case PP_DAL_POWERLEVEL_6:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
+ case PP_DAL_POWERLEVEL_7:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
+ max_clocks_state);
+ return DM_PP_CLOCKS_STATE_INVALID;
+ }
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+ }
+}
+
+static void pp_to_dc_clock_levels_with_latency(
+ const struct pp_clock_levels_with_latency *pp_clks,
+ struct dm_pp_clock_levels_with_latency *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+}
+
+static void pp_to_dc_clock_levels_with_voltage(
+ const struct pp_clock_levels_with_voltage *pp_clks,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+ pp_handle, &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_latency pp_clks = { 0 };
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+ return false;
+
+ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+
+ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_voltage pp_clk_info = {0};
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+
+ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+
+ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (!pp_clock_request.clock_type)
+ return false;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+ adev->powerplay.pp_handle,
+ &pp_clock_request);
+ if (ret)
+ return false;
+ return true;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct amd_pp_clock_info pp_clk_info = {0};
+ int ret = 0;
+
+ if (adev->powerplay.pp_funcs->get_current_clocks)
+ ret = adev->powerplay.pp_funcs->get_current_clocks(
+ adev->powerplay.pp_handle,
+ &pp_clk_info);
+ if (ret)
+ return false;
+
+ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+ return true;
+}
+
+void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ return;
+
+ amdgpu_dpm_display_configuration_changed(adev);
+}
+
+void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_khz;
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_khz;
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_khz;
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_khz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_khz;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_khz;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{
+ funcs->pp_smu.ctx = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a3346124a01..516795342dd2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,13 +35,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
- struct timespec64 time;
- getrawmonotonic64(&time);
- return timespec64_to_ns(&time);
-}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
@@ -80,326 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-bool dm_pp_apply_display_requirements(
- const struct dc_context *ctx,
- const struct dm_pp_display_configuration *pp_display_cfg)
-{
- struct amdgpu_device *adev = ctx->driver_context;
-
- if (adev->pm.dpm_enabled) {
-
- memset(&adev->pm.pm_display_cfg, 0,
- sizeof(adev->pm.pm_display_cfg));
-
- adev->pm.pm_display_cfg.cpu_cc6_disable =
- pp_display_cfg->cpu_cc6_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_disable =
- pp_display_cfg->cpu_pstate_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_separation_time =
- pp_display_cfg->cpu_pstate_separation_time;
-
- adev->pm.pm_display_cfg.nb_pstate_switch_disable =
- pp_display_cfg->nb_pstate_switch_disable;
-
- adev->pm.pm_display_cfg.num_display =
- pp_display_cfg->display_count;
- adev->pm.pm_display_cfg.num_path_including_non_display =
- pp_display_cfg->display_count;
-
- adev->pm.pm_display_cfg.min_core_set_clock =
- pp_display_cfg->min_engine_clock_khz/10;
- adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
- pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
- adev->pm.pm_display_cfg.min_mem_set_clock =
- pp_display_cfg->min_memory_clock_khz/10;
-
- adev->pm.pm_display_cfg.multi_monitor_in_sync =
- pp_display_cfg->all_displays_in_sync;
- adev->pm.pm_display_cfg.min_vblank_time =
- pp_display_cfg->avail_mclk_switch_time_us;
-
- adev->pm.pm_display_cfg.display_clk =
- pp_display_cfg->disp_clk_khz/10;
-
- adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
-
- adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
- adev->pm.pm_display_cfg.line_time_in_us =
- pp_display_cfg->line_time_in_us;
-
- adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
- adev->pm.pm_display_cfg.crossfire_display_index = -1;
- adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
-
- /* TODO: complete implementation of
- * pp_display_configuration_change().
- * Follow example of:
- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
-
- /* TODO: replace by a separate call to 'apply display cfg'? */
- amdgpu_pm_compute_clocks(adev);
- }
-
- return true;
-}
-
-static void get_default_clock_levels(
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *clks)
-{
- uint32_t disp_clks_in_khz[6] = {
- 300000, 400000, 496560, 626090, 685720, 757900 };
- uint32_t sclks_in_khz[6] = {
- 300000, 360000, 423530, 514290, 626090, 720000 };
- uint32_t mclks_in_khz[2] = { 333000, 800000 };
-
- switch (clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, disp_clks_in_khz,
- sizeof(disp_clks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, sclks_in_khz,
- sizeof(sclks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- clks->num_levels = 2;
- memmove(clks->clocks_in_khz, mclks_in_khz,
- sizeof(mclks_in_khz));
- break;
- default:
- clks->num_levels = 0;
- break;
- }
-}
-
-static enum amd_pp_clock_type dc_to_pp_clock_type(
- enum dm_pp_clock_type dm_pp_clk_type)
-{
- enum amd_pp_clock_type amd_pp_clk_type = 0;
-
- switch (dm_pp_clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- amd_pp_clk_type = amd_pp_disp_clock;
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- amd_pp_clk_type = amd_pp_sys_clock;
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- amd_pp_clk_type = amd_pp_mem_clock;
- break;
- default:
- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
- dm_pp_clk_type);
- break;
- }
-
- return amd_pp_clk_type;
-}
-
-static void pp_to_dc_clock_levels(
- const struct amd_pp_clocks *pp_clks,
- struct dm_pp_clock_levels *dc_clks,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->count,
- DM_PP_MAX_CLOCK_LEVELS);
-
- dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- dc_clks->num_levels = pp_clks->count;
-
- DRM_INFO("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < dc_clks->num_levels; i++) {
- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
- /* translate 10kHz to kHz */
- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
- }
-}
-
-static void pp_to_dc_clock_levels_with_latency(
- const struct pp_clock_levels_with_latency *pp_clks,
- struct dm_pp_clock_levels_with_latency *clk_level_info,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->num_levels,
- DM_PP_MAX_CLOCK_LEVELS);
-
- clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- clk_level_info->num_levels = pp_clks->num_levels;
-
- DRM_DEBUG("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < clk_level_info->num_levels; i++) {
- DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
- clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
- }
-}
-
-bool dm_pp_get_clock_levels_by_type(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *dc_clks)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct amd_pp_clocks pp_clks = { 0 };
- struct amd_pp_simple_clock_info validation_clks = { 0 };
- uint32_t i;
-
- if (adev->powerplay.pp_funcs->get_clock_by_type) {
- if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
- dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
- get_default_clock_levels(clk_type, dc_clks);
- return true;
- }
- }
-
- pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
-
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
- pp_handle, &validation_clks)) {
- /* Error in pplib. Provide default values. */
- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
- validation_clks.engine_max_clock = 72000;
- validation_clks.memory_max_clock = 80000;
- validation_clks.level = 0;
- }
- }
-
- DRM_INFO("DM_PPLIB: Validation clocks:\n");
- DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
- validation_clks.engine_max_clock);
- DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
- validation_clks.memory_max_clock);
- DRM_INFO("DM_PPLIB: level : %d\n",
- validation_clks.level);
-
- /* Translate 10 kHz to kHz. */
- validation_clks.engine_max_clock *= 10;
- validation_clks.memory_max_clock *= 10;
-
- /* Determine the highest non-boosted level from the Validation Clocks */
- if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
- /* This clock is higher the validation clock.
- * Than means the previous one is the highest
- * non-boosted one. */
- DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
- DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- }
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_latency(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_latency *clk_level_info)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct pp_clock_levels_with_latency pp_clks = { 0 };
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
-
- pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_voltage(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_voltage *clk_level_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_notify_wm_clock_changes(
- const struct dc_context *ctx,
- struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_power_level_change_request(
- const struct dc_context *ctx,
- struct dm_pp_power_level_change_request *level_change_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_clock_for_voltage_request(
- const struct dc_context *ctx,
- struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_get_static_clocks(
- const struct dc_context *ctx,
- struct dm_pp_static_clock_info *static_clk_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-void dm_pp_get_funcs_rv(
- struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
-{}
-
-/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index aed538a4d1ba..532a515fda9a 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,7 +25,7 @@
DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
DC_LIBS += dcn10 dml
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index b49ea96b5dae..a50a76471107 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
# subcomponents.
BASICS = conversion.o fixpt31_32.o \
- logger.o log_helpers.o vector.o
+ log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 021451549ff7..26583f346c39 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -28,75 +28,12 @@
#include "include/logger_interface.h"
#include "dm_helpers.h"
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-struct dc_signal_type_info {
- enum signal_type type;
- char name[MAX_NAME_LEN];
-};
-
-static const struct dc_signal_type_info signal_type_info_tbl[] = {
- {SIGNAL_TYPE_NONE, "NC"},
- {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
- {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
- {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
- {SIGNAL_TYPE_LVDS, "LVDS"},
- {SIGNAL_TYPE_RGB, "VGA"},
- {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
- {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
- {SIGNAL_TYPE_EDP, "eDP"},
- {SIGNAL_TYPE_VIRTUAL, "Virtual"}
-};
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...)
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
{
int i;
- va_list args;
- struct log_entry entry = { 0 };
- enum signal_type signal;
-
- if (link->local_sink)
- signal = link->local_sink->sink_signal;
- else
- signal = link->connector_signal;
-
- if (link->type == dc_connection_mst_branch)
- signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
-
- dm_logger_open(ctx->logger, &entry, event);
-
- for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
- if (signal == signal_type_info_tbl[i].type)
- break;
-
- if (i == NUM_ELEMENTS(signal_type_info_tbl))
- goto fail;
-
- dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
- signal_type_info_tbl[i].name,
- link->link_index);
-
- va_start(args, msg);
- dm_logger_append_va(&entry, msg, args);
-
- if (entry.buf_offset > 0 &&
- entry.buf[entry.buf_offset - 1] == '\n')
- entry.buf_offset--;
if (hex_data)
for (i = 0; i < hex_data_count; i++)
- dm_logger_append(&entry, "%2.2X ", hex_data[i]);
-
- dm_logger_append(&entry, "^\n");
-
-fail:
- dm_logger_close(&entry);
-
- va_end(args);
+ DC_LOG_DEBUG("%2.2X ", hex_data[i]);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
deleted file mode 100644
index 0866874ae8c6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "dm_services.h"
-#include "include/logger_interface.h"
-#include "logger.h"
-
-
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-static const struct dc_log_type_info log_type_info_tbl[] = {
- {LOG_ERROR, "Error"},
- {LOG_WARNING, "Warning"},
- {LOG_DEBUG, "Debug"},
- {LOG_DC, "DC_Interface"},
- {LOG_SURFACE, "Surface"},
- {LOG_HW_HOTPLUG, "HW_Hotplug"},
- {LOG_HW_LINK_TRAINING, "HW_LKTN"},
- {LOG_HW_SET_MODE, "HW_Mode"},
- {LOG_HW_RESUME_S3, "HW_Resume"},
- {LOG_HW_AUDIO, "HW_Audio"},
- {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
- {LOG_MST, "MST"},
- {LOG_SCALER, "Scaler"},
- {LOG_BIOS, "BIOS"},
- {LOG_BANDWIDTH_CALCS, "BWCalcs"},
- {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
- {LOG_I2C_AUX, "I2C_AUX"},
- {LOG_SYNC, "Sync"},
- {LOG_BACKLIGHT, "Backlight"},
- {LOG_FEATURE_OVERRIDE, "Override"},
- {LOG_DETECTION_EDID_PARSER, "Edid"},
- {LOG_DETECTION_DP_CAPS, "DP_Caps"},
- {LOG_RESOURCE, "Resource"},
- {LOG_DML, "DML"},
- {LOG_EVENT_MODE_SET, "Mode"},
- {LOG_EVENT_DETECTION, "Detect"},
- {LOG_EVENT_LINK_TRAINING, "LKTN"},
- {LOG_EVENT_LINK_LOSS, "LinkLoss"},
- {LOG_EVENT_UNDERFLOW, "Underflow"},
- {LOG_IF_TRACE, "InterfaceTrace"},
- {LOG_DTN, "DTN"},
- {LOG_DISPLAYSTATS, "DisplayStats"}
-};
-
-
-/* ----------- Object init and destruction ----------- */
-static bool construct(struct dc_context *ctx, struct dal_logger *logger,
- uint32_t log_mask)
-{
- /* malloc buffer and init offsets */
- logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
- logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
- GFP_KERNEL);
- if (!logger->log_buffer)
- return false;
-
- /* Initialize both offsets to start of buffer (empty) */
- logger->buffer_read_offset = 0;
- logger->buffer_write_offset = 0;
-
- logger->open_count = 0;
-
- logger->flags.bits.ENABLE_CONSOLE = 1;
- logger->flags.bits.ENABLE_BUFFER = 0;
-
- logger->ctx = ctx;
-
- logger->mask = log_mask;
-
- return true;
-}
-
-static void destruct(struct dal_logger *logger)
-{
- if (logger->log_buffer) {
- kfree(logger->log_buffer);
- logger->log_buffer = NULL;
- }
-}
-
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
-{
- /* malloc struct */
- struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
- GFP_KERNEL);
-
- if (!logger)
- return NULL;
- if (!construct(ctx, logger, log_mask)) {
- kfree(logger);
- return NULL;
- }
-
- return logger;
-}
-
-uint32_t dal_logger_destroy(struct dal_logger **logger)
-{
- if (logger == NULL || *logger == NULL)
- return 1;
- destruct(*logger);
- kfree(*logger);
- *logger = NULL;
-
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-
-static bool dal_logger_should_log(
- struct dal_logger *logger,
- enum dc_log_type log_type)
-{
- if (logger->mask & (1 << log_type))
- return true;
-
- return false;
-}
-
-static void log_to_debug_console(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_CONSOLE == 0)
- return;
-
- if (entry->buf_offset) {
- switch (entry->type) {
- case LOG_ERROR:
- dm_error("%s", entry->buf);
- break;
- default:
- dm_output_to_console("%s", entry->buf);
- break;
- }
- }
-}
-
-/* Print everything unread existing in log_buffer to debug console*/
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
-{
- char *string_start = &logger->log_buffer[logger->buffer_read_offset];
-
- if (should_warn)
- dm_output_to_console(
- "---------------- FLUSHING LOG BUFFER ----------------\n");
- while (logger->buffer_read_offset < logger->buffer_write_offset) {
-
- if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
- dm_output_to_console("%s", string_start);
- string_start = logger->log_buffer + logger->buffer_read_offset + 1;
- }
- logger->buffer_read_offset++;
- }
- if (should_warn)
- dm_output_to_console(
- "-------------- END FLUSHING LOG BUFFER --------------\n\n");
-}
-
-static void log_to_internal_buffer(struct log_entry *entry)
-{
-
- uint32_t size = entry->buf_offset;
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_BUFFER == 0)
- return;
-
- if (logger->log_buffer == NULL)
- return;
-
- if (size > 0 && size < logger->log_buffer_size) {
-
- int buffer_space = logger->log_buffer_size -
- logger->buffer_write_offset;
-
- if (logger->buffer_write_offset == logger->buffer_read_offset) {
- /* Buffer is empty, start writing at beginning */
- buffer_space = logger->log_buffer_size;
- logger->buffer_write_offset = 0;
- logger->buffer_read_offset = 0;
- }
-
- if (buffer_space > size) {
- /* No wrap around, copy 'size' bytes
- * from 'entry->buf' to 'log_buffer'
- */
- memmove(logger->log_buffer +
- logger->buffer_write_offset,
- entry->buf, size);
- logger->buffer_write_offset += size;
-
- } else {
- /* Not enough room remaining, we should flush
- * existing logs */
-
- /* Flush existing unread logs to console */
- dm_logger_flush_buffer(logger, true);
-
- /* Start writing to beginning of buffer */
- memmove(logger->log_buffer, entry->buf, size);
- logger->buffer_write_offset = size;
- logger->buffer_read_offset = 0;
- }
-
- }
-}
-
-static void log_heading(struct log_entry *entry)
-{
- int j;
-
- for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
-
- const struct dc_log_type_info *info = &log_type_info_tbl[j];
-
- if (info->type == entry->type)
- dm_logger_append(entry, "[%s]\t", info->name);
- }
-}
-
-static void append_entry(
- struct log_entry *entry,
- char *buffer,
- uint32_t buf_size)
-{
- if (!entry->buf ||
- entry->buf_offset + buf_size > entry->max_buf_bytes
- ) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- /* Todo: check if off by 1 byte due to \0 anywhere */
- memmove(entry->buf + entry->buf_offset, buffer, buf_size);
- entry->buf_offset += buf_size;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Warning: Be careful that 'msg' is null terminated and the total size is
- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
- */
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...)
-{
- if (logger && dal_logger_should_log(logger, log_type)) {
- uint32_t size;
- va_list args;
- char buffer[LOG_MAX_LINE_SIZE];
- struct log_entry entry;
-
- va_start(args, msg);
-
- entry.logger = logger;
-
- entry.buf = buffer;
-
- entry.buf_offset = 0;
- entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- entry.type = log_type;
-
- log_heading(&entry);
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
-
- buffer[entry.buf_offset + size] = '\0';
- entry.buf_offset += size + 1;
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(&entry);
- /* log internally for dsat */
- log_to_internal_buffer(&entry);
-
- va_end(args);
- }
-}
-
-/* Same as dm_logger_write, except without open() and close(), which must
- * be done separately.
- */
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...)
-{
- va_list args;
-
- va_start(args, msg);
- dm_logger_append_va(entry, msg, args);
- va_end(args);
-}
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args)
-{
- struct dal_logger *logger;
-
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- logger = entry->logger;
-
- if (logger && logger->open_count > 0 &&
- dal_logger_should_log(logger, entry->type)) {
-
- uint32_t size;
- char buffer[LOG_MAX_LINE_SIZE];
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE, msg, args);
-
- if (size < LOG_MAX_LINE_SIZE - 1) {
- append_entry(entry, buffer, size);
- } else {
- append_entry(entry, "LOG_ERROR, line too long\n", 27);
- }
- }
-}
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry, /* out */
- enum dc_log_type log_type)
-{
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- entry->type = log_type;
- entry->logger = logger;
-
- entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
- GFP_KERNEL);
-
- entry->buf_offset = 0;
- entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- logger->open_count++;
-
- log_heading(entry);
-}
-
-void dm_logger_close(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger && logger->open_count > 0) {
- logger->open_count--;
- } else {
- BREAK_TO_DEBUGGER();
- goto cleanup;
- }
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(entry);
- /* log internally for dsat */
- log_to_internal_buffer(entry);
-
- /* TODO: Write end heading */
-
-cleanup:
- if (entry->buf) {
- kfree(entry->buf);
- entry->buf = NULL;
- entry->buf_offset = 0;
- entry->max_buf_bytes = 0;
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c7f0b27e457e..be8a2494355a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
+{
+ unsigned int j;
+ struct bios_parser *bp;
+ ATOM_BRACKET_LAYOUT_RECORD *record;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+ enum bp_result result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ record = NULL;
+ record_header = NULL;
+
+ for (;;) {
+
+ record_header = (ATOM_COMMON_RECORD_HEADER *)
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->ucRecordType == 0xff ||
+ record_header->ucRecordSize == 0) {
+ break;
+ }
+
+ if (record_header->ucRecordType ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(ATOM_BRACKET_LAYOUT_RECORD)
+ <= record_header->ucRecordSize) {
+ record = (ATOM_BRACKET_LAYOUT_RECORD *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->ucRecordSize;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->ucLength;
+ slot_layout_info->width = record->ucWidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->ucConnNum;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->asConnInfo[j].ucConnectorType);
+ switch (record->asConnInfo[j].ucConnectorType) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->asConnInfo[j].ucPosition;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->asConnInfo[j].usConnectorObjectId);
+ }
+ return result;
+}
+
+
+enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ unsigned int record_offset;
+ struct bios_parser *bp;
+ enum bp_result result;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *object_table;
+ unsigned int genericTableOffset;
+
+ bp = BP_FROM_DCB(dcb);
+ object = NULL;
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+
+ genericTableOffset = bp->object_info_tbl_offset +
+ bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
+ object_table = (ATOM_OBJECT_TABLE *)
+ GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
+ if (!object_table)
+ return BP_RESULT_FAILURE;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
+
+ if (bracket_layout_id ==
+ object_table->asObjects[i].usObjectID) {
+
+ object = &object_table->asObjects[i];
+ record_offset = object->usRecordOffset +
+ bp->object_info_tbl_offset;
+
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info, record_offset);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
/******************************************************************************/
static const struct dc_vbios_funcs vbios_funcs = {
@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
.bios_parser_destroy = bios_parser_destroy,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b8cef7af3c4a..eab007e1793c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -43,6 +43,29 @@
#include "bios_parser_interface.h"
#include "bios_parser_common.h"
+
+/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
+#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
+#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
+
+#define DC_LOGGER \
+ bp->base.ctx->logger
+
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
-
static void destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
@@ -656,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
return BP_RESULT_BADBIOSTABLE;
if (sizeof(struct atom_common_table_header) +
- sizeof(struct atom_gpio_pin_lut_v2_1)
+ sizeof(struct atom_gpio_pin_assignment)
> le16_to_cpu(header->table_header.structuresize))
return BP_RESULT_BADBIOSTABLE;
@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+static enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int record_offset;
+ unsigned int j;
+ struct atom_display_object_path_v2 *object;
+ struct atom_bracket_layout_record *record;
+ struct atom_common_record_header *record_header;
+ enum bp_result result;
+ struct bios_parser *bp;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ record = NULL;
+ record_header = NULL;
+ result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ object = &v1_4->display_path[i];
+ record_offset = (unsigned int)
+ (object->disp_recordoffset) +
+ (unsigned int)(bp->object_info_tbl_offset);
+
+ for (;;) {
+
+ record_header = (struct atom_common_record_header *)
+ GET_IMAGE(struct atom_common_record_header,
+ record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->record_type == 0xff ||
+ record_header->record_size == 0) {
+ break;
+ }
+
+ if (record_header->record_type ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(struct atom_bracket_layout_record)
+ <= record_header->record_size) {
+ record = (struct atom_bracket_layout_record *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->record_size;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->bracketlen;
+ slot_layout_info->width = record->bracketwidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->conn_num;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->conn_info[j].connector_type);
+ switch (record->conn_info[j].connector_type) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->conn_info[j].position;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->conn_info[j].connectorobjid);
+ }
+ return result;
+}
+
+
+static enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+
+ if (bracket_layout_id ==
+ v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_smu_clock_info = bios_parser_get_smu_clock_info,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 651e1fd4622f..a558bfaa0c46 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
* (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
* LVDS mode: usPixelClock = pixel clock
*/
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ }
if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 752b08a42d3e..2b5dc499a35e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -59,36 +59,7 @@
bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname))
-static void init_dig_encoder_control(struct bios_parser *bp);
-static void init_transmitter_control(struct bios_parser *bp);
-static void init_set_pixel_clock(struct bios_parser *bp);
-static void init_set_crtc_timing(struct bios_parser *bp);
-
-static void init_select_crtc_source(struct bios_parser *bp);
-static void init_enable_crtc(struct bios_parser *bp);
-
-static void init_external_encoder_control(struct bios_parser *bp);
-static void init_enable_disp_power_gating(struct bios_parser *bp);
-static void init_set_dce_clock(struct bios_parser *bp);
-static void init_get_smu_clock_info(struct bios_parser *bp);
-
-void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
-{
- init_dig_encoder_control(bp);
- init_transmitter_control(bp);
- init_set_pixel_clock(bp);
-
- init_set_crtc_timing(bp);
-
- init_select_crtc_source(bp);
- init_enable_crtc(bp);
-
- init_external_encoder_control(bp);
- init_enable_disp_power_gating(bp);
- init_set_dce_clock(bp);
- init_get_smu_clock_info(bp);
-}
static uint32_t bios_cmd_table_para_revision(void *dev,
uint32_t index)
@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
return 0;
}
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index bbbcef566c55..770ff89ba7e1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_22:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332ee3e7e..416500e51b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
index fc3f98fb09ea..62435bfc274d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -25,10 +25,9 @@
#ifndef _CALCS_CALCS_LOGGER_H_
#define _CALCS_CALCS_LOGGER_H_
-#define DC_LOGGER \
- logger
+#define DC_LOGGER ctx->logger
-static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
{
int i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 2c4e8f0cb2dc..160d11a15eac 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
struct bw_fixed low_yclk = vbios->low_yclk;
if (ctx->dc->debug.bandwidth_calcs_trace) {
- print_bw_calcs_dceip(ctx->logger, dceip);
- print_bw_calcs_vbios(ctx->logger, vbios);
- print_bw_calcs_data(ctx->logger, data);
+ print_bw_calcs_dceip(ctx, dceip);
+ print_bw_calcs_vbios(ctx, vbios);
+ print_bw_calcs_data(ctx, data);
}
calculate_bandwidth(dceip, vbios, data);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 49a4ea45466d..bd039322f697 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -31,6 +31,8 @@
#include "resource.h"
#include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_hubbub.h"
+
#include "dcn_calc_math.h"
#define DC_LOGGER \
@@ -248,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
input->src.is_hsplit = true;
- input->src.dcc = pipe->plane_state->dcc.enable;
+ if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
+ dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
+ }
input->src.dcc_rate = 1;
input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
input->src.source_scan = dm_horz;
@@ -423,6 +442,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
int total_flip_bytes = 0;
int i;
+ memset(dlg_regs, 0, sizeof(*dlg_regs));
+ memset(ttu_regs, 0, sizeof(*ttu_regs));
+ memset(rq_regs, 0, sizeof(*rq_regs));
+
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
@@ -501,6 +524,7 @@ static void split_stream_across_pipes(
resource_build_scaling_params(secondary_pipe);
}
+#if 0
static void calc_wm_sets_and_perf_params(
struct dc_state *context,
struct dcn_bw_internal_vars *v)
@@ -582,6 +606,7 @@ static void calc_wm_sets_and_perf_params(
if (v->voltage_level >= 3)
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
}
+#endif
static bool dcn_bw_apply_registry_override(struct dc *dc)
{
@@ -651,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
- struct dc_debug *dbg,
+ struct dc_debug_options *dbg,
struct dc_state *context)
{
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
@@ -883,7 +908,26 @@ bool dcn_validate_bandwidth(
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
}
- v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+
+ if (dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
+ pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
+ }
+
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
@@ -976,43 +1020,60 @@ bool dcn_validate_bandwidth(
bw_consumed = v->fabric_and_dram_bandwidth;
display_pipe_configuration(v);
- calc_wm_sets_and_perf_params(context, v);
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ /*calc_wm_sets_and_perf_params(context, v);*/
+ /* Only 1 set is used by dcn since no noticeable
+ * performance improvement was measured and due to hw bug DEGVIDCN10-254
+ */
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.calc_clk.dispclk_khz <
+ if (context->bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.calc_clk.dispclk_khz =
+ context->bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
-
+ context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
@@ -1225,27 +1286,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks)
+ struct dc_clocks *clocks)
{
unsigned vdd_level, vdd_level_temp;
unsigned dcf_clk;
/*find a common supported voltage level*/
vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
/*find that level conresponding dcfclk*/
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
@@ -1331,21 +1392,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
struct pp_smu_wm_range_sets ranges = {0};
- int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
- int max_dcfclk_khz, min_dcfclk_khz;
- int socclk_khz;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
if (!pp->set_wm_ranges)
return;
kernel_fpu_begin();
- max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
- nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
- mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
kernel_fpu_end();
@@ -1353,105 +1407,46 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
- * Watermark Set
+ * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
*/
/* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
* care what the value is, hence min to overdrive level
*/
- ranges.num_reader_wm_sets = WM_COUNT;
- ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.num_reader_wm_sets = WM_SET_COUNT;
+ ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
- ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
- ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
- ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
- ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
- ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
- ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
- ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
- ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
- ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
- ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
- ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
- ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
- ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
}
+ ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+
+ ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+
+ ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
pp->set_wm_ranges(&pp->pp_smu, &ranges);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 644b2187507b..733ac224e7fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -169,6 +169,22 @@ failed_alloc:
return false;
}
+/**
+ *****************************************************************************
+ * Function: dc_stream_adjust_vmin_vmax
+ *
+ * @brief
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
+ *
+ * @param [in] dc: dc reference
+ * @param [in] stream: Initial dc stream state
+ * @param [in] adjust: Updated parameters for vertical_total_min and
+ * vertical_total_max
+ *****************************************************************************
+ */
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **streams, int num_streams,
int vmin, int vmax)
@@ -368,6 +384,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ link->preferred_link_setting = *link_setting;
+ dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -386,9 +467,6 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
- if (dc->ctx->logger)
- dal_logger_destroy(&dc->ctx->logger);
-
kfree(dc->ctx);
dc->ctx = NULL;
@@ -398,7 +476,7 @@ static void destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
@@ -411,11 +489,10 @@ static void destruct(struct dc *dc)
static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
- struct dal_logger *logger;
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
#endif
@@ -437,7 +514,7 @@ static bool construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -465,6 +542,7 @@ static bool construct(struct dc *dc,
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
dc->ctx = dc_ctx;
dc->current_state = dc_create_state();
@@ -475,14 +553,7 @@ static bool construct(struct dc *dc,
}
/* Create logger */
- logger = dal_logger_create(dc_ctx, init_params->log_mask);
- if (!logger) {
- /* can *not* call logger. call base driver 'print error' */
- dm_error("%s: failed to create Logger!\n", __func__);
- goto fail;
- }
- dc_ctx->logger = logger;
dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
@@ -901,9 +972,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
for (i = 0; i < context->stream_count; i++) {
struct dc_stream_state *stream = context->streams[i];
- dc_stream_log(stream,
- dc->ctx->logger,
- LOG_DC);
+ dc_stream_log(dc, stream);
}
result = dc_commit_state_no_check(dc, context);
@@ -927,12 +996,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->optimized_required = false;
- /* 3rd param should be true, temp w/a for RV*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
-#else
dc->hwss.set_bandwidth(dc, context, true);
-#endif
return true;
}
@@ -1548,7 +1612,7 @@ struct dc_sink *dc_link_add_remote_sink(
struct dc_sink *dc_sink;
enum dc_edid_status edid_status;
- if (len > MAX_EDID_BUFFER_SIZE) {
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
dm_error("Max EDID buffer size breached!\n");
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 267c76766dea..caece7c13bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -348,23 +348,23 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 2fa521812d23..326b3e99b7e4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "dc_link_dp.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
+#include "opp.h"
#include "link_encoder.h"
#include "hw_sequencer.h"
@@ -59,7 +60,14 @@
enum {
LINK_RATE_REF_FREQ_IN_MHZ = 27,
- PEAK_FACTOR_X1000 = 1006
+ PEAK_FACTOR_X1000 = 1006,
+ /*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+ LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
/*******************************************************************************
@@ -312,7 +320,7 @@ static enum signal_type get_basic_signal_type(
* @brief
* Check whether there is a dongle on DP connector
*/
-static bool is_dp_sink_present(struct dc_link *link)
+bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
@@ -405,7 +413,7 @@ static enum signal_type link_detect_sink(
* we assume signal is DVI; it could be corrected
* to HDMI after dongle detection
*/
- if (!is_dp_sink_present(link))
+ if (!dm_helpers_is_dp_sink_present(link))
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
}
@@ -497,6 +505,10 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps->transaction_type);
+
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -524,6 +536,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
+ dm_helpers_dp_update_branch_info(
+ link->ctx,
+ link);
+
if (!dm_helpers_dp_mst_start_top_mgr(
link->ctx,
link, boot)) {
@@ -728,6 +744,18 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
+
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal))
+ return false;
default:
break;
}
@@ -736,30 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
- // If both edid and dpcd are the same, then discard new sink and revert back to original sink
- if ((same_edid) && (same_dpcd)) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- } else {
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
+ reason != DETECT_REASON_HPDRX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
- /* deal with non-mst cases */
- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ if (fail_count == 0)
+ break;
}
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+
+ }
}
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@@ -1000,6 +1039,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+ if (dc_ctx->dc_bios->integrated_info)
+ link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;
+
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
__func__, init_params->connector_index,
@@ -1284,29 +1326,15 @@ static enum dc_status enable_link_dp(
max_link_rate = LINK_RATE_HIGH3;
if (link_settings.link_rate == max_link_rate) {
- if (state->dis_clk->funcs->set_min_clocks_state) {
- if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
- state->dis_clk->funcs->set_min_clocks_state(
- state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
- } else {
- uint32_t dp_phyclk_in_khz;
- const struct clocks_value clocks_value =
- state->dis_clk->cur_clocks_value;
-
- /* 27mhz = 27000000hz= 27000khz */
- dp_phyclk_in_khz = link_settings.link_rate * 27000;
-
- if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
- (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
- (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
- state->dis_clk->funcs->apply_clock_voltage_request(
- state->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- dp_phyclk_in_khz,
- false,
- true);
- }
- }
+ struct dc_clocks clocks = state->bw.dcn.clk;
+
+ /* dce/dcn compat, do not update dispclk */
+ clocks.dispclk_khz = 0;
+ /* 27mhz = 27000000hz= 27000khz */
+ clocks.phyclk_khz = link_settings.link_rate * 27000;
+
+ state->dis_clk->funcs->update_clocks(
+ state->dis_clk, &clocks, false);
}
dp_enable_link_phy(
@@ -1784,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_khz;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
@@ -1861,28 +1891,6 @@ static enum dc_status enable_link(
break;
}
- if (pipe_ctx->stream_res.audio && status == DC_OK) {
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
- /* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
- unsigned int i, num_audio = 1;
- for (i = 0; i < MAX_PIPES; i++) {
- /*current_state not updated yet*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
- num_audio++;
- }
-
- pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
-
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
- /* un-mute audio */
- /* TODO: audio should be per stream rather than per link */
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
- }
-
return status;
}
@@ -2023,6 +2031,15 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_current_backlight_8_bit(abm);
+}
bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream)
@@ -2415,10 +2432,13 @@ void core_link_enable_stream(
}
}
+ core_dc->hwss.enable_audio_stream(pipe_ctx);
+
/* turn off otg test pattern if enable */
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
core_dc->hwss.enable_stream(pipe_ctx);
@@ -2453,6 +2473,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
core_dc->hwss.set_avmute(pipe_ctx, enable);
}
+/**
+ *****************************************************************************
+ * Function: dc_link_enable_hpd_filter
+ *
+ * @brief
+ * If enable is true, programs HPD filter on associated HPD line using
+ * delay_on_disconnect/delay_on_connect values dependent on
+ * link->connector_signal
+ *
+ * If enable is false, programs HPD filter on associated HPD line with no
+ * delays on connect or disconnect
+ *
+ * @param [in] link: pointer to the dc link
+ * @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
struct gpio *hpd;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ae48d603ebd6..8def0d9fa0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,6 +33,7 @@
#include "include/vector.h"
#include "core_types.h"
#include "dc_link_ddc.h"
+#include "aux_engine.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -629,83 +630,61 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-enum ddc_result dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len,
- uint32_t *read)
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action)
{
- struct aux_payload read_payload = {
- .i2c_over_aux = i2c,
- .write = false,
- .address = address,
- .length = len,
- .data = data,
- };
- struct aux_command command = {
- .payloads = &read_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- *read = 0;
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct aux_engine *aux_engine;
+ enum aux_channel_operation_result operation_result;
+ struct aux_request_transaction_data aux_req;
+ struct aux_reply_transaction_data aux_rep;
+ uint8_t returned_bytes = 0;
+ int res = -1;
+ uint32_t status;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command)) {
- *read = command.payloads->length;
- return DDC_RESULT_SUCESSFULL;
- }
+ memset(&aux_req, 0, sizeof(aux_req));
+ memset(&aux_rep, 0, sizeof(aux_rep));
- return DDC_RESULT_FAILED_OPERATION;
-}
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux_engine->funcs->acquire(aux_engine, ddc_pin);
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len)
-{
- struct aux_payload write_payload = {
- .i2c_over_aux = i2c,
- .write = true,
- .address = address,
- .length = len,
- .data = (uint8_t *)data,
- };
- struct aux_command command = {
- .payloads = &write_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ aux_req.type = type;
+ aux_req.action = action;
+
+ aux_req.address = address;
+ aux_req.delay = 0;
+ aux_req.length = size;
+ aux_req.data = buffer;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command))
- return DDC_RESULT_SUCESSFULL;
+ aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
+ operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
- return DDC_RESULT_FAILED_OPERATION;
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ res = returned_bytes;
+
+ if (res <= size && res >= 0)
+ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+ buffer, reply,
+ &status);
+
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ res = 0;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ res = -1;
+ break;
+ }
+ aux_engine->funcs->release_engine(aux_engine);
+ return res;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7857cb42b3e6..a7553b6d59c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3,6 +3,7 @@
#include "dc.h"
#include "dc_link_dp.h"
#include "dm_helpers.h"
+#include "opp.h"
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -38,7 +39,7 @@ static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result);
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
@@ -93,8 +94,8 @@ static void dpcd_set_link_settings(
uint8_t rate = (uint8_t)
(lt_settings->link_settings.link_rate);
- union down_spread_ctrl downspread = {{0}};
- union lane_count_set lane_count_set = {{0}};
+ union down_spread_ctrl downspread = { {0} };
+ union lane_count_set lane_count_set = { {0} };
uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
@@ -164,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
const struct link_training_settings *lt_settings,
enum hw_dp_training_pattern pattern)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = {{0}};
+ union dpcd_training_pattern dpcd_pattern = { {0} };
uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
@@ -232,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
DP_TRAINING_PATTERN_SET,
&dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw) );
+ sizeof(dpcd_pattern.raw));
core_link_write_dpcd(
link,
@@ -246,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
dpcd_base_lt_offset,
dpcd_lt_buffer,
- size_in_bytes + sizeof(dpcd_pattern.raw) );
+ size_in_bytes + sizeof(dpcd_pattern.raw));
link->cur_lane_setting = lt_settings->lane_settings[0];
}
@@ -428,8 +429,8 @@ static void get_lane_status_and_drive_settings(
struct link_training_settings *req_settings)
{
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
- struct link_training_settings request_settings = {{0}};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ struct link_training_settings request_settings = { {0} };
uint32_t lane;
memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -651,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
if (req_drv_setting_changed) {
update_drive_settings(
- lt_settings,req_settings);
+ lt_settings, req_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -724,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
enum hw_dp_training_pattern hw_tr_pattern;
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
hw_tr_pattern = get_supported_tp(link);
@@ -952,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ if (link->dp_ss_off)
+ lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
+ else
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
/* 1. set link rate, lane count and spread*/
dpcd_set_link_settings(link, &lt_settings);
@@ -1027,6 +1031,9 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].VOLTAGE_SWING,
lt_settings.lane_settings[0].PRE_EMPHASIS);
+ if (status != LINK_TRAINING_SUCCESS)
+ link->ctx->dc->debug_data.ltFailCount++;
+
return status;
}
@@ -1082,9 +1089,10 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting)
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
{
struct dc_link_settings max_link_cap = {0};
struct dc_link_settings cur_link_setting = {0};
@@ -1097,6 +1105,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ if (link->dc->debug.skip_detection_link_training) {
+ link->verified_link_cap = *known_limit_link_setting;
+ return true;
+ }
+
success = false;
skip_link_training = false;
@@ -1151,6 +1164,8 @@ bool dp_hbr_verify_link_cap(
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
success = true;
+ else
+ (*fail_count)++;
}
if (success)
@@ -1182,7 +1197,7 @@ bool dp_hbr_verify_link_cap(
return success;
}
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
{
@@ -1428,6 +1443,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
uint32_t lane_count = link_setting->lane_count;
uint32_t kbps = link_rate_in_kbps;
+
kbps *= lane_count;
kbps *= 8; /* 8 bits per byte*/
@@ -1445,9 +1461,9 @@ bool dp_validate_mode_timing(
const struct dc_link_settings *link_setting;
/*always DP fail safe mode*/
- if (timing->pix_clk_khz == (uint32_t)25175 &&
- timing->h_addressable == (uint32_t)640 &&
- timing->v_addressable == (uint32_t)480)
+ if (timing->pix_clk_khz == (uint32_t) 25175 &&
+ timing->h_addressable == (uint32_t) 640 &&
+ timing->v_addressable == (uint32_t) 480)
return true;
/* We always use verified link settings */
@@ -1647,22 +1663,26 @@ static enum dc_status read_hpd_rx_irq_data(
irq_data->raw,
sizeof(union hpd_irq_data));
else {
- /* Read 2 bytes at this location,... */
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT_ESI,
- irq_data->raw,
- 2);
+ tmp,
+ sizeof(tmp));
if (retval != DC_OK)
return retval;
- /* ... then read remaining 4 at the other location */
- retval = core_link_read_dpcd(
- link,
- DP_LANE0_1_STATUS_ESI,
- &irq_data->raw[2],
- 4);
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
}
return retval;
@@ -1767,12 +1787,10 @@ static void dp_test_send_link_training(struct dc_link *link)
dp_retrain_link_dp_test(link, &link_settings, false);
}
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
* (toggling on and off) with debugger break
* This caueses intermittent PHY automation failure
* Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
static void dp_test_send_phy_test_pattern(struct dc_link *link)
{
union phy_test_pattern dpcd_test_pattern;
@@ -1832,13 +1850,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
break;
case PHY_TEST_PATTERN_CP2520_1:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
case PHY_TEST_PATTERN_CP2520_2:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
@@ -1991,12 +2009,16 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
{
- union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
- enum dc_status result = DDC_RESULT_UNKNOWN;
+ enum dc_status result;
+
bool status = false;
+
+ if (out_link_loss)
+ *out_link_loss = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -2071,6 +2093,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
true, LINK_TRAINING_ATTEMPTS);
status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
}
if (link->type == dc_connection_active_dongle &&
@@ -2257,6 +2281,11 @@ static void get_active_converter_info(
link->dpcd_caps.branch_hw_revision =
dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.branch_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
}
@@ -2305,12 +2334,14 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -2391,6 +2422,36 @@ static bool retrieve_link_cap(struct dc_link *link)
&link->dpcd_caps.sink_count.raw,
sizeof(link->dpcd_caps.sink_count.raw));
+ /* read sink ieee oui */
+ core_link_read_dpcd(link,
+ DP_SINK_OUI,
+ (uint8_t *)(&sink_id),
+ sizeof(sink_id));
+
+ link->dpcd_caps.sink_dev_id =
+ (sink_id.ieee_oui[0] << 16) +
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
+ memmove(
+ link->dpcd_caps.sink_dev_id_str,
+ sink_id.ieee_device_id,
+ sizeof(sink_id.ieee_device_id));
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_HW_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.sink_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.sink_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2495,8 +2556,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
}
break;
@@ -2508,8 +2569,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 751f3ac9d921..1644f2a946b0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,7 +41,7 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/dcn10_resource.h"
#endif
#include "dce120/dce120_resource.h"
@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_AI:
dc_version = DCE_VERSION_12_0;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
break;
@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
res_pool = dcn10_create_resource_pool(
num_virtual_links, dc);
@@ -268,24 +268,30 @@ bool resource_construct(
return true;
}
+static int find_matching_clock_source(
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] == clock_source)
+ return i;
+ }
+ return -1;
+}
void resource_unreference_clock_source(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
-
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]--;
- break;
- }
-
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count--;
}
@@ -295,19 +301,31 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]++;
- break;
- }
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count++;
}
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i = find_matching_clock_source(pool, clock_source);
+
+ if (i > -1)
+ return res_ctx->clock_source_ref_count[i];
+
+ if (pool->dp_clock_source == clock_source)
+ return res_ctx->dp_clock_source_ref_count;
+
+ return -1;
+}
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2)
@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.pix_clk_khz)
return false;
+ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
+ return false;
+
if (stream1->phy_pix_clk != stream2->phy_pix_clk
&& (!dc_is_dp_signal(stream1->signal)
|| !dc_is_dp_signal(stream2->signal)))
@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable(
return true;
}
+static bool is_dp_and_hdmi_sharable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->ctx->dc->caps.disable_dp_clk_share)
+ return false;
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+ stream2->clamping.c_depth != COLOR_DEPTH_888)
+ return false;
+
+ return true;
+
+}
static bool is_sharable_clk_src(
const struct pipe_ctx *pipe_with_clk_src,
@@ -348,15 +383,18 @@ static bool is_sharable_clk_src(
if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
+ (dc_is_dp_signal(pipe->stream->signal) &&
+ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
+ pipe->stream)))
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
- && dc_is_dvi_signal(pipe->stream->signal))
+ && dc_is_dual_link_signal(pipe->stream->signal))
return false;
if (dc_is_hdmi_signal(pipe->stream->signal)
- && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
return false;
if (!resource_are_streams_timing_synchronizable(
@@ -522,13 +560,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
- int recout_full_x, recout_full_y;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
@@ -597,20 +634,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
}
}
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
- recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
- recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+ recout_full->width = plane_state->dst_rect.width
+ * stream->dst.width / stream->src.width;
+ recout_full->height = plane_state->dst_rect.height
+ * stream->dst.height / stream->src.height;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -662,7 +701,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
@@ -680,15 +719,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
flip_vert_scan_dir = true;
else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
flip_horz_scan_dir = true;
- if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
rect_swap_helper(&src);
rect_swap_helper(&data->viewport_c);
rect_swap_helper(&data->viewport);
- }
+ } else if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
/*
* Init calculated according to formula:
@@ -708,127 +746,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+ if (!flip_horz_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
+ data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
+ }
+ if (data->viewport_c.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ }
- /* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int int_part;
+ int end_offset = src.x + src.width
+ - data->viewport.x - data->viewport.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h) - end_offset;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : end_offset;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.width += int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
+
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int int_part;
+ int end_offset = (src.x + src.width) / vpc_div
+ - data->viewport_c.x - data->viewport_c.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : end_offset;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.width += int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+
+ }
+ if (!flip_vert_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
}
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
- }
-
- if (data->viewport.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
}
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
+ data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
+ }
+ if (data->viewport_c.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ }
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int int_part;
+ int end_offset = src.y + src.height
+ - data->viewport.y - data->viewport.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v) - end_offset;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : end_offset;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.height += int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int int_part;
+ int end_offset = (src.y + src.height) / vpc_div
+ - data->viewport_c.y - data->viewport_c.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : end_offset;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.height += int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
/* Interlaced inits based on final vert inits */
@@ -846,7 +1043,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct view recout_skip = { 0 };
+ struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -866,7 +1063,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_skip);
+ calculate_recout(pipe_ctx, &recout_full);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -910,7 +1107,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1054,7 +1251,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1125,7 +1322,7 @@ bool dc_add_plane_to_context(
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (!free_pipe) {
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
@@ -1546,8 +1743,8 @@ enum dc_status dc_add_stream_to_ctx(
struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
- if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
- DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1723,7 +1920,7 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
@@ -1789,7 +1986,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dis_clk = dc->res_pool->display_clock;
+ dst_ctx->dis_clk = dc->res_pool->dccg;
}
enum dc_status dc_validate_global_state(
@@ -2347,7 +2544,8 @@ static void set_hdr_static_info_packet(
{
/* HDR Static Metadata info packet for HDR10 */
- if (!stream->hdr_static_metadata.valid)
+ if (!stream->hdr_static_metadata.valid ||
+ stream->use_dynamic_meta)
return;
*info_packet = stream->hdr_static_metadata;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 25fae38409ab..9971b515c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
sink->dc_container_id = NULL;
+ sink->sink_id = init_params->link->ctx->dc_sink_id_count;
+ // increment dc_sink_id_count because we don't want two sinks with same ID
+ // unless they are actually the same
+ init_params->link->ctx->dc_sink_id_count++;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 3732a1de9d6c..fdcc8ab19bf3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,8 @@
#include "ipp.h"
#include "timing_generator.h"
+#define DC_LOGGER dc->ctx->logger
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
}
core_dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (core_dc->hwss.set_cursor_sdr_white_level)
+ core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (pipe_to_program)
@@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
-
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dm_logger,
- enum dc_log_type log_type)
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
-
- dm_logger_write(dm_logger,
- log_type,
- "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ DC_LOG_DC(
+ "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
stream,
stream->src.x,
stream->src.y,
@@ -337,21 +335,18 @@ void dc_stream_log(
stream->dst.width,
stream->dst.height,
stream->output_color_space);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
stream->timing.pix_clk_khz,
stream->timing.h_total,
stream->timing.v_total,
stream->timing.pixel_encoding,
stream->timing.display_color_depth);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tsink name: %s, serial: %d\n",
stream->sink->edid_caps.display_name,
stream->sink->edid_caps.serial_number);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tlink: %d\n",
stream->sink->link->link_index);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 68a71adeb12e..8fb3aefd195c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
+/**
+ *****************************************************************************
+ * Function: dc_plane_get_status
+ *
+ * @brief
+ * Looks up the pipe context of plane_state and updates the pending status
+ * of the pipe context. Then returns plane_state->status
+ *
+ * @param [in] plane_state: pointer to the plane_state to get the status of
+ *****************************************************************************
+ */
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
{
@@ -181,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
kref_put(&tf->refcount, dc_transfer_func_free);
}
-struct dc_transfer_func *dc_create_transfer_func()
+struct dc_transfer_func *dc_create_transfer_func(void)
{
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 9cfde0ccf4e9..e2f033d420a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.44"
+#define DC_VER "3.1.59"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -68,6 +68,7 @@ struct dc_caps {
uint32_t max_planes;
uint32_t max_downscale_ratio;
uint32_t i2c_speed_in_khz;
+ uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
unsigned int max_video_width;
int linear_pitch_alignment;
@@ -76,6 +77,9 @@ struct dc_caps {
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
+ bool disable_dp_clk_share;
+ bool psp_setup_panel_mode;
};
struct dc_dcc_surface_param {
@@ -168,6 +172,12 @@ struct dc_config {
bool disable_disp_pll_sharing;
};
+enum visual_confirm {
+ VISUAL_CONFIRM_DISABLE = 0,
+ VISUAL_CONFIRM_SURFACE = 1,
+ VISUAL_CONFIRM_HDR = 2,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -185,6 +195,10 @@ enum wm_report_mode {
WM_REPORT_OVERRIDE = 1,
};
+/*
+ * For any clocks that may differ per pipe
+ * only the max is stored in this structure
+ */
struct dc_clocks {
int dispclk_khz;
int max_supported_dppclk_khz;
@@ -193,10 +207,11 @@ struct dc_clocks {
int socclk_khz;
int dcfclk_deep_sleep_khz;
int fclk_khz;
+ int phyclk_khz;
};
-struct dc_debug {
- bool surface_visual_confirm;
+struct dc_debug_options {
+ enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -227,6 +242,7 @@ struct dc_debug {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ bool optimized_watermark;
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
@@ -242,8 +258,19 @@ struct dc_debug {
bool always_use_regamma;
bool p010_mpo_support;
bool recovery_enabled;
+ bool avoid_vbios_exec_table;
+ bool scl_reset_length10;
+ bool hdmi20_disable;
+ bool skip_detection_link_training;
+};
+struct dc_debug_data {
+ uint32_t ltFailCount;
+ uint32_t i2cErrorCount;
+ uint32_t auxErrorCount;
};
+
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -252,8 +279,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
- struct dc_debug debug;
-
+ struct dc_debug_options debug;
struct dc_context *ctx;
uint8_t link_count;
@@ -268,7 +294,7 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
@@ -288,9 +314,9 @@ struct dc {
bool apply_edp_fast_boot_optimization;
/* FBC compressor */
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
-#endif
+
+ struct dc_debug_data debug_data;
};
enum frame_buffer_mode {
@@ -358,6 +384,7 @@ enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
TF_TYPE_BYPASS,
+ TF_TYPE_HWPWL
};
struct dc_transfer_func_distributed_points {
@@ -377,16 +404,22 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
TRANSFER_FUNCTION_UNITY,
+ TRANSFER_FUNCTION_HLG,
+ TRANSFER_FUNCTION_HLG12,
+ TRANSFER_FUNCTION_GAMMA22
};
struct dc_transfer_func {
struct kref refcount;
- struct dc_transfer_func_distributed_points tf_pts;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
uint32_t sdr_ref_white_level;
struct dc_context *ctx;
+ union {
+ struct pwl_params pwl;
+ struct dc_transfer_func_distributed_points tf_pts;
+ };
};
/*
@@ -616,9 +649,14 @@ struct dpcd_caps {
struct dc_dongle_caps dongle_caps;
uint32_t sink_dev_id;
+ int8_t sink_dev_id_str[6];
+ int8_t sink_hw_revision;
+ int8_t sink_fw_revision[2];
+
uint32_t branch_dev_id;
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
+ int8_t branch_fw_revision[2];
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
@@ -661,9 +699,13 @@ struct dc_sink {
struct dc_link *link;
struct dc_context *ctx;
+ uint32_t sink_id;
+
/* private to dc_sink.c */
+ // refcount must be the last member in dc_sink, since we want the
+ // sink structure to be logically cloneable up to (but not including)
+ // refcount
struct kref refcount;
-
};
void dc_sink_retain(struct dc_sink *sink);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index d9b84ec7954c..90082bab71f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
void (*post_init)(struct dc_bios *bios);
void (*bios_parser_destroy)(struct dc_bios **dcb);
+
+ enum bp_result (*get_board_layout_info)(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info);
};
struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index e1affeb5cc51..05c8c31d8b31 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -25,6 +25,65 @@
#ifndef DC_DDC_TYPES_H_
#define DC_DDC_TYPES_H_
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+};
+
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
struct i2c_payload {
bool write;
uint8_t address;
@@ -109,7 +168,7 @@ struct ddc_service {
uint32_t address;
uint32_t edid_buf_len;
- uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+ uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
};
#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 90bccd5ccaa2..da93ab43f2d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -430,7 +430,7 @@ union test_request {
struct {
uint8_t LINK_TRAINING :1;
uint8_t LINK_TEST_PATTRN :1;
- uint8_t EDID_REAT :1;
+ uint8_t EDID_READ :1;
uint8_t PHY_TEST_PATTERN :1;
uint8_t AUDIO_TEST_PATTERN :1;
uint8_t RESERVED :1;
@@ -443,7 +443,8 @@ union test_response {
struct {
uint8_t ACK :1;
uint8_t NO_ACK :1;
- uint8_t RESERVED :6;
+ uint8_t EDID_CHECKSUM_WRITE:1;
+ uint8_t RESERVED :5;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index bd0fda0ceb91..e68077e65565 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
return reg_val;
}
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data)
+{
+ dm_write_reg(ctx, addr_index, index);
+ dm_write_reg(ctx, addr_data, data);
+}
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index)
+{
+ uint32_t value = 0;
+
+ dm_write_reg(ctx, addr_index, index);
+ value = dm_read_reg(ctx, addr_data);
+
+ return value;
+}
+
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b1f70579d61b..b789cb2b354b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,13 +192,14 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
@@ -403,9 +404,11 @@ struct dc_cursor_position {
struct dc_cursor_mi_param {
unsigned int pixel_clk_khz;
unsigned int ref_clk_khz;
- unsigned int viewport_x_start;
- unsigned int viewport_width;
+ struct rect viewport;
struct fixed31_32 h_scale_ratio;
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
+ bool mirror;
};
/* IPP related types */
@@ -414,6 +417,7 @@ enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
GAMMA_CS_TFM_1D_ENTRIES = 4096,
+ GAMMA_CUSTOM_ENTRIES = 4096,
GAMMA_MAX_ENTRIES = 4096
};
@@ -421,6 +425,7 @@ enum dc_gamma_type {
GAMMA_RGB_256 = 1,
GAMMA_RGB_FLOAT_1024 = 2,
GAMMA_CS_TFM_1D = 3,
+ GAMMA_CUSTOM = 4,
};
struct dc_csc_transform {
@@ -489,6 +494,7 @@ struct dc_cursor_attributes {
uint32_t height;
enum dc_cursor_color_format color_format;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* In case we support HW Cursor rotation in the future */
enum dc_rotation_angle rotation_angle;
@@ -496,6 +502,11 @@ struct dc_cursor_attributes {
union dc_cursor_attribute_flags attribute_flags;
};
+struct dpp_cursor_attributes {
+ int bias;
+ int scale;
+};
+
/* OPP */
enum dc_color_space {
@@ -567,25 +578,25 @@ struct scaling_taps {
};
enum dc_timing_standard {
- TIMING_STANDARD_UNDEFINED,
- TIMING_STANDARD_DMT,
- TIMING_STANDARD_GTF,
- TIMING_STANDARD_CVT,
- TIMING_STANDARD_CVT_RB,
- TIMING_STANDARD_CEA770,
- TIMING_STANDARD_CEA861,
- TIMING_STANDARD_HDMI,
- TIMING_STANDARD_TV_NTSC,
- TIMING_STANDARD_TV_NTSC_J,
- TIMING_STANDARD_TV_PAL,
- TIMING_STANDARD_TV_PAL_M,
- TIMING_STANDARD_TV_PAL_CN,
- TIMING_STANDARD_TV_SECAM,
- TIMING_STANDARD_EXPLICIT,
+ DC_TIMING_STANDARD_UNDEFINED,
+ DC_TIMING_STANDARD_DMT,
+ DC_TIMING_STANDARD_GTF,
+ DC_TIMING_STANDARD_CVT,
+ DC_TIMING_STANDARD_CVT_RB,
+ DC_TIMING_STANDARD_CEA770,
+ DC_TIMING_STANDARD_CEA861,
+ DC_TIMING_STANDARD_HDMI,
+ DC_TIMING_STANDARD_TV_NTSC,
+ DC_TIMING_STANDARD_TV_NTSC_J,
+ DC_TIMING_STANDARD_TV_PAL,
+ DC_TIMING_STANDARD_TV_PAL_M,
+ DC_TIMING_STANDARD_TV_PAL_CN,
+ DC_TIMING_STANDARD_TV_SECAM,
+ DC_TIMING_STANDARD_EXPLICIT,
/*!< For explicit timings from EDID, VBIOS, etc.*/
- TIMING_STANDARD_USER_OVERRIDE,
+ DC_TIMING_STANDARD_USER_OVERRIDE,
/*!< For mode timing override by user*/
- TIMING_STANDARD_MAX
+ DC_TIMING_STANDARD_MAX
};
enum dc_color_depth {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8a716baa1203..d43cefbc43d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -73,6 +73,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
+ bool dp_ss_off;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -141,6 +142,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
@@ -172,7 +175,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
* false - no change in Downstream port status. No further action required
* from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data);
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
struct dc_sink_init_data;
@@ -210,10 +213,29 @@ bool dc_link_dp_set_test_pattern(
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
/*
* DPCD access interfaces
*/
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+void dc_link_enable_hpd(const struct dc_link *link);
+void dc_link_disable_hpd(const struct dc_link *link);
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d7e6d53bb383..cbfe418006cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -59,6 +59,9 @@ struct dc_stream_state {
struct freesync_context freesync_ctx;
struct dc_info_packet hdr_static_metadata;
+ PHYSICAL_ADDRESS_LOC dmdata_address;
+ bool use_dynamic_meta;
+
struct dc_transfer_func *out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -97,6 +100,7 @@ struct dc_stream_state {
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* from stream struct */
struct kref refcount;
@@ -144,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
/*
* Log the current stream state.
*/
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dc_logger,
- enum dc_log_type log_type);
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
uint8_t dc_get_current_stream_count(struct dc *dc);
struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
@@ -255,6 +256,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position);
+
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
@@ -299,9 +301,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *v_pos,
unsigned int *nom_v_pos);
-void dc_stream_set_static_screen_events(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- const struct dc_static_screen_events *events);
-
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 76df2534c4a4..8c6eb78b0c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -77,8 +77,6 @@ struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
-
- struct dal_logger *logger;
void *cgs_device;
enum dce_environment dce_environment;
@@ -92,13 +90,12 @@ struct dc_context {
bool created_bios;
struct gpio_service *gpio_service;
struct i2caux *i2caux;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint32_t dc_sink_id_count;
uint64_t fbc_gpu_addr;
-#endif
};
-#define MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 512
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -137,13 +134,13 @@ enum plane_stereo_format {
*/
enum dc_edid_connector_type {
- EDID_CONNECTOR_UNKNOWN = 0,
- EDID_CONNECTOR_ANALOG = 1,
- EDID_CONNECTOR_DIGITAL = 10,
- EDID_CONNECTOR_DVI = 11,
- EDID_CONNECTOR_HDMIA = 12,
- EDID_CONNECTOR_MDDI = 14,
- EDID_CONNECTOR_DISPLAYPORT = 15
+ DC_EDID_CONNECTOR_UNKNOWN = 0,
+ DC_EDID_CONNECTOR_ANALOG = 1,
+ DC_EDID_CONNECTOR_DIGITAL = 10,
+ DC_EDID_CONNECTOR_DVI = 11,
+ DC_EDID_CONNECTOR_HDMIA = 12,
+ DC_EDID_CONNECTOR_MDDI = 14,
+ DC_EDID_CONNECTOR_DISPLAYPORT = 15
};
enum dc_edid_status {
@@ -169,7 +166,7 @@ struct dc_cea_audio_mode {
struct dc_edid {
uint32_t length;
- uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+ uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
};
/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
@@ -195,6 +192,7 @@ union display_content_support {
struct dc_panel_patch {
unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd8e535..825537bd4545 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644
index 000000000000..3f5b2e6f7553
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_aux.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+
+#define DC_LOGGER \
+ engine->ctx->logger
+
+#include "reg_helper.h"
+
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+#define FROM_AUX_ENGINE_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+static void release_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required.
+ */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply().
+ */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
+
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
+
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
+
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
+
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
+
+ /* First byte was already used to get the command status */
+ --bytes_replied;
+
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ return i;
+ }
+
+ return 0;
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
+ }
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored.
+ */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX
+ */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+static bool submit_request(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction
+ */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+enum i2caux_engine_type get_engine_type(
+ const struct aux_engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+static bool acquire(
+ struct aux_engine *engine,
+ struct ddc *ddc)
+{
+
+ enum gpio_result result;
+
+ if (engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!engine->funcs->is_engine_available(engine))
+ return false;
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!engine->funcs->acquire_engine(engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+ .release_engine = release_engine,
+ .destroy_engine = dce110_engine_destroy,
+ .submit_request = submit_request,
+ .get_engine_type = get_engine_type,
+ .acquire = acquire,
+};
+
+void dce110_engine_destroy(struct aux_engine **engine)
+{
+
+ struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+ kfree(engine110);
+ *engine = NULL;
+
+}
+struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs)
+{
+ aux_engine110->base.ddc = NULL;
+ aux_engine110->base.ctx = ctx;
+ aux_engine110->base.delay = 0;
+ aux_engine110->base.max_defer_write_retry = 0;
+ aux_engine110->base.funcs = &aux_engine_funcs;
+ aux_engine110->base.inst = inst;
+ aux_engine110->timeout_period = timeout_period;
+ aux_engine110->regs = regs;
+
+ return &aux_engine110->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644
index 000000000000..f7caab85dc80
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+#include "aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+enum { /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description".
+ */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS.
+ */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dce110_aux_engine_construct(
+ struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs);
+
+void dce110_engine_destroy(struct aux_engine **engine);
+
+bool dce110_aux_engine_acquire(
+ struct aux_engine *aux_engine,
+ struct ddc *ddc);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 88b09dd758ba..439dcf3b596c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
uint64_t feedback_divider;
feedback_divider =
- (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
feedback_divider *= 10;
/* additional factor, since we divide by 10 afterwards */
feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
* of fractional feedback decimal point and the fractional FB Divider precision
* is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
- feedback_divider += (uint64_t)
- (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider += 5ULL *
+ calc_pll_cs->fract_fb_divider_precision_factor;
feedback_divider =
div_u64(feedback_divider,
calc_pll_cs->fract_fb_divider_precision_factor * 10);
@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
&fract_feedback_divider);
/*Actual calculated value*/
- actual_calc_clk_khz = (uint64_t)(feedback_divider *
- calc_pll_cs->fract_fb_divider_factor) +
+ actual_calc_clk_khz = (uint64_t)feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor +
fract_feedback_divider;
actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
actual_calc_clk_khz =
@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
#endif
@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_kHz = 700000;
@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index c45e2f76189e..801bb65707b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,7 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8a581c67bf2d..684da3db7568 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -30,7 +30,7 @@
#include "bios_parser_interface.h"
#include "dc.h"
#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn_calcs.h"
#endif
#include "core_types.h"
@@ -38,7 +38,7 @@
#include "dal_asic_id.h"
#define TO_DCE_CLOCKS(clocks)\
- container_of(clocks, struct dce_disp_clk, base)
+ container_of(clocks, struct dce_dccg, base)
#define REG(reg) \
(clk_dce->regs->reg)
@@ -101,99 +101,78 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-/* Starting point for each divider range.*/
-enum dce_divider_range_start {
- DIVIDER_RANGE_01_START = 200, /* 2.00*/
- DIVIDER_RANGE_02_START = 1600, /* 16.00*/
- DIVIDER_RANGE_03_START = 3200, /* 32.00*/
- DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_MAX_DID = 0x80
};
-/* Ranges for divider identifiers (Divider ID or DID)
- mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
-enum dce_divider_id_register_setting {
- DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
- DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
-/* Step size between each divider within a range.
- Incrementing the DENTIST_DISPCLK_WDIVIDER by one
- will increment the divider by this much.*/
-enum dce_divider_range_step_size {
- DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
- DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
- DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
-};
-
-static bool dce_divider_range_construct(
- struct dce_divider_range *div_range,
- int range_start,
- int range_step,
- int did_min,
- int did_max)
+static int dentist_get_divider_from_did(int did)
{
- div_range->div_range_start = range_start;
- div_range->div_range_step = range_step;
- div_range->did_min = did_min;
- div_range->did_max = did_max;
-
- if (div_range->div_range_step == 0) {
- div_range->div_range_step = 1;
- /*div_range_step cannot be zero*/
- BREAK_TO_DEBUGGER();
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
}
- /* Calculate this based on the other inputs.*/
- /* See DividerRange.h for explanation of */
- /* the relationship between divider id (DID) and a divider.*/
- /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
- /* Maximum divider identified in this range =
- * (Number of Divider IDs)*Step size between dividers
- * + The start of this range.*/
- div_range->div_range_end = (did_max - did_min) * range_step
- + range_start;
- return true;
-}
-
-static int dce_divider_range_calc_divider(
- struct dce_divider_range *div_range,
- int did)
-{
- /* Is this DID within our range?*/
- if ((did < div_range->did_min) || (did >= div_range->did_max))
- return INVALID_DIVIDER;
-
- return ((did - div_range->did_min) * div_range->div_range_step)
- + div_range->div_range_start;
-
}
-static int dce_divider_range_get_divider(
- struct dce_divider_range *div_range,
- int ranges_num,
- int did)
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
{
- int div = INVALID_DIVIDER;
- int i;
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
- for (i = 0; i < ranges_num; i++) {
- /* Calculate divider with given divider ID*/
- div = dce_divider_range_calc_divider(&div_range[i], did);
- /* Found a valid return divider*/
- if (div != INVALID_DIVIDER)
- break;
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
- return div;
+ return dp_ref_clk_khz;
}
-static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+static int dce_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz = 600000;
- int target_div = INVALID_DIVIDER;
+ int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
@@ -204,80 +183,27 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dce_divider_range_get_divider(
- clk_dce->divider_ranges,
- DIVIDER_RANGE_MAX,
- dprefclk_wdivider);
-
- if (target_div != INVALID_DIVIDER) {
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
- * clk_dce->dentist_vco_freq_khz) / target_div;
- }
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
- /* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
- */
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
}
-/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
- * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
- * clock implementation
- */
-static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
- int dp_ref_clk_khz = 600000;
-
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
}
+
static enum dm_pp_clocks_state dce_get_required_clocks_state(
- struct display_clock *clk,
- struct state_dependent_clocks *req_clocks)
+ struct dccg *clk,
+ struct dc_clocks *req_clocks)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int i;
enum dm_pp_clocks_state low_req_clk;
@@ -286,53 +212,30 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (req_clocks->display_clk_khz >
+ if (req_clocks->dispclk_khz >
clk_dce->max_clks_by_state[i].display_clk_khz
- || req_clocks->pixel_clk_khz >
+ || req_clocks->phyclk_khz >
clk_dce->max_clks_by_state[i].pixel_clk_khz)
break;
low_req_clk = i + 1;
if (low_req_clk > clk->max_clks_state) {
- DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
- __func__,
- req_clocks->display_clk_khz,
- req_clocks->pixel_clk_khz);
- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
+ < req_clocks->dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk->max_clks_state;
}
return low_req_clk;
}
-static bool dce_clock_set_min_clocks_state(
- struct display_clock *clk,
- enum dm_pp_clocks_state clocks_state)
-{
- struct dm_pp_power_level_change_request level_change_req = {
- clocks_state };
-
- if (clocks_state > clk->max_clks_state) {
- /*Requested state exceeds max supported state.*/
- DC_LOG_WARNING("Requested state exceeds max supported state");
- return false;
- } else if (clocks_state == clk->cur_min_clks_state) {
- /*if we're trying to set the same state, we can just return
- * since nothing needs to be done*/
- return true;
- }
-
- /* get max clock state from PPLIB */
- if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
- clk->cur_min_clks_state = clocks_state;
-
- return true;
-}
-
static int dce_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
struct dc_bios *bp = clk->ctx->dc_bios;
int actual_clock = requested_clk_khz;
@@ -364,10 +267,10 @@ static int dce_set_clock(
}
static int dce_psr_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct dc_context *ctx = clk_dce->base.ctx;
struct dc *core_dc = ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
@@ -380,10 +283,10 @@ static int dce_psr_set_clock(
}
static int dce112_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
@@ -432,9 +335,9 @@ static int dce112_set_clock(
return actual_clock;
}
-static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
{
- struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@@ -488,11 +391,9 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_dce->dfs_bypass_enabled = true;
-
- clk_dce->use_max_disp_clk = debug->max_disp_clk;
}
-static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
{
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
@@ -548,139 +449,265 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
}
}
-static bool dce_apply_clock_voltage_request(
- struct display_clock *clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk)
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
{
- bool send_request = false;
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- break;
- default:
- BREAK_TO_DEBUGGER();
- return false;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
- clock_voltage_req.clk_type = clocks_type;
- clock_voltage_req.clocks_in_khz = clocks_in_khz;
-
- /* to pplib */
- if (pre_mode_set) {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
- clk->cur_clocks_value.dispclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.dispclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
- clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
- clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
- break;
- default:
- ASSERT(0);
- break;
- }
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+}
+
+#ifdef CONFIG_X86
+static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+ bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (new_clocks->dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return new_clocks->dispclk_khz;
} else {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
- send_request = true;
- break;
- default:
- ASSERT(0);
- break;
- }
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return new_clocks->dispclk_khz;
}
- if (send_request) {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
- struct dc *core_dc = clk->ctx->dc;
- /*use dcfclk request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz =
- dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
- }
+
+ return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ struct dc *dc = dccg->ctx->dc;
+ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+ dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dc *dc = dccg->ctx->dc;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+
+ if (new_clocks->phyclk_khz)
+ smu_req.display_count = 1;
+ else
+ smu_req.display_count = 0;
+
+ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+ || new_clocks->fclk_khz > dccg->clks.fclk_khz
+ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.fclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+
+ send_request_to_lower = true;
+ }
+
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+
+ *smu_req_cur = smu_req;
+}
#endif
- dm_pp_apply_clock_for_voltage_request(
- clk->ctx, &clock_voltage_req);
+
+static void dce_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > dccg->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+ dccg->cur_min_clks_state = level_change_req.power_level;
}
- if (update_dp_phyclk && (clocks_in_khz >
- clk->cur_clocks_value.max_dp_phyclk_in_khz))
- clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
- return true;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
}
+#ifdef CONFIG_X86
+static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+};
+#endif
static const struct display_clock_funcs dce120_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
- .apply_clock_voltage_request = dce_apply_clock_voltage_request,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce12_update_clocks
};
static const struct display_clock_funcs dce112_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce110_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_psr_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_psr_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_set_clock,
+ .update_clocks = dce_update_clocks
};
-static void dce_disp_clk_construct(
- struct dce_disp_clk *clk_dce,
+static void dce_dccg_construct(
+ struct dce_dccg *clk_dce,
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct display_clock *base = &clk_dce->base;
+ struct dccg *base = &clk_dce->base;
base->ctx = ctx;
base->funcs = &dce_funcs;
@@ -700,34 +727,15 @@ static void dce_disp_clk_construct(
dce_clock_read_integrated_info(clk_dce);
dce_clock_read_ss_info(clk_dce);
-
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_01],
- DIVIDER_RANGE_01_START,
- DIVIDER_RANGE_01_STEP_SIZE,
- DIVIDER_RANGE_01_BASE_DIVIDER_ID,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_02],
- DIVIDER_RANGE_02_START,
- DIVIDER_RANGE_02_STEP_SIZE,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_03],
- DIVIDER_RANGE_03_START,
- DIVIDER_RANGE_03_STEP_SIZE,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID,
- DIVIDER_RANGE_MAX_DIVIDER_ID);
}
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -738,19 +746,19 @@ struct display_clock *dce_disp_clk_create(
dce80_max_clks_by_state,
sizeof(dce80_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
return &clk_dce->base;
}
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -761,7 +769,7 @@ struct display_clock *dce110_disp_clk_create(
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce110_funcs;
@@ -769,13 +777,13 @@ struct display_clock *dce110_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -786,7 +794,7 @@ struct display_clock *dce112_disp_clk_create(
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce112_funcs;
@@ -794,10 +802,9 @@ struct display_clock *dce112_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+struct dccg *dce120_dccg_create(struct dc_context *ctx)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
- struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -808,28 +815,59 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, NULL, NULL, NULL);
clk_dce->base.funcs = &dce120_funcs;
- /* new in dce120 */
- if (!ctx->dc->debug.disable_pplib_clock_request &&
- dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
- && clk_level_info.num_levels)
- clk_dce->max_displ_clk_in_khz =
- clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
- else
- clk_dce->max_displ_clk_in_khz = 1133000;
+ return &clk_dce->base;
+}
+
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ clk_dce->base.ctx = ctx;
+ clk_dce->base.funcs = &dcn1_funcs;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ if (bp->integrated_info)
+ clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_dce);
return &clk_dce->base;
}
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk)
+void dce_dccg_destroy(struct dccg **dccg)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
kfree(clk_dce);
- *disp_clk = NULL;
+ *dccg = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index 0e717e0dc8f0..e5e44adc6c27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -33,6 +33,9 @@
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+ SR(DENTIST_DISPCLK_CNTL)
+
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -40,58 +43,37 @@
CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
- type DENTIST_DPREFCLK_WDIVIDER;
+ type DENTIST_DPREFCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_CHG_DONE;
-struct dce_disp_clk_shift {
+struct dccg_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
-struct dce_disp_clk_mask {
+struct dccg_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct dce_disp_clk_registers {
+struct dccg_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
};
-/* Array identifiers and count for the divider ranges.*/
-enum dce_divider_range_count {
- DIVIDER_RANGE_01 = 0,
- DIVIDER_RANGE_02,
- DIVIDER_RANGE_03,
- DIVIDER_RANGE_MAX /* == 3*/
-};
-
-enum dce_divider_error_types {
- INVALID_DID = 0,
- INVALID_DIVIDER = 1
-};
-
-struct dce_divider_range {
- int div_range_start;
- /* The end of this range of dividers.*/
- int div_range_end;
- /* The distance between each divider in this range.*/
- int div_range_step;
- /* The divider id for the lowest divider.*/
- int did_min;
- /* The divider id for the highest divider.*/
- int did_max;
-};
-
-struct dce_disp_clk {
- struct display_clock base;
- const struct dce_disp_clk_registers *regs;
- const struct dce_disp_clk_shift *clk_shift;
- const struct dce_disp_clk_mask *clk_mask;
+struct dce_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+ const struct dccg_shift *clk_shift;
+ const struct dccg_mask *clk_mask;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
- struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
- bool use_max_disp_clk;
int dentist_vco_freq_khz;
/* Cache the status of DFS-bypass feature*/
@@ -106,32 +88,33 @@ struct dce_disp_clk {
int dprefclk_ss_percentage;
/* DPREFCLK SS percentage Divider (100 or 1000) */
int dprefclk_ss_divider;
-
- /* max disp_clk from PPLIB for max validation display clock*/
- int max_displ_clk_in_khz;
};
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
+
+struct dccg *dce120_dccg_create(struct dc_context *ctx);
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk);
+void dce_dccg_destroy(struct dccg **dccg);
#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a576b8bbb3cd..ca7989e4932b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
}
}
-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
}
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -314,7 +316,7 @@ static void dce_get_psr_wait_loop(
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static void dcn10_get_dmcu_state(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
* least a few frames. Should never hit the max retry assert below.
*/
if (wait == true) {
- for (retryCount = 0; retryCount <= 1000; retryCount++) {
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (enable) {
- if (psr_state != 0)
- break;
- } else {
- if (psr_state == 0)
- break;
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
}
- udelay(500);
- }
- /* assert if max retry hit */
- ASSERT(retryCount <= 1000);
+ /* assert if max retry hit */
+ if (retryCount >= 1000)
+ ASSERT(0);
}
}
-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* If microcontroller is not running, do nothing */
if (dmcu->dmcu_state != DMCU_RUNNING)
- return;
+ return false;
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
}
static void dcn10_psr_wait_loop(
@@ -735,7 +743,7 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -787,7 +795,7 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 057b8afd74bc..64dc75378541 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -147,6 +147,7 @@
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DCFCLK_CNTL), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
/* todo: get these from GVM instead of reading registers ourselves */\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -249,7 +250,6 @@ struct dce_hwseq_registers {
uint32_t DISPCLK_FREQ_CHANGE_CNTL;
uint32_t RBBMIF_TIMEOUT_DIS;
uint32_t RBBMIF_TIMEOUT_DIS_2;
- uint32_t DENTIST_DISPCLK_CNTL;
uint32_t DCHUBBUB_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
@@ -276,6 +276,8 @@ struct dce_hwseq_registers {
uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t AZALIA_AUDIO_DTO;
+ uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -362,7 +364,8 @@ struct dce_hwseq_registers {
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
- HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
+ HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -496,14 +499,13 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER; \
- type DENTIST_DISPCLK_WDIVIDER; \
type VGA_TEST_ENABLE; \
type VGA_TEST_RENDER_START; \
type D1VGA_MODE_ENABLE; \
type D2VGA_MODE_ENABLE; \
type D3VGA_MODE_ENABLE; \
- type D4VGA_MODE_ENABLE;
+ type D4VGA_MODE_ENABLE; \
+ type AZALIA_AUDIO_DTO_MODULE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index dbe3b26b6d9e..eff7d22d78fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -256,6 +256,11 @@ static void setup_panel_mode(
enum dp_panel_mode panel_mode)
{
uint32_t value;
+ struct dc_context *ctx = enc110->base.ctx;
+
+ /* if psp set panel mode, dal should be program it */
+ if (ctx->dc->caps.psp_setup_panel_mode)
+ return;
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -646,6 +651,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc110->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -773,6 +781,9 @@ void dce110_link_encoder_construct(
__func__,
result);
}
+ if (enc110->base.ctx->dc->debug.hdmi20_disable) {
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dce110_link_encoder_validate_output_with_stream(
@@ -919,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
enum bp_result result;
/* Enable the PHY */
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
@@ -961,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
* We need to set number of lanes manually.
*/
configure_encoder(enc110, link_settings);
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index b235a75355b8..85686d917636 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
return true;
}
-static struct mem_input_funcs dce_mi_funcs = {
+static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_display_marks = dce_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
.free_mem_input = dce_mi_free_dmif,
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
+static const struct mem_input_funcs dce112_mi_funcs = {
+ .mem_input_program_display_marks = dce112_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static const struct mem_input_funcs dce120_mi_funcs = {
+ .mem_input_program_display_marks = dce120_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
void dce_mem_input_construct(
struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+ dce_mi->base.funcs = &dce112_mi_funcs;
}
void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+ dce_mi->base.funcs = &dce120_mi_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c0e813c7ddd4..b139b4017820 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (REG(DP_DB_CNTL))
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-#endif
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
@@ -322,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
#endif
@@ -333,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
#endif
@@ -367,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -446,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -481,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
crtc_timing->v_front_porch;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -756,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -794,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index a02e719d7794..ab63d0d0304c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -155,7 +155,7 @@ static void program_overscan(
int overscan_bottom = data->v_active
- data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 41f83ecd7469..74c05e878807 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -125,17 +125,50 @@ static void dce100_pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+ return max_pix_clk;
+}
+
void dce100_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ struct dc_clocks req_clks;
+
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
+
dce100_pplib_apply_display_requirements(dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 38ec0d609297..3f76e6019546 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -52,6 +52,7 @@
#include "dce/dce_10_0_sh_mask.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -135,15 +136,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
+struct aux_engine *dce100_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -644,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
@@ -817,11 +865,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -851,7 +899,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
struct irq_service_init_data init_data;
@@ -871,7 +919,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.disable_dp_clk_share = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
@@ -915,6 +963,13 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index e2994d337044..1f7f25013217 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
{
- uint8_t counter = 0;
+ uint32_t counter = 0;
uint32_t addr = mmFBC_STATUS;
uint32_t value;
@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
counter++;
}
- if (counter == 10) {
+ if (counter == 1000) {
DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
__func__);
} else {
@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.lpt_channels_num = 0;
compressor->base.attached_inst = 0;
compressor->base.is_enabled = false;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
compressor->base.funcs = &dce110_compressor_funcs;
-#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c29052b6da5a..5450d4d38e8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -34,9 +34,7 @@
#include "dce/dce_hwseq.h"
#include "gpio_service_interface.h"
-#if defined(CONFIG_DRM_AMD_DC_FBC)
#include "dce110_compressor.h"
-#endif
#include "bios/bios_parser_helper.h"
#include "timing_generator.h"
@@ -667,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
+ bool is_hdmi;
+ bool is_dp;
+
ASSERT(pipe_ctx->stream);
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi && !is_dp)
+ return;
+
+ if (is_hdmi)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ else
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -857,17 +864,22 @@ void hwss_edp_power_control(
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long duration_in_ms =
- dm_get_elapse_time_in_ns(
+ div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
unsigned long long wait_time_ms = 0;
/* max 500ms from LCDVDD off to on */
+ unsigned long long edp_poweroff_time_ms = 500;
+
+ if (link->local_sink != NULL)
+ edp_poweroff_time_ms =
+ 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
if (link->link_trace.time_stamp.edp_poweroff == 0)
- wait_time_ms = 500;
- else if (duration_in_ms < 500)
- wait_time_ms = 500 - duration_in_ms;
+ wait_time_ms = edp_poweroff_time_ms;
+ else if (duration_in_ms < edp_poweroff_time_ms)
+ wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
if (wait_time_ms) {
msleep(wait_time_ms);
@@ -972,19 +984,35 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ /* notify audio driver for audio modes of monitor */
+ struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ unsigned int i, num_audio = 1;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ if (pipe_ctx->stream_res.audio) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ /*current_state not updated yet*/
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ num_audio++;
+ }
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+}
+
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
@@ -1015,7 +1043,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
* stream->stream_engine_id);
*/
}
+}
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ dc->hwss.disable_audio_stream(pipe_ctx, option);
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
@@ -1206,13 +1250,13 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
#endif
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
get_surface_visual_confirm_color(pipe_ctx, &color);
else
color_space_to_black_color(dc,
@@ -1298,6 +1342,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
pipe_ctx[pipe_ctx->pipe_idx];
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
/* */
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
@@ -1412,7 +1480,7 @@ static void power_down_controllers(struct dc *dc)
{
int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
dc->res_pool->timing_generators[i]->funcs->disable_crtc(
dc->res_pool->timing_generators[i]);
}
@@ -1441,10 +1509,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
power_down_clock_sources(dc);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
}
static void disable_vga_and_power_gate_all_controllers(
@@ -1454,12 +1520,13 @@ static void disable_vga_and_power_gate_all_controllers(
struct timing_generator *tg;
struct dc_context *ctx = dc->ctx;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
tg = dc->res_pool->timing_generators[i];
if (tg->funcs->disable_vga)
tg->funcs->disable_vga(tg);
-
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
/* Enable CLOCK gating for each pipe BEFORE controller
* powergating. */
enable_display_pipe_clock_gating(ctx,
@@ -1602,7 +1669,7 @@ static void dce110_set_displaymarks(
}
}
-static void set_safe_displaymarks(
+void dce110_set_safe_displaymarks(
struct resource_context *res_ctx,
const struct resource_pool *pool)
{
@@ -1686,9 +1753,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
value |= 0x84;
-#endif
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
@@ -1696,23 +1761,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
}
/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet.
- * TODO: after mode set, pre_mode_set = false,
- * may read PLL register to get pixel clock
+ * may not be programmed yet
*/
static uint32_t get_max_pixel_clock_for_all_paths(
struct dc *dc,
- struct dc_state *context,
- bool pre_mode_set)
+ struct dc_state *context)
{
uint32_t max_pix_clk = 0;
int i;
- if (!pre_mode_set) {
- /* TODO: read ASIC register to get pixel clock */
- ASSERT(0);
- }
-
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1728,97 +1785,10 @@ static uint32_t get_max_pixel_clock_for_all_paths(
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
- if (max_pix_clk == 0)
- ASSERT(0);
-
return max_pix_clk;
}
/*
- * Find clock state based on clock requested. if clock value is 0, simply
- * set clock state as requested without finding clock state by clock value
- */
-
-static void apply_min_clocks(
- struct dc *dc,
- struct dc_state *context,
- enum dm_pp_clocks_state *clocks_state,
- bool pre_mode_set)
-{
- struct state_dependent_clocks req_clocks = {0};
-
- if (!pre_mode_set) {
- /* set clock_state without verification */
- if (context->dis_clk->funcs->set_min_clocks_state) {
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- return;
- }
-
- /* TODO: This is incorrect. Figure out how to fix. */
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- context->dis_clk->cur_clocks_value.dispclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
- pre_mode_set,
- false);
- return;
- }
-
- /* get the required state based on state dependent clocks:
- * display clock and pixel clock
- */
- req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
-
- req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
- dc, context, true);
-
- if (context->dis_clk->funcs->get_required_clocks_state) {
- *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
- context->dis_clk, &req_clocks);
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- } else {
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- req_clocks.display_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-
-/*
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
@@ -1896,7 +1866,6 @@ static void enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
}
-#endif
static void dce110_reset_hw_ctx_wrap(
struct dc *dc,
@@ -1939,7 +1908,9 @@ static void dce110_reset_hw_ctx_wrap(
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
- if (old_clk)
+ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+ dc->res_pool,
+ old_clk))
old_clk->funcs->cs_power_down(old_clk);
dc->hwss.disable_plane(dc, pipe_ctx_old);
@@ -1949,97 +1920,12 @@ static void dce110_reset_hw_ctx_wrap(
}
}
-
-enum dc_status dce110_apply_ctx_to_hw(
+static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- enum dc_status status;
int i;
- enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
-
- /* Reset old context */
- /* look up the targets that have been removed since last commit */
- dc->hwss.reset_hw_ctx_wrap(dc, context);
-
- /* Skip applying if no targets */
- if (context->stream_count <= 0)
- return DC_OK;
-
- /* Apply new context */
- dcb->funcs->set_scratch_critical_state(dcb, true);
- /* below is for real asic only */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream == pipe_ctx_old->stream) {
- if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
- dce_crtc_switch_to_clk_src(dc->hwseq,
- pipe_ctx->clock_source, i);
- continue;
- }
-
- dc->hwss.enable_display_power_gating(
- dc, i, dc->ctx->dc_bios,
- PIPE_GATING_CONTROL_DISABLE);
- }
-
- set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- if (dc->fbc_compressor)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
- /*TODO: when pplib works*/
- apply_min_clocks(dc, context, &clocks_state, true);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
- if (context->bw.dcn.calc_clk.fclk_khz
- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dcfclk_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dispclk_khz
- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- }
- } else
-#endif
- if (context->bw.dce.dispclk_khz
- > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- }
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
* first, loop to find any HDMI audio, if not, loop find DP audio
@@ -2113,6 +1999,52 @@ enum dc_status dce110_apply_ctx_to_hw(
}
}
}
+}
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+ dce110_setup_audio_dto(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
@@ -2131,31 +2063,6 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe)
continue;
- if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
-
- struct audio_output audio_output;
-
- build_audio_output(context, pipe_ctx, &audio_output);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info);
- else
- pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info,
- &audio_output.crtc_info);
-
- pipe_ctx->stream_res.audio->funcs->az_configure(
- pipe_ctx->stream_res.audio,
- pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
- }
-
status = apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
@@ -2165,17 +2072,11 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- /* to save power */
- apply_min_clocks(dc, context, &clocks_state, false);
-
dcb->funcs->set_scratch_critical_state(dcb, false);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
enable_fbc(dc, context);
-#endif
-
return DC_OK;
}
@@ -2490,10 +2391,9 @@ static void init_hw(struct dc *dc)
abm->funcs->init_backlight(abm);
abm->funcs->abm_init(abm);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
-#endif
}
@@ -2632,7 +2532,7 @@ static void pplib_apply_display_requirements(
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
- pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -2654,20 +2554,25 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-static void dce110_set_bandwidth(
+void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- dce110_set_displaymarks(dc, context);
+ struct dc_clocks req_clks;
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ if (decrease_allowed)
+ dce110_set_displaymarks(dc, context);
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
pplib_apply_display_requirements(dc, context);
}
@@ -2679,9 +2584,7 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
-#endif
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2722,7 +2625,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* fbc not applicable on Underlay pipe */
if (dc->fbc_compressor && old_pipe->stream &&
pipe_ctx->pipe_idx != underlay_idx) {
@@ -2731,7 +2633,6 @@ static void dce110_program_front_end_for_pipe(
else
enable_fbc(dc, dc->current_state);
}
-#endif
mi->funcs->mem_input_program_surface_config(
mi,
@@ -2907,9 +2808,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2968,6 +2871,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
.enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 5d7e9f516827..e4c5db75c4c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
@@ -56,10 +60,19 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dce110_power_down(struct dc *dc);
+void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 0564c8e31252..9b9fc3d96c07 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
{
}
-static struct mem_input_funcs dce110_mem_input_v_funcs = {
+static const struct mem_input_funcs dce110_mem_input_v_funcs = {
.mem_input_program_display_marks =
dce_mem_input_v_program_display_marks,
.mem_input_program_chroma_display_marks =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index ee33786bdef6..e5e9e92521e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -49,14 +49,14 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#define DC_LOGGER \
dc->ctx->logger
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
#include "dce110/dce110_compressor.h"
-#endif
#include "reg_helper.h"
@@ -147,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -307,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -589,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
+struct aux_engine *dce110_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -652,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -680,8 +716,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -795,43 +831,38 @@ static bool dce110_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -841,7 +872,6 @@ static bool dce110_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1180,11 +1210,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce110_disp_clk_create(ctx,
+ pool->base.dccg = dce110_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1214,7 +1244,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1265,14 +1295,18 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
dc->fbc_compressor = dce110_compressor_create(ctx);
-
-
-#endif
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index a7dce060204f..aa8d6b10d2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -235,7 +235,7 @@ static void program_overscan(
int overscan_right = data->h_active - data->recout.x - data->recout.width;
int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 00c0a1ef15eb..288129343c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -49,6 +49,7 @@
#include "dce112/dce112_hw_sequencer.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "reg_helper.h"
@@ -146,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
+struct aux_engine *dce112_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@@ -640,6 +676,7 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -668,8 +705,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -744,43 +781,38 @@ bool dce112_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -790,7 +822,6 @@ bool dce112_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1000,7 +1031,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1010,7 +1041,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1020,7 +1051,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -1030,7 +1061,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -1124,11 +1155,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce112_disp_clk_create(ctx,
+ pool->base.dccg = dce112_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1158,7 +1189,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1214,6 +1245,13 @@ static bool construct(
"DC:failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index e96ff86d2fc3..5853522a6182 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
+static void dce120_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (context->stream_count <= 0)
+ return;
+ dce110_set_bandwidth(dc, context, decrease_allowed);
+}
void dce120_hw_sequencer_construct(struct dc *dc)
{
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 2d58daccc005..d43f37d99c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_hwseq.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
};
+ #define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
+struct aux_engine *dce120_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
static const struct bios_registers bios_regs = {
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
};
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -494,8 +529,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
}
static void read_dce_straps(
@@ -775,7 +810,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -785,7 +820,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -795,7 +830,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -805,7 +840,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -848,6 +883,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.psp_setup_panel_mode = true;
dc->debug = debug_defaults;
@@ -894,11 +930,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dce120_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
- goto disp_clk_create_fail;
+ goto dccg_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
@@ -984,6 +1020,13 @@ static bool construct(
dm_error(
"DC: failed to create output pixel processor!\n");
}
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
/* check next valid pipe */
j++;
@@ -1011,7 +1054,7 @@ static bool construct(
irqs_create_fail:
controller_create_fail:
-disp_clk_create_fail:
+dccg_create_fail:
clk_src_create_fail:
res_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 2ea490f8482e..04b866f0fa1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
CRTC_REG_SET(
CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
- CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
if (enable_blanking)
CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 48a068964722..604c62969ead 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -54,6 +54,7 @@
#include "reg_helper.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
/* TODO remove this include */
@@ -153,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
+struct aux_engine *dce80_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct stream_encoder *dce80_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -683,8 +719,8 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -822,11 +858,11 @@ static bool dce80_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -852,7 +888,7 @@ static bool dce80_construct(
goto res_create_fail;
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -899,9 +935,18 @@ static bool dce80_construct(
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1006,11 +1051,11 @@ static bool dce81_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1037,7 +1082,7 @@ static bool dce81_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1087,6 +1132,7 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1187,11 +1233,11 @@ static bool dce83_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1218,7 +1264,7 @@ static bool dce83_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1268,6 +1314,7 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index c69fa4bfab0a..bf8b68f8db4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
pixel_width = scl_data->viewport.width;
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
+ if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
+ scl_data->ratios.horz.value != dc_fixpt_one.value &&
+ scl_data->ratios.vert.value != dc_fixpt_one.value)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
uint32_t width)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
uint32_t cur_en = pos->enable ? 1 : 0;
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)width <= 0)
@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
}
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (attr) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+}
+
void dpp1_dppclk_control(
struct dpp *dpp_base,
bool dppclk_div,
@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_full_bypass = dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e862cafa6501..e2889e61b18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -119,6 +119,7 @@
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
SRI(DPP_CONTROL, DPP_TOP, id), \
SRI(CM_HDR_MULT_COEF, CM, id)
@@ -324,6 +325,8 @@
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
@@ -1076,7 +1079,9 @@
type CUR0_COLOR1; \
type DPPCLK_RATE_CONTROL; \
type DPP_CLOCK_ENABLE; \
- type CM_HDR_MULT_COEF;
+ type CM_HDR_MULT_COEF; \
+ type CUR0_FP_BIAS; \
+ type CUR0_FP_SCALE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR0; \
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
- uint32_t CM_HDR_MULT_COEF;
+ uint32_t CM_HDR_MULT_COEF; \
+ uint32_t CURSOR0_FP_SCALE_BIAS;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
const struct dc_cursor_mi_param *param,
uint32_t width);
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f862fd148cca..4a863a5dab41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(
struct dcn10_dpp *dpp, const struct rect *recout)
{
+ int visual_confirm_on = 0;
+ if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ visual_confirm_on = 1;
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT */
RECOUT_START_X, recout->x,
@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
- (dpp->base.inst + 1));
+ - visual_confirm_on * 4 * (dpp->base.inst + 1));
}
/* Main function to program scaler and line buffer in manual scaling mode */
@@ -655,6 +658,12 @@ void dpp1_dscl_set_scaler_manual_scale(
dpp->scl_data = *scl_data;
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
@@ -678,12 +687,6 @@ void dpp1_dscl_set_scaler_manual_scale(
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
return;
- /* Autocal off */
- REG_SET_3(DSCL_AUTOCAL, 0,
- AUTOCAL_MODE, AUTOCAL_MODE_OFF,
- AUTOCAL_NUM_PIPE, 0,
- AUTOCAL_PIPE_ID, 0);
-
/* Black offsets */
if (ycbcr)
REG_SET_2(SCL_BLACK_OFFSET, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 943143efbb82..1ea91e153d3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -190,10 +190,17 @@ static uint32_t convert_and_clamp(
}
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
+{
+ REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+}
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz)
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
{
uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
/*
@@ -202,191 +209,259 @@ void hubbub1_program_watermarks(
*/
uint32_t prog_wm_value;
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
- prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
-
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
+ hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
+ hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_exit_ns,
+ watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-
-
/* clock state B */
- prog_wm_value = convert_and_clamp(
- watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
+ hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
+ hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_exit_ns,
+ watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state C */
- prog_wm_value = convert_and_clamp(
- watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
+ hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
+ hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ }
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_exit_ns,
+ watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state D */
- prog_wm_value = convert_and_clamp(
- watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.urgent_ns, prog_wm_value);
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
+ hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_exit_ns,
+ if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
+ hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
}
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
+ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -408,6 +483,11 @@ void hubbub1_update_dchub(
struct hubbub *hubbub,
struct dchub_init_data *dh_data)
{
+ if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
+ ASSERT(false);
+ /*should not come here*/
+ return;
+ }
/* TODO: port code from dal2 */
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 6315a0e6b0d6..d6e596eef4c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -185,6 +185,7 @@ struct hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
+ struct dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -194,10 +195,13 @@ void hubbub1_update_dchub(
bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub);
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz);
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index c28085be39ff..2138cd3c5d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -152,21 +152,19 @@ void hubp1_program_tiling(
PIPE_ALIGNED, info->gfx9.pipe_aligned);
}
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
- bool horizontal_mirror)
+ struct dc_plane_dcc_param *dcc)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
/* Program data and meta surface pitch (calculation from addrlib)
* 444 or 420 luma
*/
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
ASSERT(plane_size->video.chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t mirror;
+
if (horizontal_mirror)
mirror = 1;
else
mirror = 0;
-
/* Program rotation angle and horz mirror - no mirror */
if (rotation == ROTATION_ANGLE_0)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
@@ -287,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
@@ -450,9 +461,6 @@ bool hubp1_program_surface_flip_and_addr(
hubp->request_address = *address;
- if (flip_immediate)
- hubp->current_address = *address;
-
return true;
}
@@ -481,8 +489,8 @@ void hubp1_program_surface_config(
{
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
- hubp1_program_size_and_rotation(
- hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_size(hubp, format, plane_size, dcc);
+ hubp1_program_rotation(hubp, rotation, horizontal_mirror);
hubp1_program_pixel_format(hubp, format);
}
@@ -688,7 +696,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
- hubp->current_address = hubp->request_address;
return false;
}
@@ -1061,9 +1068,11 @@ void hubp1_cursor_set_position(
const struct dc_cursor_mi_param *param)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
- uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
/*
* Guard aganst cursor_set_position() from being called with invalid
@@ -1075,6 +1084,18 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
+ y_hotspot = pos->x_hotspot;
+ x_hotspot = pos->y_hotspot;
+ }
+
+ if (param->mirror) {
+ x_hotspot = param->viewport.width - x_hotspot;
+ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
@@ -1085,7 +1106,7 @@ void hubp1_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)hubp->curs_attr.width <= 0)
@@ -1102,8 +1123,8 @@ void hubp1_cursor_set_position(
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ CURSOR_HOT_SPOT_X, x_hotspot,
+ CURSOR_HOT_SPOT_Y, y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1125,7 +1146,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
-static struct hubp_funcs dcn10_hubp_funcs = {
+static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
.hubp_program_surface_config =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index d901d5092969..f689feace82d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -268,8 +268,6 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
@@ -388,6 +386,8 @@
#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
HUBP_MASK_SH_LIST_DCN(mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format);
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
bool horizontal_mirror);
void hubp1_program_tiling(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f8e0576af6e0..cfcc54f2ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.calc_clk.dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.calc_clk.socclk_khz);
+ dc->current_state->bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw.dcn.clk.fclk_khz,
+ dc->current_state->bw.dcn.clk.socclk_khz);
log_mpc_crc(dc);
@@ -415,6 +415,8 @@ static void dpp_pg_control(
if (hws->ctx->dc->debug.disable_dpp_power_gate)
return;
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
switch (dpp_inst) {
case 0: /* DPP0 */
@@ -465,6 +467,8 @@ static void hubp_pg_control(
if (hws->ctx->dc->debug.disable_hubp_power_gate)
return;
+ if (REG(DOMAIN0_PG_CONFIG) == 0)
+ return;
switch (hubp_inst) {
case 0: /* DCHUBP0 */
@@ -719,19 +723,7 @@ static void reset_back_end_for_pipe(
if (!pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
else if (pipe_ctx->stream_res.audio) {
- /*
- * if stream is already disabled outside of commit streams path,
- * audio disable was skipped. Need to do it here
- */
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- pipe_ctx->stream_res.audio = NULL;
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
- }
-
+ dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
}
}
@@ -842,7 +834,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
}
-static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
static bool should_log_hw_state; /* prevent hw state log by default */
@@ -877,7 +869,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
return;
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
- opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ if (opp != NULL)
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1022,7 +1015,7 @@ static void dcn10_init_hw(struct dc *dc)
/* Reset all MPCC muxes */
dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = dc->res_pool->hubps[i];
@@ -1096,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc)
}
enable_power_gating_plane(dc->hwseq, true);
+
+ memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
}
static void reset_hw_ctx_wrap(
@@ -1164,12 +1159,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
if (plane_state == NULL)
return;
+
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&plane_state->address,
plane_state->flip_immediate);
+
plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
@@ -1213,8 +1215,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
- /*TF_TYPE_DISTRIBUTED_POINTS*/
- result = false;
+ cm_helper_translate_curve_to_degamma_hw_format(tf,
+ &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &dpp_base->degamma_params);
+ result = true;
}
return result;
@@ -1355,10 +1360,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
- grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
- &grouped_pipes[i]->stream->triggered_crtc_reset);
+ if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ &grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1774,6 +1780,43 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
+static void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ // Determine the overscan color based on the top-most (desktop) plane's context
+ struct pipe_ctx *top_pipe_ctx = pipe_ctx;
+
+ while (top_pipe_ctx->top_pipe != NULL)
+ top_pipe_ctx = top_pipe_ctx->top_pipe;
+
+ switch (top_pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB2101010:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ /* HDR10, ARGB2101010 - set boarder color to red */
+ color->color_r_cr = color_value;
+ }
+ break;
+ case PIXEL_FORMAT_FP16:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ /* HDR10, FP16 - set boarder color to blue */
+ color->color_b_cb = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 HDR - set boarder color to green */
+ color->color_g_y = color_value;
+ }
+ break;
+ default:
+ /* SDR - set boarder color to Gray */
+ color->color_r_cr = color_value/2;
+ color->color_b_cb = color_value/2;
+ color->color_g_y = color_value/2;
+ break;
+ }
+}
+
static uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
@@ -1854,11 +1897,10 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-
-static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -1869,13 +1911,17 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
/* TODO: proper fix once fpga works */
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ dcn10_get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
- else
+ } else {
color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space,
- &blnd_cfg.black_color);
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
@@ -1964,18 +2010,17 @@ static void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
- context->bw.dcn.cur_clk.dispclk_khz / 2;
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ dc->res_pool->dccg->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->current_state->bw.dcn.cur_clk.dppclk_khz =
- should_divided_by_2 ?
- context->bw.dcn.cur_clk.dispclk_khz / 2 :
- context->bw.dcn.cur_clk.dispclk_khz;
+ dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->dccg->clks.dispclk_khz / 2 :
+ dc->res_pool->dccg->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2001,7 +2046,7 @@ static void update_dchubp_dpp(
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change)
- update_mpcc(dc, pipe_ctx);
+ dc->hwss.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2063,12 +2108,13 @@ static void update_dchubp_dpp(
static void dcn10_blank_pixel_data(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank)
{
enum dc_color_space color_space;
struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
/* program otg blank color */
color_space = stream->output_color_space;
@@ -2110,6 +2156,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, hw_mult);
}
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dcn10_enable_plane(dc, pipe_ctx, context);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for full update.
+ * TODO: This can be further optimized/cleaned up
+ * Always call this for now since it does memcmp inside before
+ * doing heavy calculation and programming
+ */
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+}
+
static void program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -2127,31 +2200,12 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
- pipe_ctx->stream, blank);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
}
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn10_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ dcn10_program_pipe(dc, pipe_ctx, context);
}
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
@@ -2165,12 +2219,12 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2232,8 +2286,6 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
- unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
- bool program_water_mark = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2247,7 +2299,7 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+ dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
}
/* Disconnect unused mpcc */
@@ -2278,11 +2330,10 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
- hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+ dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
- DC_LOG_DC(
- "Reset mpcc for pipe %d\n",
+ DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
}
}
@@ -2295,248 +2346,41 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.full_update)
- program_water_mark = true;
-
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i])
- dcn10_disable_plane(dc, old_pipe_ctx);
- }
-
- if (program_water_mark) {
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* watermark is for all pipes */
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks, ref_clk_mhz);
+ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
- }
-/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\n============== Watermark parameters ==============\n"
- "a.urgent_ns: %d \n"
- "a.cstate_enter_plus_exit: %d \n"
- "a.cstate_exit: %d \n"
- "a.pstate_change: %d \n"
- "a.pte_meta_urgent: %d \n"
- "b.urgent_ns: %d \n"
- "b.cstate_enter_plus_exit: %d \n"
- "b.cstate_exit: %d \n"
- "b.pstate_change: %d \n"
- "b.pte_meta_urgent: %d \n",
- context->bw.dcn.watermarks.a.urgent_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.b.urgent_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns
- );
- DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\nc.urgent_ns: %d \n"
- "c.cstate_enter_plus_exit: %d \n"
- "c.cstate_exit: %d \n"
- "c.pstate_change: %d \n"
- "c.pte_meta_urgent: %d \n"
- "d.urgent_ns: %d \n"
- "d.cstate_enter_plus_exit: %d \n"
- "d.cstate_exit: %d \n"
- "d.pstate_change: %d \n"
- "d.pte_meta_urgent: %d \n"
- "========================================================\n",
- context->bw.dcn.watermarks.c.urgent_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.d.urgent_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns
- );
-*/
-}
-
-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
-{
- return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
-{
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
- bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dispclk_khz;
- int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
- bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dppclk_khz;
-
- /* increase clock, looking for div is 0 for current, request div is 1*/
- if (dispclk_increase) {
- /* already divided by 2, no need to reach target clk with 2 steps*/
- if (cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request disp clk is lower than maximum supported dpp clk,
- * no need to reach target clk with two steps.
- */
- if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* target dpp clk not request divided by 2, still within threshold */
- if (!request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- } else {
- /* decrease clock, looking for current dppclk divided by 2,
- * request dppclk not divided by 2.
- */
-
- /* current dpp clk not divided by 2, no need to ramp*/
- if (!cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* current disp clk is lower than current maximum dpp clk,
- * no need to ramp
- */
- if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request dpp clk need to be divided by 2 */
- if (request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
- }
-
- return disp_clk_threshold;
-}
-
-static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
-
- int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
-
- /* set disp clk to dpp clk threshold */
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- dispclk_to_dpp_threshold);
-
- /* update request dpp clk division option */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state)
- continue;
-
- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
- pipe_ctx->plane_res.dpp,
- request_dpp_div,
- true);
- }
-
- /* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- }
-
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dppclk_khz =
- context->bw.dcn.calc_clk.dppclk_khz;
- context->bw.dcn.cur_clk.max_supported_dppclk_khz =
- context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ if (dc->hwseq->wa.DEGVIDCN10_254)
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
static void dcn10_set_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed)
+ bool safe_to_lower)
{
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
- context->bw.dcn.cur_clk.dcfclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- smu_req.hard_min_dcefclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
- context->bw.dcn.cur_clk.fclk_khz =
- context->bw.dcn.calc_clk.fclk_khz;
- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
- }
-
- smu_req.display_count = context->stream_count;
-
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- *smu_req_cur = smu_req;
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
- /* make sure dcf clk is before dpp clk to
- * make sure we have enough voltage to run dpp clk
- */
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &context->bw.dcn.clk,
+ safe_to_lower);
- ramp_up_dispclk_with_dpp(dc, context);
+ dcn10_pplib_apply_display_requirements(dc, context);
}
- dcn10_pplib_apply_display_requirements(dc, context);
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks,
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* need to fix this function. not doing the right thing here */
}
static void set_drr(struct pipe_ctx **pipe_ctx,
@@ -2701,16 +2545,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ bool flip_pending;
if (plane_state == NULL)
return;
- plane_state->status.is_flip_pending =
- pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
- if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ plane_state->status.is_flip_pending = flip_pending;
+
+ if (!flip_pending)
+ plane_state->status.current_address = plane_state->status.requested_address;
+
+ if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
tg->funcs->is_stereo_left_eye) {
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
@@ -2719,8 +2567,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
- if (hws->ctx->dc->res_pool->hubbub != NULL)
- hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ if (hws->ctx->dc->res_pool->hubbub != NULL) {
+ struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
+
+ if (hubp->funcs->hubp_update_dchub)
+ hubp->funcs->hubp_update_dchub(hubp, dh_data);
+ else
+ hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ }
}
static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
@@ -2731,9 +2585,11 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2757,6 +2613,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, attributes->color_format);
}
+static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
+ struct fixed31_32 multiplier;
+ struct dpp_cursor_attributes opt_attr = { 0 };
+ uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
+ return;
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ if (sdr_white_level > 80) {
+ multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
+ convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
+ }
+
+ opt_attr.scale = hw_scale;
+ opt_attr.bias = 0;
+
+ pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
+ pipe_ctx->plane_res.dpp, &opt_attr);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_csc_matrix = program_csc_matrix,
@@ -2764,7 +2647,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
.update_plane_addr = dcn10_update_plane_addr,
+ .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
.update_dchub = dcn10_update_dchub,
+ .update_mpcc = dcn10_update_mpcc,
.update_pending_status = dcn10_update_pending_status,
.set_input_transfer_func = dcn10_set_input_transfer_func,
.set_output_transfer_func = dcn10_set_output_transfer_func,
@@ -2778,6 +2663,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.disable_plane = dcn10_disable_plane,
.blank_pixel_data = dcn10_blank_pixel_data,
@@ -2800,7 +2687,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 44f734b73f9e..7139fb73e966 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 21fa40ac0786..6f675206a136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -65,11 +65,6 @@ enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
-
-
-static void aux_initialize(struct dcn10_link_encoder *enc10);
-
-
static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
@@ -445,12 +440,11 @@ static uint8_t get_frontend_source(
}
}
-static void configure_encoder(
+void configure_encoder(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings)
{
/* set number of lanes */
-
REG_SET(DP_CONFIG, 0,
DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
@@ -602,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc10->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -734,6 +731,9 @@ void dcn10_link_encoder_construct(
__func__,
result);
}
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dcn10_link_encoder_validate_output_with_stream(
@@ -812,7 +812,7 @@ void dcn10_link_encoder_hw_init(
ASSERT(result == BP_RESULT_OK);
}
- aux_initialize(enc10);
+ dcn10_aux_initialize(enc10);
/* reinitialize HPD.
* hpd_initialize() will pass DIG_FE id to HW context.
@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
if (!dcn10_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ /*in DP_Alt_No_Connect case, we turn off the dig already,
+ after excuation the PHY w/a sequence, not allow touch PHY any more*/
return;
}
/* Power-down RX and disable GPU PHY should be paired.
@@ -1347,8 +1349,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
FN(reg, f1), v1,\
FN(reg, f2), v2)
-static void aux_initialize(
- struct dcn10_link_encoder *enc10)
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
{
enum hpd_source_id hpd_source = enc10->base.hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 2a97cdb2cfbb..49ead12b2532 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
SRI(DP_DPHY_PRBS_CNTL, DP, id), \
@@ -64,6 +65,7 @@
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
#define LE_DCN10_REG_LIST(id)\
LE_DCN_COMMON_REG_LIST(id)
@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+ uint32_t TMDS_CTL_BITS;
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
type DP_MSE_SAT_SLOT_COUNT3;\
type DP_MSE_SAT_UPDATE;\
type DP_MSE_16_MTP_KEEPOUT;\
+ type DC_HPD_EN;\
+ type TMDS_CTL0;\
type AUX_HPD_SEL;\
type AUX_LS_READ_EN;\
- type AUX_RX_RECEIVE_WINDOW;\
- type DC_HPD_EN
+ type AUX_RX_RECEIVE_WINDOW
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
@@ -266,6 +271,10 @@ void dcn10_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal);
+void configure_encoder(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings);
+
/* enables TMDS PHY output */
/* TODO: still need depth or just pass in adjusted pixel clock? */
void dcn10_link_encoder_enable_tmds_output(
@@ -327,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
bool dcn10_is_dig_enabled(struct link_encoder *enc);
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
+
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 9ca51ae46de7..958994edf2c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy);
}
-const struct mpc_funcs dcn10_mpc_funcs = {
+static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 77a1a9d541a4..ab958cff3b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
*opp = NULL;
}
-static struct opp_funcs dcn10_opp_funcs = {
+static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f2fbce0e3fc5..411f89218e01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height)
+{
+ uint32_t otg_enabled;
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &otg_enabled);
+
+ if (otg_enabled == 0)
+ return false;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ *otg_active_width = v_blank_start - v_blank_end;
+ *otg_active_height = h_blank_start - h_blank_end;
+ return true;
+}
+
void optc1_clear_optc_underflow(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1293,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
return (underflow_occurred == 1);
}
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!optc1_is_tg_enabled(optc))
+ return false;
+
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable)
+ return true;
+
+ /* Program frame boundaries */
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable. Only using CRC0*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+
+ return true;
+}
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ uint32_t field = 0;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
+
+ /* Early return if CRC is not enabled for this CRTC */
+ if (!field)
+ return false;
+
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+
+ return true;
+}
+
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -1305,6 +1402,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1328,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc1_configure_crc,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c62052f46460..c1b114209fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -75,7 +75,14 @@
SRI(CONTROL, VTG, inst),\
SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst)
+ SRI(OTG_GSL_CONTROL, OTG, inst),\
+ SRI(OTG_CRC_CNTL, OTG, inst),\
+ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC0_DATA_B, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
@@ -138,6 +145,13 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_X;
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
+ uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC0_DATA_RG;
+ uint32_t OTG_CRC0_DATA_B;
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -232,7 +246,21 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
- SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -363,7 +391,22 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
- type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
+ type OTG_CRC_CONT_EN;\
+ type OTG_CRC0_SELECT;\
+ type OTG_CRC_EN;\
+ type CRC0_R_CR;\
+ type CRC0_G_Y;\
+ type CRC0_B_CB;\
+ type OTG_CRC0_WINDOWA_X_START;\
+ type OTG_CRC0_WINDOWA_X_END;\
+ type OTG_CRC0_WINDOWA_Y_START;\
+ type OTG_CRC0_WINDOWA_Y_END;\
+ type OTG_CRC0_WINDOWB_X_START;\
+ type OTG_CRC0_WINDOWB_X_END;\
+ type OTG_CRC0_WINDOWB_Y_START;\
+ type OTG_CRC0_WINDOWB_Y_END;
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)
@@ -507,4 +550,19 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
+
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp);
+
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index df5cb2d1d164..6b44ed3697a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -64,6 +64,69 @@
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+
+const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs = 42,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 589824,
+ .max_line_buffer_lines = 12,
+ .IsLineBufferBppFixed = 0,
+ .LineBufferFixedBpp = -1,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .max_num_dpp = 4,
+ .max_num_wb = 2,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 14,
+ .dppclk_delay_subtotal = 90,
+ .dispclk_delay_subtotal = 42,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+ .bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 2,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 17.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+};
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -294,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
@@ -417,13 +495,14 @@ static const struct dce110_clk_src_mask cs_mask = {
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
+ .num_opp = 4,
.num_video_plane = 4,
.num_audio = 4,
.num_stream_encoder = 4,
.num_pll = 4,
};
-static const struct dc_debug debug_defaults_drv = {
+static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
.force_abm_enable = false,
@@ -436,7 +515,7 @@ static const struct dc_debug debug_defaults_drv = {
*/
.min_disp_clk_khz = 100000,
- .disable_pplib_clock_request = true,
+ .disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -451,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
.max_downscale_src_width = 3840,
};
-static const struct dc_debug debug_defaults_diags = {
+static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
@@ -515,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
+struct aux_engine *dcn10_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -680,6 +776,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
hws->wa.false_optc_underflow = true;
+ hws->wa.DEGVIDCN10_254 = true;
}
return hws;
}
@@ -762,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -790,8 +890,8 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
kfree(pool->base.pp_smu);
}
@@ -971,11 +1071,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
return DC_OK;
}
-static struct dc_cap_funcs cap_funcs = {
+static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
-static struct resource_funcs dcn10_res_pool_funcs = {
+static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.validate_bandwidth = dcn_validate_bandwidth,
@@ -1027,6 +1127,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+ dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1070,8 +1172,8 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dcn1_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
@@ -1191,6 +1293,14 @@ static bool construct(
goto fail;
}
+ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto fail;
+ }
+
/* check next valid pipe */
j++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index c928ee4cd382..6f9078f3c4d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
uint8_t colorimetry_bpc;
uint8_t dynamic_range_rgb = 0; /*full range*/
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+ uint8_t dp_pixel_encoding = 0;
+ uint8_t dp_component_depth = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR422);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
if (crtc_timing->flags.Y_ONLY)
if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
* Color depth of Y-only could be
* 8, 10, 12, 16 bits
*/
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
+
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
@@ -287,48 +285,55 @@ void enc1_stream_encoder_dp_set_stream_attribute(
*/
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR420);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_RGB444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
break;
}
misc1 = REG_READ(DP_MSA_MISC);
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
/* set color depth */
-
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- 0);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
case COLOR_DEPTH_888:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_8BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
break;
case COLOR_DEPTH_101010:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_10BPC);
-
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
break;
case COLOR_DEPTH_121212:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_12BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
break;
case COLOR_DEPTH_161616:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_16BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_6BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
}
+ /* Set DP pixel encoding and component depth */
+ REG_UPDATE_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, dp_pixel_encoding,
+ DP_COMPONENT_DEPTH, dp_component_depth);
+
/* set dynamic range and YCbCr range */
switch (crtc_timing->display_color_depth) {
@@ -354,7 +359,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
switch (output_color_space) {
case COLOR_SPACE_SRGB:
- misc0 = misc0 | 0x0;
misc1 = misc1 & ~0x80; /* bit7 = 0*/
dynamic_range_rgb = 0; /*full range*/
break;
@@ -1087,27 +1091,6 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static uint32_t calc_max_audio_packets_per_line(
- const struct audio_crtc_info *crtc_info)
-{
- uint32_t max_packets_per_line;
-
- max_packets_per_line =
- crtc_info->h_total - crtc_info->h_active;
-
- if (crtc_info->pixel_repetition)
- max_packets_per_line *= crtc_info->pixel_repetition;
-
- /* for other hdmi features */
- max_packets_per_line -= 58;
- /* for Control Period */
- max_packets_per_line -= 16;
- /* Number of Audio Packets per Line */
- max_packets_per_line /= 32;
-
- return max_packets_per_line;
-}
-
static void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
@@ -1201,16 +1184,9 @@ static void enc1_se_setup_hdmi_audio(
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct audio_clock_info audio_clock_info = {0};
- uint32_t max_packets_per_line;
-
- /* For now still do calculation, although this field is ignored when
- * above HDMI_PACKET_GEN_VERSION set to 1
- */
- max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
/* HDMI_AUDIO_PACKET_CONTROL */
- REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
- HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
HDMI_AUDIO_DELAY_EN, 1);
/* AFMT_AUDIO_PACKET_CONTROL */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 034369fbb9e2..5d4527d03045 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps);
+
+/*
+ * Update DP branch info
+ */
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -103,6 +111,9 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_is_dp_sink_present(
+ struct dc_link *link);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index eac4bfe12257..58ed2055ef9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -40,7 +40,7 @@ enum wm_set_id {
WM_B,
WM_C,
WM_D,
- WM_COUNT,
+ WM_SET_COUNT,
};
struct pp_smu_wm_set_range {
@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
struct pp_smu_wm_range_sets {
uint32_t num_reader_wm_sets;
- struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
uint32_t num_writer_wm_sets;
- struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
};
struct pp_smu_display_requirement_rv {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 4ff9b2bba178..eb5ab3978e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
#define dm_log_to_buffer(buffer, size, fmt, args)\
vsnprintf(buffer, size, fmt, args)
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ return ktime_get_raw_ns();
+}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index ab8c77d4e6df..2b83f922ac02 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_eng_clk_in_khz;
uint32_t wm_max_eng_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_dcfclk_clk_in_khz;
uint32_t wm_max_dcfclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_socclk_clk_in_khz;
uint32_t wm_max_socclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index f83a608f93e9..d97ca6528f9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -36,11 +36,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_soc_bounding_box.o := $(dml_ccflags)
CFLAGS_dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- soc_bounding_box.o dml_common_defs.o
+ dml_common_defs.o
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index fd9d97aab071..dddeb0d4db8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,67 +26,8 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = 164,
- .dpte_buffer_size_in_pte_reqs = 42,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .pte_chunk_size_kbytes = 2,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 2,
- .line_buffer_size_bits = 589824,
- .max_line_buffer_lines = 12,
- .IsLineBufferBppFixed = 0,
- .LineBufferFixedBpp = -1,
- .writeback_luma_buffer_size_kbytes = 12,
- .writeback_chroma_buffer_size_kbytes = 8,
- .max_num_dpp = 4,
- .max_num_wb = 2,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 4,
- .max_vscl_ratio = 4,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.10,
- .min_vblank_lines = 14,
- .dppclk_delay_subtotal = 90,
- .dispclk_delay_subtotal = 42,
- .dcfclk_cstate_latency = 10,
- .max_inter_dcn_tile_repeaters = 8,
- .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
- .bug_forcing_LC_req_same_size_fixed = 0,
-};
-
-static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .writeback_latency_us = 12.0,
- .ideal_dram_bw_after_urgent_percent = 80.0,
- .max_request_size_bytes = 256,
- .downspread_percent = 0.5,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 128,
- .urgent_out_of_order_return_per_channel_bytes = 256,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 2,
- .vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 17.0,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
-};
+extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
+extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3c2abcb8a1b0..635206248889 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,7 +27,6 @@
#include "dml_common_defs.h"
-#include "soc_bounding_box.h"
#include "dml1_display_rq_dlg_calc.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 7fa0375939ae..cbafce649e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
double dscclk_mhz;
double dcfclk_mhz;
double socclk_mhz;
- double dram_speed_mhz;
+ double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
- double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
};
@@ -112,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
struct _vcs_dpi_voltage_scaling_st clock_limits[7];
};
@@ -304,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned char otg_inst;
unsigned char odm_split_cnt;
unsigned char odm_combine;
+ unsigned char use_maximum_vstartup;
};
struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
deleted file mode 100644
index 324239c77958..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "soc_bounding_box.h"
-#include "display_mode_lib.h"
-#include "dc_features.h"
-
-#include "dml_inline_defs.h"
-
-/*
- * NOTE:
- * This file is gcc-parseable HW gospel, coming straight from HW engineers.
- *
- * It doesn't adhere to Linux kernel style and sometimes will do things in odd
- * ways. Unless there is something clearly wrong with it the code should
- * remain as-is as it provides us with a guarantee from HW that it is correct.
- */
-
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
-{
- to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
- to_box->sr_exit_time_us = from_box->sr_exit_time_us;
- to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
- to_box->urgent_latency_us = from_box->urgent_latency_us;
- to_box->writeback_latency_us = from_box->writeback_latency_us;
-}
-
-voltage_scaling_st dml_socbb_voltage_scaling(
- const soc_bounding_box_st *soc,
- enum voltage_state voltage)
-{
- const voltage_scaling_st *voltage_state;
- const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
-
- for (voltage_state = soc->clock_limits;
- voltage_state < voltage_end && voltage_state->state != voltage;
- voltage_state++) {
- }
-
- if (voltage_state < voltage_end)
- return *voltage_state;
- return soc->clock_limits[DC__VOLTAGE_STATES - 1];
-}
-
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
-{
- double return_bw;
-
- voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
- state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
- * box->ideal_dram_bw_after_urgent_percent / 100.0);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
-
- return return_bw;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 562ee189d780..b9d9930a4974 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 9c4a56c738c0..bf40725f982f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -82,13 +82,16 @@
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0
-#define DDC_MASK_SH_LIST(mask_sh) \
+#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
- SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ DDC_MASK_SH_LIST_COMMON(mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index ab5483c0c502..f20161c5706d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -375,6 +375,7 @@ struct gpio *dal_gpio_create_irq(
case GPIO_ID_GPIO_PAD:
break;
default:
+ id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 0caee3523017..83df779984e5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -43,7 +43,7 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/hw_factory_dcn10.h"
#endif
@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_0:
dal_hw_factory_dce120_init(factory);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
dal_hw_factory_dcn10_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 55c707488541..e7541310480b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,7 +43,7 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/hw_translate_dcn10.h"
#endif
@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_0:
dal_hw_translate_dce120_init(translate);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
dal_hw_translate_dcn10_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index 352885cb4d07..a851d07f0190 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -71,7 +71,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
###############################################################################
# DCN 1.0 family
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
I2CAUX_DCN1 = i2caux_dcn10.o
AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index bb526ad326e5..0afd2fa57bbe 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -128,8 +128,20 @@ static void process_read_reply(
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
} else {
- ctx->current_read_length = ctx->returned_byte;
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->transaction_complete = true;
ctx->operation_succeeded = true;
@@ -157,6 +169,10 @@ static void process_read_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -215,6 +231,10 @@ static void process_read_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -282,7 +302,6 @@ static bool read_command(
ctx.operation_succeeded);
}
- request->payload.length = ctx.reply.length;
return ctx.operation_succeeded;
}
@@ -370,6 +389,10 @@ static void process_write_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -422,6 +445,10 @@ static void process_write_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
index 8e71324ccb10..c33a2898d967 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -26,46 +26,7 @@
#ifndef __DAL_AUX_ENGINE_H__
#define __DAL_AUX_ENGINE_H__
-enum aux_transaction_type {
- AUX_TRANSACTION_TYPE_DP,
- AUX_TRANSACTION_TYPE_I2C
-};
-
-struct aux_request_transaction_data {
- enum aux_transaction_type type;
- enum i2caux_transaction_action action;
- /* 20-bit AUX channel transaction address */
- uint32_t address;
- /* delay, in 100-microsecond units */
- uint8_t delay;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_transaction_reply {
- AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
- AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
- AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
-
- AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
- AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
- AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
-
- AUX_TRANSACTION_REPLY_INVALID = 0xFF
-};
-
-struct aux_reply_transaction_data {
- enum aux_transaction_reply status;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_channel_operation_result {
- AUX_CHANNEL_OPERATION_SUCCEEDED,
- AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
- AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
- AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
-};
+#include "dc_ddc_types.h"
struct aux_engine;
@@ -83,6 +44,12 @@ struct aux_engine_funcs {
void (*process_channel_reply)(
struct aux_engine *engine,
struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
enum aux_channel_operation_result (*get_channel_status)(
struct aux_engine *engine,
uint8_t *returned_bytes);
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
index e8d3781deaed..8b704ab0471c 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce100_aux_regs),
dce100_aux_regs,
dce100_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 5f47f6c007ac..ae5caa97caca 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -198,27 +198,27 @@ static void submit_channel_request(
((request->type == AUX_TRANSACTION_TYPE_I2C) &&
((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
-
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
-
- /* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
- AUXN_IMPCAL_ENABLE, 1,
- AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
/* set the delay and the number of bytes to write */
/* The length include
@@ -275,55 +275,92 @@ static void submit_channel_request(
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
-static void process_channel_reply(
- struct aux_engine *engine,
- struct aux_reply_transaction_data *reply)
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
- /* Need to do a read to get the number of bytes to process
- * Alternatively, this information can be passed -
- * but that causes coupling which isn't good either. */
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
- uint32_t bytes_replied;
- uint32_t value;
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
- value = REG_GET(AUX_SW_STATUS,
- AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
- if (bytes_replied) {
- uint32_t reply_result;
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
- AUX_SW_INDEX, 0,
- AUX_SW_AUTOINCREMENT_DISABLE, 1,
- AUX_SW_DATA_RW, 1);
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &reply_result);
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
- reply_result = reply_result >> 4;
+ /* First byte was already used to get the command status */
+ --bytes_replied;
- switch (reply_result) {
- case 0: /* ACK */ {
- uint32_t i = 0;
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
- /* first byte was already used
- * to get the command status */
- --bytes_replied;
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
- while (i < bytes_replied) {
- uint32_t aux_sw_data_val;
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &aux_sw_data_val);
+ return i;
+ }
- reply->data[i] = aux_sw_data_val;
- ++i;
- }
+ return 0;
+}
- reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
}
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
break;
case 1: /* NACK */
reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
@@ -340,15 +377,6 @@ static void process_channel_reply(
default:
reply->status = AUX_TRANSACTION_REPLY_INVALID;
}
- } else {
- /* Need to handle an error case...
- * hopefully, upper layer function won't call this function
- * if the number of bytes in the reply was 0
- * because there was surely an error that was asserted
- * that should have been handled
- * for hot plug case, this could happens*/
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
}
}
@@ -371,6 +399,10 @@ static enum aux_channel_operation_result get_channel_status(
value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
/* Note that the following bits are set in 'status.bits'
* during CTS 4.2.1.2 (FW 3.3.1):
* AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
@@ -402,10 +434,10 @@ static enum aux_channel_operation_result get_channel_status(
return AUX_CHANNEL_OPERATION_SUCCEEDED;
}
} else {
- /*time_elapsed >= aux_engine->timeout_period */
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
-
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
}
}
@@ -415,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
.acquire_engine = acquire_engine,
.submit_channel_request = submit_channel_request,
.process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
.get_channel_status = get_channel_status,
.is_engine_available = is_engine_available,
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index b7256f595052..9cbe1a7a6bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
};
-enum {
- /* No timeout in HW
- * (timeout implemented in SW by querying status) */
- I2C_SETUP_TIME_LIMIT = 255,
- I2C_HW_BUFFER_SIZE = 538
-};
+
/*
* @brief
@@ -152,6 +147,11 @@ static bool setup_engine(
struct i2c_engine *i2c_engine)
{
struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+ uint32_t reset_length = 0;
+
+ if (hw_engine->base.base.setup_limit != 0)
+ i2c_setup_limit = hw_engine->base.base.setup_limit;
/* Program pin select */
REG_UPDATE_6(
@@ -164,11 +164,15 @@ static bool setup_engine(
DC_I2C_DDC_SELECT, hw_engine->engine_id);
/* Program time limit */
- REG_UPDATE_N(
- SETUP, 2,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-
+ if (hw_engine->base.base.send_reset_length == 0) {
+ /*pre-dcn*/
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+ } else {
+ reset_length = hw_engine->base.base.send_reset_length;
+ }
/* Program HW priority
* set to High - interrupt software I2C at any time
* Enable restart of SW I2C that was interrupted by HW
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
index 5bb04085f670..fea2946906ed 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
/* number of pending transactions (before GO) */
uint32_t transaction_count;
uint32_t engine_keep_power_up_count;
+ uint32_t i2_setup_time_limit;
};
struct i2c_hw_engine_dce110_create_arg {
@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
struct i2c_engine *dal_i2c_hw_engine_dce110_create(
const struct i2c_hw_engine_dce110_create_arg *arg);
+enum {
+ I2C_SETUP_TIME_LIMIT_DCE = 255,
+ I2C_SETUP_TIME_LIMIT_DCN = 3,
+ I2C_HW_BUFFER_SIZE = 538,
+ I2C_SEND_RESET_LENGTH_9 = 9,
+ I2C_SEND_RESET_LENGTH_10 = 10,
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
index 2a047f8ca0e9..1d748ac1d6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -43,6 +43,9 @@
#include "i2c_sw_engine_dce110.h"
#include "i2c_hw_engine_dce110.h"
#include "aux_engine_dce110.h"
+#include "../../dc.h"
+#include "dc_types.h"
+
/*
* Post-requisites: headers required by this unit
@@ -199,6 +202,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers aux_regs[],
const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
const struct dce110_i2c_hw_engine_shift *i2c_shift,
@@ -249,9 +253,22 @@ void dal_i2caux_dce110_construct(
base->i2c_hw_engines[line_id] =
dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
-
+ if (base->i2c_hw_engines[line_id] != NULL) {
+ switch (ctx->dce_version) {
+ case DCN_VERSION_1_0:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCN;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ default:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCE;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ }
+ }
++i;
- } while (i < ARRAY_SIZE(hw_ddc_lines));
+ } while (i < num_i2caux_inst);
/* Create AUX engines for all lines which has assisted HW AUX
* 'i' (loop counter) used as DDC/AUX engine_id */
@@ -272,7 +289,7 @@ void dal_i2caux_dce110_construct(
dal_aux_engine_dce110_create(&aux_init_data);
++i;
- } while (i < ARRAY_SIZE(hw_aux_lines));
+ } while (i < num_i2caux_inst);
/*TODO Generic I2C SW and HW*/
}
@@ -303,6 +320,7 @@ struct i2caux *dal_i2caux_dce110_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce110_aux_regs),
dce110_aux_regs,
i2c_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
index 1b1f71c60ac9..d3d8cc58666a 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers *aux_regs,
const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
const struct dce110_i2c_hw_engine_shift *i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
index dafc1a727f7f..a9db04738724 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -93,6 +93,7 @@ static void construct(
{
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce112_aux_regs),
dce112_aux_regs,
dce112_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index 0e7b18260027..6a4f344c1db4 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce120_aux_regs),
dce120_aux_regs,
dce120_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index e44a8901f38b..a59c1f50c1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dcn10_aux_regs),
dcn10_aux_regs,
dcn10_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
index 33de8a8834dc..b16fb1ff687d 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -26,6 +26,8 @@
#ifndef __DAL_ENGINE_H__
#define __DAL_ENGINE_H__
+#include "dc_ddc_types.h"
+
enum i2caux_transaction_operation {
I2CAUX_TRANSACTION_READ,
I2CAUX_TRANSACTION_WRITE
@@ -53,7 +55,8 @@ enum i2caux_transaction_status {
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
- I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
};
struct i2caux_transaction_request {
@@ -75,19 +78,6 @@ enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
-enum i2caux_transaction_action {
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
- I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
-
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
- I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
-
- I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
- I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
-};
-
struct engine;
struct engine_funcs {
@@ -106,6 +96,7 @@ struct engine_funcs {
struct engine {
const struct engine_funcs *funcs;
+ uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
index 58fc0f25eceb..ded6ea34b714 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -86,6 +86,8 @@ struct i2c_engine {
struct engine base;
const struct i2c_engine_funcs *funcs;
uint32_t timeout_delay;
+ uint32_t setup_limit;
+ uint32_t send_reset_length;
};
void dal_i2c_engine_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 14dc8c94d862..f7ed355fc84f 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -59,7 +59,7 @@
#include "dce120/i2caux_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/i2caux_dcn10.h"
#endif
@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
return dal_i2caux_dce120_create(ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
return dal_i2caux_dcn10_create(ctx);
#endif
@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
break;
}
- cmd->payloads->length = request.payload.length;
++index_of_payload;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a94942d4e66b..9f33306f9014 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,7 +33,7 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "mpc.h"
#endif
@@ -138,7 +138,7 @@ struct resource_pool {
struct output_pixel_processor *opps[MAX_PIPES];
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
-
+ struct aux_engine *engines[MAX_PIPES];
struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;
@@ -162,7 +162,7 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
- struct display_clock *display_clock;
+ struct dccg *dccg;
struct irq_service *irqs;
struct abm *abm;
@@ -221,7 +221,7 @@ struct pipe_ctx {
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -255,8 +255,7 @@ struct dce_bw_output {
};
struct dcn_bw_output {
- struct dc_clocks cur_clk;
- struct dc_clocks calc_clk;
+ struct dc_clocks clk;
struct dcn_watermark_set watermarks;
};
@@ -277,11 +276,11 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct display_clock *dis_clk;
+ struct dccg *dis_clk;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 30b3a08b91be..538b83303b86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,22 +102,13 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-enum ddc_result dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len,
- uint32_t *read);
-
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len);
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action);
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c650084..a37255c757e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,9 +33,10 @@ struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting);
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count);
bool dp_validate_mode_timing(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 132d18d4b293..ddbb673caa08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks);
+ struct dc_clocks *clocks);
void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644
index 000000000000..e79cd4e92919
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+#include "dc_ddc_types.h"
+#include "include/i2caux_interface.h"
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+union aux_config;
+
+struct aux_engine {
+ uint32_t inst;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+ bool acquire_reset;
+};
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available)(struct aux_engine *engine);
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct aux_engine *engine);
+ bool (*acquire)(
+ struct aux_engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct aux_engine *engine);
+ void (*destroy_engine)(
+ struct aux_engine **engine);
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
index f5f69cd81f6f..3c7ccb68ecdb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -27,23 +27,7 @@
#define __DISPLAY_CLOCK_H__
#include "dm_services_types.h"
-
-
-struct clocks_value {
- int dispclk_in_khz;
- int max_pixelclk_in_khz;
- int max_non_dp_phyclk_in_khz;
- int max_dp_phyclk_in_khz;
- bool dispclk_notify_pplib_done;
- bool pixelclk_notify_pplib_done;
- bool phyclk_notigy_pplib_done;
- int dcfclock_in_khz;
- int dppclk_in_khz;
- int mclk_in_khz;
- int phyclk_in_khz;
- int common_vdd_level;
-};
-
+#include "dc.h"
/* Structure containing all state-dependent clocks
* (dependent on "enum clocks_state") */
@@ -52,34 +36,23 @@ struct state_dependent_clocks {
int pixel_clk_khz;
};
-struct display_clock {
+struct dccg {
struct dc_context *ctx;
const struct display_clock_funcs *funcs;
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
- struct clocks_value cur_clocks_value;
+ struct dc_clocks clks;
};
struct display_clock_funcs {
- int (*set_clock)(struct display_clock *disp_clk,
+ void (*update_clocks)(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower);
+ int (*set_dispclk)(struct dccg *dccg,
int requested_clock_khz);
- enum dm_pp_clocks_state (*get_required_clocks_state)(
- struct display_clock *disp_clk,
- struct state_dependent_clocks *req_clocks);
-
- bool (*set_min_clocks_state)(struct display_clock *disp_clk,
- enum dm_pp_clocks_state dm_pp_clocks_state);
-
- int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
-
- bool (*apply_clock_voltage_request)(
- struct display_clock *disp_clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk);
+ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
};
#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
const char *src,
unsigned int bytes);
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
- void (*setup_psr)(struct dmcu *dmcu,
+ bool (*setup_psr)(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context);
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 582458f028f8..74ad94b0e4f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -151,6 +151,9 @@ struct dpp_funcs {
void (*dpp_set_hdr_multiplier)(
struct dpp *dpp_base,
uint32_t multiplier);
+ void (*set_optional_cursor_attributes)(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
void (*dpp_dppclk_control)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 97df82cddf82..4f3f9e68ccfa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -43,10 +43,9 @@ enum cursor_lines_per_chunk {
};
struct hubp {
- struct hubp_funcs *funcs;
+ const struct hubp_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
- struct dc_plane_address current_address;
int inst;
/* run time states */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 47f1dc5a43b7..da89c2edb07c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -64,7 +64,7 @@ struct stutter_modes {
};
struct mem_input {
- struct mem_input_funcs *funcs;
+ const struct mem_input_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
struct dc_plane_address current_address;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 69cb0a105300..af700c7dac50 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -156,6 +156,9 @@ struct timing_generator_funcs {
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position);
+ bool (*get_otg_active_size)(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
void (*set_early_control)(struct timing_generator *tg,
uint32_t early_cntl);
void (*wait_for_state)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 63fc6c499789..a14ce4de80b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
bool false_optc_underflow;
+ bool DEGVIDCN10_254;
};
struct hwseq_wa_state {
@@ -101,10 +102,18 @@ struct hw_sequencer_funcs {
const struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_dchub)(
struct dce_hwseq *hws,
struct dchub_init_data *dh_data);
+ void (*update_mpcc)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_pending_status)(
struct pipe_ctx *pipe_ctx);
@@ -154,20 +163,24 @@ struct hw_sequencer_funcs {
struct dc_link_settings *link_settings);
void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+
void (*pipe_control_lock)(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
void (*blank_pixel_data)(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank);
void (*set_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed);
+ bool safe_to_lower);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
int vmin, int vmax);
@@ -210,6 +223,7 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+ void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 3306e7b0b3e3..cf5a84b9e27c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
+
+
+/* indirect register access */
+
+#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
+ IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+
+#define IX_REG_READ(index_reg_name, data_reg_name, index) \
+ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+
+
+
+#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ IX_REG_READ(index_reg_name, data_reg_name, index), \
+ n, __VA_ARGS__)
+
+#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2) \
+ IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data);
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index);
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 640a647f4611..5b321008b0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
+ int num_opp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
@@ -102,6 +103,11 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source);
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 498515aad4a5..a76ee600ecee 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index dcdfa0f01551..ae3fd0a235ba 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -36,7 +36,7 @@
#include "dce120/irq_service_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/irq_service_dcn10.h"
#endif
@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
- if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
return NULL;
return &irq_service->info[source];
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a407892905af..c9fce9066ad8 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -48,7 +48,7 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include <asm/fpu/api.h>
#endif
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 019e7a095ea1..d968956a10cd 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -40,7 +40,8 @@ enum ddc_result {
DDC_RESULT_FAILED_INCOMPLETE,
DDC_RESULT_FAILED_OPERATION,
DDC_RESULT_FAILED_INVALID_OPERATION,
- DDC_RESULT_FAILED_BUFFER_OVERFLOW
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW,
+ DDC_RESULT_FAILED_HPD_DISCON
};
enum ddc_service_type {
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index d8e52e3b8e3c..1c66166d0a94 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -27,6 +27,9 @@
#define __DAL_DPCD_DEFS_H__
#include <drm/drm_dp_helper.h>
+#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_SINK_HW_REVISION_START 0x409
+#endif
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index a981b3e99ab3..52a73332befb 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,6 +26,13 @@
#ifndef __DAL_FIXED31_32_H__
#define __DAL_FIXED31_32_H__
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
#ifndef LLONG_MIN
#define LLONG_MIN (1LL<<63)
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 36bbad594267..f312834fef50 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -395,6 +395,8 @@ struct integrated_info {
struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
unsigned char dp3_ext_hdmi_6g_reg_num;
struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+ /* V11 */
+ uint32_t dp_ss_control;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 2941b882b0b6..58bb42ed85ca 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -37,6 +37,10 @@
* ********************************************************************
*/
+#define MAX_CONNECTOR_NUMBER_PER_SLOT (16)
+#define MAX_BOARD_SLOTS (4)
+#define INVALID_CONNECTOR_INDEX ((unsigned int)(-1))
+
/* HPD unit id - HW direct translation */
enum hpd_source_id {
HPD_SOURCEID1 = 0,
@@ -136,5 +140,47 @@ enum sync_source {
SYNC_SOURCE_DUAL_GPU_PIN
};
+/* connector sizes in millimeters - from BiosParserTypes.hpp */
+#define CONNECTOR_SIZE_DVI 40
+#define CONNECTOR_SIZE_VGA 32
+#define CONNECTOR_SIZE_HDMI 16
+#define CONNECTOR_SIZE_DP 16
+#define CONNECTOR_SIZE_MINI_DP 9
+#define CONNECTOR_SIZE_UNKNOWN 30
+
+enum connector_layout_type {
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN,
+ CONNECTOR_LAYOUT_TYPE_DVI_D,
+ CONNECTOR_LAYOUT_TYPE_DVI_I,
+ CONNECTOR_LAYOUT_TYPE_VGA,
+ CONNECTOR_LAYOUT_TYPE_HDMI,
+ CONNECTOR_LAYOUT_TYPE_DP,
+ CONNECTOR_LAYOUT_TYPE_MINI_DP,
+};
+struct connector_layout_info {
+ struct graphics_object_id connector_id;
+ enum connector_layout_type connector_type;
+ unsigned int length;
+ unsigned int position; /* offset in mm from right side of the board */
+};
+
+/* length and width in mm */
+struct slot_layout_info {
+ unsigned int length;
+ unsigned int width;
+ unsigned int num_of_connectors;
+ struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
+};
+
+struct board_layout_info {
+ unsigned int num_of_slots;
+ /* indicates valid information in bracket layout structure. */
+ unsigned int is_number_of_slots_valid : 1;
+ unsigned int is_slots_size_valid : 1;
+ unsigned int is_connector_offsets_valid : 1;
+ unsigned int is_connector_lengths_valid : 1;
+
+ struct slot_layout_info slots[MAX_BOARD_SLOTS];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c4197432eb7c..33b3d755fe65 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -197,6 +197,11 @@ enum transmitter_color_depth {
TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
};
+enum dp_alt_mode {
+ DP_Alt_mode__Unknown = 0,
+ DP_Alt_mode__Connect,
+ DP_Alt_mode__NoConnect,
+};
/*
*****************************************************************************
* graphics_object_id struct
@@ -287,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
return (enum engine_id) id.id;
return ENGINE_ID_UNKNOWN;
}
+
+static inline bool dal_graphics_object_id_equal(
+ struct graphics_object_id id_1,
+ struct graphics_object_id id_2)
+{
+ if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
+ (id_1.type == id_2.type)) {
+ return true;
+ }
+ return false;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index dc98d6d4b2bd..e3c79616682d 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,47 +40,7 @@ struct dc_state;
*
*/
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
-
-uint32_t dal_logger_destroy(struct dal_logger **logger);
-
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
-
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...);
-
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...);
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args);
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry,
- enum dc_log_type log_type);
-
-void dm_logger_close(struct log_entry *entry);
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...);
-
-void logger_write(struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- void *paralist);
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
void pre_surface_trace(
struct dc *dc,
@@ -106,28 +66,31 @@ void context_clock_trace(
* marked by this macro.
* Note that the message will be printed exactly once for every function
* it is used in order to avoid repeating of the same message. */
+
#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
-{ \
- static bool print_not_impl = true; \
-\
- if (print_not_impl == true) { \
- print_not_impl = false; \
- dm_logger_write(ctx->logger, LOG_WARNING, \
- "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
- } \
-}
+ do { \
+ static bool print_not_impl = true; \
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
/******************************************************************************
* Convenience macros to save on typing.
*****************************************************************************/
#define DC_ERROR(...) \
- dm_logger_write(dc_ctx->logger, LOG_ERROR, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_ERROR(__VA_ARGS__); \
+ } while (0)
#define DC_SYNC_INFO(...) \
- dm_logger_write(dc_ctx->logger, LOG_SYNC, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_SYNC(__VA_ARGS__); \
+ } while (0)
/* Connectivity log format:
* [time stamp] [drm] [Major_minor] [connector name] message.....
@@ -137,20 +100,30 @@ void context_clock_trace(
*/
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_DETECTION, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
+ } while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_LT(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_MODE(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
+ } while (0)
/*
* Display Test Next logging
@@ -165,38 +138,21 @@ void context_clock_trace(
dm_dtn_log_end(dc_ctx)
#define PERFORMANCE_TRACE_START() \
- unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
- unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
- unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
- if (dc->debug.performance_trace) {\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
- dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
- dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
- dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
- }
-
-#define PERFORMANCE_TRACE_END() do {\
- unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
- if (dc->debug.performance_trace) {\
- dm_logger_write(dc->ctx->logger, \
- LOG_PERF_TRACE, \
- "%s duration: %d ticks\n", __func__,\
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
+
+#define PERFORMANCE_TRACE_END() \
+ do { \
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
+ if (dc->debug.performance_trace) { \
+ DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
perf_trc_end_stmp - perf_trc_start_stmp); \
- if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
- dc->ctx->logger->mask = perf_trc_start_log_msk;\
- dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
} \
- } \
-} while (0)
+ } while (0)
-#define DISPLAY_STATS_BEGIN(entry) \
- dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
+#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
-#define DISPLAY_STATS(msg, ...) \
- dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
+#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
-#define DISPLAY_STATS_END(entry) \
- dm_logger_close(&entry)
+#define DISPLAY_STATS_END(entry) (void)(entry)
#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 0a540b9897a6..ad3695e67b76 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -138,63 +138,4 @@ enum dc_log_type {
(1 << LOG_HW_AUDIO)| \
(1 << LOG_BANDWIDTH_CALCS)*/
-union logger_flags {
- struct {
- uint32_t ENABLE_CONSOLE:1; /* Print to console */
- uint32_t ENABLE_BUFFER:1; /* Print to buffer */
- uint32_t RESERVED:30;
- } bits;
- uint32_t value;
-};
-
-struct log_entry {
- struct dal_logger *logger;
- enum dc_log_type type;
-
- char *buf;
- uint32_t buf_offset;
- uint32_t max_buf_bytes;
-};
-
-/**
-* Structure for enumerating log types
-*/
-struct dc_log_type_info {
- enum dc_log_type type;
- char name[MAX_NAME_LEN];
-};
-
-/* Structure for keeping track of offsets, buffer, etc */
-
-#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
-
-/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
- * change log line size to 896 to meet the request.
- */
-#define LOG_MAX_LINE_SIZE 896
-
-struct dal_logger {
-
- /* How far into the circular buffer has been read by dsat
- * Read offset should never cross write offset. Write \0's to
- * read data just to be sure?
- */
- uint32_t buffer_read_offset;
-
- /* How far into the circular buffer we have written
- * Write offset should never cross read offset
- */
- uint32_t buffer_write_offset;
-
- uint32_t open_count;
-
- char *log_buffer; /* Pointer to malloc'ed buffer */
- uint32_t log_buffer_size; /* Size of circular buffer */
-
- uint32_t mask; /*array of masks for major elements*/
-
- union logger_flags flags;
- struct dc_context *ctx;
-};
-
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index eee0dfad6962..bf29733958c3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m1));
}
+
+/*de gamma, none linear to linear*/
+static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 threshold;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ threshold = dc_fixpt_one;
+ reference_white_level = dc_fixpt_half;
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ threshold = dc_fixpt_from_fraction(1, 12);
+ reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+ }
+ if (dc_fixpt_lt(threshold, in_x))
+ *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+}
+
+/*re gamma, linear to none linear*/
+static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ reference_white_level = dc_fixpt_from_fraction(4, 1);
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ reference_white_level = dc_fixpt_from_fraction(1, 3);
+ }
+ if (dc_fixpt_lt(dc_fixpt_half, in_x))
+ *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
+}
+
+
/* one-time pre-compute PQ values - only for sdr_white_level 80 */
void precompute_pq(void)
{
@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
}
}
+static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = degamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
+static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
static void scale_gamma(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
@@ -898,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
- *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ * adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ *
+ * Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
@@ -919,7 +1020,7 @@ static void apply_lut_1d(
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
- if (ramp->type != GAMMA_CS_TFM_1D)
+ if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
@@ -1537,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axix_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp);
+ mapUserRamp && ramp->type != GAMMA_CUSTOM);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
@@ -1622,6 +1725,25 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ build_hlg_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+ points->blue[i] = rgb_regamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_regamma);
}
rgb_regamma_alloc_fail:
return ret;
@@ -1682,6 +1804,25 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
+ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+ build_hlg_degamma(rgb_degamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_degamma[i].r;
+ points->green[i] = rgb_degamma[i].g;
+ points->blue[i] = rgb_degamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_degamma);
}
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
new file mode 100644
index 000000000000..66b1fad572ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef LUTS_1D_H
+#define LUTS_1D_H
+
+#include "hw_shared.h"
+
+struct point_config {
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_slope;
+};
+
+struct lut_point {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t delta_red;
+ uint32_t delta_green;
+ uint32_t delta_blue;
+};
+
+struct pwl_1dlut_parameter {
+ struct gamma_curve arr_curve_points[34];
+ struct point_config arr_points[2];
+ struct lut_point rgb_resulted[256];
+ uint32_t hw_points_num;
+};
+#endif // LUTS_1D_H
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 710852ad03f3..3d4c1b1ab8c4 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -29,7 +29,7 @@
#include "core_types.h"
#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
@@ -238,7 +238,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
for (int i = 0; i < core_stats->entry_id; i++) {
if (event_index < core_stats->event_index &&
i == events[event_index].entry_id) {
- DISPLAY_STATS("%s\n", events[event_index].event_string);
+ DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
event_index++;
} else if (time_index < core_stats->index &&
i == time[time_index].entry_id) {
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 5eb895fd98bf..9cb9ceb4d74d 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -27,6 +27,7 @@
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
@@ -34,6 +35,7 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b178176b72ac..265621d8945c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -128,47 +128,57 @@ enum PP_FEATURE_MASK {
PP_OVERDRIVE_MASK = 0x4000,
PP_GFXOFF_MASK = 0x8000,
PP_ACG_MASK = 0x10000,
+ PP_STUTTER_MODE = 0x20000,
};
+/**
+ * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ */
struct amd_ip_funcs {
- /* Name of IP block */
+ /** @name: Name of IP block */
char *name;
- /* sets up early driver state (pre sw_init), does not configure hw - Optional */
+ /**
+ * @early_init:
+ *
+ * sets up early driver state (pre sw_init),
+ * does not configure hw - Optional
+ */
int (*early_init)(void *handle);
- /* sets up late driver/hw state (post hw_init) - Optional */
+ /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
int (*late_init)(void *handle);
- /* sets up driver state, does not configure hw */
+ /** @sw_init: sets up driver state, does not configure hw */
int (*sw_init)(void *handle);
- /* tears down driver state, does not configure hw */
+ /** @sw_fini: tears down driver state, does not configure hw */
int (*sw_fini)(void *handle);
- /* sets up the hw state */
+ /** @hw_init: sets up the hw state */
int (*hw_init)(void *handle);
- /* tears down the hw state */
+ /** @hw_fini: tears down the hw state */
int (*hw_fini)(void *handle);
+ /** @late_fini: final cleanup */
void (*late_fini)(void *handle);
- /* handles IP specific hw/sw changes for suspend */
+ /** @suspend: handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
- /* handles IP specific hw/sw changes for resume */
+ /** @resume: handles IP specific hw/sw changes for resume */
int (*resume)(void *handle);
- /* returns current IP block idle status */
+ /** @is_idle: returns current IP block idle status */
bool (*is_idle)(void *handle);
- /* poll for idle */
+ /** @wait_for_idle: poll for idle */
int (*wait_for_idle)(void *handle);
- /* check soft reset the IP block */
+ /** @check_soft_reset: check soft reset the IP block */
bool (*check_soft_reset)(void *handle);
- /* pre soft reset the IP block */
+ /** @pre_soft_reset: pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
- /* soft reset the IP block */
+ /** @soft_reset: soft reset the IP block */
int (*soft_reset)(void *handle);
- /* post soft reset the IP block */
+ /** @post_soft_reset: post soft reset the IP block */
int (*post_soft_reset)(void *handle);
- /* enable/disable cg for the IP block */
+ /** @set_clockgating_state: enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
- /* enable/disable pg for the IP block */
+ /** @set_powergating_state: enable/disable pg for the IP block */
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
- /* get current clockgating status */
+ /** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 18a32477ed1d..fe0cbaade3c3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -89,6 +89,8 @@
#define mmUVD_JPEG_RB_SIZE_BASE_IDX 1
#define mmUVD_JPEG_ADDR_CONFIG 0x021f
#define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_JPEG_PITCH 0x0222
+#define mmUVD_JPEG_PITCH_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_CMD 0x022c
#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_DATA0 0x022d
@@ -203,6 +205,8 @@
#define mmUVD_RB_WPTR4_BASE_IDX 1
#define mmUVD_JRBC_RB_RPTR 0x0457
#define mmUVD_JRBC_RB_RPTR_BASE_IDX 1
+#define mmUVD_LMI_JPEG_VMID 0x045d
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x045e
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x045f
@@ -231,6 +235,8 @@
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_JRBC_IB_VMID 0x0507
#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_VMID 0x0508
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
#define mmUVD_JRBC_RB_WPTR 0x0509
#define mmUVD_JRBC_RB_WPTR_BASE_IDX 1
#define mmUVD_JRBC_RB_CNTL 0x050a
@@ -239,6 +245,20 @@
#define mmUVD_JRBC_IB_SIZE_BASE_IDX 1
#define mmUVD_JRBC_LMI_SWAP_CNTL 0x050d
#define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x050e
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x050f
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0510
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0511
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_JRBC_RB_REF_DATA 0x0512
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0513
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define mmUVD_JRBC_EXTERNAL_REG_BASE 0x0517
+#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX 1
#define mmUVD_JRBC_SOFT_RESET 0x0519
#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 1
#define mmUVD_JRBC_STATUS 0x051a
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800b703a..4bc118df3bc4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
uint16_t backlight_pwm_hz; // pwm frequency in hz
- uint8_t memorytype; // enum of atom_sys_mem_type
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
uint8_t umachannelnumber; // number of memory channels
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
uint8_t pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
uint8_t pwr_on_vary_bl_to_blon;
uint8_t pwr_down_bloff_to_vary_bloff;
uint8_t min_allowed_bl_level;
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1;
+ uint8_t reserved2;
struct atom_external_display_connection_info extdispconninfo;
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
+ struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
struct atom_camera_data camera_info;
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
- uint32_t reserved[108];
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
+ struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
+ uint32_t reserved[66];
};
@@ -1433,7 +1440,10 @@ struct atom_smc_dpm_info_v4_1
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
- uint32_t boardreserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t boardreserved[9];
};
/*
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 7852952d1fde..1d93a0c574c9 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,6 +23,8 @@
#ifndef _DM_PP_INTERFACE_
#define _DM_PP_INTERFACE_
+#include "dm_services_types.h"
+
#define PP_MAX_CLOCK_LEVELS 16
enum amd_pp_display_config_type{
@@ -189,39 +191,4 @@ struct pp_display_clock_request {
uint32_t clock_freq_in_khz;
};
-#define PP_MAX_WM_SETS 4
-
-enum pp_wm_set_id {
- DC_WM_SET_A = 0,
- DC_WM_SET_B,
- DC_WM_SET_C,
- DC_WM_SET_D,
- DC_WM_SET_INVALID = 0xffff,
-};
-
-struct pp_wm_set_with_dmif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_dcefclk_in_khz;
- uint32_t wm_max_dcefclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_set_with_mcif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_socclk_in_khz;
- uint32_t wm_max_socclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_sets_with_clock_ranges_soc15 {
- uint32_t num_wm_sets_dmif;
- uint32_t num_wm_sets_mcif;
- struct pp_wm_set_with_dmif_clock_range_soc15
- wm_sets_dmif[PP_MAX_WM_SETS];
- struct pp_wm_set_with_mcif_clock_range_soc15
- wm_sets_mcif[PP_MAX_WM_SETS];
-};
-
#endif /* _DM_PP_INTERFACE_ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
new file mode 100644
index 000000000000..36306c57a2b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_GFX_9_0_H__
+#define __IRQSRCS_GFX_9_0_H__
+
+
+#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT 176 /* B0 CP_INTERRUPT pkt in RB */
+#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT 177 /* B1 CP_INTERRUPT pkt in IB1 */
+#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT 178 /* B2 CP_INTERRUPT pkt in IB2 */
+#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 /* B4 PM4 Pkt Rsvd Bits Error */
+#define GFX_9_0__SRCID__CP_EOP_INTERRUPT 181 /* B5 End-of-Pipe Interrupt */
+#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR 183 /* B7 Bad Opcode Error */
+#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT 184 /* B8 Privileged Register Fault */
+#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT 185 /* B9 Privileged Instr Fault */
+#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
+#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 /* BB Context Empty Interrupt */
+#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 /* BC Context Busy Interrupt */
+#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
+#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE 193 /* C1 "Surface Probe Fault Signal Incomplete" */
+#define GFX_9_0__SRCID__CP_PREEMPT_ACK 194 /* C2 Preemption Ack-wledge */
+#define GFX_9_0__SRCID__CP_GPF 195 /* C3 General Protection Fault (GPF) */
+#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR 196 /* C4 GDS Alloc Error */
+#define GFX_9_0__SRCID__CP_ECC_ERROR 197 /* C5 ECC Error */
+#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 /* C7 Compute query status */
+#define GFX_9_0__SRCID__CP_VM_DOORBELL 200 /* C8 Unattached VM Doorbell Received */
+#define GFX_9_0__SRCID__CP_FUE_ERROR 201 /* C9 ECC FUE Error */
+#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 /* CA Streaming Perf Monitor Interrupt */
+#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 /* E8 CRead timeout error */
+#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE 233 /* E9 Register GUI Idle */
+#define GFX_9_0__SRCID__SQ_INTERRUPT_ID 239 /* EF SQ Interrupt (ttrace wrap, errors) */
+
+#endif /* __IRQSRCS_GFX_9_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
index c6b6f97de9de..aaed7f59e0e2 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -198,4 +198,102 @@
#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
#define VISLANDS30_IV_EXTID_HPD_RX_F 11
+#define VISLANDS30_IV_SRCID_GPIO_19 0x00000053 /* 83 */
+
+#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR 0x00000060 /* 96 */
+#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH 0x00000061 /* 97 */
+
+#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR 0x00000062 /* 98 */
+
+
+#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP 0x00000077 /* 119 */
+#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE 0x0000007c /* 124 */
+
+#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID 0x00000087 /* 135 */
+
+#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK 0x0000008a /* 138 */
+
+#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT 0x0000008c /* 140 */
+#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT 0x0000008d /* 141 */
+
+#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT 0x00000090 /* 144 */
+#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT 0x00000091 /* 145 */
+
+#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT 0x00000092 /* 146 */
+#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT 0x00000093 /* 147 */
+
+#define VISLANDS30_IV_SRCID_ACP 0x000000a2 /* 162 */
+
+#define VISLANDS30_IV_SRCID_VCE_TRAP 0x000000a7 /* 167 */
+#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE 0
+#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY 1
+#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME 2
+
+#define VISLANDS30_IV_SRCID_CP_INT_RB 0x000000b0 /* 176 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB1 0x000000b1 /* 177 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB2 0x000000b2 /* 178 */
+#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR 0x000000b4 /* 180 */
+#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE 0x000000b5 /* 181 */
+#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE 0x000000b7 /* 183 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT 0x000000b8 /* 184 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT 0x000000b9 /* 185 */
+#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT 0x000000ba /* 186 */
+#define VISLANDS30_IV_SRCID_CP_GUI_IDLE 0x000000bb /* 187 */
+#define VISLANDS30_IV_SRCID_CP_GUI_BUSY 0x000000bc /* 188 */
+
+#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000bf /* 191 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000c7 /* 199 */
+
+#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT 0x000000c0 /* 192 */
+#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL 0x000000c1 /* 193 */
+#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK 0x000000c2 /* 194 */
+#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT 0x000000c3 /* 195 */
+#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR 0x000000c4 /* 196 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR 0x000000ca /* 202 */
+
+#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID 0x000000da /* 218 */
+
+#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR 0x000000dc /* 220 */
+
+#define VISLANDS30_IV_SRCID_SDMA_TRAP 0x000000e0 /* 224 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE 0x000000e1 /* 225 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT 0x000000e2 /* 226 */
+
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER 0x000000e5 /* 229 */
+
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH 0x000000e6 /* 230 */
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW 0x000000e7 /* 231 */
+
+#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR 0x000000e8 /* 232 */
+#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE 0x000000e9 /* 233 */
+
+#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG 0x000000ef /* 239 */
+
+#define VISLANDS30_IV_SRCID_SDMA_PREEMPT 0x000000f0 /* 240 */
+#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE 0x000000f2 /* 242 */
+#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY 0x000000f3 /* 243 */
+#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID 0x000000f4 /* 244 */
+#define VISLANDS30_IV_SRCID_SDMA_FROZEN 0x000000f5 /* 245 */
+#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT 0x000000f6 /* 246 */
+#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE 0x000000f7 /* 247 */
+
+#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG 0x000000f8 /* 248 */
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER 0x000000fd /* 253 */
+
+/* These are not "real" source ids defined by HW */
+#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL 0x00000100 /* 256 */
+#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL 0
+#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL 1
+
+
+/* IV Extended IDs */
+#define VISLANDS30_IV_EXTID_NONE 0x00000000
+#define VISLANDS30_IV_EXTID_INVALID 0xffffffff
+
#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
new file mode 100644
index 000000000000..802413832fe8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA0_4_0_H__
+#define __IRQSRCS_SDMA0_4_0_H__
+
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA0_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA0_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA0_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA0_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA0_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
new file mode 100644
index 000000000000..d12a35619f9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA1_4_0_H__
+#define __IRQSRCS_SDMA1_4_0_H__
+
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA1_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA1_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA1_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA1_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA1_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA1_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
index 7a65206a6d21..02bab4673cd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
@@ -23,13 +23,10 @@
*
*/
-#ifndef __SOC_BOUNDING_BOX_H__
-#define __SOC_BOUNDING_BOX_H__
+#ifndef __IRQSRCS_SMUIO_9_0_H__
+#define __IRQSRCS_SMUIO_9_0_H__
-#include "dml_common_defs.h"
+#define SMUIO_9_0__SRCID__SMUIO_GPIO19 83 /* GPIO19 interrupt */
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
-voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+#endif /* __IRQSRCS_SMUIO_9_0_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
new file mode 100644
index 000000000000..5218bc53fb2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_THM_9_0_H__
+#define __IRQSRCS_THM_9_0_H__
+
+#define THM_9_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_9_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+#endif /* __IRQSRCS_THM_9_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
new file mode 100644
index 000000000000..fb041aee6c66
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_UVD_7_0_H__
+#define __IRQSRCS_UVD_7_0_H__
+
+#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP 119
+#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY 120
+#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* UVD system message interrupt */
+
+#endif /* __IRQSRCS_UVD_7_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
new file mode 100644
index 000000000000..3440bab565af
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCE_4_0_H__
+#define __IRQSRCS_VCE_4_0_H__
+
+#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE 0
+#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY 1
+#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME 2
+
+#endif /* __IRQSRCS_VCE_4_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
new file mode 100644
index 000000000000..e5951709bfc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCN_1_0_H__
+#define __IRQSRCS_VCN_1_0_H__
+
+#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 /* 0x77 Encoder General Purpose */
+#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY 120 /* 0x78 Encoder Low Latency */
+#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* 0x7c UVD system message interrupt */
+
+#endif /* __IRQSRCS_VCN_1_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
new file mode 100644
index 000000000000..d130936c9989
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VMC_1_0_H__
+#define __IRQSRCS_VMC_1_0_H__
+
+
+#define VMC_1_0__SRCID__VM_FAULT 0
+#define VMC_1_0__SRCID__VM_CONTEXT0_ALL 256
+#define VMC_1_0__SRCID__VM_CONTEXT1_ALL 257
+
+#define UTCL2_1_0__SRCID__FAULT 0 /* UTC L2 has encountered a fault or retry scenario */
+
+
+#endif /* __IRQSRCS_VMC_1_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5733fbee07f7..14391b06080c 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -47,6 +47,17 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_vm_fault_info {
+ uint64_t page_addr;
+ uint32_t vmid;
+ uint32_t mc_id;
+ uint32_t status;
+ bool prot_valid;
+ bool prot_read;
+ bool prot_write;
+ bool prot_exec;
+};
+
struct kfd_cu_info {
uint32_t num_shader_engines;
uint32_t num_shader_arrays_per_engine;
@@ -259,6 +270,21 @@ struct tile_config {
* IB to the corresponding ring (ring type). The IB is executed with the
* specified VMID in a user mode context.
*
+ * @get_vm_fault_info: Return information about a recent VM fault on
+ * GFXv7 and v8. If multiple VM faults occurred since the last call of
+ * this function, it will return information about the first of those
+ * faults. On GFXv9 VM fault information is fully contained in the IH
+ * packet and this function is not needed.
+ *
+ * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
+ * IH ring entry. This function allows the KFD ISR to get the VMID
+ * from the fault status register as early as possible.
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -374,6 +400,14 @@ struct kfd2kgd_calls {
int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+
+ int (*get_vm_fault_info)(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+ void (*gpu_recover)(struct kgd_dev *kgd);
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
};
/**
@@ -399,6 +433,10 @@ struct kfd2kgd_calls {
* @schedule_evict_and_restore_process: Schedules work queue that will prepare
* for safe eviction of KFD BOs that belong to the specified process.
*
+ * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
+ *
+ * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
+ *
* This structure contains function callback pointers so the kgd driver
* will notify to the amdkfd about certain status changes.
*
@@ -417,6 +455,8 @@ struct kgd2kfd_calls {
int (*resume_mm)(struct mm_struct *mm);
int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+ int (*post_reset)(struct kfd_dev *kfd);
};
int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 06f08f34a110..6a41b81c7325 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
struct amd_pp_display_configuration;
struct amd_pp_clock_info;
struct pp_display_clock_request;
-struct pp_wm_sets_with_clock_ranges_soc15;
struct pp_clock_levels_with_voltage;
struct pp_clock_levels_with_latency;
struct amd_pp_clocks;
@@ -232,16 +231,19 @@ struct amd_pm_funcs {
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
/* export to amdgpu */
- void (*powergate_uvd)(void *handle, bool gate);
- void (*powergate_vce)(void *handle, bool gate);
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_powergating_by_smu)(void *handle,
+ uint32_t block_type, bool gate);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
/* export to DC */
u32 (*get_sclk)(void *handle, bool low);
u32 (*get_mclk)(void *handle, bool low);
@@ -261,15 +263,12 @@ struct amd_pm_funcs {
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
int (*set_watermarks_for_clocks_ranges)(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
int (*display_clock_voltage_request)(void *handle,
struct pp_display_clock_request *clock);
int (*get_display_mode_validation_clocks)(void *handle,
struct amd_pp_simple_clock_info *clocks);
- int (*get_power_profile_mode)(void *handle, char *buf);
- int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
- int (*set_mmhub_powergating_by_smu)(void *handle);
+ int (*notify_smu_enable_pwe)(void *handle);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d567be49c31b..7a646f94b478 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret;
-
- if (!hwmgr || !hwmgr->pm_en)
- return 0;
-
- if (hwmgr->hwmgr_func->gfx_off_control) {
- /* Enable/disable GFX off through SMU */
- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
- state == AMD_PG_STATE_GATE);
- if (ret)
- pr_err("gfx off control failed!\n");
- }
-
- if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
- pr_debug("%s was not implemented.\n", __func__);
- return 0;
- }
-
- /* Enable/disable GFX per cu powergating through SMU */
- return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
- state == AMD_PG_STATE_GATE);
+ return 0;
}
static int pp_suspend(void *handle)
@@ -1020,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
- struct amd_pp_simple_clock_info simple_clocks;
+ struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1056,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
- clocks->max_clocks_state = simple_clocks.level;
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1118,17 +1099,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
mutex_unlock(&hwmgr->smu_lock);
return ret;
@@ -1159,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
+ clocks->level = PP_DAL_POWERLEVEL_7;
+
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
@@ -1168,19 +1151,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
return ret;
}
-static int pp_set_mmhub_powergating_by_smu(void *handle)
+static int pp_dpm_powergate_mmhub(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+}
+
+static int pp_dpm_powergate_gfx(void *handle, bool gate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
+ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+}
+
+static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ pp_dpm_powergate_uvd(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ pp_dpm_powergate_vce(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_GMC:
+ pp_dpm_powergate_mmhub(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int pp_notify_smu_enable_pwe(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1231,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
- .powergate_vce = pp_dpm_powergate_vce,
- .powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1250,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
+ .set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1268,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
- .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921fac22..53207e76b0f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
PHM_FUNC_CHECK(hwmgr);
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
return -EINVAL;
return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
}
int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index e63bc47dc715..8994aa5c8cf8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
return -EINVAL;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -148,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_AI:
- hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
@@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err1;
+ /* make sure dc limits are valid */
+ if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ hwmgr->dyn_state.max_clock_voltage_on_dc =
+ hwmgr->dyn_state.max_clock_voltage_on_ac;
ret = psm_init_power_state_table(hwmgr);
if (ret)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 7047e29755c3..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
switch (hwmgr->chip_id) {
case CHIP_TONGA:
case CHIP_FIJI:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
return;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
return;
default:
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
return 0;
}
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_2 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_1 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
- struct atom_firmware_info_v3_1 *info = NULL;
+ struct atom_firmware_info_v3_2 *fwinfo_3_2;
+ struct atom_firmware_info_v3_1 *fwinfo_3_1;
+ struct atom_common_table_header *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
- info = (struct atom_firmware_info_v3_1 *)
+ info = (struct atom_common_table_header *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- boot_values->ulRevision = info->firmware_revision;
- boot_values->ulGfxClk = info->bootup_sclk_in10khz;
- boot_values->ulUClk = info->bootup_mclk_in10khz;
- boot_values->usVddc = info->bootup_vddc_mv;
- boot_values->usVddci = info->bootup_vddci_mv;
- boot_values->usMvddc = info->bootup_mvddc_mv;
- boot_values->usVddGfx = info->bootup_vddgfx_mv;
- boot_values->ucCoolingID = info->coolingsolution_id;
- boot_values->ulSocClk = 0;
- boot_values->ulDCEFClk = 0;
+ if ((info->format_revision == 3) && (info->content_revision == 2)) {
+ fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+ boot_values, fwinfo_3_2);
+ } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+ fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+ boot_values, fwinfo_3_1);
+ } else {
+ pr_info("Fw info table revision does not match!");
+ return -EINVAL;
+ }
return 0;
}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+ param->Vr2_I2C_address = info->Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulUClk;
uint32_t ulSocClk;
uint32_t ulDCEFClk;
+ uint32_t ulEClk;
+ uint32_t ulVClk;
+ uint32_t ulDClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
+
+ uint8_t Vr2_I2C_address;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 35bd9870ab10..4e1fd5393845 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
ATOM_Tonga_Voltage_Lookup_Record,
entries, vddc_lookup_pp_tables, i);
record->us_calculated = 0;
- record->us_vdd = atom_record->usVdd;
- record->us_cac_low = atom_record->usCACLow;
- record->us_cac_mid = atom_record->usCACMid;
- record->us_cac_high = atom_record->usCACHigh;
+ record->us_vdd = le16_to_cpu(atom_record->usVdd);
+ record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
+ record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
+ record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
}
*lookup_table = table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index d4bc83e81389..a63e00653324 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct smu10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
int result = 0;
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
}
-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.asic_setup = smu10_setup_asic_task,
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
- .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .powergate_mmhub = smu10_powergate_mmhub,
.smus_notify_pwe = smu10_smus_notify_pwe,
.gfx_off_control = smu10_gfx_off_control,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
+ .powergate_gfx = smu10_gfx_off_control,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6d72a5600917..683b29a99366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PPSMC_MSG_VCEDPM_Disable);
}
-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr, enable ?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- if (!bgate)
- smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
- return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerOFF);
- return 0;
-}
-
-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerON);
- return 0;
-}
-
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
smu7_powerup_uvd(hwmgr);
smu7_powerup_vce(hwmgr);
- smu7_powerup_samu(hwmgr);
return 0;
}
@@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
}
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate) {
- smu7_update_samu_dpm(hwmgr, true);
- smu7_powerdown_samu(hwmgr);
- } else {
- smu7_powerup_samu(hwmgr);
- smu7_update_samu_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
@@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 1ddce023218a..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -29,11 +29,10 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f8e866ceda02..052e60dfaf9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,8 @@
#include "processpptables.h"
#include "pp_thermal.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -885,6 +887,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
data->odn_dpm_table.max_vddc = max_vddc;
}
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+
+ if (table_info == NULL)
+ return;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.sclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ break;
+ }
+ }
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.mclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ break;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -904,10 +960,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
/* initialize ODN table */
if (hwmgr->od_enabled) {
- smu7_setup_voltage_range_from_vbios(hwmgr);
- smu7_odn_initial_default_setting(hwmgr);
+ if (data->odn_dpm_table.max_vddc) {
+ smu7_check_dpm_table_updated(hwmgr);
+ } else {
+ smu7_setup_voltage_range_from_vbios(hwmgr);
+ smu7_odn_initial_default_setting(hwmgr);
+ }
}
-
return 0;
}
@@ -1521,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_up_hyst = 0;
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
- data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.bupdate_mclk = 1;
data->current_profile_setting.mclk_up_hyst = 0;
data->current_profile_setting.mclk_down_hyst = 100;
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -2820,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
-
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_power_state *smu7_ps =
cast_phw_smu7_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -2843,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
"VI should always have 2 performance levels",
);
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < smu7_ps->performance_level_count; i++) {
if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
@@ -3126,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
+ state_entry->ucPCIELaneLow);
performance_level = &(smu7_power_state->performance_levels
[smu7_power_state->performance_level_count++]);
@@ -3717,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit)
- || (dpm_table->dpm_levels[i].value > high_limit))
+ /*skip the trim if od is enabled*/
+ if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ || dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
dpm_table->dpm_levels[i].enabled = true;
@@ -3762,10 +3822,8 @@ static int smu7_generate_dpm_level_enable_mask(
const struct smu7_power_state *smu7_ps =
cast_const_phw_smu7_power_state(states->pnew_state);
- /*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled)
- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
if (result)
return result;
@@ -4049,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 230,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 231,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 83,
+ VISLANDS30_IV_SRCID_GPIO_19,
source);
return 0;
@@ -4244,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
return 0;
}
@@ -4555,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < dep_sclk_table->count; i++)
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
clocks->count = dep_sclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < sclk_table->count; i++)
- clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->clock[i] = sclk_table->entries[i].clk * 10;
clocks->count = sclk_table->count;
}
@@ -4592,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_mclk_table = table_info->vdd_dep_on_mclk;
for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
}
@@ -4600,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
for (i = 0; i < mclk_table->count; i++)
- clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->clock[i] = mclk_table->entries[i].clk * 10;
clocks->count = mclk_table->count;
}
return 0;
@@ -4739,60 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-
- if (table_info == NULL)
- return;
-
- for (i=0; i<data->dpm_table.sclk_table.count; i++) {
- if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.sclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- break;
- }
- }
-
- for (i=0; i<data->dpm_table.mclk_table.count; i++) {
- if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.mclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- break;
- }
- }
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
-
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -5043,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_fan_control_mode = smu7_get_fan_control_mode,
.force_clock_level = smu7_force_clock_level,
.print_clock_levels = smu7_print_clock_levels,
- .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .powergate_gfx = smu7_powergate_gfx,
.get_sclk_od = smu7_get_sclk_od,
.set_sclk_od = smu7_set_sclk_od,
.get_mclk_od = smu7_get_mclk_od,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index c91e75db6a8e..3784ce6e50ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -310,7 +310,6 @@ struct smu7_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index c952845833d7..5e19f5977eb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 50690c72b2ea..0adfc5392cd3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
+/* convert form 8bit vid to real voltage in mV*4 */
static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
@@ -1604,17 +1605,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.display_clock[i];
+ clocks->clock[i] = data->sys_info.display_clock[i] * 10;
break;
case amd_pp_sys_clock:
table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = table->entries[i].clk;
+ clocks->clock[i] = table->entries[i].clk * 10;
break;
case amd_pp_mem_clock:
clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+ clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
break;
default:
return -1;
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 93a3d022ba47..2aab1b475945 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -25,6 +25,9 @@
#include "ppatomctrl.h"
#include "ppsmc.h"
#include "atom.h"
+#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
+#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
uint8_t convert_to_vid(uint16_t vddc)
{
@@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
- if (src_id == 230)
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 231)
+ else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 83)
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
@@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 0,
+ THM_9_0__SRCID__THM_DIG_THERM_L2H,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 1,
+ THM_9_0__SRCID__THM_DIG_THERM_H2L,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_ROM_SMUIO,
- 83,
+ SMUIO_9_0__SRCID__SMUIO_GPIO19,
source);
return 0;
@@ -652,7 +655,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
}
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
uint32_t i;
struct watermarks *table = wt_table;
@@ -660,49 +663,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
if (!table || !wm_with_clock_ranges)
return -EINVAL;
- if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+ if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
return -EINVAL;
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
100);
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 916cc01e7652..5454289d5226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 05e680d55dbb..fb86c24394ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-#define MEM_LATENCY_HIGH 245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
@@ -295,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+ struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
+ int result;
+
+ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+ if (!result) {
+ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+ }
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2078,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
- data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
- data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
-
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@@ -2414,6 +2413,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
return result;
}
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+ uint32_t i;
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
/**
* Initializes the SMC table and uploads it
*
@@ -2430,6 +2463,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -2437,8 +2471,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
return result);
/* initialize ODN table */
- if (hwmgr->od_enabled)
- vega10_odn_initial_default_setting(hwmgr);
+ if (hwmgr->od_enabled) {
+ if (odn_table->max_vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ vega10_check_dpm_table_updated(hwmgr);
+ } else {
+ vega10_odn_initial_default_setting(hwmgr);
+ }
+ }
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
@@ -2861,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- if ((hwmgr->smu_version == 0x001c2c00) ||
- (hwmgr->smu_version == 0x001c2d00))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
-
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
@@ -3061,6 +3096,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -3086,12 +3122,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (vega10_ps->performance_level_count != 2)
pr_info("VI should always have 2 performance levels");
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
if (vega10_ps->performance_levels[i].mem_clock >
max_limits->mclk)
@@ -3181,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
/* Find the lowest MCLK frequency that is within
* the tolerable latency defined in DAL
*/
- latency = 0;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= latency) &&
(data->mclk_latency_table.entries[i].frequency >=
@@ -3223,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_dpm_table *dpm_table = &data->dpm_table;
+ struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
+ struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
+ int count;
if (!data->need_update_dpm_table)
return 0;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ for (count = 0; count < dpm_table->gfx_table.count; count++)
+ dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
+ odn_clk_table = &odn_table->vdd_dep_on_mclk;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ for (count = 0; count < dpm_table->mem_table.count; count++)
+ dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3674,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3749,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
uint32_t i;
struct pp_display_clock_request clock_req;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
@@ -3765,7 +3818,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (i < dpm_table->count) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
+ clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -4022,28 +4075,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
table_info->vdd_dep_on_sclk;
uint32_t i;
+ clocks->num_levels = 0;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
- dep_table->entries[i].clk;
+ dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
}
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
- uint32_t clock)
-{
- if (clock >= MEM_FREQ_LOW_LATENCY &&
- clock < MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_HIGH;
- else if (clock >= MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_LOW;
- else
- return MEM_LATENCY_ERR;
-}
-
static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
@@ -4052,26 +4094,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t j = 0;
uint32_t i;
- clocks->num_levels = 0;
- data->mclk_latency_table.count = 0;
-
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
- clocks->data[clocks->num_levels].clocks_in_khz =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].frequency =
- dep_table->entries[i].clk;
- clocks->data[clocks->num_levels].latency_in_us =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].latency =
- vega10_get_mem_latency(hwmgr,
- dep_table->entries[i].clk);
- clocks->num_levels++;
- data->mclk_latency_table.count++;
+
+ clocks->data[j].clocks_in_khz =
+ dep_table->entries[i].clk * 10;
+ data->mclk_latency_table.entries[j].frequency =
+ dep_table->entries[i].clk;
+ clocks->data[j].latency_in_us =
+ data->mclk_latency_table.entries[j].latency = 25;
+ j++;
}
}
+ clocks->num_levels = data->mclk_latency_table.count = j;
}
static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4084,7 +4122,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4100,7 +4138,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4160,7 +4198,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
@@ -4173,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_range)
{
struct vega10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
@@ -4695,40 +4734,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct vega10_hwmgr *data = hwmgr->backend;
- struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v2_information *table_info = hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
- uint32_t i;
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
-
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index aadd6cbc7e85..339820da9e6a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -370,7 +370,6 @@ struct vega10_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Internal settings to apply the application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..0789d64246ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disallowed_features = 0x0;
data->registry_data.od_state_in_dc_support = 0;
+ data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
data->registry_data.log_avfs_param = 0;
@@ -422,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+ data->gfxoff_controlled_by_driver = true;
+ else
+ data->gfxoff_controlled_by_driver = false;
+
return result;
}
@@ -453,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
*/
static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
{
- dpm_state->soft_min_level = 0xff;
- dpm_state->soft_max_level = 0xff;
- dpm_state->hard_min_level = 0xff;
- dpm_state->hard_max_level = 0xff;
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
}
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
- PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ PPCLK_e clk_id, uint32_t *num_of_levels)
{
- int result;
- /*
- * SMU expects the Clock ID to be in the top 16 bits.
- * Lower 16 bits specify the level however 0xFF is a
- * special argument the returns the total number of levels
- */
- PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
- "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
- return -EINVAL);
-
- result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+ int ret = 0;
- PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
- "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
- return -EINVAL);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ PP_ASSERT_WITH_CODE(!ret,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
- PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
- "[GetNumberDPMLevel] Number of CLK Levels is zero!",
- return -EINVAL);
+ *num_of_levels = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
- return result;
+ return ret;
}
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
- int result;
+ int result = 0;
/*
*SMU expects the Clock ID to be in the top 16 bits.
@@ -500,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- result = vega12_read_arg_from_smc(hwmgr, clock);
-
- PP_ASSERT_WITH_CODE(*clock != 0,
- "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
- return -EINVAL);
+ *clock = smum_get_argument(hwmgr);
return result;
}
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels, clk;
+
+ ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk levels!",
+ return ret);
+
+ dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk of specific level!",
+ return ret);
+ dpm_table->dpm_levels[i].value = clk;
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return ret;
+}
+
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
@@ -519,224 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*/
static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
- uint32_t num_levels, i, clock;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
-
struct vega12_single_dpm_table *dpm_table;
+ int ret = 0;
memset(&data->dpm_table, 0, sizeof(data->dpm_table));
- /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+ /* socclk */
dpm_table = &(data->dpm_table.soc_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_SOCCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_GFXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Initialize Mclk DPM table based on allow Mclk values */
- dpm_table = &(data->dpm_table.mem_table);
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_UCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* eclk */
dpm_table = &(data->dpm_table.eclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_ECLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* vclk */
dpm_table = &(data->dpm_table.vclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_VCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dclk */
dpm_table = &(data->dpm_table.dclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Assume there is no headless Vega12 for now */
+ /* dcefclk */
dpm_table = &(data->dpm_table.dcef_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DCEFCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCEFCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* pixclk */
dpm_table = &(data->dpm_table.pixel_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PIXCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PIXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dispclk */
dpm_table = &(data->dpm_table.display_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DISPCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DISPCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* phyclk */
dpm_table = &(data->dpm_table.phy_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PHYCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PHYCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -803,6 +735,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
@@ -844,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = true;
+ data->vce_power_gated = true;
+
+ if (data->smu_features[GNLD_DPM_UVD].enabled)
+ data->uvd_power_gated = false;
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled)
+ data->vce_power_gated = false;
+}
+
static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -862,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
- PP_ASSERT(
- !data->smu_features[i].allowed || enabled,
- "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
}
}
+ vega12_init_powergate_state(hwmgr);
+
return 0;
}
@@ -923,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
return result;
}
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+ /* AC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMax = smum_get_argument(hwmgr);
+
+ /* AC Min */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get min ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMin = smum_get_argument(hwmgr);
+
+ /* DC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max dc clock from SMC!",
+ return -EINVAL);
+ clock->DCMax = smum_get_argument(hwmgr);
+
+ return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t i;
+
+ for (i = 0; i < PPCLK_COUNT; i++)
+ PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+ i, &(data->clk_range[i])),
+ "Failed to get clk range from SMC!",
+ return -EINVAL);
+
+ return 0;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -950,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to power control set level!",
result = tmp_result);
+ result = vega12_get_all_clock_ranges(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to get all clock ranges!",
+ return result);
+
result = vega12_odn_initialize_default_settings(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to power control set level!",
@@ -978,76 +974,172 @@ static uint32_t vega12_find_lowest_dpm_level(
break;
}
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
return i;
}
static uint32_t vega12_find_highest_dpm_level(
struct vega12_single_dpm_table *table)
{
- uint32_t i = 0;
+ int32_t i = 0;
+ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+ "[FindHighestDPMLevel] DPM Table has too many entries!",
+ return MAX_REGULAR_DPM_NUMBER - 1);
- if (table->count <= MAX_REGULAR_DPM_NUMBER) {
- for (i = table->count; i > 0; i--) {
- if (table->dpm_levels[i - 1].enabled)
- return i - 1;
- }
- } else {
- pr_info("DPM Table Has Too Many Entries!");
- return MAX_REGULAR_DPM_NUMBER - 1;
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
}
- return i;
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return (uint32_t)i;
}
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_boot_level !=
- data->dpm_table.gfx_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->smc_state_table.gfx_boot_level;
+ uint32_t min_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min gfxclk !",
+ return ret);
}
- if (data->smc_state_table.mem_boot_level !=
- data->dpm_table.mem_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->smc_state_table.mem_boot_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min memclk !",
+ return ret);
+
+ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set hard min memclk !",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min vclk!",
+ return ret);
+
+ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min socclk!",
+ return ret);
+ }
+
+ return ret;
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_max_level !=
- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->smc_state_table.gfx_max_level;
+ uint32_t max_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max gfxclk!",
+ return ret);
}
- if (data->smc_state_table.mem_max_level !=
- data->dpm_table.mem_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->smc_state_table.mem_max_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max memclk!",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max vclk!",
+ return ret);
+
+ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max socclk!",
+ return ret);
+ }
+
+ return ret;
}
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1123,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
"Failed to get current package power!",
return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &value);
+ value = smum_get_argument(hwmgr);
/* power value is an integer */
*query = value << 8;
#endif
@@ -1136,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
- "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1159,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
- "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1180,16 +1266,12 @@ static int vega12_get_current_activity_percent(
#if 0
ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
- if (!ret) {
- if (current_activity > 100) {
- PP_ASSERT(false,
- "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
- current_activity = 100;
- }
- } else
+ current_activity = smum_get_argument(hwmgr);
+ if (current_activity > 100) {
PP_ASSERT(false,
- "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
+ "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+ current_activity = 100;
+ }
} else
PP_ASSERT(false,
"[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
@@ -1252,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
return 0;
}
@@ -1270,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
switch (clk_type) {
case amd_pp_dcef_clock:
- clk_freq = clock_req->clock_freq_in_khz / 100;
clk_select = PPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
@@ -1306,9 +1387,10 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
(struct vega12_hwmgr *)(hwmgr->backend);
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
- uint32_t clk_request;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
@@ -1319,7 +1401,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE(
@@ -1333,15 +1415,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
}
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
- clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
- "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
- return -1);
- data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
- }
-
return 0;
}
@@ -1350,12 +1423,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+ uint32_t soft_level;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1372,13 +1452,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t soft_level;
+
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1394,17 +1480,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- data->smc_state_table.gfx_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
@@ -1412,22 +1487,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
+
return 0;
}
-#if 0
static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+ struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
- if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
- table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
- table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
- *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1435,13 +1516,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
}
+
return 0;
}
-#endif
static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
@@ -1465,11 +1546,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
-#if 0
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
-#endif
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1485,27 +1564,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
- vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+ vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+ vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
-#if 0
- if (!ret) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
- }
-#endif
+
return ret;
}
@@ -1539,24 +1609,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
PPCLK_e clock_select,
bool max)
{
- int result;
- *clock = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
- if (max) {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get max clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- } else {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get min clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- }
+ if (max)
+ *clock = data->clk_range[clock_select].ACMax;
+ else
+ *clock = data->clk_range[clock_select].ACMin;
- return result;
+ return 0;
}
static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1571,12 +1631,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.gfx_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1603,13 +1663,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.mem_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
- clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
-
+ clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
+ data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1633,12 +1692,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.dcef_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1661,12 +1720,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.soc_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1713,99 +1772,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
- uint32_t i;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
if (!data->registry_data.disable_water_mark &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported) {
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
- }
-
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
- }
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap |= WaterMarksExist;
data->water_marks_bitmap &= ~WaterMarksLoaded;
}
- return result;
+ return 0;
}
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
switch (type) {
case PP_SCLK:
- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
case PP_MCLK:
- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
@@ -1838,8 +1867,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_MCLK:
@@ -1854,8 +1883,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_PCIE:
@@ -1867,6 +1896,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+ if (data->mclk_latency_table.entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(data->dpm_table.vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* dclk */
+ dpm_table = &(data->dpm_table.dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* socclk */
+ dpm_table = &(data->dpm_table.soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* eclk */
+ dpm_table = &(data->dpm_table.eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, 0);
+
+ ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+ &data->dpm_table.mem_table);
+
+ return ret;
+}
+
static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1911,6 +2139,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->vce_power_gated == bgate)
+ return;
+
data->vce_power_gated = bgate;
vega12_enable_disable_vce_dpm(hwmgr, !bgate);
}
@@ -1919,6 +2150,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->uvd_power_gated == bgate)
+ return;
+
data->uvd_power_gated = bgate;
vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
}
@@ -2086,6 +2320,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+
+ return ret;
+}
+
+static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+
+ return ret;
+}
+
+static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+ if (enable)
+ return vega12_enable_gfx_off(hwmgr);
+ else
+ return vega12_disable_gfx_off(hwmgr);
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2113,6 +2379,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.display_clock_voltage_request = vega12_display_clock_voltage_request,
.force_clock_level = vega12_force_clock_level,
.print_clock_levels = vega12_print_clock_levels,
+ .apply_clocks_adjust_rules =
+ vega12_apply_clocks_adjust_rules,
+ .pre_display_config_changed =
+ vega12_pre_display_configuration_changed_task,
.display_config_changed = vega12_display_configuration_changed_task,
.powergate_uvd = vega12_power_gate_uvd,
.powergate_vce = vega12_power_gate_vce,
@@ -2131,6 +2401,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega12_start_thermal_controller,
+ .powergate_gfx = vega12_gfx_off_control,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..b3e424d28994 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
uint32_t mem_clock;
uint32_t soc_clock;
uint32_t dcef_clock;
+ uint32_t eclock;
+ uint32_t dclock;
+ uint32_t vclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -301,6 +304,12 @@ struct vega12_odn_fan_table {
bool force_fan_pwm;
};
+struct vega12_clock_range {
+ uint32_t ACMax;
+ uint32_t ACMin;
+ uint32_t DCMax;
+};
+
struct vega12_hwmgr {
struct vega12_dpm_table dpm_table;
struct vega12_dpm_table golden_dpm_table;
@@ -382,6 +391,11 @@ struct vega12_hwmgr {
uint32_t smu_version;
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega12_smc_state_table smc_state_table;
+
+ struct vega12_clock_range clk_range[PPCLK_COUNT];
+
+ /* ---- Gfxoff ---- */
+ bool gfxoff_controlled_by_driver;
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
@@ -432,6 +446,8 @@ struct vega12_hwmgr {
#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..cb3a5b1737c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,11 +224,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
- /* 0xFFFF will disable the ACG feature */
- if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
- ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
- ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
- }
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index cfd9e6ccb790..904eb2c9155b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
- current_rpm),
- "Attempt to read current RPM from SMC Failed!",
- return -1);
+ return -EINVAL);
+ *current_rpm = smum_get_argument(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a202247c9894..429c9c4322da 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b99fb8ac822c..d3d96260f440 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -26,7 +26,6 @@
#include <linux/seq_file.h>
#include "amd_powerplay.h"
#include "hardwaremanager.h"
-#include "pp_power_source.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
@@ -195,7 +194,7 @@ struct pp_smumgr_func {
int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
uint32_t firmware);
- int (*get_argument)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_argument)(struct pp_hwmgr *hwmgr);
int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
@@ -294,8 +293,7 @@ struct pp_hwmgr_func {
int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
- int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -303,7 +301,7 @@ struct pp_hwmgr_func {
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
- int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -328,7 +326,7 @@ struct pp_hwmgr_func {
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
- int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
};
@@ -741,7 +739,6 @@ struct pp_hwmgr {
const struct pp_table_func *pptable_func;
struct pp_power_state *ps;
- enum pp_power_source power_source;
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 6c22ed9249bf..82550a8a3a3f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -29,7 +29,6 @@
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
- SMU_SAMU_TABLE,
SMU_BIF_TABLE,
};
@@ -47,7 +46,6 @@ enum SMU_MEMBER {
UcodeLoadStatus,
UvdBootLevel,
VceBootLevel,
- SamuBootLevel,
LowSclkInterruptThreshold,
DRAM_LOG_ADDR_H,
DRAM_LOG_ADDR_L,
@@ -82,7 +80,7 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern int smum_get_argument(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
- uint16_t MinVoltageUlvGfx;
- uint16_t MinVoltageUlvSoc;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[14];
+ uint32_t Reserved[14];
@@ -483,9 +483,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t PllGfxclkSpreadEnabled;
- uint8_t PllGfxclkSpreadPercent;
- uint16_t PllGfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -495,11 +495,14 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint8_t AcgGfxclkSpreadEnabled;
- uint8_t AcgGfxclkSpreadPercent;
- uint16_t AcgGfxclkSpreadFreq;
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
- uint32_t BoardReserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t BoardReserved[9];
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 0a200406a1ec..8d557accaef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
- vega12_smumgr.o vegam_smumgr.o
+ vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 2d4ec8ac3a08..fbe3ef4ee45c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU7_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_samu_clock_voltage_dependency_table *samu_table =
- hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(samu_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
- table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
- table->SamuLevel[count].MinPhases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = ci_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = ci_program_memory_timing_parameters(hwmgr);
@@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
return 0;
}
+static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct ci_smumgr *smu_data = hwmgr->smu_backend;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ else
+ smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
+
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = uvd_table->count - 1; i >= 0; i--) {
+ if (uvd_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
+
+ data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+
+ for (i = vce_table->count - 1; i >= 0; i--) {
+ if (vce_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ ci_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ ci_update_vce_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.smu_init = ci_smu_init,
.smu_fini = ci_smu_fini,
@@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
+ .update_smc_table = ci_update_smc_table,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 53df9405f43a..18048f8e2f13 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
fiji_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 415f691c3fa9..9299b93aa09a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
static int iceland_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result;);
- result = iceland_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = iceland_program_memory_timing_parameters(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a8c6524f07e4..1276f168ff68 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
+ if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled)))
+ polaris10_populate_mvdd_value(hwmgr,
data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
+ &vol_level);
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1337,55 +1331,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -1566,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1617,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount == 0 || stretch_amount > 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
@@ -1865,10 +1806,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize VCE Level!", return result);
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2222,34 +2159,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
@@ -2276,9 +2185,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
polaris10_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
polaris10_update_bif_smc_table(hwmgr);
default:
@@ -2357,8 +2263,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 0a563f6fe9ea..bb07d43f3874 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
return 0;
}
-static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index d644a9bb9078..a029e47c2319 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -379,8 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
- int result = 0;
- struct SMU_DRAMData_TOC *toc;
+ int r = 0;
if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
@@ -421,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ UCODE_ID_CP_MEC_JT2_MASK;
}
- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
+ if (!smu_data->toc) {
+ struct SMU_DRAMData_TOC *toc;
+
+ smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
+ if (!smu_data->toc)
+ return -ENOMEM;
+ toc = smu_data->toc;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- if (!hwmgr->not_vf)
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ if (!hwmgr->not_vf)
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
-
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ }
+ memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
+ sizeof(struct SMU_DRAMData_TOC));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
- return result;
+ return r;
+
+failed:
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
+ return r;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
@@ -570,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
- uint64_t mc_addr = 0;
int r;
/* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
@@ -584,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->header_buffer.handle,
- &mc_addr,
+ &smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr);
if (r)
return -EINVAL;
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr = mc_addr;
-
if (!hwmgr->not_vf)
return 0;
@@ -602,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->smu_buffer.handle,
- &mc_addr,
+ &smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
if (r) {
@@ -611,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.kaddr);
return -EINVAL;
}
- smu_data->smu_buffer.mc_addr = mc_addr;
if (smum_is_hw_avfs_present(hwmgr))
hwmgr->avfs_supported = true;
@@ -633,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
&smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
+
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 39c9bfda0ab4..01f0538fba6b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,10 +37,9 @@ struct smu7_buffer_entry {
};
struct smu7_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
struct smu7_buffer_entry smu_buffer;
struct smu7_buffer_entry header_buffer;
+ struct SMU_DRAMData_TOC *toc;
uint32_t soft_regs_start;
uint32_t dpm_table_start;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index c861d3023474..f7e3bc22bb93 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int smu8_get_argument(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
{
if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
+ return 0;
return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
new file mode 100644
index 000000000000..079fc8e8f709
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "vega10_inc.h"
+#include "soc15_common.h"
+#include "pp_debug.h"
+
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t mp1_fw_flags;
+
+ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+
+ if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param smumgr the address of the powerplay hardware manager.
+ * @return TRUE SMC has responded, FALSE otherwise.
+ */
+static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+ uint32_t ret;
+
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ if (ret)
+ pr_err("No response from smu\n");
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC with parameter
+ * @param hwmgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
+
+ return 0;
+}
+
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
new file mode 100644
index 000000000000..1462279ca128
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU9_SMUMANAGER_H_
+#define _SMU9_SMUMANAGER_H_
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c9837935f0f5..99d5e4f98f49 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_get_argument(struct pp_hwmgr *hwmgr)
+uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr_funcs->get_argument)
return hwmgr->smumgr_funcs->get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 782b19fc2e70..7dabc6c456e1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
static int tonga_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize ACP Level !", return result);
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
/* Since only the initial state is completely set up at this
* point (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
tonga_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index e84669c448a3..5d19115f410c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -28,142 +28,11 @@
#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
#include "smu9_driver_if.h"
+#include "smu9_smumgr.h"
#include "ppatomctrl.h"
#include "pp_debug.h"
-#define AVFS_EN_MSB 1568
-#define AVFS_EN_LSB 1568
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
- uint32_t ret;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- ret = phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- if (ret)
- pr_err("No response from smu\n");
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
-
- return 0;
-}
-
-static int vega10_get_argument(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-}
-
static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id);
@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = vega10_get_argument(hwmgr);
+ smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+ *features_enabled = smu9_get_argument(hwmgr);
return 0;
}
@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
}
@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = vega10_get_argument(hwmgr);
+ smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- if (!vega10_is_smc_ram_running(hwmgr))
+ if (!smu9_is_smc_ram_running(hwmgr))
return -EINVAL;
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+ uint16_t table_id, bool rw)
{
int ret;
@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
.smu_fini = &vega10_smu_fini,
.start_smu = &vega10_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega10_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega10_is_dpm_running,
- .get_argument = vega10_get_argument,
+ .get_argument = smu9_get_argument,
.smc_table_manager = vega10_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 7d9b40e8b1bf..7f0e2109f40d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -24,157 +24,14 @@
#include "smumgr.h"
#include "vega12_inc.h"
#include "soc15_common.h"
+#include "smu9_smumgr.h"
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12/smu9_driver_if.h"
-
#include "ppatomctrl.h"
#include "pp_debug.h"
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-
-/*
- * Send a message to the SMC with parameter, do not wait for response
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int vega12_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
-
- return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-}
-
-/*
- * Retrieve an argument from SMC.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param arg pointer to store the argument from SMC.
- * @return Always return 0.
- */
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-
- return 0;
-}
-
/*
* Copy table from SMC into driver FB
* @param hwmgr the address of the HW manager
@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
return -EINVAL);
@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_low) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ smc_features_low = smu9_get_argument(hwmgr);
+
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_high) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
- return -EINVAL);
+ smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- vega12_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
}
return 0;
}
-#if 0 /* tentatively remove */
-static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
- uint32_t smc_driver_if_version;
-
- PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
- "Attempt to get SMC IF Version Number Failed!",
- return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
-
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static int vega12_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega12_smumgr *priv;
@@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
static int vega12_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
+ PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
"SMC is not running!",
return -EINVAL);
-#if 0 /* tentatively remove */
- PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
- "Failed to verify SMC interface!",
- return -EINVAL);
-#endif
-
vega12_set_tools_address(hwmgr);
return 0;
@@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
.smu_fini = &vega12_smu_fini,
.start_smu = &vega12_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega12_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega12_is_dpm_running,
+ .get_argument = smu9_get_argument,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
index 2810d387b611..b285cbc04019 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -48,7 +48,6 @@ struct vega12_smumgr {
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 2de48959ac93..57420d7caa4e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
vegam_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- vegam_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
vegam_update_bif_smc_table(hwmgr);
break;
@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU75_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize VCE Level!", return result);
- result = vegam_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 16903dc7fe0d..965cda48dc13 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- if (!crtc->primary->fb)
- return;
-
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
@@ -189,7 +186,7 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
static void arc_pgu_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index b8f6f9a5dfbe..68629e614990 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -99,7 +99,7 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
goto error_encoder_cleanup;
}
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index bb8b158ff90d..3bf31d1a4722 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,4 +1,5 @@
hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+mali-dp-y += malidp_mw.o
obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index cf5cbd63ecdf..e4d67b70244d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,6 +229,8 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ int i;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
@@ -238,20 +240,17 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (!state->fb || !state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
- if (!crtc_state) {
- DRM_DEBUG_KMS("Invalid crtc state\n");
- return -EINVAL;
+ for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+ /* we cannot disable the plane while the CRTC is active */
+ if (!state->fb && crtc_state->active)
+ return -EINVAL;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -280,16 +279,10 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
.atomic_update = hdlcd_plane_atomic_update,
};
-static void hdlcd_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = hdlcd_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -334,10 +327,8 @@ int hdlcd_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
&hdlcd_crtc_funcs, NULL);
- if (ret) {
- hdlcd_plane_destroy(primary);
+ if (ret)
return ret;
- }
drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index feaa8bc3d7b7..0ed1cde98cf8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -27,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -100,16 +101,9 @@ setup_fail:
return ret;
}
-static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = hdlcd_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -124,13 +118,6 @@ static void hdlcd_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
}
-static void hdlcd_lastclose(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(hdlcd->fbdev);
-}
-
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
@@ -246,7 +233,7 @@ static struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = hdlcd_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -321,14 +308,9 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(hdlcd->fbdev)) {
- ret = PTR_ERR(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto err_fbdev;
- }
ret = drm_dev_register(drm, 0);
if (ret)
@@ -337,15 +319,13 @@ static int hdlcd_drm_bind(struct device *dev)
return 0;
err_register:
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
err_fbdev:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
@@ -366,23 +346,23 @@ static void hdlcd_drm_unbind(struct device *dev)
struct hdlcd_drm_private *hdlcd = drm->dev_private;
drm_dev_unregister(drm);
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- pm_runtime_get_sync(drm->dev);
+ pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&hdlcd->crtc);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
- of_reserved_mem_device_release(drm->dev);
+ drm_atomic_helper_shutdown(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ of_reserved_mem_device_release(dev);
drm_mode_config_cleanup(drm);
- drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
+ drm_dev_put(drm);
}
static const struct component_master_ops hdlcd_master_ops = {
@@ -427,35 +407,15 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_kms_helper_poll_disable(drm);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
-
- hdlcd->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(hdlcd->state)) {
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(hdlcd->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_atomic_helper_resume(drm, hdlcd->state);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
+ drm_mode_config_helper_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index 56f34dfff640..fd438d177b64 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,8 @@
struct hdlcd_drm_private {
void __iomem *mmio;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_plane *plane;
- struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index fcc62bc60f6a..ef44202fb43f 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -411,6 +411,16 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ /* If only the writeback routing has changed, we don't need a modeset */
+ if (state->connectors_changed) {
+ u32 old_mask = crtc->state->connector_mask;
+ u32 new_mask = state->connector_mask;
+
+ if ((old_mask ^ new_mask) ==
+ (1 << drm_connector_index(&malidp->mw_connector.base)))
+ state->connectors_changed = false;
+ }
+
ret = malidp_crtc_atomic_check_gamma(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 0a788d76ed5f..08b5bb219816 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -17,6 +17,7 @@
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_of.h>
#include "malidp_drv.h"
+#include "malidp_mw.h"
#include "malidp_regs.h"
#include "malidp_hw.h"
@@ -170,14 +172,15 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
- atomic_set(&malidp->config_valid, 0);
- hwdev->hw->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev, 1);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->hw->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev)) {
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
return 0;
+ }
ret = wait_event_interruptible_timeout(malidp->wq,
- atomic_read(&malidp->config_valid) == 1,
+ atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
return (ret > 0) ? 0 : -ETIMEDOUT;
@@ -216,12 +219,20 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
+ struct malidp_drm *malidp = drm->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
pm_runtime_get_sync(drm->dev);
+ /*
+ * set config_valid to a special value to let IRQ handlers
+ * know that we are updating registers
+ */
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
+ malidp->dev->hw->set_config_valid(malidp->dev, 0);
+
drm_atomic_helper_commit_modeset_disables(drm, state);
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -230,7 +241,9 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_se_config(crtc, old_crtc_state);
}
- drm_atomic_helper_commit_planes(drm, state, 0);
+ drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ malidp_mw_atomic_commit(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
@@ -268,12 +281,18 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.helper_private = &malidp_mode_config_helpers;
ret = malidp_crtc_init(drm);
- if (ret) {
- drm_mode_config_cleanup(drm);
- return ret;
- }
+ if (ret)
+ goto crtc_fail;
+
+ ret = malidp_mw_connector_init(drm);
+ if (ret)
+ goto crtc_fail;
return 0;
+
+crtc_fail:
+ drm_mode_config_cleanup(drm);
+ return ret;
}
static void malidp_fini(struct drm_device *drm)
@@ -285,6 +304,8 @@ static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
/* fetch the interrupts from DT */
irq_de = platform_get_irq_byname(pdev, "DE");
@@ -304,7 +325,7 @@ static int malidp_irq_init(struct platform_device *pdev)
ret = malidp_se_irq_init(drm, irq_se);
if (ret) {
- malidp_de_irq_fini(drm);
+ malidp_de_irq_fini(hwdev);
return ret;
}
@@ -326,6 +347,106 @@ static int malidp_dumb_create(struct drm_file *file_priv,
return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
}
+#ifdef CONFIG_DEBUG_FS
+
+static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
+{
+ error_stats->num_errors = 0;
+ error_stats->last_error_status = 0;
+ error_stats->last_error_vblank = -1;
+}
+
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ error_stats->last_error_status = status;
+ error_stats->last_error_vblank = vblank;
+ error_stats->num_errors++;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+}
+
+void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
+{
+ seq_printf(m, "[%s] num_errors : %d\n", prefix,
+ error_stats.num_errors);
+ seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix,
+ error_stats.last_error_status);
+ seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
+ error_stats.last_error_vblank);
+}
+
+static int malidp_show_stats(struct seq_file *m, void *arg)
+{
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+ struct malidp_error_stats de_errors, se_errors;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ de_errors = malidp->de_errors;
+ se_errors = malidp->se_errors;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ malidp_error_stats_dump("DE", de_errors, m);
+ malidp_error_stats_dump("SE", se_errors, m);
+ return 0;
+}
+
+static int malidp_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, malidp_show_stats, inode->i_private);
+}
+
+static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ return len;
+}
+
+static const struct file_operations malidp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = malidp_debugfs_open,
+ .read = seq_read,
+ .write = malidp_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int malidp_debugfs_init(struct drm_minor *minor)
+{
+ struct malidp_drm *malidp = minor->dev->dev_private;
+ struct dentry *dentry = NULL;
+
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_lock_init(&malidp->errors_lock);
+ dentry = debugfs_create_file("debug",
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
+ &malidp_debugfs_fops);
+ if (!dentry) {
+ DRM_ERROR("Cannot create debug file\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+#endif //CONFIG_DEBUG_FS
+
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
@@ -342,6 +463,9 @@ static struct drm_driver malidp_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = malidp_debugfs_init,
+#endif
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
@@ -458,6 +582,8 @@ static int malidp_runtime_pm_suspend(struct device *dev)
/* we can only suspend if the hardware is in config mode */
WARN_ON(!hwdev->hw->in_config_mode(hwdev));
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
clk_disable_unprepare(hwdev->aclk);
@@ -476,6 +602,8 @@ static int malidp_runtime_pm_resume(struct device *dev)
clk_prepare_enable(hwdev->aclk);
clk_prepare_enable(hwdev->mclk);
hwdev->pm_suspended = false;
+ malidp_de_irq_hw_init(hwdev);
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
@@ -488,6 +616,7 @@ static int malidp_bind(struct device *dev)
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
struct of_device_id const *dev_id;
+ struct drm_encoder *encoder;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
@@ -587,8 +716,9 @@ static int malidp_bind(struct device *dev)
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
+ hwdev->output_color_depth = out_depth;
- atomic_set(&malidp->config_valid, 0);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
@@ -608,6 +738,15 @@ static int malidp_bind(struct device *dev)
goto bind_fail;
}
+ /* We expect to have a maximum of two encoders one for the actual
+ * display and a virtual one for the writeback connector
+ */
+ WARN_ON(drm->mode_config.num_encoder > 2);
+ list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
+ encoder->possible_clones =
+ (1 << drm->mode_config.num_encoder) - 1;
+ }
+
ret = malidp_irq_init(pdev);
if (ret < 0)
goto irq_init_fail;
@@ -641,8 +780,8 @@ register_fail:
fbdev_fail:
pm_runtime_get_sync(dev);
vblank_fail:
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
irq_init_fail:
drm_atomic_helper_shutdown(drm);
@@ -672,14 +811,15 @@ static void malidp_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
drm_crtc_vblank_off(&malidp->crtc);
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
@@ -752,8 +892,25 @@ static int __maybe_unused malidp_pm_resume(struct device *dev)
return 0;
}
+static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev)) {
+ malidp_runtime_pm_suspend(dev);
+ pm_runtime_set_suspended(dev);
+ }
+ return 0;
+}
+
+static int __maybe_unused malidp_pm_resume_early(struct device *dev)
+{
+ malidp_runtime_pm_resume(dev);
+ pm_runtime_set_active(dev);
+ return 0;
+}
+
static const struct dev_pm_ops malidp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index c70989b93387..e3eb0cb1f385 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -13,18 +13,38 @@
#ifndef __MALIDP_DRV_H__
#define __MALIDP_DRV_H__
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
#include <linux/mutex.h>
#include <linux/wait.h>
+#include <linux/spinlock.h>
#include <drm/drmP.h>
#include "malidp_hw.h"
+#define MALIDP_CONFIG_VALID_INIT 0
+#define MALIDP_CONFIG_VALID_DONE 1
+#define MALIDP_CONFIG_START 0xd0
+
+struct malidp_error_stats {
+ s32 num_errors;
+ u32 last_error_status;
+ s64 last_error_vblank;
+};
+
struct malidp_drm {
struct malidp_hw_device *dev;
struct drm_crtc crtc;
+ struct drm_writeback_connector mw_connector;
wait_queue_head_t wq;
struct drm_pending_vblank_event *event;
atomic_t config_valid;
u32 core_id;
+#ifdef CONFIG_DEBUG_FS
+ struct malidp_error_stats de_errors;
+ struct malidp_error_stats se_errors;
+ /* Protects errors stats */
+ spinlock_t errors_lock;
+#endif
};
#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -62,6 +82,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+#ifdef CONFIG_DEBUG_FS
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank);
+#endif
+
/* often used combination of rotational bits */
#define MALIDP_ROTATED_MASK (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 069783e715f1..c94a4422e0e9 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -21,15 +21,24 @@
#include "malidp_drv.h"
#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+enum {
+ MW_NOT_ENABLED = 0, /* SE writeback not enabled */
+ MW_ONESHOT, /* SE in one-shot mode for writeback */
+ MW_START, /* SE started writeback */
+ MW_RESTART, /* SE will start another writeback after this one */
+ MW_STOP, /* SE needs to stop after this writeback */
+};
static const struct malidp_format_id malidp500_de_formats[] = {
/* fourcc, layers supporting the format, internal id */
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 0 },
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 1 },
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 4 },
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 5 },
{ DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
{ DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
@@ -38,7 +47,7 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
{ DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
- { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+ { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
};
@@ -47,27 +56,27 @@ static const struct malidp_format_id malidp500_de_formats[] = {
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
- { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
- { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \
+ { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \
+ { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
{ DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
{ DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
- { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
- { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
- { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
- { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \
+ { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \
+ { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
- { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
+ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
static const struct malidp_format_id malidp550_de_formats[] = {
@@ -223,15 +232,20 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
}
static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = 0;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= MALIDP500_HSYNCPOL;
@@ -368,6 +382,55 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP500_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ /* restart the writeback if already enabled */
+ if (hwdev->mw_state != MW_NOT_ENABLED)
+ hwdev->mw_state = MW_RESTART;
+ else
+ hwdev->mw_state = MW_START;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP500_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART)
+ hwdev->mw_state = MW_STOP;
+ malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -447,15 +510,20 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
}
static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
/*
* Mali-DP550 and Mali-DP650 encode the background color like this:
@@ -588,6 +656,51 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP550_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ hwdev->mw_state = MW_ONESHOT;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP550_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -632,11 +745,18 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP500_DE_IRQ_VSYNC |
MALIDP500_DE_IRQ_GLOBAL,
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP500_DE_IRQ_AXI_ERR |
+ MALIDP500_DE_IRQ_SATURATION,
},
.se_irq_map = {
.irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+ MALIDP500_SE_IRQ_CONF_VALID |
MALIDP500_SE_IRQ_GLOBAL,
- .vsync_irq = 0,
+ .vsync_irq = MALIDP500_SE_IRQ_CONF_VALID,
+ .err_mask = MALIDP500_SE_IRQ_INIT_BUSY |
+ MALIDP500_SE_IRQ_AXI_ERROR |
+ MALIDP500_SE_IRQ_OVERRUN,
},
.dc_irq_map = {
.irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
@@ -655,6 +775,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp500_rotmem_required,
.se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
.se_calc_mclk = malidp500_se_calc_mclk,
+ .enable_memwrite = malidp500_enable_memwrite,
+ .disable_memwrite = malidp500_disable_memwrite,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
@@ -670,13 +792,20 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -692,6 +821,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
[MALIDP_650] = {
@@ -708,13 +839,25 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP650_DE_IRQ_DRIFT |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP650_DE_IRQ_DRIFT |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR |
+ MALIDP650_DE_IRQ_ACEV1 |
+ MALIDP650_DE_IRQ_ACEV2 |
+ MALIDP650_DE_IRQ_ACEG |
+ MALIDP650_DE_IRQ_AXIEP,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -730,6 +873,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
};
@@ -791,7 +936,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
malidp->event = NULL;
spin_unlock(&drm->event_lock);
}
- atomic_set(&malidp->config_valid, 1);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
ret = IRQ_WAKE_THREAD;
}
@@ -800,10 +945,17 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
return ret;
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
- status &= mask;
+ /* keep the status of the enabled interrupts, plus the error bits */
+ status &= (mask | de->err_mask);
if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
+#ifdef CONFIG_DEBUG_FS
+ if (status & de->err_mask) {
+ malidp_error(malidp, &malidp->de_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+ }
+#endif
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
@@ -819,6 +971,23 @@ static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+ /* first enable the DC block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->hw->map.dc_irq_map.irq_mask);
+
+ /* now enable the DE block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->hw->map.de_irq_map.irq_mask);
+}
+
int malidp_de_irq_init(struct drm_device *drm, int irq)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -839,22 +1008,13 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
return ret;
}
- /* first enable the DC block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->hw->map.dc_irq_map.irq_mask);
-
- /* now enable the DE block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->hw->map.de_irq_map.irq_mask);
+ malidp_de_irq_hw_init(hwdev);
return 0;
}
-void malidp_de_irq_fini(struct drm_device *drm)
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
@@ -879,19 +1039,61 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
return IRQ_NONE;
status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
- if (!(status & se->irq_mask))
+ if (!(status & (se->irq_mask | se->err_mask)))
return IRQ_NONE;
+#ifdef CONFIG_DEBUG_FS
+ if (status & se->err_mask)
+ malidp_error(malidp, &malidp->se_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+#endif
mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
- /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+ if (status & se->vsync_irq) {
+ switch (hwdev->mw_state) {
+ case MW_ONESHOT:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ break;
+ case MW_STOP:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* disable writeback after stop */
+ hwdev->mw_state = MW_NOT_ENABLED;
+ break;
+ case MW_RESTART:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* fall through to a new start */
+ case MW_START:
+ /* writeback started, need to emulate one-shot mode */
+ hw->disable_memwrite(hwdev);
+ /*
+ * only set config_valid HW bit if there is no other update
+ * in progress or if we raced ahead of the DE IRQ handler
+ * and config_valid flag will not be update until later
+ */
+ status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) ||
+ (status & hw->map.dc_irq_map.vsync_irq))
+ hw->set_config_valid(hwdev, 1);
+ break;
+ }
+ }
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
return IRQ_HANDLED;
}
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+ malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->hw->map.se_irq_map.irq_mask);
+}
+
static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
{
return IRQ_HANDLED;
@@ -915,17 +1117,14 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
return ret;
}
- malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->hw->map.se_irq_map.irq_mask);
+ hwdev->mw_state = MW_NOT_ENABLED;
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
-void malidp_se_irq_fini(struct drm_device *drm)
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
hwdev->hw->map.se_irq_map.irq_mask);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index b5dd6c73ec9f..ad2e96915d44 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -33,6 +33,7 @@ enum {
DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
DE_VIDEO2 = BIT(3),
DE_SMART = BIT(4),
+ SE_MEMWRITE = BIT(5),
};
struct malidp_format_id {
@@ -52,6 +53,7 @@ struct malidp_format_id {
struct malidp_irq_map {
u32 irq_mask; /* mask of IRQs that can be enabled in the block */
u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
+ u32 err_mask; /* mask of bits that represent errors */
};
struct malidp_layer {
@@ -151,12 +153,13 @@ struct malidp_hw {
bool (*in_config_mode)(struct malidp_hw_device *hwdev);
/*
- * Set configuration valid flag for hardware parameters that can
- * be changed outside the configuration mode. Hardware will use
- * the new settings when config valid is set after the end of the
- * current buffer scanout
+ * Set/clear configuration valid flag for hardware parameters that can
+ * be changed outside the configuration mode to the given value.
+ * Hardware will use the new settings when config valid is set,
+ * after the end of the current buffer scanout, and will ignore
+ * any new values for those parameters if config valid flag is cleared
*/
- void (*set_config_valid)(struct malidp_hw_device *hwdev);
+ void (*set_config_valid)(struct malidp_hw_device *hwdev, u8 value);
/*
* Set a new mode in hardware. Requires the hardware to be in
@@ -177,6 +180,23 @@ struct malidp_hw {
long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct videomode *vm);
+ /*
+ * Enable writing to memory the content of the next frame
+ * @param hwdev - malidp_hw_device structure containing the HW description
+ * @param addrs - array of addresses for each plane
+ * @param pitches - array of pitches for each plane
+ * @param num_planes - number of planes to be written
+ * @param w - width of the output frame
+ * @param h - height of the output frame
+ * @param fmt_id - internal format ID of output buffer
+ */
+ int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+
+ /*
+ * Disable the writing to memory of the next frame's content.
+ */
+ void (*disable_memwrite)(struct malidp_hw_device *hwdev);
u8 features;
};
@@ -210,10 +230,14 @@ struct malidp_hw_device {
u8 min_line_size;
u16 max_line_size;
+ u32 output_color_depth;
/* track the device PM state */
bool pm_suspended;
+ /* track the SE memory writeback state */
+ u8 mw_state;
+
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
};
@@ -279,9 +303,11 @@ static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
}
int malidp_de_irq_init(struct drm_device *drm, int irq);
-void malidp_de_irq_fini(struct drm_device *drm);
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev);
int malidp_se_irq_init(struct drm_device *drm, int irq);
-void malidp_se_irq_fini(struct drm_device *drm);
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
new file mode 100644
index 000000000000..ba6ae66387c9
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * ARM Mali DP Writeback connector implementation
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drmP.h>
+#include <drm/drm_writeback.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state)
+
+struct malidp_mw_connector_state {
+ struct drm_connector_state base;
+ dma_addr_t addrs[2];
+ s32 pitches[2];
+ u8 format;
+ u8 n_planes;
+};
+
+static int malidp_mw_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+malidp_mw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if ((w < mode_config->min_width) || (w > mode_config->max_width))
+ return MODE_BAD_HVALUE;
+
+ if ((h < mode_config->min_height) || (h > mode_config->max_height))
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+ .get_modes = malidp_mw_connector_get_modes,
+ .mode_valid = malidp_mw_connector_mode_valid,
+};
+
+static void malidp_mw_connector_reset(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state =
+ kzalloc(sizeof(*mw_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+}
+
+static enum drm_connector_status
+malidp_mw_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void malidp_mw_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_state *
+malidp_mw_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL);
+ if (!mw_state)
+ return NULL;
+
+ /* No need to preserve any of our driver-local data */
+ __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
+
+ return &mw_state->base;
+}
+
+static const struct drm_connector_funcs malidp_mw_connector_funcs = {
+ .reset = malidp_mw_connector_reset,
+ .detect = malidp_mw_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = malidp_mw_connector_destroy,
+ .atomic_duplicate_state = malidp_mw_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int
+malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state);
+ struct malidp_drm *malidp = encoder->dev->dev_private;
+ struct drm_framebuffer *fb;
+ int i, n_planes;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if ((fb->width != crtc_state->mode.hdisplay) ||
+ (fb->height != crtc_state->mode.vdisplay)) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ mw_state->format =
+ malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
+ fb->format->format);
+ if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ }
+
+ n_planes = drm_format_num_planes(fb->format->format);
+ for (i = 0; i < n_planes; i++) {
+ struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+ /* memory write buffers are never rotated */
+ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
+
+ if (fb->pitches[i] & (alignment - 1)) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ mw_state->pitches[i] = fb->pitches[i];
+ mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+ }
+ mw_state->n_planes = n_planes;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = {
+ .atomic_check = malidp_mw_encoder_atomic_check,
+};
+
+static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats)
+{
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+ u32 *formats;
+ int n, i;
+
+ formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats)
+ return NULL;
+
+ for (n = 0, i = 0; i < map->n_pixel_formats; i++) {
+ if (map->pixel_formats[i].layer & SE_MEMWRITE)
+ formats[n++] = map->pixel_formats[i].format;
+ }
+
+ *n_formats = n;
+
+ return formats;
+}
+
+int malidp_mw_connector_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ u32 *formats;
+ int ret, n_formats;
+
+ if (!malidp->dev->hw->enable_memwrite)
+ return 0;
+
+ malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
+ drm_connector_helper_add(&malidp->mw_connector.base,
+ &malidp_mw_connector_helper_funcs);
+
+ formats = get_writeback_formats(malidp, &n_formats);
+ if (!formats)
+ return -ENOMEM;
+
+ ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
+ &malidp_mw_connector_funcs,
+ &malidp_mw_encoder_helper_funcs,
+ formats, n_formats);
+ kfree(formats);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct drm_writeback_connector *mw_conn = &malidp->mw_connector;
+ struct drm_connector_state *conn_state = mw_conn->base.state;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_mw_connector_state *mw_state;
+
+ if (!conn_state)
+ return;
+
+ mw_state = to_mw_state(conn_state);
+
+ if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ struct drm_framebuffer *fb = conn_state->writeback_job->fb;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev,
+ "Enable memwrite %ux%u:%d %pad fmt: %u\n",
+ fb->width, fb->height,
+ mw_state->pitches[0],
+ &mw_state->addrs[0],
+ mw_state->format);
+
+ drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
+ conn_state->writeback_job = NULL;
+
+ hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
+ mw_state->pitches, mw_state->n_planes,
+ fb->width, fb->height, mw_state->format);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
+ hwdev->hw->disable_memwrite(hwdev);
+ }
+}
diff --git a/drivers/gpu/drm/arm/malidp_mw.h b/drivers/gpu/drm/arm/malidp_mw.h
new file mode 100644
index 000000000000..19a007676a1d
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ */
+
+#ifndef __MALIDP_MW_H__
+#define __MALIDP_MW_H__
+
+int malidp_mw_connector_init(struct drm_device *drm);
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state);
+#endif
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 149024fb4432..3579d36b2a71 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -53,6 +53,8 @@
#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
#define MALIDP550_SE_IRQ_EOW (1 << 0)
#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_SE_IRQ_OVR (1 << 17)
+#define MALIDP550_SE_IRQ_IBSY (1 << 18)
#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
@@ -60,12 +62,18 @@
#define MALIDP550_DC_IRQ_SE (1 << 24)
#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
+#define MALIDP650_DE_IRQ_ACEV1 (1 << 17)
+#define MALIDP650_DE_IRQ_ACEV2 (1 << 18)
+#define MALIDP650_DE_IRQ_ACEG (1 << 19)
+#define MALIDP650_DE_IRQ_AXIEP (1 << 28)
/* bit masks that are common between products */
#define MALIDP_CFG_VALID (1 << 0)
#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
#define MALIDP_DISP_FUNC_CADJ (1 << 4)
#define MALIDP_DISP_FUNC_ILACED (1 << 8)
+#define MALIDP_SCALE_ENGINE_EN (1 << 16)
+#define MALIDP_SE_MEMWRITE_EN (2 << 5)
/* register offsets for IRQ management */
#define MALIDP_REG_STATUS 0x00000
@@ -153,6 +161,16 @@
(((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
#define MALIDP_SE_ENH_COEFF0 0x04
+
+/* register offsets relative to MALIDP5x0_SE_MEMWRITE_BASE */
+#define MALIDP_MW_FORMAT 0x00000
+#define MALIDP_MW_P1_STRIDE 0x00004
+#define MALIDP_MW_P2_STRIDE 0x00008
+#define MALIDP_MW_P1_PTR_LOW 0x0000c
+#define MALIDP_MW_P1_PTR_HIGH 0x00010
+#define MALIDP_MW_P2_PTR_LOW 0x0002c
+#define MALIDP_MW_P2_PTR_HIGH 0x00030
+
/* register offsets and bits specific to DP500 */
#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
@@ -186,7 +204,8 @@
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
-#define MALIDP500_SE_PTR_BASE 0x00e0c
+#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
@@ -217,6 +236,9 @@
#define MALIDP550_DE_PERF_BASE 0x00500
#define MALIDP550_SE_BASE 0x08000
#define MALIDP550_SE_CONTROL 0x08010
+#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
+#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
#define MALIDP550_DC_CONFIG_REQ (1 << 16)
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ecf25cf9f9f5..9bc3c3213724 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o armada_trace.o
+ armada_gem.o armada_overlay.o armada_plane.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 41a784f5a5e6..2f7c048c5361 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -27,6 +27,10 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ /* Initialise SPU register */
+ writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ dcrtc->base + LCD_SPU_ADV_REG);
+
return 0;
}
@@ -75,9 +79,27 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
return 0;
}
+static void armada510_crtc_disable(struct armada_crtc *dcrtc)
+{
+ if (!IS_ERR(dcrtc->clk)) {
+ clk_disable_unprepare(dcrtc->clk);
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ }
+}
+
+static void armada510_crtc_enable(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode)
+{
+ if (IS_ERR(dcrtc->clk)) {
+ dcrtc->clk = dcrtc->extclk[0];
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ }
+}
+
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
- .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
.init = armada510_crtc_init,
.compute_clock = armada510_crtc_compute_clock,
+ .disable = armada510_crtc_disable,
+ .enable = armada510_crtc_enable,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 03eeee11dd5b..da9360688b55 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -19,33 +20,9 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-enum csc_mode {
- CSC_AUTO = 0,
- CSC_YUV_CCIR601 = 1,
- CSC_YUV_CCIR709 = 2,
- CSC_RGB_COMPUTER = 1,
- CSC_RGB_STUDIO = 2,
-};
-
-static const uint32_t armada_primary_formats[] = {
- DRM_FORMAT_UYVY,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
-};
-
/*
* A note about interlacing. Let's consider HDMI 1920x1080i.
* The timing parameters we have from X are:
@@ -115,15 +92,13 @@ armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
}
}
-#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
-
-static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
{
uint32_t dumb_ctrl;
dumb_ctrl = dcrtc->cfg_dumb_ctrl;
- if (!dpms_blanked(dcrtc->dpms))
+ if (enable)
dumb_ctrl |= CFG_DUMB_ENA;
/*
@@ -132,295 +107,26 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
* force LCD_D[23:0] to output blank color, overriding the GPIO or
* SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
*/
- if (dpms_blanked(dcrtc->dpms) &&
- (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
dumb_ctrl &= ~DUMB_MASK;
dumb_ctrl |= DUMB_BLANK;
}
- /*
- * The documentation doesn't indicate what the normal state of
- * the sync signals are. Sebastian Hesselbart kindly probed
- * these signals on his board to determine their state.
- *
- * The non-inverted state of the sync signals is active high.
- * Setting these bits makes the appropriate signal active low.
- */
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
- dumb_ctrl |= CFG_INV_CSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
- dumb_ctrl |= CFG_INV_HSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
- dumb_ctrl |= CFG_INV_VSYNC;
-
- if (dcrtc->dumb_ctrl != dumb_ctrl) {
- dcrtc->dumb_ctrl = dumb_ctrl;
- writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-}
-
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y)
-{
- const struct drm_format_info *format = fb->format;
- unsigned int num_planes = format->num_planes;
- u32 addr = drm_fb_obj(fb)->dev_addr;
- int i;
-
- if (num_planes > 3)
- num_planes = 3;
-
- addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
- x * format->cpp[0];
-
- y /= format->vsub;
- x /= format->hsub;
-
- for (i = 1; i < num_planes; i++)
- addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * format->cpp[i];
- for (; i < 3; i++)
- addrs[i] = 0;
-}
-
-static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
- int x, int y, struct armada_regs *regs, bool interlaced)
-{
- unsigned pitch = fb->pitches[0];
- u32 addrs[3], addr_odd, addr_even;
- unsigned i = 0;
-
- DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
- pitch, x, y, fb->format->cpp[0] * 8);
-
- armada_drm_plane_calc_addrs(addrs, fb, x, y);
-
- addr_odd = addr_even = addrs[0];
-
- if (interlaced) {
- addr_even += pitch;
- pitch *= 2;
- }
-
- /* write offset, base, and pitch */
- armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
- armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
- armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
-
- return i;
-}
-
-static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
- struct armada_plane_work *work,
- void (*fn)(struct armada_crtc *, struct armada_plane_work *))
-{
- struct armada_plane *dplane = drm_to_armada_plane(work->plane);
- struct drm_pending_vblank_event *event;
- struct drm_framebuffer *fb;
-
- if (fn)
- fn(dcrtc, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- event = work->event;
- fb = work->old_fb;
- if (event || fb) {
- struct drm_device *dev = dcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (event)
- drm_crtc_send_vblank_event(&dcrtc->crtc, event);
- if (fb)
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- if (work->need_kfree)
- kfree(work);
-
- wake_up(&dplane->frame_wait);
+ armada_updatel(dumb_ctrl,
+ ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
+ dcrtc->base + LCD_SPU_DUMB_CTRL);
}
-static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- /* Handle any pending frame work. */
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->fn);
-}
-
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- struct armada_plane *plane = drm_to_armada_plane(work->plane);
- int ret;
-
- ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret)
- return ret;
-
- ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
- if (ret)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return ret;
-}
-
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
-{
- return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
-}
-
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *dplane)
-{
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->cancel);
-}
-
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- if (dcrtc->plane == work->plane)
- dcrtc->plane = NULL;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static struct armada_plane_work *
-armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
-{
- struct armada_plane_work *work;
- int i = 0;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return NULL;
-
- work->plane = plane;
- work->fn = armada_drm_crtc_complete_frame_work;
- work->need_kfree = true;
- armada_reg_queue_end(work->regs, i);
-
- return work;
-}
-
-static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
- struct drm_framebuffer *fb, bool force)
-{
- struct armada_plane_work *work;
-
- if (!fb)
- return;
-
- if (force) {
- /* Display is disabled, so just drop the old fb */
- drm_framebuffer_put(fb);
- return;
- }
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (work) {
- work->old_fb = fb;
-
- if (armada_drm_plane_work_queue(dcrtc, work) == 0)
- return;
-
- kfree(work);
- }
-
- /*
- * Oops - just drop the reference immediately and hope for
- * the best. The worst that will happen is the buffer gets
- * reused before it has finished being displayed.
- */
- drm_framebuffer_put(fb);
-}
-
-static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
-{
- /*
- * Tell the DRM core that vblank IRQs aren't going to happen for
- * a while. This cleans up any pending vblank events for us.
- */
- drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
- if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
- if (dpms_blanked(dpms))
- armada_drm_vblank_off(dcrtc);
- else if (!IS_ERR(dcrtc->clk))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- dcrtc->dpms = dpms;
- armada_drm_crtc_update(dcrtc);
- if (!dpms_blanked(dpms))
- drm_crtc_vblank_on(&dcrtc->crtc);
- else if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
- } else if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- }
-}
-
-/*
- * Prepare for a mode set. Turn off overlay to ensure that we don't end
- * up with the overlay size being bigger than the active screen size.
- * We rely upon X refreshing this state after the mode set has completed.
- *
- * The mode_config.mutex will be held for this call
- */
-static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct drm_plane *plane;
-
- /*
- * If we have an overlay plane associated with this CRTC, disable
- * it before the modeset to avoid its coordinates being outside
- * the new mode parameters.
- */
- plane = dcrtc->plane;
- if (plane) {
- drm_plane_force_disable(plane);
- WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
- HZ));
- }
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
- dcrtc->dpms = DRM_MODE_DPMS_ON;
- armada_drm_crtc_update(dcrtc);
+ /* If we have an event, we need vblank events enabled */
+ event = xchg(&crtc->state->event, NULL);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ dcrtc->event = event;
}
}
@@ -465,8 +171,8 @@ static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
+ struct drm_pending_vblank_event *event;
void __iomem *base = dcrtc->base;
- struct drm_plane *ovl_plane;
if (stat & DMA_FF_UNDERFLOW)
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -476,10 +182,6 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- ovl_plane = dcrtc->plane;
- if (ovl_plane)
- armada_drm_plane_work_run(dcrtc, ovl_plane);
-
spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -495,22 +197,35 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
writel_relaxed(val, base + LCD_SPU_ADV_REG);
}
- if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
- writel_relaxed(dcrtc->cursor_hw_pos,
- base + LCD_SPU_HWC_OVSA_HPXL_VLN);
- writel_relaxed(dcrtc->cursor_hw_sz,
- base + LCD_SPU_HWC_HPXL_VLN);
- armada_updatel(CFG_HWC_ENA,
- CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
- base + LCD_SPU_DMA_CTRL0);
- dcrtc->cursor_update = false;
+ if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
+ if (dcrtc->update_pending) {
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ dcrtc->update_pending = false;
+ }
+ if (dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD |
+ CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ }
armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
}
-
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ)
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
+ if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
+ event = xchg(&dcrtc->event, NULL);
+ if (event) {
+ spin_lock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ spin_unlock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+ }
+ }
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -519,8 +234,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
/*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
+ * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+ * is set. Writing has some other effect to acknowledge the IRQ -
+ * without this, we only get a single IRQ.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
@@ -536,107 +252,16 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
return IRQ_NONE;
}
-static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
-{
- struct drm_display_mode *adj = &dcrtc->crtc.mode;
- uint32_t val = 0;
-
- if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
- val |= CFG_CSC_YUV_CCIR709;
- if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
- val |= CFG_CSC_RGB_STUDIO;
-
- /*
- * In auto mode, set the colorimetry, based upon the HDMI spec.
- * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
- * ITU601. It may be more appropriate to set this depending on
- * the source - but what if the graphic frame is YUV and the
- * video frame is RGB?
- */
- if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
- !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
- (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
- if (dcrtc->csc_yuv_mode == CSC_AUTO)
- val |= CFG_CSC_YUV_CCIR709;
- }
-
- /*
- * We assume we're connected to a TV-like device, so the YUV->RGB
- * conversion should produce a limited range. We should set this
- * depending on the connectors attached to this CRTC, and what
- * kind of device they report being connected.
- */
- if (dcrtc->csc_rgb_mode == CSC_AUTO)
- val |= CFG_CSC_RGB_STUDIO;
-
- return val;
-}
-
-static void armada_drm_gra_plane_regs(struct armada_regs *regs,
- struct drm_framebuffer *fb, struct armada_plane_state *state,
- int x, int y, bool interlaced)
-{
- unsigned int i;
- u32 ctrl0;
-
- i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
- armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
-
- ctrl0 = state->ctrl0;
- if (interlaced)
- ctrl0 |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
- CFG_GRA_HSMOOTH | CFG_GRA_ENA,
- LCD_SPU_DMA_CTRL0);
- armada_reg_queue_end(regs, i);
-}
-
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
-{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
-
- armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
- armada_drm_crtc_update_regs(dcrtc, regs);
-}
-
/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode, struct drm_display_mode *adj,
- int x, int y, struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
+ struct drm_display_mode *adj = &crtc->state->adjusted_mode;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
unsigned long flags;
unsigned i;
- bool interlaced;
-
- drm_framebuffer_get(crtc->primary->fb);
-
- interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
-
- val = CFG_GRA_ENA;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
- drm_to_armada_plane(crtc->primary)->state.src_hw =
- drm_to_armada_plane(crtc->primary)->state.dst_hw =
- adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
- drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
@@ -644,35 +269,15 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
- DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
- adj->crtc_hdisplay,
- adj->crtc_hsync_start,
- adj->crtc_hsync_end,
- adj->crtc_htotal, lm, rm);
- DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
- adj->crtc_vdisplay,
- adj->crtc_vsync_start,
- adj->crtc_vsync_end,
- adj->crtc_vtotal, tm, bm);
-
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
-
- drm_crtc_vblank_off(crtc);
-
- val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
- if (val != dcrtc->dumb_ctrl) {
- dcrtc->dumb_ctrl = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-
- /*
- * If we are blanked, we would have disabled the clock. Re-enable
- * it so that compute_clock() does the right thing.
- */
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
+ DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
+ crtc->base.id, crtc->name,
+ adj->base.id, adj->name, adj->vrefresh, adj->clock,
+ adj->crtc_hdisplay, adj->crtc_hsync_start,
+ adj->crtc_hsync_end, adj->crtc_htotal,
+ adj->crtc_vdisplay, adj->crtc_vsync_start,
+ adj->crtc_vsync_end, adj->crtc_vtotal,
+ adj->type, adj->flags);
+ DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
@@ -689,25 +294,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
- dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
if (interlaced) {
/* Odd interlaced frame */
+ val -= adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
(1 << 16);
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
- val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
- dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -720,77 +320,136 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (dcrtc->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
- }
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
- val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
- armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ val = 0;
+ if (adj->flags & DRM_MODE_FLAG_NCSYNC)
+ val |= CFG_INV_CSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= CFG_INV_HSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= CFG_INV_VSYNC;
+ armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
+ CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
-
- armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- armada_drm_crtc_update(dcrtc);
+static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- drm_crtc_vblank_on(crtc);
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- return 0;
+ dcrtc->regs_idx = 0;
+ dcrtc->regs = dcrtc->atomic_regs;
}
-/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[4];
- unsigned i;
- i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
- dcrtc->interlaced);
- armada_reg_queue_end(regs, i);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
+
+ /*
+ * If we aren't doing a full modeset, then we need to queue
+ * the event here.
+ */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
+ dcrtc->update_pending = true;
+ armada_drm_crtc_queue_state_event(crtc);
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ } else {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ }
+}
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
+static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- /* Take a reference to the new fb as we're using it */
- drm_framebuffer_get(crtc->primary->fb);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- /* Update the base in the CRTC */
- armada_drm_crtc_update_regs(dcrtc, regs);
+ drm_crtc_vblank_off(crtc);
+ armada_drm_crtc_update(dcrtc, false);
- /* Drop our previously held reference */
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ if (!crtc->state->active) {
+ /*
+ * This modeset will be leaving the CRTC disabled, so
+ * call the backend to disable upstream clocks etc.
+ */
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
- return 0;
+ /*
+ * We will not receive any further vblank events.
+ * Send the flip_done event manually.
+ */
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+ if (event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ }
}
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
- armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ if (!old_state->active) {
+ /*
+ * This modeset is enabling the CRTC after it having
+ * been disabled. Reverse the call to ->disable in
+ * the atomic_disable().
+ */
+ if (dcrtc->variant->enable)
+ dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
+ }
+ armada_drm_crtc_update(dcrtc, true);
+ drm_crtc_vblank_on(crtc);
- /* Disable our primary plane when we disable the CRTC. */
- crtc->primary->funcs->disable_plane(crtc->primary, NULL);
+ armada_drm_crtc_queue_state_event(crtc);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
- .dpms = armada_drm_crtc_dpms,
- .prepare = armada_drm_crtc_prepare,
- .commit = armada_drm_crtc_commit,
.mode_fixup = armada_drm_crtc_mode_fixup,
- .mode_set = armada_drm_crtc_mode_set,
- .mode_set_base = armada_drm_crtc_mode_set_base,
- .disable = armada_drm_crtc_disable,
+ .mode_set_nofb = armada_drm_crtc_mode_set_nofb,
+ .atomic_begin = armada_drm_crtc_atomic_begin,
+ .atomic_flush = armada_drm_crtc_atomic_flush,
+ .atomic_disable = armada_drm_crtc_atomic_disable,
+ .atomic_enable = armada_drm_crtc_atomic_enable,
};
static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
@@ -883,7 +542,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (!dcrtc->cursor_obj || !h || !w) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -907,7 +565,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -1015,8 +672,8 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
- if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
@@ -1025,361 +682,51 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(dcrtc);
}
-/*
- * The mode_config lock is held here, to prevent races between this
- * and a mode_set.
- */
-static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- unsigned i;
- int ret;
-
- /* We don't support changing the pixel format */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (!work)
- return -ENOMEM;
-
- work->event = event;
- work->old_fb = dcrtc->crtc.primary->fb;
-
- i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
- dcrtc->interlaced);
- armada_reg_queue_end(work->regs, i);
-
- /*
- * Ensure that we hold a reference on the new framebuffer.
- * This has to match the behaviour in mode_set.
- */
- drm_framebuffer_get(fb);
-
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- /* Undo our reference above */
- drm_framebuffer_put(fb);
- kfree(work);
- return ret;
- }
-
- /*
- * Don't take a reference on the new framebuffer;
- * drm_mode_page_flip_ioctl() has already grabbed a reference and
- * will _not_ drop that reference on successful return from this
- * function. Simply mark this new framebuffer as the current one.
- */
- dcrtc->crtc.primary->fb = fb;
-
- /*
- * Finally, if the display is blanked, we won't receive an
- * interrupt, so complete it now.
- */
- if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-
- return 0;
-}
-
-static int
-armada_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- struct armada_private *priv = crtc->dev->dev_private;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- bool update_csc = false;
-
- if (property == priv->csc_yuv_prop) {
- dcrtc->csc_yuv_mode = val;
- update_csc = true;
- } else if (property == priv->csc_rgb_prop) {
- dcrtc->csc_rgb_mode = val;
- update_csc = true;
- }
-
- if (update_csc) {
- uint32_t val;
-
- val = dcrtc->spu_iopad_ctrl |
- armada_drm_crtc_calculate_csc(dcrtc);
- writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
- }
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
return 0;
}
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = armada_drm_crtc_page_flip,
- .set_property = armada_drm_crtc_set_property,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = armada_drm_crtc_enable_vblank,
.disable_vblank = armada_drm_crtc_disable_vblank,
};
-static void armada_drm_primary_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_plane *dplane = drm_to_armada_plane(state->plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- bool was_disabled;
- unsigned int idx = 0;
- u32 val;
-
- val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
- if (dfb->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
- if (state->visible)
- val |= CFG_GRA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- val |= CFG_GRA_HSMOOTH;
-
- was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
- if (was_disabled)
- armada_reg_queue_mod(regs, idx,
- 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
- dplane->state.ctrl0 = val;
- dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
- drm_rect_width(&state->dst);
- dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
-
- armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
- state->src.x1 >> 16, state->src.y1 >> 16,
- dcrtc->interlaced);
-
- dplane->state.vsync_update = !was_disabled;
- dplane->state.changed = true;
-}
-
-static int armada_drm_primary_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
-
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_frame_work;
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
-
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
- }
-
- armada_drm_primary_update_state(&state, work->regs);
-
- if (!dplane->state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (!dplane->state.vsync_update) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- return 0;
- }
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc;
- struct armada_plane_work *work;
- unsigned int idx = 0;
- u32 sram_para1, enable_mask;
-
- if (!plane->crtc)
- return 0;
-
- /*
- * Arrange to power down most RAMs and FIFOs if this is the primary
- * plane, otherwise just the YUV FIFOs for the overlay plane.
- */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- enable_mask = CFG_GRA_ENA;
- } else {
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
- enable_mask = CFG_DMA_ENA;
- }
-
- dplane->state.ctrl0 &= ~enable_mask;
-
- dcrtc = drm_to_armada_crtc(plane->crtc);
-
- /*
- * Try to disable the plane and drop our ref on the framebuffer
- * at the next frame update. If we fail for any reason, disable
- * the plane immediately.
- */
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_disable_work;
- work->cancel = armada_drm_crtc_complete_disable_work;
- work->old_fb = plane->fb;
-
- armada_reg_queue_mod(work->regs, idx,
- 0, enable_mask, LCD_SPU_DMA_CTRL0);
- armada_reg_queue_mod(work->regs, idx,
- sram_para1, 0, LCD_SPU_SRAM_PARA1);
- armada_reg_queue_end(work->regs, idx);
-
- /* Wait for any preceding work to complete, but don't wedge */
- if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (armada_drm_plane_work_queue(dcrtc, work)) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = armada_drm_primary_update,
- .disable_plane = armada_drm_plane_disable,
- .destroy = drm_primary_helper_destroy,
-};
-
-int armada_drm_plane_init(struct armada_plane *plane)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(plane->works); i++)
- plane->works[i].plane = &plane->base;
-
- init_waitqueue_head(&plane->frame_wait);
-
- return 0;
-}
-
-static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_YUV_CCIR601, "CCIR601" },
- { CSC_YUV_CCIR709, "CCIR709" },
-};
-
-static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_RGB_COMPUTER, "Computer system" },
- { CSC_RGB_STUDIO, "Studio" },
-};
-
-static int armada_drm_crtc_create_properties(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->csc_yuv_prop)
- return 0;
-
- priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
- "CSC_YUV", armada_drm_csc_yuv_enum_list,
- ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
- priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
- "CSC_RGB", armada_drm_csc_rgb_enum_list,
- ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
-
- if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
- return -ENOMEM;
-
- return 0;
-}
-
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
- struct armada_plane *primary;
+ struct drm_plane *primary;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(drm);
- if (ret)
- return ret;
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1397,8 +744,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->base = base;
dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
- dcrtc->csc_yuv_mode = CSC_AUTO;
- dcrtc->csc_rgb_mode = CSC_AUTO;
dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
spin_lock_init(&dcrtc->irq_lock);
@@ -1415,6 +760,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
@@ -1441,39 +787,23 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
goto err_crtc;
}
- ret = armada_drm_plane_init(primary);
- if (ret) {
- kfree(primary);
- goto err_crtc;
- }
-
- ret = drm_universal_plane_init(drm, &primary->base, 0,
- &armada_primary_plane_funcs,
- armada_primary_formats,
- ARRAY_SIZE(armada_primary_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = armada_drm_primary_plane_init(drm, primary);
if (ret) {
kfree(primary);
goto err_crtc;
}
- ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+ ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
&armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
- dcrtc->csc_yuv_mode);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
- dcrtc->csc_rgb_mode);
-
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
err_crtc_init:
- primary->base.funcs->destroy(&primary->base);
+ primary->funcs->destroy(primary);
err_crtc:
kfree(dcrtc);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 445829b8877a..7ebd337b60af 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,49 +32,8 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_crtc;
-struct armada_plane;
struct armada_variant;
-struct armada_plane_work {
- void (*fn)(struct armada_crtc *, struct armada_plane_work *);
- void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
- bool need_kfree;
- struct drm_plane *plane;
- struct drm_framebuffer *old_fb;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[14];
-};
-
-struct armada_plane_state {
- u16 src_x;
- u16 src_y;
- u32 src_hw;
- u32 dst_hw;
- u32 dst_yx;
- u32 ctrl0;
- bool changed;
- bool vsync_update;
-};
-
-struct armada_plane {
- struct drm_plane base;
- wait_queue_head_t frame_wait;
- bool next_work;
- struct armada_plane_work works[2];
- struct armada_plane_work *work;
- struct armada_plane_state state;
-};
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
-
-int armada_drm_plane_init(struct armada_plane *plane);
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work);
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *plane);
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y);
-
struct armada_crtc {
struct drm_crtc crtc;
const struct armada_variant *variant;
@@ -89,10 +48,6 @@ struct armada_crtc {
} v[2];
bool interlaced;
bool cursor_update;
- uint8_t csc_yuv_mode;
- uint8_t csc_rgb_mode;
-
- struct drm_plane *plane;
struct armada_gem_object *cursor_obj;
int cursor_x;
@@ -102,21 +57,22 @@ struct armada_crtc {
uint32_t cursor_w;
uint32_t cursor_h;
- int dpms;
uint32_t cfg_dumb_ctrl;
- uint32_t dumb_ctrl;
uint32_t spu_iopad_ctrl;
spinlock_t irq_lock;
uint32_t irq_ena;
+
+ bool update_pending;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs atomic_regs[32];
+ struct armada_regs *regs;
+ unsigned int regs_idx;
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
extern struct platform_driver armada_lcd_platform_driver;
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index cc4c557c9f66..f09083ff15d3 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -42,11 +42,12 @@ struct armada_private;
struct armada_variant {
bool has_spu_adv_reg;
- uint32_t spu_adv_reg;
int (*init)(struct armada_crtc *, struct device *);
int (*compute_clock)(struct armada_crtc *,
const struct drm_display_mode *,
uint32_t *);
+ void (*disable)(struct armada_crtc *);
+ void (*enable)(struct armada_crtc *, const struct drm_display_mode *);
};
/* Variant ops */
@@ -54,14 +55,10 @@ extern const struct armada_variant armada510_ops;
struct armada_private {
struct drm_device drm;
- struct work_struct fb_unref_work;
- DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear; /* protected by linear_lock */
struct mutex linear_lock;
- struct drm_property *csc_yuv_prop;
- struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
struct drm_property *colorkey_min_prop;
struct drm_property *colorkey_max_prop;
@@ -76,13 +73,6 @@ struct armada_private {
#endif
};
-void __armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-void armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-
-extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
-
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4b11b6b52f1d..fa31589b4fc0 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,46 +9,18 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
+#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static void armada_drm_unref_work(struct work_struct *work)
-{
- struct armada_private *priv =
- container_of(work, struct armada_private, fb_unref_work);
- struct drm_framebuffer *fb;
-
- while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_put(fb);
-}
-
-/* Must be called with dev->event_lock held */
-void __armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- struct armada_private *priv = dev->dev_private;
-
- WARN_ON(!kfifo_put(&priv->fb_unref, fb));
- schedule_work(&priv->fb_unref_work);
-}
-
-void armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
@@ -72,11 +44,18 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
@@ -109,7 +88,7 @@ static int armada_drm_bind(struct device *dev)
/*
* The drm_device structure must be at the start of
- * armada_private for drm_dev_unref() to work correctly.
+ * armada_private for drm_dev_put() to work correctly.
*/
BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
@@ -125,9 +104,6 @@ static int armada_drm_bind(struct device *dev)
dev_set_drvdata(dev, &priv->drm);
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
@@ -155,6 +131,8 @@ static int armada_drm_bind(struct device *dev)
priv->drm.irq_enabled = true;
+ drm_mode_config_reset(&priv->drm);
+
ret = armada_fbdev_init(&priv->drm);
if (ret)
goto err_comp;
@@ -179,8 +157,7 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
return ret;
}
@@ -198,9 +175,8 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index ac92bce07ecd..6bd638a54579 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -7,30 +7,15 @@
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
- drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_put_unlocked(&dfb->obj->obj);
- kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *dfile, unsigned int *handle)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
- return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
static const struct drm_framebuffer_funcs armada_fb_funcs = {
- .destroy = armada_fb_destroy,
- .create_handle = armada_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->fmt = format;
dfb->mod = config;
- dfb->obj = obj;
+ dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
@@ -99,7 +84,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
return dfb;
}
-static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
struct armada_gem_object *obj;
@@ -153,8 +138,3 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
-
-const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
- .fb_create = armada_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
-};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 48073c4f54d8..476daad0a36a 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -10,15 +10,15 @@
struct armada_framebuffer {
struct drm_framebuffer fb;
- struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
-
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 2a59db0994b2..8d23700848df 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -24,7 +24,7 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
};
-static int armada_fb_create(struct drm_fb_helper *fbh,
+static int armada_fbdev_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
@@ -108,7 +108,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
int ret = 0;
if (!fbh->fb) {
- ret = armada_fb_create(fbh, sizes);
+ ret = armada_fbdev_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a97f509743a5..892c1d9304bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -13,25 +13,14 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static int armada_gem_vm_fault(struct vm_fault *vmf)
+static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
- int ret;
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
-
- switch (ret) {
- case 0:
- case -EBUSY:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
const struct vm_operations_struct armada_gem_vm_ops = {
@@ -490,8 +479,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map_atomic = armada_gem_dmabuf_no_kmap,
- .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..277580b36758 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
@@ -315,4 +316,19 @@ enum {
PWRDN_IRQ_LEVEL = 1 << 0,
};
+static inline u32 armada_rect_hw_fp(struct drm_rect *r)
+{
+ return (drm_rect_height(r) & 0xffff0000) | drm_rect_width(r) >> 16;
+}
+
+static inline u32 armada_rect_hw(struct drm_rect *r)
+{
+ return drm_rect_height(r) << 16 | (drm_rect_width(r) & 0x0000ffff);
+}
+
+static inline u32 armada_rect_yx(struct drm_rect *r)
+{
+ return (r)->y1 << 16 | ((r)->x1 & 0x0000ffff);
+}
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c391955009d6..eb7dfb65ef47 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,346 +7,468 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/armada_drm.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-struct armada_ovl_plane_properties {
- uint32_t colorkey_yr;
- uint32_t colorkey_ug;
- uint32_t colorkey_vb;
-#define K2R(val) (((val) >> 0) & 0xff)
-#define K2G(val) (((val) >> 8) & 0xff)
-#define K2B(val) (((val) >> 16) & 0xff)
- int16_t brightness;
- uint16_t contrast;
- uint16_t saturation;
- uint32_t colorkey_mode;
-};
-
-struct armada_ovl_plane {
- struct armada_plane base;
- struct armada_ovl_plane_properties prop;
+#define DEFAULT_BRIGHTNESS 0
+#define DEFAULT_CONTRAST 0x4000
+#define DEFAULT_SATURATION 0x4000
+#define DEFAULT_ENCODING DRM_COLOR_YCBCR_BT601
+
+struct armada_overlay_state {
+ struct drm_plane_state base;
+ u32 colorkey_yr;
+ u32 colorkey_ug;
+ u32 colorkey_vb;
+ u32 colorkey_mode;
+ u32 colorkey_enable;
+ s16 brightness;
+ u16 contrast;
+ u16 saturation;
};
-#define drm_to_armada_ovl_plane(p) \
- container_of(p, struct armada_ovl_plane, base.base)
-
+#define drm_to_overlay_state(s) \
+ container_of(s, struct armada_overlay_state, base)
-static void
-armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
- struct armada_crtc *dcrtc)
+static inline u32 armada_spu_contrast(struct drm_plane_state *state)
{
- writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
- writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
- writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+ return drm_to_overlay_state(state)->brightness << 16 |
+ drm_to_overlay_state(state)->contrast;
+}
- writel_relaxed(prop->brightness << 16 | prop->contrast,
- dcrtc->base + LCD_SPU_CONTRAST);
+static inline u32 armada_spu_saturation(struct drm_plane_state *state)
+{
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
- writel_relaxed(prop->saturation << 16,
- dcrtc->base + LCD_SPU_SATURATION);
- writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
+ return drm_to_overlay_state(state)->saturation << 16;
+}
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
- spin_unlock_irq(&dcrtc->irq_lock);
+static inline u32 armada_csc(struct drm_plane_state *state)
+{
+ /*
+ * The CFG_CSC_RGB_* settings control the output of the colour space
+ * converter, setting the range of output values it produces. Since
+ * we will be blending with the full-range graphics, we need to
+ * produce full-range RGB output from the conversion.
+ */
+ return CFG_CSC_RGB_COMPUTER |
+ (state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
+ CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
}
/* === Plane support === */
-static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
+static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- unsigned long flags;
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx;
+ u32 cfg, cfg_mask, val;
- trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_ovl_plane_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- const struct drm_format_info *format;
- unsigned int idx = 0;
- bool fb_changed;
- u32 val, ctrl0;
- u16 src_x, src_y;
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
- ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
- if (state->visible)
- ctrl0 |= CFG_DMA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- ctrl0 |= CFG_DMA_HSMOOTH;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
- /*
- * Shifting a YUV packed format image by one pixel causes the U/V
- * planes to swap. Compensate for it by also toggling the UV swap.
- */
- format = dfb->fb.format;
- if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
- if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
- /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ idx = 0;
+ if (!old_state->visible && state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
- }
-
- fb_changed = dplane->base.base.fb != &dfb->fb ||
- dplane->base.state.src_x != state->src.x1 >> 16 ||
- dplane->base.state.src_y != state->src.y1 >> 16;
-
- dplane->base.state.vsync_update = fb_changed;
-
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
- if (fb_changed) {
- u32 addrs[3];
-
- dplane->base.state.src_y = src_y = state->src.y1 >> 16;
- dplane->base.state.src_x = src_x = state->src.x1 >> 16;
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb) {
+ const struct drm_format_info *format;
+ u16 src_x, pitches[3];
+ u32 addrs[2][3];
- armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
+ armada_drm_plane_calc(state, addrs, pitches, false);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0][0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[0][1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[0][2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[1][0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1][1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[1][2],
LCD_SPU_DMA_START_ADDR_V1);
- val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_YC);
- val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_UV);
- }
+ val = pitches[0] << 16 | pitches[0];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
+ val = pitches[1] << 16 | pitches[2];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
- val = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- if (dplane->base.state.src_hw != val) {
- dplane->base.state.src_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_HPXL_VLN);
- }
+ cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+ CFG_CBSH_ENA;
+ if (state->visible)
+ cfg |= CFG_DMA_ENA;
- val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
- if (dplane->base.state.dst_hw != val) {
- dplane->base.state.dst_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DZM_HPXL_VLN);
+ /*
+ * Shifting a YUV packed format image by one pixel causes the
+ * U/V planes to swap. Compensate for it by also toggling
+ * the UV swap.
+ */
+ format = state->fb->format;
+ src_x = state->src.x1 >> 16;
+ if (format->num_planes == 1 && src_x & (format->hsub - 1))
+ cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
+ cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
+ CFG_DMA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_DMA_ENA : 0;
+ cfg_mask = CFG_DMA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
}
-
- val = state->dst.y1 << 16 | state->dst.x1;
- if (dplane->base.state.dst_yx != val) {
- dplane->base.state.dst_yx = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_OVSA_HPXL_VLN);
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_DMA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_DMA_HSMOOTH;
}
- if (dplane->base.state.ctrl0 != ctrl0) {
- dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(regs, idx, ctrl0,
- CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
- CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
- CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
- CFG_YUV2RGB) | CFG_DMA_ENA,
- LCD_SPU_DMA_CTRL0);
- dplane->base.state.vsync_update = true;
- }
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ val = armada_spu_contrast(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_contrast(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
+ val = armada_spu_saturation(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_saturation(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
+ if (!old_state->visible && state->visible)
+ armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
+ val = armada_csc(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_csc(old_state) != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
+ LCD_SPU_IOPAD_CONTROL);
+ val = drm_to_overlay_state(state)->colorkey_yr;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_yr != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
+ val = drm_to_overlay_state(state)->colorkey_ug;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_ug != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
+ val = drm_to_overlay_state(state)->colorkey_vb;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_vb != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
+ val = drm_to_overlay_state(state)->colorkey_mode;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_mode != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
+ CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ LCD_SPU_DMA_CTRL1);
+ val = drm_to_overlay_state(state)->colorkey_enable;
+ if (((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_enable != val) &&
+ dcrtc->variant->has_spu_adv_reg)
+ armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
+ ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
- dplane->base.state.changed = idx != 0;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
- armada_reg_queue_end(regs, idx);
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down the YUV FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
}
+static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_overlay_plane_atomic_update,
+ .atomic_disable = armada_drm_overlay_plane_atomic_disable,
+};
+
static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
trace_armada_ovl_plane_update(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->base.works[dplane->base.next_work];
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- armada_ovl_plane_update_state(&state, work->regs);
-
- if (!dplane->base.state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
- /* Just updating the position/size? */
- if (!dplane->base.state.vsync_update) {
- armada_ovl_plane_work(dcrtc, work);
- return 0;
- }
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
+static void armada_overlay_reset(struct drm_plane *plane)
+{
+ struct armada_overlay_state *state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(plane->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.color_encoding = DEFAULT_ENCODING;
+ state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->colorkey_yr = 0xfefefe00;
+ state->colorkey_ug = 0x01010100;
+ state->colorkey_vb = 0x01010100;
+ state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ state->colorkey_enable = ADV_GRACOLORKEY;
+ state->brightness = DEFAULT_BRIGHTNESS;
+ state->contrast = DEFAULT_CONTRAST;
+ state->saturation = DEFAULT_SATURATION;
}
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret)
- DRM_ERROR("failed to queue plane work: %d\n", ret);
-
- dplane->base.next_work = !dplane->base.next_work;
-
- return 0;
+ plane->state = &state->base;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
+struct drm_plane_state *
+armada_overlay_duplicate_state(struct drm_plane *plane)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
+ struct armada_overlay_state *state;
- drm_plane_cleanup(plane);
+ if (WARN_ON(!plane->state))
+ return NULL;
- kfree(dplane);
+ state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ return &state->base;
}
-static int armada_ovl_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int armada_overlay_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
{
struct armada_private *priv = plane->dev->dev_private;
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- bool update_attr = false;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
- dplane->prop.colorkey_yr = CCC(K2R(val));
- dplane->prop.colorkey_ug = CCC(K2G(val));
- dplane->prop.colorkey_vb = CCC(K2B(val));
+ drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
+ drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
+ drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
#undef CCC
- update_attr = true;
} else if (property == priv->colorkey_min_prop) {
- dplane->prop.colorkey_yr &= ~0x00ff0000;
- dplane->prop.colorkey_yr |= K2R(val) << 16;
- dplane->prop.colorkey_ug &= ~0x00ff0000;
- dplane->prop.colorkey_ug |= K2G(val) << 16;
- dplane->prop.colorkey_vb &= ~0x00ff0000;
- dplane->prop.colorkey_vb |= K2B(val) << 16;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
} else if (property == priv->colorkey_max_prop) {
- dplane->prop.colorkey_yr &= ~0xff000000;
- dplane->prop.colorkey_yr |= K2R(val) << 24;
- dplane->prop.colorkey_ug &= ~0xff000000;
- dplane->prop.colorkey_ug |= K2G(val) << 24;
- dplane->prop.colorkey_vb &= ~0xff000000;
- dplane->prop.colorkey_vb |= K2B(val) << 24;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
} else if (property == priv->colorkey_val_prop) {
- dplane->prop.colorkey_yr &= ~0x0000ff00;
- dplane->prop.colorkey_yr |= K2R(val) << 8;
- dplane->prop.colorkey_ug &= ~0x0000ff00;
- dplane->prop.colorkey_ug |= K2G(val) << 8;
- dplane->prop.colorkey_vb &= ~0x0000ff00;
- dplane->prop.colorkey_vb |= K2B(val) << 8;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
} else if (property == priv->colorkey_alpha_prop) {
- dplane->prop.colorkey_yr &= ~0x000000ff;
- dplane->prop.colorkey_yr |= K2R(val);
- dplane->prop.colorkey_ug &= ~0x000000ff;
- dplane->prop.colorkey_ug |= K2G(val);
- dplane->prop.colorkey_vb &= ~0x000000ff;
- dplane->prop.colorkey_vb |= K2B(val);
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
- update_attr = true;
+ if (val == CKMODE_DISABLE) {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ drm_to_overlay_state(state)->colorkey_enable = 0;
+ } else {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ drm_to_overlay_state(state)->colorkey_enable =
+ ADV_GRACOLORKEY;
+ }
} else if (property == priv->brightness_prop) {
- dplane->prop.brightness = val - 256;
- update_attr = true;
+ drm_to_overlay_state(state)->brightness = val - 256;
} else if (property == priv->contrast_prop) {
- dplane->prop.contrast = val;
- update_attr = true;
+ drm_to_overlay_state(state)->contrast = val;
} else if (property == priv->saturation_prop) {
- dplane->prop.saturation = val;
- update_attr = true;
+ drm_to_overlay_state(state)->saturation = val;
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
- if (update_attr && dplane->base.base.crtc)
- armada_ovl_update_attr(&dplane->prop,
- drm_to_armada_crtc(dplane->base.base.crtc));
+static int armada_overlay_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+#define C2K(c,s) (((c) >> (s)) & 0xff)
+#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
+ if (property == priv->colorkey_prop) {
+ /* Do best-efforts here for this property */
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ /* If min != max, or min != val, error out */
+ if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24) ||
+ *val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8))
+ return -EINVAL;
+ } else if (property == priv->colorkey_min_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ } else if (property == priv->colorkey_max_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24);
+ } else if (property == priv->colorkey_val_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8);
+ } else if (property == priv->colorkey_alpha_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 0);
+ } else if (property == priv->colorkey_mode_prop) {
+ *val = (drm_to_overlay_state(state)->colorkey_mode &
+ CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ } else if (property == priv->brightness_prop) {
+ *val = drm_to_overlay_state(state)->brightness + 256;
+ } else if (property == priv->contrast_prop) {
+ *val = drm_to_overlay_state(state)->contrast;
+ } else if (property == priv->saturation_prop) {
+ *val = drm_to_overlay_state(state)->saturation;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
- .update_plane = armada_ovl_plane_update,
- .disable_plane = armada_drm_plane_disable,
+ .update_plane = armada_overlay_plane_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = armada_ovl_plane_destroy,
- .set_property = armada_ovl_plane_set_property,
+ .reset = armada_overlay_reset,
+ .atomic_duplicate_state = armada_overlay_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_set_property = armada_overlay_set_property,
+ .atomic_get_property = armada_overlay_get_property,
};
static const uint32_t armada_ovl_formats[] = {
@@ -419,46 +541,31 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = dev->dev_private;
struct drm_mode_object *mobj;
- struct armada_ovl_plane *dplane;
+ struct drm_plane *overlay;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
- dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
- if (!dplane)
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
return -ENOMEM;
- ret = armada_drm_plane_init(&dplane->base);
- if (ret) {
- kfree(dplane);
- return ret;
- }
-
- dplane->base.works[0].fn = armada_ovl_plane_work;
- dplane->base.works[1].fn = armada_ovl_plane_work;
+ drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
- ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+ ret = drm_universal_plane_init(dev, overlay, crtcs,
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
- kfree(dplane);
+ kfree(overlay);
return ret;
}
- dplane->prop.colorkey_yr = 0xfefefe00;
- dplane->prop.colorkey_ug = 0x01010100;
- dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
- dplane->prop.brightness = 0;
- dplane->prop.contrast = 0x4000;
- dplane->prop.saturation = 0x4000;
-
- mobj = &dplane->base.base.base;
+ mobj = &overlay->base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
@@ -471,11 +578,19 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
- drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->brightness_prop,
+ 256 + DEFAULT_BRIGHTNESS);
drm_object_attach_property(mobj, priv->contrast_prop,
- dplane->prop.contrast);
+ DEFAULT_CONTRAST);
drm_object_attach_property(mobj, priv->saturation_prop,
- dplane->prop.saturation);
+ DEFAULT_SATURATION);
- return 0;
+ ret = drm_plane_create_color_properties(overlay,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DEFAULT_ENCODING,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
new file mode 100644
index 000000000000..9f36423dd394
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include "armada_plane.h"
+#include "armada_trace.h"
+
+static const uint32_t armada_primary_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced)
+{
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
+ unsigned int x = state->src.x1 >> 16;
+ unsigned int y = state->src.y1 >> 16;
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ int i;
+
+ DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
+ fb->pitches[0], x, y, format->cpp[0] * 8);
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+ pitches[0] = fb->pitches[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++) {
+ addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * format->cpp[i];
+ pitches[i] = fb->pitches[i];
+ }
+ for (; i < 3; i++) {
+ addrs[0][i] = 0;
+ pitches[i] = 0;
+ }
+ if (interlaced) {
+ for (i = 0; i < 3; i++) {
+ addrs[1][i] = addrs[0][i] + pitches[i];
+ pitches[i] *= 2;
+ }
+ } else {
+ for (i = 0; i < 3; i++)
+ addrs[1][i] = addrs[0][i];
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_plane_state *state,
+ struct armada_regs *regs, bool interlaced)
+{
+ u16 pitches[3];
+ u32 addrs[2][3];
+ unsigned i = 0;
+
+ armada_drm_plane_calc(state, addrs, pitches, interlaced);
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addrs[0][0], LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addrs[1][0], LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitches[0], 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ state->fb ? state->fb->base.id : 0);
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+ return 0;
+}
+
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->fb ? old_state->fb->base.id : 0);
+
+ if (old_state->fb)
+ drm_framebuffer_put(old_state->fb);
+}
+
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb && !WARN_ON(!state->crtc)) {
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ else
+ crtc_state = crtc->state;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX,
+ true, false);
+ } else {
+ state->visible = false;
+ }
+ return 0;
+}
+
+static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ u32 cfg, cfg_mask, val;
+ unsigned int idx;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
+
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ idx = 0;
+ if (!old_state->visible && state->visible) {
+ val = CFG_PDWN64x66;
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ val |= CFG_PDWN256x24;
+ armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
+ }
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ idx += armada_drm_crtc_calc_fb(state, regs + idx,
+ dcrtc->interlaced);
+ }
+ if (old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ cfg |= CFG_PALETTE_ENA;
+ if (state->visible)
+ cfg |= CFG_GRA_ENA;
+ if (dcrtc->interlaced)
+ cfg |= CFG_GRA_FTOGGLE;
+ cfg_mask = CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_GRA_ENA : 0;
+ cfg_mask = CFG_GRA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
+ }
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_GRA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_GRA_HSMOOTH;
+ }
+
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
+
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down most RAMs and FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
+ CFG_PDWN256x8 | CFG_PDWN32x32 | CFG_PDWN64x66,
+ 0, LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
+}
+
+static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_primary_plane_atomic_update,
+ .atomic_disable = armada_drm_primary_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary)
+{
+ int ret;
+
+ drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(drm, primary, 0,
+ &armada_primary_plane_funcs,
+ armada_primary_formats,
+ ARRAY_SIZE(armada_primary_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
new file mode 100644
index 000000000000..ff4281ba7fad
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.h
@@ -0,0 +1,15 @@
+#ifndef ARMADA_PLANE_H
+#define ARMADA_PLANE_H
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced);
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state);
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 036dff8a1f33..5e77d456d9bb 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -790,12 +790,12 @@ static int ast_get_modes(struct drm_connector *connector)
if (!flags)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ drm_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
} else
- drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ drm_connector_update_edid_property(&ast_connector->base, NULL);
return 0;
}
@@ -900,7 +900,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ast_connector->i2c = ast_i2c_create(dev);
if (!ast_connector->i2c)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1ea5c36b006..843cac222e60 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 47e0992f3908..04440064b9b7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
{
- struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct drm_color_lut *lut;
int idx;
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
- atmel_hlcdc_plane_update_clut(plane);
+ atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
- if (plane->base.fb)
- drm_framebuffer_put(plane->base.fb);
-
- drm_plane_cleanup(p);
-}
-
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = atmel_hlcdc_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 233980a78591..ca5a9afdd5cf 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -259,7 +259,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs_crtc_init(bochs->dev);
bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_mode_connector_attach_encoder(&bochs->connector,
+ drm_connector_attach_encoder(&bochs->connector,
&bochs->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index fa2c7997e2fd..bf6cad6c9178 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF && RC_CORE
+ depends on OF
select DRM_KMS_HELPER
imply EXTCON
+ select INPUT
+ select RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 73021b388e12..85c2d407a52e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
+ /*
+ * The bridge resets its registers on unplug. So when we get a plug
+ * event and we're already supposed to be powered, cycle the bridge to
+ * restore its state.
+ */
+ if (status == connector_status_connected &&
+ adv7511->connector.status == connector_status_disconnected &&
+ adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ }
+
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
if (status == connector_status_disconnected)
@@ -601,7 +613,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
__adv7511_power_off(adv7511);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
@@ -860,7 +872,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
}
drm_connector_helper_add(&adv->connector,
&adv7511_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+ drm_connector_attach_encoder(&adv->connector, bridge->encoder);
if (adv->type == ADV7533)
ret = adv7533_attach_dsi(adv);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index b49043866be6..f8433c93f463 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -969,8 +969,8 @@ static int anx78xx_get_modes(struct drm_connector *connector)
goto unlock;
}
- err = drm_mode_connector_update_edid_property(connector,
- anx78xx->edid);
+ err = drm_connector_update_edid_property(connector,
+ anx78xx->edid);
if (err) {
DRM_ERROR("Failed to update EDID property: %d\n", err);
goto unlock;
@@ -1048,8 +1048,8 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge)
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
- err = drm_mode_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ err = drm_connector_attach_encoder(&anx78xx->connector,
+ bridge->encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2bcbfadb6ac5..d68986cea132 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1119,8 +1119,8 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &dp->aux.ddc);
pm_runtime_put(dp->dev);
if (edid) {
- drm_mode_connector_update_edid_property(&dp->connector,
- edid);
+ drm_connector_update_edid_property(&dp->connector,
+ edid);
num_modes += drm_add_edid_modes(&dp->connector, edid);
kfree(edid);
}
@@ -1210,7 +1210,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector,
&analogix_dp_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
/*
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index c255fc3e1be5..ce9496d13986 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1152,7 +1152,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
np = of_node_get(dev->dev.of_node);
panel = of_drm_find_panel(np);
- if (panel) {
+ if (!IS_ERR(panel)) {
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
.transfer = cdns_dsi_transfer,
};
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
return 0;
}
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 9837c8d69e69..9b706789a341 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -55,7 +55,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
@@ -122,7 +122,7 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&vga->connector,
+ drm_connector_attach_encoder(&vga->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 75b0d3f6e4de..f56c92f7af7c 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -68,9 +68,9 @@ static int lvds_encoder_probe(struct platform_device *pdev)
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!panel) {
+ if (IS_ERR(panel)) {
dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
- return -EPROBE_DEFER;
+ return PTR_ERR(panel);
}
lvds_encoder->panel_bridge =
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 7ccadba7c98c..2136c97aeb8e 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -152,7 +152,7 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client);
if (ge_b850v3_lvds_ptr->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
ge_b850v3_lvds_ptr->edid);
num_modes = drm_add_edid_modes(connector,
ge_b850v3_lvds_ptr->edid);
@@ -241,7 +241,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
return ret;
}
- ret = drm_mode_connector_attach_encoder(connector, bridge->encoder);
+ ret = drm_connector_attach_encoder(connector, bridge->encoder);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d64a3283822a..a3e817abace1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -222,7 +222,7 @@ static int ptn3460_get_modes(struct drm_connector *connector)
}
ptn_bridge->edid = (struct edid *)edid;
- drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+ drm_connector_update_edid_property(connector, ptn_bridge->edid);
num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
@@ -265,7 +265,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
- drm_mode_connector_attach_encoder(&ptn_bridge->connector,
+ drm_connector_attach_encoder(&ptn_bridge->connector,
bridge->encoder);
if (ptn_bridge->panel)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6d99d4a3beb3..7cbaba213ef6 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -79,7 +79,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&panel_bridge->connector,
+ drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 81198f5e9afa..7334d1b62b71 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -503,7 +503,7 @@ static int ps8622_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ps8622->connector,
&ps8622_connector_helper_funcs);
drm_connector_register(&ps8622->connector);
- drm_mode_connector_attach_encoder(&ps8622->connector,
+ drm_connector_attach_encoder(&ps8622->connector,
bridge->encoder);
if (ps8622->panel)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 60373d7eb220..e59a13542333 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -170,7 +170,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
return ret;
edid = drm_get_edid(connector, sii902x->i2c->adapter);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -324,7 +324,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge)
else
sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 250effa0e6b8..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
#include <drm/bridge/mhl.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -72,9 +73,7 @@ struct sii8620 {
struct regulator_bulk_data supplies[2];
struct mutex lock; /* context lock, protects fields below */
int error;
- int pixel_clock;
unsigned int use_packed_pixel:1;
- int video_code;
enum sii8620_mode mode;
enum sii8620_sink_type sink_type;
u8 cbus_status;
@@ -82,7 +81,6 @@ struct sii8620 {
u8 xstat[MHL_XDS_SIZE];
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
- u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
bool feature_complete;
bool devcap_read;
bool sink_detected;
@@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx)
static void sii8620_set_format(struct sii8620 *ctx)
{
+ u8 out_fmt;
+
if (sii8620_is_mhl3(ctx)) {
sii8620_setbits(ctx, REG_M3_P0CTRL,
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
+ if (ctx->use_packed_pixel) {
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, BIT_VID_MODE_M1080P,
+ REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+ REG_MHLTX_CTL6, 0x60
+ );
+ } else {
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
REG_MHLTX_CTL6, 0xa0
);
+ }
}
+ if (ctx->use_packed_pixel)
+ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
+ else
+ out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
sii8620_write_seq(ctx,
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
- REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, out_fmt,
);
}
@@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
return frm_len;
}
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+ struct drm_display_mode *mode)
{
struct mhl3_infoframe mhl_frm;
union hdmi_infoframe frm;
u8 buf[31];
int ret;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ mode,
+ true);
+ if (ctx->use_packed_pixel)
+ frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+ if (ret > 0)
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
sii8620_write(ctx, REG_TPI_SC,
BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
- ARRAY_SIZE(ctx->avif) - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
return;
}
- ret = hdmi_avi_infoframe_init(&frm.avi);
- frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
- frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
- frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
- frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
- frm.avi.video_code = ctx->video_code;
- if (!ret)
- ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
- if (ret > 0)
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
static void sii8620_start_video(struct sii8620 *ctx)
{
+ struct drm_display_mode *mode =
+ &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
if (!sii8620_is_mhl3(ctx))
sii8620_stop_video(ctx);
@@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
sii8620_set_format(ctx);
if (!sii8620_is_mhl3(ctx)) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+ u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+ if (ctx->use_packed_pixel)
+ link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
sii8620_set_auto_zone(ctx);
} else {
static const struct {
@@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
};
u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
- int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
@@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
clk_spec[i].link_rate);
}
- sii8620_set_infoframes(ctx);
+ sii8620_set_infoframes(ctx, mode);
}
static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
static void sii8620_status_changed_path(struct sii8620 *ctx)
{
- if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL
- | MHL_DST_LM_PATH_ENABLED);
- } else {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL);
- }
+ u8 link_mode;
+
+ if (ctx->use_packed_pixel)
+ link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ link_mode);
}
static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
- ctx->video_code = drm_match_cea_mode(adjusted_mode);
- ctx->pixel_clock = adjusted_mode->clock;
mutex_unlock(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 3c136f2b954f..5971976284bf 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1922,7 +1922,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1974,7 +1974,7 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 0fd9cf27542c..8e28e738cb52 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1140,7 +1140,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
if (!edid)
return 0;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
return count;
@@ -1195,7 +1195,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
- drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+ drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index acb857030951..c3e32138c6bb 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -62,7 +62,7 @@ static int tfp410_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
fallback:
@@ -132,7 +132,7 @@ static int tfp410_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&dvi->connector,
+ drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index be2d7e488062..ce9db7aab225 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -92,7 +92,6 @@
#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
struct cirrus_crtc {
struct drm_crtc base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
struct drm_connector base;
};
-struct cirrus_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct cirrus_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct cirrus_framebuffer gfb;
+ struct drm_framebuffer gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32fbfba2c623..b643ac92801c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.base.format->cpp[0];
+ int bpp = afbdev->gfb.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj;
+ obj = afbdev->gfb.obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
gfbdev->sysram = sysram;
gfbdev->size = size;
- fb = &gfbdev->gfb.base;
+ fb = &gfbdev->gfb;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj) {
- drm_gem_object_put_unlocked(gfb->obj);
- gfb->obj = NULL;
+ if (gfb->obj[0]) {
+ drm_gem_object_put_unlocked(gfb->obj[0]);
+ gfb->obj[0] = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(&gfb->base);
- drm_framebuffer_cleanup(&gfb->base);
+ drm_framebuffer_unregister_private(gfb);
+ drm_framebuffer_cleanup(gfb);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 26df1e8cd490..60d54e10a34d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -10,42 +10,25 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "cirrus_drv.h"
-static int cirrus_create_handle(struct drm_framebuffer *fb,
- struct drm_file* file_priv,
- unsigned int* handle)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- drm_gem_object_put_unlocked(cirrus_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = cirrus_create_handle,
- .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+ gfb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
{
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
+ struct drm_framebuffer *fb;
u32 bpp;
int ret;
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOENT);
- cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
- if (!cirrus_fb) {
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
- ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
drm_gem_object_put_unlocked(obj);
- kfree(cirrus_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return &cirrus_fb->base;
+ return fb;
}
static const struct drm_mode_config_funcs cirrus_mode_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c91b9b054e3f..336bfda40125 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
int x, int y, int atomic)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
struct cirrus_bo *bo;
int ret;
u64 gpu_addr;
/* push the previous fb to system ram */
if (!atomic && fb) {
- cirrus_fb = to_cirrus_framebuffer(fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
- cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
@@ -536,7 +530,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = cirrus_fbdev_init(cdev);
if (ret) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 895741e9cd7d..3eb061e11e2e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
return fence_ptr;
}
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ s32 __user *fence_ptr)
+{
+ unsigned int index = drm_connector_index(connector);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ state->connectors[index].out_fence_ptr = fence_ptr;
+
+ return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ unsigned int index = drm_connector_index(connector);
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->connectors[index].out_fence_ptr;
+ state->connectors[index].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_copy(&state->mode, mode);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- mode->name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
+ struct drm_crtc *crtc = state->crtc;
+
if (blob == state->mode_blob)
return 0;
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
- if (blob->length != sizeof(struct drm_mode_modeinfo) ||
- drm_mode_convert_umode(state->crtc->dev, &state->mode,
- blob->data))
+ int ret;
+
+ if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+ crtc->base.id, crtc->name,
+ blob->length);
return -EINVAL;
+ }
+
+ ret = drm_mode_convert_umode(crtc->dev,
+ &state->mode, blob->data);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ crtc->base.id, crtc->name,
+ ret, drm_get_mode_status_name(state->mode.status));
+ drm_mode_debug_printmodeline(&state->mode);
+ return -EINVAL;
+ }
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- state->mode.name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ state->mode.name, crtc->base.id, crtc->name,
+ state);
} else {
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
- } else if (crtc->funcs->atomic_set_property)
+ } else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- else
+ } else {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
return -EINVAL;
+ }
return 0;
}
@@ -677,6 +729,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_writeback_job *writeback_job = state->writeback_job;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+ return 0;
+
+ if (writeback_job->fb && !state->crtc) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ if (state->crtc)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+
+ if (writeback_job->fb && !crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+ connector->base.id, connector->name,
+ state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ if (writeback_job->out_fence && !writeback_job->fb) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
+ /* the legacy pointers should never be set */
+ WARN_ON(plane->fb);
+ WARN_ON(plane->old_fb);
+ WARN_ON(plane->crtc);
+
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+ plane->base.id, plane->name, val);
return -EINVAL;
+ }
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* either *both* CRTC and FB must be set, or neither */
if (state->crtc && !state->fb) {
- DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+ plane->base.id, plane->name);
return -EINVAL;
} else if (state->fb && !state->crtc) {
- DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+ state->crtc->base.id, state->crtc->name,
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+ plane->base.id, plane->name,
drm_get_format_name(state->fb->format->format,
&format_name),
state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane->base.id, plane->name,
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ plane->base.id, plane->name,
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -996,6 +1111,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
drm_printf(p, "\trotation=%x\n", state->rotation);
+ drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
drm_printf(p, "\tcolor-encoding=%s\n",
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
@@ -1120,6 +1236,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
+ obj_state->state = state;
state->num_private_objs = num_objs;
@@ -1278,6 +1395,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->link_status = val;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
+ } else if (property == config->content_type_property) {
+ state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
@@ -1286,10 +1405,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->writeback_fb_id_property) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ return ret;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ return set_out_fence_for_connector(state->state, connector,
+ fence_ptr);
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -1304,6 +1437,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (state->writeback_job && state->writeback_job->fb)
+ drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
+
if (connector->funcs->atomic_print_state)
connector->funcs->atomic_print_state(p, state);
}
@@ -1363,10 +1500,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
+ } else if (property == config->content_type_property) {
+ *val = state->content_type;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->writeback_fb_id_property) {
+ /* Writeback framebuffer is one-shot, write and forget */
+ *val = 0;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ *val = 0;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1442,7 +1586,7 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+ crtc_state->plane_mask &= ~drm_plane_mask(plane);
}
plane_state->crtc = crtc;
@@ -1452,15 +1596,16 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+ crtc_state->plane_mask |= drm_plane_mask(plane);
}
if (crtc)
- DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
- plane_state, crtc->base.id, crtc->name);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+ plane->base.id, plane->name, plane_state,
+ crtc->base.id, crtc->name);
else
- DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
- plane_state);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+ plane->base.id, plane->name, plane_state);
return 0;
}
@@ -1480,12 +1625,15 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
+ struct drm_plane *plane = plane_state->plane;
+
if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+ fb->base.id, plane->base.id, plane->name,
plane_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
@@ -1546,6 +1694,7 @@ int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
+ struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
@@ -1556,7 +1705,7 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc);
crtc_state->connector_mask &=
- ~(1 << drm_connector_index(conn_state->connector));
+ ~drm_connector_mask(conn_state->connector);
drm_connector_put(conn_state->connector);
conn_state->crtc = NULL;
@@ -1568,15 +1717,17 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
- 1 << drm_connector_index(conn_state->connector);
+ drm_connector_mask(conn_state->connector);
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
- DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
- DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+ connector->base.id, connector->name,
conn_state);
}
@@ -1584,6 +1735,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job)
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+ return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_writeback_job *job =
+ drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ drm_framebuffer_assign(&job->fb, fb);
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+ fb->base.id, conn_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
@@ -1629,7 +1844,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
+ if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
continue;
conn_state = drm_atomic_get_connector_state(state, connector);
@@ -1672,6 +1887,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+ DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
+
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1920,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1944,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ ret = drm_atomic_connector_check(conn, conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+ conn->base.id, conn->name);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
@@ -2048,45 +2277,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
/**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
- unsigned plane_mask,
- int ret)
-{
- struct drm_plane *plane;
-
- /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
- * locks (ie. while it is still safe to deref plane->state). We
- * need to do this here because the driver entry points cannot
- * distinguish between legacy and atomic ioctls.
- */
- drm_for_each_plane_mask(plane, dev, plane_mask) {
- if (ret == 0) {
- struct drm_framebuffer *new_fb = plane->state->fb;
- if (new_fb)
- drm_framebuffer_get(new_fb);
- plane->fb = new_fb;
- plane->crtc = plane->state->crtc;
-
- if (plane->old_fb)
- drm_framebuffer_put(plane->old_fb);
- }
- plane->old_fb = NULL;
- }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
-/**
* DOC: explicit fencing properties
*
* Explicit fencing allows userspace to control the buffer synchronization
@@ -2161,7 +2351,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
return 0;
}
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
@@ -2170,6 +2360,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2427,45 @@ static int prepare_crtc_signaling(struct drm_device *dev,
c++;
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *job;
+ struct drm_out_fence_state *f;
+ struct dma_fence *fence;
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_connector(state, conn);
+ if (!fence_ptr)
+ continue;
+
+ job = drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ wb_conn = drm_connector_to_writeback(conn);
+ fence = drm_writeback_get_out_fence(wb_conn);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ job->out_fence = fence;
+ }
+
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2476,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
}
-static void complete_crtc_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_out_fence_state *fence_state,
- unsigned int num_fences,
- bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -2306,9 +2537,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- struct drm_plane *plane;
struct drm_out_fence_state *fence_state;
- unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences;
@@ -2348,7 +2577,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
- plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
@@ -2419,17 +2647,11 @@ retry:
copied_props++;
}
- if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
- !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
- }
drm_mode_object_put(obj);
}
- ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
- &num_fences);
+ ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
if (ret)
goto out;
@@ -2445,9 +2667,7 @@ retry:
}
out:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
- complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+ complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 130da5195f3b..80be74df7ba6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -120,7 +121,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
- if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+ if (encoder_mask & drm_encoder_mask(new_encoder)) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
new_encoder->base.id, new_encoder->name,
connector->base.id, connector->name);
@@ -128,7 +129,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
return -EINVAL;
}
- encoder_mask |= 1 << drm_encoder_index(new_encoder);
+ encoder_mask |= drm_encoder_mask(new_encoder);
}
}
@@ -154,7 +155,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
encoder = connector->state->best_encoder;
- if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+ if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
continue;
if (!disable_conflicting_encoders) {
@@ -222,7 +223,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask &=
- ~(1 << drm_encoder_index(conn_state->best_encoder));
+ ~drm_encoder_mask(conn_state->best_encoder);
}
}
@@ -233,7 +234,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask |=
- 1 << drm_encoder_index(encoder);
+ drm_encoder_mask(encoder);
}
}
@@ -644,7 +645,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (ret)
return ret;
- connectors_mask += BIT(i);
+ connectors_mask |= BIT(i);
}
/*
@@ -1172,6 +1173,27 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ const struct drm_connector_helper_funcs *funcs;
+
+ funcs = connector->helper_private;
+ if (!funcs->atomic_commit)
+ continue;
+
+ if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+ WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+ funcs->atomic_commit(connector, new_conn_state);
+ }
+ }
+}
+
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
@@ -1251,6 +1273,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+
+ drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1426,6 +1450,8 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1455,6 +1481,8 @@ void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1510,8 +1538,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane *plane = NULL;
+ struct drm_plane_state *old_plane_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
int i, n_planes = 0;
@@ -1527,7 +1556,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (n_planes != 1)
return -EINVAL;
- if (!new_plane_state->crtc)
+ if (!new_plane_state->crtc ||
+ old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;
@@ -2030,6 +2060,45 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
+ * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
+ * @old_state: atomic state object with old state structures
+ *
+ * This function walks all CRTCs and fake VBLANK events on those with
+ * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
+ * The primary use of this function is writeback connectors working in oneshot
+ * mode and faking VBLANK events. In this case they only fake the VBLANK event
+ * when a job is queued, and any change to the pipeline that does not touch the
+ * connector is leading to timeouts when calling
+ * drm_atomic_helper_wait_for_vblanks() or
+ * drm_atomic_helper_wait_for_flip_done().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ unsigned long flags;
+
+ if (!new_crtc_state->no_vblank)
+ continue;
+
+ spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ if (new_crtc_state->event) {
+ drm_crtc_send_vblank_event(crtc,
+ new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
+
+/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
* @old_state: atomic state object with old state structures
*
@@ -2320,11 +2389,13 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_crtc *crtc = old_crtc_state->crtc;
struct drm_atomic_state *old_state = old_crtc_state->state;
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(old_state, crtc);
struct drm_plane *plane;
unsigned plane_mask;
plane_mask = old_crtc_state->plane_mask;
- plane_mask |= crtc->state->plane_mask;
+ plane_mask |= new_crtc_state->plane_mask;
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
@@ -2333,6 +2404,8 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
plane_funcs = plane->helper_private;
@@ -2340,13 +2413,14 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
if (!old_plane_state || !plane_funcs)
continue;
- WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
+ WARN_ON(new_plane_state->crtc &&
+ new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
+ if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, old_plane_state);
- else if (plane->state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, plane->state))
+ else if (new_plane_state->crtc ||
+ drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_funcs->atomic_update(plane, old_plane_state);
}
@@ -2795,7 +2869,7 @@ static int update_output_state(struct drm_atomic_state *state,
* resets the "link-status" property to GOOD, to force any link
* re-training. The SETCRTC ioctl does not define whether an update does
* need a full modeset or just a plane update, hence we're allowed to do
- * that. See also drm_mode_connector_set_link_status_property().
+ * that. See also drm_connector_set_link_status_property().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2914,7 +2988,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2957,17 +3030,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- if (clean_old_fbs) {
- plane->old_fb = plane->fb;
- plane_mask |= BIT(drm_plane_index(plane));
- }
}
ret = drm_atomic_commit(state);
free:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -3129,13 +3195,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- WARN_ON(plane->crtc != new_plane_state->crtc);
- WARN_ON(plane->fb != new_plane_state->fb);
- WARN_ON(plane->old_fb);
-
+ for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
- }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -3660,6 +3721,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
new file mode 100644
index 000000000000..baff50a4c234
--- /dev/null
+++ b/drivers/gpu/drm/drm_client.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This library provides support for clients running in the kernel like fbdev and bootsplash.
+ * Currently it's only partially implemented, just enough to support fbdev.
+ *
+ * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
+ */
+
+static int drm_client_open(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_file *file;
+
+ file = drm_file_alloc(dev->primary);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev->filelist_mutex);
+ list_add(&file->lhead, &dev->filelist_internal);
+ mutex_unlock(&dev->filelist_mutex);
+
+ client->file = file;
+
+ return 0;
+}
+
+static void drm_client_close(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ mutex_lock(&dev->filelist_mutex);
+ list_del(&client->file->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ drm_file_free(client->file);
+}
+EXPORT_SYMBOL(drm_client_close);
+
+/**
+ * drm_client_new - Create a DRM client
+ * @dev: DRM device
+ * @client: DRM client
+ * @name: Client name
+ * @funcs: DRM client functions (optional)
+ *
+ * The caller needs to hold a reference on @dev before calling this function.
+ * The client is freed when the &drm_device is unregistered. See drm_client_release().
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs)
+{
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ return -ENOTSUPP;
+
+ if (funcs && !try_module_get(funcs->owner))
+ return -ENODEV;
+
+ client->dev = dev;
+ client->name = name;
+ client->funcs = funcs;
+
+ ret = drm_client_open(client);
+ if (ret)
+ goto err_put_module;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_add(&client->list, &dev->clientlist);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ drm_dev_get(dev);
+
+ return 0;
+
+err_put_module:
+ if (funcs)
+ module_put(funcs->owner);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_new);
+
+/**
+ * drm_client_release - Release DRM client resources
+ * @client: DRM client
+ *
+ * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
+ *
+ * This function should only be called from the unregister callback. An exception
+ * is fbdev which cannot free the buffer if userspace has open file descriptors.
+ *
+ * Note:
+ * Clients cannot initiate a release by themselves. This is done to keep the code simple.
+ * The driver has to be unloaded before the client can be unloaded.
+ */
+void drm_client_release(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+
+ drm_client_close(client);
+ drm_dev_put(dev);
+ if (client->funcs)
+ module_put(client->funcs->owner);
+}
+EXPORT_SYMBOL(drm_client_release);
+
+void drm_client_dev_unregister(struct drm_device *dev)
+{
+ struct drm_client_dev *client, *tmp;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
+ list_del(&client->list);
+ if (client->funcs && client->funcs->unregister) {
+ client->funcs->unregister(client);
+ } else {
+ drm_client_release(client);
+ kfree(client);
+ }
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+/**
+ * drm_client_dev_hotplug - Send hotplug event to clients
+ * @dev: DRM device
+ *
+ * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
+ *
+ * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
+ * don't need to call this function themselves.
+ */
+void drm_client_dev_hotplug(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->hotplug)
+ continue;
+
+ ret = client->funcs->hotplug(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_hotplug);
+
+void drm_client_dev_restore(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->restore)
+ continue;
+
+ ret = client->funcs->restore(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ if (!ret) /* The first one to return zero gets the privilege to restore */
+ break;
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+{
+ struct drm_device *dev = buffer->client->dev;
+
+ if (buffer->vaddr && dev->driver->gem_prime_vunmap)
+ dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+
+ if (buffer->gem)
+ drm_gem_object_put_unlocked(buffer->gem);
+
+ if (buffer->handle)
+ drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+
+ kfree(buffer);
+}
+
+static struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_mode_create_dumb dumb_args = { };
+ struct drm_device *dev = client->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_gem_object *obj;
+ void *vaddr;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->client = client;
+
+ dumb_args.width = width;
+ dumb_args.height = height;
+ dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8;
+ ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+ if (ret)
+ goto err_delete;
+
+ buffer->handle = dumb_args.handle;
+ buffer->pitch = dumb_args.pitch;
+
+ obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err_delete;
+ }
+
+ buffer->gem = obj;
+
+ /*
+ * FIXME: The dependency on GEM here isn't required, we could
+ * convert the driver handle to a dma-buf instead and use the
+ * backend-agnostic dma-buf vmap support instead. This would
+ * require that the handle2fd prime ioctl is reworked to pull the
+ * fd_install step out of the driver backend hooks, to make that
+ * final step optional for internal users.
+ */
+ vaddr = dev->driver->gem_prime_vmap(obj);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err_delete;
+ }
+
+ buffer->vaddr = vaddr;
+
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
+{
+ int ret;
+
+ if (!buffer->fb)
+ return;
+
+ ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+ if (ret)
+ DRM_DEV_ERROR(buffer->client->dev->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+ buffer->fb = NULL;
+}
+
+static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+ u32 width, u32 height, u32 format)
+{
+ struct drm_client_dev *client = buffer->client;
+ struct drm_mode_fb_cmd fb_req = { };
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_format_info(format);
+ fb_req.bpp = info->cpp[0] * 8;
+ fb_req.depth = info->depth;
+ fb_req.width = width;
+ fb_req.height = height;
+ fb_req.handle = buffer->handle;
+ fb_req.pitch = buffer->pitch;
+
+ ret = drm_mode_addfb(client->dev, &fb_req, client->file);
+ if (ret)
+ return ret;
+
+ buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
+ if (WARN_ON(!buffer->fb))
+ return -ENOENT;
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_put(buffer->fb);
+
+ strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
+
+ return 0;
+}
+
+/**
+ * drm_client_framebuffer_create - Create a client framebuffer
+ * @client: DRM client
+ * @width: Framebuffer width
+ * @height: Framebuffer height
+ * @format: Buffer format
+ *
+ * This function creates a &drm_client_buffer which consists of a
+ * &drm_framebuffer backed by a dumb buffer.
+ * Call drm_client_framebuffer_delete() to free the buffer.
+ *
+ * Returns:
+ * Pointer to a client buffer or an error pointer on failure.
+ */
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_client_buffer *buffer;
+ int ret;
+
+ buffer = drm_client_buffer_create(client, width, height, format);
+ if (IS_ERR(buffer))
+ return buffer;
+
+ ret = drm_client_buffer_addfb(buffer, width, height, format);
+ if (ret) {
+ drm_client_buffer_delete(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
+EXPORT_SYMBOL(drm_client_framebuffer_create);
+
+/**
+ * drm_client_framebuffer_delete - Delete a client framebuffer
+ * @buffer: DRM client buffer (can be NULL)
+ */
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
+{
+ if (!buffer)
+ return;
+
+ drm_client_buffer_rmfb(buffer);
+ drm_client_buffer_delete(buffer);
+}
+EXPORT_SYMBOL(drm_client_framebuffer_delete);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_printf(&p, "%s\n", client->name);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ return 0;
+}
+
+static const struct drm_info_list drm_client_debugfs_list[] = {
+ { "internal_clients", drm_client_debugfs_internal_clients, 0 },
+};
+
+int drm_client_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9b9ba5d5ec0c..6011d769d50b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -48,7 +48,7 @@
*
* Connectors must be attached to an encoder to be used. For devices that map
* connectors to encoders 1:1, the connector should be attached at
- * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * initialization time with a call to drm_connector_attach_encoder(). The
* driver must also set the &drm_connector.encoder field to point to the
* attached encoder.
*
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+ { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
};
void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
- if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+ connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_object_attach_property(&connector->base,
config->edid_property,
0);
@@ -285,7 +291,7 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
*
@@ -296,8 +302,8 @@ EXPORT_SYMBOL(drm_connector_init);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
+int drm_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
{
int i;
@@ -315,7 +321,7 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
return 0;
@@ -323,7 +329,30 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+EXPORT_SYMBOL(drm_connector_attach_encoder);
+
+/**
+ * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * @connector: the connector
+ * @encoder: the encoder
+ *
+ * Returns:
+ * True if @encoder is one of the possible encoders for @connector.
+ */
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ struct drm_encoder *enc;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
+ if (enc == encoder)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_connector_has_possible_encoder);
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -577,7 +606,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
/**
* drm_connector_list_iter_next - return next connector
- * @iter: connectr_list iterator
+ * @iter: connector_list iterator
*
* Returns the next connector for @iter, or NULL when the list walk has
* completed.
@@ -720,6 +749,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+ { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+ { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+ { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+ { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+ { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
@@ -777,7 +814,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* Blob property which contains the current EDID read from the sink. This
* is useful to parse sink identification information like vendor, model
* and serial. Drivers should update this property by calling
- * drm_mode_connector_update_edid_property(), usually after having parsed
+ * drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
* DPMS:
@@ -815,7 +852,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
- * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
* TILE:
@@ -826,14 +863,14 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* are not gen-locked. Note that for tiled panels which are genlocked, like
* dual-link LVDS or dual-link DSI, the driver should try to not expose the
* tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
- * should update this value using drm_mode_connector_set_tile_property().
+ * should update this value using drm_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
* Connector link-status property to indicate the status of link. The
* default value of link-status is "GOOD". If something fails during or
* after modeset, the kernel driver may set this to "BAD" and issue a
* hotplug uevent. Drivers should update this value using
- * drm_mode_connector_set_link_status_property().
+ * drm_connector_set_link_status_property().
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
@@ -997,6 +1034,82 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ * Indicates content type setting to be used in HDMI infoframes to indicate
+ * content type for the external device, so that it adjusts it's display
+ * settings accordingly.
+ *
+ * The value of this property can be one of the following:
+ *
+ * No Data:
+ * Content type is unknown
+ * Graphics:
+ * Content type is graphics
+ * Photo:
+ * Content type is photo
+ * Cinema:
+ * Content type is cinema
+ * Game:
+ * Content type is game
+ *
+ * Drivers can set up this property by calling
+ * drm_connector_attach_content_type_property(). Decoding to
+ * infoframe values is done through drm_hdmi_avi_infoframe_content_type().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_content_type_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.content_type_property,
+ DRM_MODE_CONTENT_TYPE_NO_DATA);
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ * content type information, based
+ * on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ switch (conn_state->content_type) {
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ break;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+ break;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ frame->content_type = HDMI_CONTENT_TYPE_GAME;
+ break;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+ break;
+ default:
+ /* Graphics is the default(0) */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ }
+
+ frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
@@ -1261,6 +1374,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+ if (dev->mode_config.content_type_property)
+ return 0;
+
+ dev->mode_config.content_type_property =
+ drm_property_create_enum(dev, 0, "content type",
+ drm_content_type_enum_list,
+ ARRAY_SIZE(drm_content_type_enum_list));
+
+ if (dev->mode_config.content_type_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
+/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
@@ -1285,7 +1425,7 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
/**
- * drm_mode_connector_set_path_property - set tile property on connector
+ * drm_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property; must not be NULL.
*
@@ -1297,8 +1437,8 @@ EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path)
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path)
{
struct drm_device *dev = connector->dev;
int ret;
@@ -1311,10 +1451,10 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
dev->mode_config.path_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+EXPORT_SYMBOL(drm_connector_set_path_property);
/**
- * drm_mode_connector_set_tile_property - set tile property on connector
+ * drm_connector_set_tile_property - set tile property on connector
* @connector: connector to set property on.
*
* This looks up the tile information for a connector, and creates a
@@ -1324,7 +1464,7 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
* Returns:
* Zero on success, errno on failure.
*/
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+int drm_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
char tile[256];
@@ -1354,10 +1494,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector)
dev->mode_config.tile_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+EXPORT_SYMBOL(drm_connector_set_tile_property);
/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * drm_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
*
@@ -1367,8 +1507,8 @@ EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid)
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid)
{
struct drm_device *dev = connector->dev;
size_t size = 0;
@@ -1406,10 +1546,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
dev->mode_config.edid_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+EXPORT_SYMBOL(drm_connector_update_edid_property);
/**
- * drm_mode_connector_set_link_status_property - Set link status property of a connector
+ * drm_connector_set_link_status_property - Set link status property of a connector
* @connector: drm connector
* @link_status: new value of link status property (0: Good, 1: Bad)
*
@@ -1427,8 +1567,8 @@ EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
* it is not limited to DP or link training. For example, if we implement
* asynchronous setcrtc, this property can be used to report any failures in that.
*/
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status)
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status)
{
struct drm_device *dev = connector->dev;
@@ -1436,7 +1576,7 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector
connector->state->link_status = link_status;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
* drm_connector_init_panel_orientation_property -
@@ -1489,7 +1629,7 @@ int drm_connector_init_panel_orientation_property(
}
EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
@@ -1507,8 +1647,8 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *conn_set_prop = data;
struct drm_mode_obj_set_property obj_set_prop = {
@@ -1589,22 +1729,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] != 0)
- encoders_count++;
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ encoders_count++;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (put_user(encoder->base.id, encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_encoders = encoders_count;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 3c4000facb36..f973d287696a 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
+ if (ctx->handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 98a36e6c69ad..bae43938c8f6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
return crtc->timeline_name;
}
-static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops drm_crtc_fence_ops = {
.get_driver_name = drm_crtc_fence_get_driver_name,
.get_timeline_name = drm_crtc_fence_get_timeline_name,
- .enable_signaling = drm_crtc_fence_enable_signaling,
- .wait = dma_fence_default_wait,
};
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
@@ -286,6 +279,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
crtc->dev = dev;
crtc->funcs = funcs;
@@ -325,9 +322,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
if (primary && !primary->possible_crtcs)
- primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ primary->possible_crtcs = drm_crtc_mask(crtc);
if (cursor && !cursor->possible_crtcs)
- cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ cursor->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_crtc_crc_init(crtc);
if (ret) {
@@ -464,32 +461,42 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
struct drm_crtc *tmp;
int ret;
+ WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
+
/*
* NOTE: ->set_config can also disable other crtcs (if we steal all
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- drm_for_each_crtc(tmp, crtc->dev)
- tmp->primary->old_fb = tmp->primary->fb;
+ drm_for_each_crtc(tmp, crtc->dev) {
+ struct drm_plane *plane = tmp->primary;
+
+ plane->old_fb = plane->fb;
+ }
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
- crtc->primary->crtc = fb ? crtc : NULL;
- crtc->primary->fb = fb;
+ struct drm_plane *plane = crtc->primary;
+
+ plane->crtc = fb ? crtc : NULL;
+ plane->fb = fb;
}
drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
+ struct drm_plane *plane = tmp->primary;
+
+ if (plane->fb)
+ drm_framebuffer_get(plane->fb);
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
}
return ret;
}
+
/**
* drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
* @set: modeset config to set
@@ -640,7 +647,9 @@ retry:
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
+ DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+ ret, drm_get_mode_status_name(mode->status));
+ drm_mode_debug_printmodeline(mode);
goto out;
}
@@ -732,7 +741,11 @@ retry:
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = __drm_mode_set_config_internal(&set, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = crtc->funcs->set_config(&set, &ctx);
+ else
+ ret = __drm_mode_set_config_internal(&set, &ctx);
out:
if (fb)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 5d307b23a4e6..b61322763394 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -56,12 +56,21 @@ int drm_mode_setcrtc(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
/* IOCTLs */
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
/* drm_dumb_buffers.c */
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv);
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv);
+
/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -139,7 +148,7 @@ void drm_connector_ida_init(void);
void drm_connector_ida_destroy(void);
void drm_connector_unregister_all(struct drm_device *dev);
int drm_connector_register_all(struct drm_device *dev);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
@@ -147,8 +156,8 @@ const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -163,14 +172,19 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
const struct drm_framebuffer *fb);
void drm_fb_release(struct drm_file *file_priv);
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv);
+
/* IOCTL */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b2482818fee8..6f28fe58f169 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
@@ -164,6 +165,12 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
DRM_ERROR("Failed to create framebuffer debugfs file\n");
return ret;
}
+
+ ret = drm_client_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create client debugfs file\n");
+ return ret;
+ }
}
if (dev->driver->debugfs_init) {
@@ -307,13 +314,13 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf,
if (len == 5 && !strncmp(buf, "reset", 5)) {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, NULL);
+ ret = drm_connector_update_edid_property(connector, NULL);
} else if (len < EDID_LENGTH ||
EDID_LENGTH * (1 + edid->extensions) > len)
ret = -EINVAL;
else {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_connector_update_edid_property(connector, edid);
if (!ret)
connector->override_edid = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 9f8312137cad..99961192bf03 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
{
kfree(crc->entries);
+ crc->overflow = false;
crc->entries = NULL;
crc->head = 0;
crc->tail = 0;
@@ -391,8 +392,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ bool was_overflow = crc->overflow;
+
+ crc->overflow = true;
spin_unlock(&crc->lock);
- DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
+ if (!was_overflow)
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
return -ENOBUFS;
}
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644
index 000000000000..988513346e9c
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+ "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+ ssize_t err = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ /* Bit 15 (logical address 15) should always be set */
+ u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+ u8 mask[2];
+ ssize_t err;
+
+ if (addr != CEC_LOG_ADDR_INVALID)
+ la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+ mask[0] = la_mask & 0xff;
+ mask[1] = la_mask >> 8;
+ err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ unsigned int retries = min(5, attempts - 1);
+ ssize_t err;
+
+ err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
+ if (err < 0)
+ return err;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
+ return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ ssize_t err;
+ u8 val;
+
+ if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (err >= 0) {
+ if (enable)
+ val |= DP_CEC_SNOOPING_ENABLE;
+ else
+ val &= ~DP_CEC_SNOOPING_ENABLE;
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ }
+ return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+ struct seq_file *file)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ struct drm_dp_desc desc;
+ struct drm_dp_dpcd_ident *id = &desc.ident;
+
+ if (drm_dp_read_desc(aux, &desc, true))
+ return;
+ seq_printf(file, "OUI: %*phD\n",
+ (int)sizeof(id->oui), id->oui);
+ seq_printf(file, "ID: %*pE\n",
+ (int)strnlen(id->device_id, sizeof(id->device_id)),
+ id->device_id);
+ seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+ /*
+ * Show this both in decimal and hex: at least one vendor
+ * always reports this in hex.
+ */
+ seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+ id->sw_major_rev, id->sw_minor_rev,
+ id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+ .adap_enable = drm_dp_cec_adap_enable,
+ .adap_log_addr = drm_dp_cec_adap_log_addr,
+ .adap_transmit = drm_dp_cec_adap_transmit,
+ .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+ .adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ struct cec_msg msg;
+ u8 rx_msg_info;
+ ssize_t err;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ if (err < 0)
+ return err;
+
+ if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+ return 0;
+
+ msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+ err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ if (err < 0)
+ return err;
+
+ cec_received_msg(adap, &msg);
+ return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ u8 flags;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ return;
+
+ if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+ drm_dp_cec_received(aux);
+
+ if (flags & DP_CEC_TX_MESSAGE_SENT)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+ else if (flags & DP_CEC_TX_LINE_ERROR)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES);
+ else if (flags &
+ (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES);
+ drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+ u8 cec_irq;
+ int ret;
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
+ if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+ goto unlock;
+
+ drm_dp_cec_handle_irq(aux);
+ drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+ u8 cap = 0;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ !(cap & DP_CEC_TUNNELING_CAPABLE))
+ return false;
+ if (cec_cap)
+ *cec_cap = cap;
+ return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+ struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+ cec.unregister_work.work);
+
+ mutex_lock(&aux->cec.lock);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ unsigned int num_las = 1;
+ u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+ /*
+ * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+ * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+ *
+ * Do this here as well to ensure the tests against cec_caps are
+ * correct.
+ */
+ cec_caps &= ~CEC_CAP_RC;
+#endif
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!drm_dp_cec_cap(aux, &cap)) {
+ /* CEC is not supported, unregister any existing adapter */
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+
+ if (cap & DP_CEC_SNOOPING_CAPABLE)
+ cec_caps |= CEC_CAP_MONITOR_ALL;
+ if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+ num_las = CEC_MAX_LOG_ADDRS;
+
+ if (aux->cec.adap) {
+ if (aux->cec.adap->capabilities == cec_caps &&
+ aux->cec.adap->available_log_addrs == num_las) {
+ /* Unchanged, so just set the phys addr */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ goto unlock;
+ }
+ /*
+ * The capabilities changed, so unregister the old
+ * adapter first.
+ */
+ cec_unregister_adapter(aux->cec.adap);
+ }
+
+ /* Create a new adapter */
+ aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+ aux, aux->cec.name, cec_caps,
+ num_las);
+ if (IS_ERR(aux->cec.adap)) {
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+ if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+ cec_delete_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ } else {
+ /*
+ * Update the phys addr for the new CEC adapter. When called
+ * from drm_dp_cec_register_connector() edid == NULL, so in
+ * that case the phys addr is just invalidated.
+ */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ cec_phys_addr_invalidate(aux->cec.adap);
+ /*
+ * We're done if we want to keep the CEC device
+ * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+ * DPCD still indicates the CEC capability (expected for an integrated
+ * HDMI branch device).
+ */
+ if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+ !drm_dp_cec_cap(aux, NULL)) {
+ /*
+ * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+ * seconds. This to debounce short HPD off-and-on cycles from
+ * displays.
+ */
+ schedule_delayed_work(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_delay * HZ);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent)
+{
+ WARN_ON(aux->cec.adap);
+ aux->cec.name = name;
+ aux->cec.parent = parent;
+ INIT_DELAYED_WORK(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_work);
+
+ drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+ if (!aux->cec.adap)
+ return;
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a7ba602a43a8..0cccbcb2d03e 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -185,6 +185,20 @@ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
#define AUX_RETRY_INTERVAL 500 /* us */
+static inline void
+drm_dp_dump_access(const struct drm_dp_aux *aux,
+ u8 request, uint offset, void *buffer, int ret)
+{
+ const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
+
+ if (ret > 0)
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+ aux->name, offset, arrow, ret, min(ret, 20), buffer);
+ else
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n",
+ aux->name, offset, arrow, ret);
+}
+
/**
* DOC: dp helpers
*
@@ -288,10 +302,14 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
1);
if (ret != 1)
- return ret;
+ goto out;
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+ size);
+
+out:
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_read);
@@ -312,8 +330,12 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ int ret;
+
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+ size);
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_write);
@@ -1087,6 +1109,7 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
+ mutex_init(&aux->cec.lock);
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
aux->ddc.algo = &drm_dp_i2c_algo;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 658830620ca3..7780567aa669 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1215,7 +1215,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(port->connector);
+ drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
@@ -2559,7 +2559,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(connector);
+ drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7af748ed1c58..ea4941da9b27 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drmP.h>
@@ -53,13 +54,14 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -505,6 +507,8 @@ int drm_dev_init(struct drm_device *dev,
dev->driver = driver;
INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->filelist_internal);
+ INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
@@ -514,6 +518,7 @@ int drm_dev_init(struct drm_device *dev,
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
+ mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
@@ -569,6 +574,7 @@ err_minors:
err_free:
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
return ret;
@@ -603,6 +609,7 @@ void drm_dev_fini(struct drm_device *dev)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
@@ -858,6 +865,8 @@ void drm_dev_unregister(struct drm_device *dev)
dev->registered = false;
+ drm_client_dev_unregister(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 9e2ae02f31e0..81dfdd33753a 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -53,10 +53,10 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv)
{
- struct drm_mode_create_dumb *args = data;
u32 cpp, stride, size;
if (!dev->driver->dumb_create)
@@ -92,6 +92,12 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_create(file_priv, dev, args);
}
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_create_dumb(dev, data, file_priv);
+}
+
/**
* drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
* @dev: DRM device
@@ -123,17 +129,22 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
&args->offset);
}
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv)
{
- struct drm_mode_destroy_dumb *args = data;
-
if (!dev->driver->dumb_create)
return -ENOSYS;
if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ return dev->driver->dumb_destroy(file_priv, dev, handle);
else
- return drm_gem_dumb_destroy(file_priv, dev, args->handle);
+ return drm_gem_dumb_destroy(file_priv, dev, handle);
}
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ return drm_mode_destroy_dumb(dev, args->handle, file_priv);
+}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a5808382bdf0..5dc742b27ca0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -163,8 +163,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
- /* HTC Vive VR Headset */
+ /* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
- /* 1 - 640x480@60Hz */
+ /* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 2 - 720x480@60Hz */
+ /* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 3 - 720x480@60Hz */
+ /* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 4 - 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 5 - 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 6 - 720(1440)x480i@60Hz */
+ /* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 7 - 720(1440)x480i@60Hz */
+ /* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 8 - 720(1440)x240@60Hz */
+ /* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 9 - 720(1440)x240@60Hz */
+ /* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 10 - 2880x480i@60Hz */
+ /* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 11 - 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 12 - 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 13 - 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 14 - 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 15 - 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 16 - 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 17 - 720x576@50Hz */
+ /* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 18 - 720x576@50Hz */
+ /* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 19 - 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 20 - 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 21 - 720(1440)x576i@50Hz */
+ /* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 22 - 720(1440)x576i@50Hz */
+ /* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 23 - 720(1440)x288@50Hz */
+ /* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 24 - 720(1440)x288@50Hz */
+ /* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 25 - 2880x576i@50Hz */
+ /* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 26 - 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 27 - 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 28 - 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 29 - 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 30 - 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 31 - 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 32 - 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 33 - 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 34 - 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 35 - 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 36 - 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 37 - 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 38 - 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 39 - 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 40 - 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 41 - 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 42 - 720x576@100Hz */
+ /* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 43 - 720x576@100Hz */
+ /* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 44 - 720(1440)x576i@100Hz */
+ /* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 45 - 720(1440)x576i@100Hz */
+ /* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 46 - 1920x1080i@120Hz */
+ /* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 47 - 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 48 - 720x480@120Hz */
+ /* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 49 - 720x480@120Hz */
+ /* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 50 - 720(1440)x480i@120Hz */
+ /* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 51 - 720(1440)x480i@120Hz */
+ /* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 52 - 720x576@200Hz */
+ /* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 53 - 720x576@200Hz */
+ /* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 54 - 720(1440)x576i@200Hz */
+ /* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 55 - 720(1440)x576i@200Hz */
+ /* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 56 - 720x480@240Hz */
+ /* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 57 - 720x480@240Hz */
+ /* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240Hz */
+ /* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240Hz */
+ /* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 60 - 1280x720@24Hz */
+ /* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 61 - 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 62 - 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 63 - 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 64 - 1920x1080@100Hz */
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 65 - 1280x720@24Hz */
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 66 - 1280x720@25Hz */
+ /* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 67 - 1280x720@30Hz */
+ /* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 68 - 1280x720@50Hz */
+ /* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 69 - 1280x720@60Hz */
+ /* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 70 - 1280x720@100Hz */
+ /* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 71 - 1280x720@120Hz */
+ /* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 72 - 1920x1080@24Hz */
+ /* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 73 - 1920x1080@25Hz */
+ /* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 74 - 1920x1080@30Hz */
+ /* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 75 - 1920x1080@50Hz */
+ /* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 76 - 1920x1080@60Hz */
+ /* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 77 - 1920x1080@100Hz */
+ /* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 78 - 1920x1080@120Hz */
+ /* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 79 - 1680x720@24Hz */
+ /* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 80 - 1680x720@25Hz */
+ /* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 81 - 1680x720@30Hz */
+ /* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 82 - 1680x720@50Hz */
+ /* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 83 - 1680x720@60Hz */
+ /* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 84 - 1680x720@100Hz */
+ /* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 85 - 1680x720@120Hz */
+ /* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 86 - 2560x1080@24Hz */
+ /* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 87 - 2560x1080@25Hz */
+ /* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 88 - 2560x1080@30Hz */
+ /* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 89 - 2560x1080@50Hz */
+ /* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 90 - 2560x1080@60Hz */
+ /* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 91 - 2560x1080@100Hz */
+ /* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 92 - 2560x1080@120Hz */
+ /* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 93 - 3840x2160p@24Hz 16:9 */
+ /* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 94 - 3840x2160p@25Hz 16:9 */
+ /* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 95 - 3840x2160p@30Hz 16:9 */
+ /* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 96 - 3840x2160p@50Hz 16:9 */
+ /* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 97 - 3840x2160p@60Hz 16:9 */
+ /* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 98 - 4096x2160p@24Hz 256:135 */
+ /* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 99 - 4096x2160p@25Hz 256:135 */
+ /* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 100 - 4096x2160p@30Hz 256:135 */
+ /* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 101 - 4096x2160p@50Hz 256:135 */
+ /* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 102 - 4096x2160p@60Hz 256:135 */
+ /* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 103 - 3840x2160p@24Hz 64:27 */
+ /* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 104 - 3840x2160p@25Hz 64:27 */
+ /* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 105 - 3840x2160p@30Hz 64:27 */
+ /* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 106 - 3840x2160p@50Hz 64:27 */
+ /* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 107 - 3840x2160p@60Hz 64:27 */
+ /* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -4874,6 +4875,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
+ * As some drivers don't support atomic, we can't use connector state.
+ * So just initialize the frame with default values, just the same way
+ * as it's done with other properties here.
+ */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ frame->itc = 0;
+
+ /*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.
*/
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 186d00adfb5f..9da36a6271d3 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,6 +18,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
@@ -26,11 +27,8 @@
#include <drm/drm_print.h>
#include <linux/module.h>
-#define DEFAULT_FBDEFIO_DELAY_MS 50
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- const struct drm_framebuffer_funcs *fb_funcs;
};
/**
@@ -44,36 +42,6 @@ struct drm_fbdev_cma {
*
* An fbdev framebuffer backed by cma is also available by calling
* drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
- * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
- * set up automatically. &drm_framebuffer_funcs.dirty is called by
- * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
- *
- * Example fbdev deferred io code::
- *
- * static int driver_fb_dirty(struct drm_framebuffer *fb,
- * struct drm_file *file_priv,
- * unsigned flags, unsigned color,
- * struct drm_clip_rect *clips,
- * unsigned num_clips)
- * {
- * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
- * ... push changes ...
- * return 0;
- * }
- *
- * static struct drm_framebuffer_funcs driver_fb_funcs = {
- * .destroy = drm_gem_fb_destroy,
- * .create_handle = drm_gem_fb_create_handle,
- * .dirty = driver_fb_dirty,
- * };
- *
- * Initialize::
- *
- * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
- * dev->mode_config.num_crtc,
- * dev->mode_config.num_connector,
- * &driver_fb_funcs);
- *
*/
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
@@ -131,236 +99,28 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(info->device, vma, info->screen_base,
- info->fix.smem_start, info->fix.smem_len);
-}
-
-static struct fb_ops drm_fbdev_cma_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_mmap = drm_fb_cma_mmap,
-};
-
-static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- fb_deferred_io_mmap(info, vma);
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- return 0;
-}
-
-static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
- struct drm_gem_cma_object *cma_obj)
-{
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- /*
- * Per device structures are needed because:
- * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
- * fbdefio: individual delays
- */
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- /* can't be offset from vaddr since dirty() uses cma_obj */
- fbi->screen_buffer = cma_obj->vaddr;
- /* fb_deferred_io_fault() needs a physical address */
- fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
-
- *fbops = *fbi->fbops;
- fbi->fbops = fbops;
-
- fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
- fbi->fbdefio = fbdefio;
- fb_deferred_io_init(fbi);
- fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
-
- return 0;
-}
-
-static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
-{
- if (!fbi->fbdefio)
- return;
-
- fb_deferred_io_cleanup(fbi);
- kfree(fbi->fbdefio);
- kfree(fbi->fbops);
-}
-
-static int
-drm_fbdev_cma_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_device *dev = helper->dev;
- struct drm_gem_cma_object *obj;
- struct drm_framebuffer *fb;
- unsigned int bytes_per_pixel;
- unsigned long offset;
- struct fb_info *fbi;
- size_t size;
- int ret;
-
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
- size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
- obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(obj))
- return -ENOMEM;
-
- fbi = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(fbi)) {
- ret = PTR_ERR(fbi);
- goto err_gem_free_object;
- }
-
- fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fb)) {
- dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fb);
- goto err_fb_info_destroy;
- }
-
- helper->fb = fb;
-
- fbi->par = helper;
- fbi->flags = FBINFO_FLAG_DEFAULT;
- fbi->fbops = &drm_fbdev_cma_ops;
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
-
- offset = fbi->var.xoffset * bytes_per_pixel;
- offset += fbi->var.yoffset * fb->pitches[0];
-
- dev->mode_config.fb_base = (resource_size_t)obj->paddr;
- fbi->screen_base = obj->vaddr + offset;
- fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
- fbi->screen_size = size;
- fbi->fix.smem_len = size;
-
- if (fb->funcs->dirty) {
- ret = drm_fbdev_cma_defio_init(fbi, obj);
- if (ret)
- goto err_cma_destroy;
- }
-
- return 0;
-
-err_cma_destroy:
- drm_framebuffer_remove(fb);
-err_fb_info_destroy:
- drm_fb_helper_fini(helper);
-err_gem_free_object:
- drm_gem_object_put_unlocked(&obj->base);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_create,
-};
-
/**
- * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
* @dev->mode_config.preferred_depth is used if this is zero.
* @max_conn_count: Maximum number of connectors.
* @dev->mode_config.num_connector is used if this is zero.
- * @funcs: Framebuffer functions, in particular a custom dirty() callback.
- * Can be NULL.
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
-
- if (!max_conn_count)
- max_conn_count = dev->mode_config.num_connector;
-
- fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma)
- return -ENOMEM;
- fbdev_cma->fb_funcs = funcs;
- fb_helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
- goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
- }
-
- ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ /* dev->fb_helper will indirectly point to fbdev_cma after this call */
+ fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
+ if (IS_ERR(fbdev_cma))
+ return PTR_ERR(fbdev_cma);
return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_free:
- kfree(fbdev_cma);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
-
-/**
- * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- * @dev->mode_config.num_connector is used if this is zero.
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
- max_conn_count, NULL);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
@@ -370,104 +130,54 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
*/
void drm_fb_cma_fbdev_fini(struct drm_device *dev)
{
- struct drm_fb_helper *fb_helper = dev->fb_helper;
-
- if (!fb_helper)
- return;
-
- /* Unregister if it hasn't been done already */
- if (fb_helper->fbdev && fb_helper->fbdev->dev)
- drm_fb_helper_unregister_fbi(fb_helper);
-
- if (fb_helper->fbdev)
- drm_fbdev_cma_defio_fini(fb_helper->fbdev);
-
- if (fb_helper->fb)
- drm_framebuffer_remove(fb_helper->fb);
-
- drm_fb_helper_fini(fb_helper);
- kfree(to_fbdev_cma(fb_helper));
+ if (dev->fb_helper)
+ drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper));
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
/**
- * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
* @max_conn_count: Maximum number of connectors
- * @funcs: fb helper functions, in particular a custom dirty() callback
*
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *helper;
+ struct drm_fb_helper *fb_helper;
int ret;
fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma) {
- dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ if (!fbdev_cma)
return ERR_PTR(-ENOMEM);
- }
- fbdev_cma->fb_funcs = funcs;
- helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+ fb_helper = &fbdev_cma->fb_helper;
- ret = drm_fb_helper_init(dev, helper, max_conn_count);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+ if (ret)
goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
-
- }
- ret = drm_fb_helper_initial_config(helper, preferred_bpp);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to set initial hw configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs,
+ preferred_bpp, max_conn_count);
+ if (ret)
+ goto err_client_put;
return fbdev_cma;
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(helper);
+err_client_put:
+ drm_client_release(&fb_helper->client);
err_free:
kfree(fbdev_cma);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
-
-static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
-};
-
-/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device
- * @max_conn_count: Maximum number of connectors
- *
- * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
- */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count)
-{
- return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
- max_conn_count,
- &drm_fb_cma_funcs);
-}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
/**
@@ -477,14 +187,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- if (fbdev_cma->fb_helper.fbdev)
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
-
- if (fbdev_cma->fb_helper.fb)
- drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
-
- drm_fb_helper_fini(&fbdev_cma->fb_helper);
- kfree(fbdev_cma);
+ /* All resources have now been freed by drm_fbdev_fb_destroy() */
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2ee1eaa66188..4b0dd20bccb8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -66,6 +67,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
+ * Drivers that support a dumb buffer with a virtual address and mmap support,
+ * should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ *
* Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
* down by calling drm_fb_helper_fbdev_teardown().
*
@@ -368,7 +372,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
- unsigned int plane_mask;
struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +384,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
state->acquire_ctx = &ctx;
retry:
- plane_mask = 0;
drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -391,9 +393,6 @@ retry:
plane_state->rotation = DRM_MODE_ROTATE_0;
- plane->old_fb = plane->fb;
- plane_mask |= 1 << drm_plane_index(plane);
-
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
@@ -430,8 +429,6 @@ retry:
ret = drm_atomic_commit(state);
out_state:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK)
goto backoff;
@@ -745,6 +742,24 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
+static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
+ void *src = fb_helper->fbdev->screen_buffer + offset;
+ void *dst = fb_helper->buffer->vaddr + offset;
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ memcpy(dst, src, len);
+ src += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+
static void drm_fb_helper_dirty_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -760,8 +775,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
spin_unlock_irqrestore(&helper->dirty_lock, flags);
/* call dirty callback only when it has been really touched */
- if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+ /* Generic fbdev uses a shadow buffer */
+ if (helper->buffer)
+ drm_fb_helper_dirty_blit_real(helper, &clip_copy);
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
}
/**
@@ -1164,7 +1183,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
* @info: fbdev registered by the helper
* @rect: info about rectangle to fill
*
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
*/
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
@@ -2330,6 +2349,20 @@ retry:
return true;
}
+static bool connector_has_possible_crtc(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return true;
+ }
+
+ return false;
+}
+
static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **best_crtcs,
struct drm_display_mode **modes,
@@ -2338,7 +2371,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int c, o;
struct drm_connector *connector;
const struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2370,27 +2402,14 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector_funcs = connector->helper_private;
/*
- * If the DRM device implements atomic hooks and ->best_encoder() is
- * NULL we fallback to the default drm_atomic_helper_best_encoder()
- * helper.
- */
- if (drm_drv_uses_atomic_modeset(fb_helper->dev) &&
- !connector_funcs->best_encoder)
- encoder = drm_atomic_helper_best_encoder(connector);
- else
- encoder = connector_funcs->best_encoder(connector);
-
- if (!encoder)
- goto out;
-
- /*
* select a crtc for this connector and then attempt to configure
* remaining connectors
*/
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0)
+ if (!connector_has_possible_crtc(connector,
+ crtc->mode_set.crtc))
continue;
for (o = 0; o < n; o++)
@@ -2417,7 +2436,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
sizeof(struct drm_fb_helper_crtc *));
}
}
-out:
+
kfree(crtcs);
return best_score;
}
@@ -2928,6 +2947,294 @@ void drm_fb_helper_output_poll_changed(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
+/* @user: 1=userspace, 0=fbcon */
+static int drm_fbdev_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct fb_info *fbi = fb_helper->fbdev;
+ struct fb_ops *fbops = NULL;
+ void *shadow = NULL;
+
+ if (fbi->fbdefio) {
+ fb_deferred_io_cleanup(fbi);
+ shadow = fbi->screen_buffer;
+ fbops = fbi->fbops;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+
+ if (shadow) {
+ vfree(shadow);
+ kfree(fbops);
+ }
+
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ /*
+ * FIXME:
+ * Remove conditional when all CMA drivers have been moved over to using
+ * drm_fbdev_generic_setup().
+ */
+ if (fb_helper->client.funcs) {
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (fb_helper->dev->driver->gem_prime_mmap)
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+ else
+ return -ENODEV;
+}
+
+static struct fb_ops drm_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = drm_fbdev_fb_open,
+ .fb_release = drm_fbdev_fb_release,
+ .fb_destroy = drm_fbdev_fb_destroy,
+ .fb_mmap = drm_fbdev_fb_mmap,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+};
+
+static struct fb_deferred_io drm_fbdev_defio = {
+ .delay = HZ / 20,
+ .deferred_io = drm_fb_helper_deferred_io,
+};
+
+/**
+ * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
+ * @fb_helper: fbdev helper structure
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * This function uses the client API to crate a framebuffer backed by a dumb buffer.
+ *
+ * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
+ * fb_copyarea, fb_imageblit.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_client_buffer *buffer;
+ struct drm_framebuffer *fb;
+ struct fb_info *fbi;
+ u32 format;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+ fb = buffer->fb;
+
+ fbi = drm_fb_helper_alloc_fbi(fb_helper);
+ if (IS_ERR(fbi)) {
+ ret = PTR_ERR(fbi);
+ goto err_free_buffer;
+ }
+
+ fbi->par = fb_helper;
+ fbi->fbops = &drm_fbdev_fb_ops;
+ fbi->screen_size = fb->height * fb->pitches[0];
+ fbi->fix.smem_len = fbi->screen_size;
+ fbi->screen_buffer = buffer->vaddr;
+ strcpy(fbi->fix.id, "DRM emulated");
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+
+ if (fb->funcs->dirty) {
+ struct fb_ops *fbops;
+ void *shadow;
+
+ /*
+ * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
+ * instance version is necessary.
+ */
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ shadow = vzalloc(fbi->screen_size);
+ if (!fbops || !shadow) {
+ kfree(fbops);
+ vfree(shadow);
+ ret = -ENOMEM;
+ goto err_fb_info_destroy;
+ }
+
+ *fbops = *fbi->fbops;
+ fbi->fbops = fbops;
+ fbi->screen_buffer = shadow;
+ fbi->fbdefio = &drm_fbdev_defio;
+
+ fb_deferred_io_init(fbi);
+ }
+
+ return 0;
+
+err_fb_info_destroy:
+ drm_fb_helper_fini(fb_helper);
+err_free_buffer:
+ drm_client_framebuffer_delete(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_generic_probe);
+
+static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->fbdev) {
+ drm_fb_helper_unregister_fbi(fb_helper);
+ /* drm_fbdev_fb_destroy() takes care of cleanup */
+ return;
+ }
+
+ /* Did drm_fb_helper_fbdev_setup() run? */
+ if (fb_helper->dev)
+ drm_fb_helper_fini(fb_helper);
+
+ drm_client_release(client);
+ kfree(fb_helper);
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+ return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+ if (!fb_helper->dev && fb_helper->funcs)
+ return 0;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ if (!dev->mode_config.num_connector)
+ return 0;
+
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
+ fb_helper->preferred_bpp, 0);
+ if (ret) {
+ fb_helper->dev = NULL;
+ fb_helper->fbdev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_client_unregister,
+ .restore = drm_fbdev_client_restore,
+ .hotplug = drm_fbdev_client_hotplug,
+};
+
+/**
+ * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up generic fbdev emulation for drivers that supports
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * Drivers that set the dirty callback on their framebuffer will get a shadow
+ * fbdev buffer that is blitted onto the real buffer. This is done in order to
+ * make deferred I/O work with all kinds of buffers.
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!drm_fbdev_emulation)
+ return 0;
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return -ENOMEM;
+
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ if (ret) {
+ kfree(fb_helper);
+ return ret;
+ }
+
+ fb_helper->preferred_bpp = preferred_bpp;
+
+ drm_fbdev_client_hotplug(&fb_helper->client);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fbdev_generic_setup);
+
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 6d9b9453707c..ffa8dc35515f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <drm/drm_client.h>
#include <drm/drm_file.h>
#include <drm/drmP.h>
@@ -101,6 +102,166 @@ DEFINE_MUTEX(drm_global_mutex);
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
+/**
+ * drm_file_alloc - allocate file context
+ * @minor: minor to allocate on
+ *
+ * This allocates a new DRM file context. It is not linked into any context and
+ * can be used by the caller freely. Note that the context keeps a pointer to
+ * @minor, so it must be freed before @minor is.
+ *
+ * RETURNS:
+ * Pointer to newly allocated context, ERR_PTR on failure.
+ */
+struct drm_file *drm_file_alloc(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *file;
+ int ret;
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return ERR_PTR(-ENOMEM);
+
+ file->pid = get_pid(task_pid(current));
+ file->minor = minor;
+
+ /* for compatibility root is always authenticated */
+ file->authenticated = capable(CAP_SYS_ADMIN);
+ file->lock_count = 0;
+
+ INIT_LIST_HEAD(&file->lhead);
+ INIT_LIST_HEAD(&file->fbs);
+ mutex_init(&file->fbs_lock);
+ INIT_LIST_HEAD(&file->blobs);
+ INIT_LIST_HEAD(&file->pending_event_list);
+ INIT_LIST_HEAD(&file->event_list);
+ init_waitqueue_head(&file->event_wait);
+ file->event_space = 4096; /* set aside 4k for event buffer */
+
+ mutex_init(&file->event_read_lock);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_open(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_open(file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&file->prime);
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, file);
+ if (ret < 0)
+ goto out_prime_destroy;
+ }
+
+ return file;
+
+out_prime_destroy:
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+ put_pid(file->pid);
+ kfree(file);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Unlink pending events */
+ list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+ pending_link) {
+ list_del(&e->pending_link);
+ e->file_priv = NULL;
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+ list_del(&e->link);
+ kfree(e);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_file_free - free file context
+ * @file: context to free, or NULL
+ *
+ * This destroys and deallocates a DRM file context previously allocated via
+ * drm_file_alloc(). The caller must make sure to unlink it from any contexts
+ * before calling this.
+ *
+ * If NULL is passed, this is a no-op.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+void drm_file_free(struct drm_file *file)
+{
+ struct drm_device *dev;
+
+ if (!file)
+ return;
+
+ dev = file->minor->dev;
+
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ dev->open_count);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
+ dev->driver->preclose)
+ dev->driver->preclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_legacy_lock_release(dev, file->filp);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_legacy_reclaim_buffers(dev, file);
+
+ drm_events_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_fb_release(file);
+ drm_property_destroy_user_blobs(dev, file);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+
+ drm_legacy_ctxbitmap_flush(dev, file);
+
+ if (drm_is_primary_client(file))
+ drm_master_release(file);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+
+ WARN_ON(!list_empty(&file->event_list));
+
+ put_pid(file->pid);
+ kfree(file);
+}
+
static int drm_setup(struct drm_device * dev)
{
int ret;
@@ -207,52 +368,22 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- filp->private_data = priv;
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
- priv->filp = filp;
- priv->pid = get_pid(task_pid(current));
- priv->minor = minor;
-
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- mutex_init(&priv->fbs_lock);
- INIT_LIST_HEAD(&priv->blobs);
- INIT_LIST_HEAD(&priv->pending_event_list);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- mutex_init(&priv->event_read_lock);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_open(dev, priv);
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_open(priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_init_file_private(&priv->prime);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_prime_destroy;
- }
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
if (drm_is_primary_client(priv)) {
ret = drm_master_open(priv);
- if (ret)
- goto out_close;
+ if (ret) {
+ drm_file_free(priv);
+ return ret;
+ }
}
+ filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+ priv->filp = filp;
+
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
@@ -278,45 +409,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
#endif
return 0;
-
-out_close:
- if (dev->driver->postclose)
- dev->driver->postclose(dev, priv);
-out_prime_destroy:
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&priv->prime);
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(priv);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, priv);
- put_pid(priv->pid);
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Unlink pending events */
- list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
- pending_link) {
- list_del(&e->pending_link);
- e->file_priv = NULL;
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
- list_del(&e->link);
- kfree(e);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void drm_legacy_dev_reinit(struct drm_device *dev)
@@ -353,6 +445,8 @@ void drm_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
+
+ drm_client_dev_restore(dev);
}
/**
@@ -383,57 +477,7 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- dev->open_count);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file_priv);
-
- drm_events_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_fb_release(file_priv);
- drm_property_destroy_user_blobs(dev, file_priv);
- }
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, file_priv);
-
- drm_legacy_ctxbitmap_flush(dev, file_priv);
-
- if (drm_is_primary_client(file_priv))
- drm_master_release(file_priv);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file_priv->prime);
-
- WARN_ON(!list_empty(&file_priv->event_list));
-
- put_pid(file_priv->pid);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
+ drm_file_free(file_priv);
if (!--dev->open_count) {
drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 5ca6395cd4d3..35c1e2742c27 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -152,27 +152,27 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index bfedceff87bb..781af1d42d76 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -95,21 +95,20 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @or: pointer to request structure
+ * @file_priv: drm file
*
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioctl which only supported RGB formats.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
int ret;
@@ -134,6 +133,12 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_addfb(dev, data, file_priv);
+}
+
static int fb_plane_width(int width,
const struct drm_format_info *format, int plane)
{
@@ -367,29 +372,28 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @dev: drm device
+ * @fb_id: id of framebuffer to remove
+ * @file_priv: drm file
*
- * Remove the FB specified by the user.
+ * Remove the specified FB.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv)
{
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, file_priv, *id);
+ fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
if (!fb)
return -ENOENT;
@@ -435,6 +439,14 @@ fail_unref:
return -ENOENT;
}
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ uint32_t *fb_id = data;
+
+ return drm_mode_rmfb(dev, *fb_id, file_priv);
+}
+
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
@@ -835,9 +847,7 @@ retry:
if (ret)
goto unlock;
- plane_mask |= BIT(drm_plane_index(plane));
-
- plane->old_fb = plane->fb;
+ plane_mask |= drm_plane_mask(plane);
}
/* This list is only filled when disable_crtcs is set. */
@@ -852,9 +862,6 @@ retry:
ret = drm_atomic_commit(state);
unlock:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4a16d7b26c89..bf90625df3c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
+ if (node->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index acfbc0641a06..2810d4131411 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index b2dc21e33ae0..5799e2782dd1 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b72242e93ea4..40179c5fc6b8 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -26,6 +26,8 @@
/* drm_file.c */
extern struct mutex drm_global_mutex;
+struct drm_file *drm_file_alloc(struct drm_minor *minor);
+void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0d4cfb232576..ea10e9a26aad 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
+ case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->writeback_connectors = req->value;
+ break;
default:
return -EINVAL;
}
@@ -634,12 +641,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 50c73c0a20b9..b54fb78a283c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- path_get(&lessor_file->f_path);
- lessee_file = alloc_file(&lessor_file->f_path,
- lessor_file->f_mode,
- fops_get(lessor_file->f_inode->i_fop));
-
+ lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
}
- /* Initialize the new file for DRM */
- DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
- ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
- if (ret)
- goto out_lessee_file;
-
lessee_priv = lessee_file->private_data;
-
/* Change the file to a master one */
drm_master_put(&lessee_priv->master);
lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
-out_lessee_file:
- fput(lessee_file);
-
out_lessee:
drm_master_put(&lessee);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bc73b7f5b9fc..80b75501f5c6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -392,6 +392,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_DCS_COMPRESSION_MODE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -410,6 +411,7 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
+ case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3166026a1874..3cc5fbd78ee2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+ return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+ struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+ u64 x = node->hole_size;
+ bool first = true;
+
+ while (*link) {
+ rb = *link;
+ if (x > rb_to_hole_size(rb)) {
+ link = &rb->rb_left;
+ } else {
+ link = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb_hole_size, rb, link);
+ rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+ insert_hole_size(&mm->holes_size, node);
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
- rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+ rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
node->hole_size = 0;
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
- struct rb_node *best = NULL;
- struct rb_node **link = &mm->holes_size.rb_node;
+ struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+ struct drm_mm_node *best = NULL;
- while (*link) {
- struct rb_node *rb = *link;
+ do {
+ struct drm_mm_node *node =
+ rb_entry(rb, struct drm_mm_node, rb_hole_size);
- if (size <= rb_hole_size(rb)) {
- link = &rb->rb_left;
- best = rb;
+ if (size <= node->hole_size) {
+ best = node;
+ rb = rb->rb_right;
} else {
- link = &rb->rb_right;
+ rb = rb->rb_left;
}
- }
+ } while (rb);
- return rb_hole_size_to_node(best);
+ return best;
}
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
+ struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
- struct rb_node **link = &mm->holes_addr.rb_node;
- while (*link) {
+ while (rb) {
u64 hole_start;
- node = rb_hole_addr_to_node(*link);
+ node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
- link = &node->rb_hole_addr.rb_left;
+ rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
- link = &node->rb_hole_addr.rb_right;
+ rb = node->rb_hole_addr.rb_right;
else
break;
}
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
- if (RB_EMPTY_ROOT(&mm->holes_size))
- return NULL;
-
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
- return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+ return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+ return rb ? rb_to_hole_size(rb) : 0;
+}
+
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
+ bool once;
DRM_MM_BUG_ON(range_start >= range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
+ if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+ return -ENOSPC;
+
if (alignment <= 1)
alignment = 0;
+ once = mode & DRM_MM_INSERT_ONCE;
+ mode &= ~DRM_MM_INSERT_ONCE;
+
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
- for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
- hole = next_hole(mm, hole, mode)) {
+ for (hole = first_hole(mm, range_start, range_end, size, mode);
+ hole;
+ hole = once ? NULL : next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
+ rb_replace_node_cached(&old->rb_hole_size,
+ &new->rb_hole_size,
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
- mm->holes_size = RB_ROOT;
+ mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index e5c653357024..21e353bd3948 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ /* only expose writeback connectors if userspace understands them */
+ if (!file_priv->writeback_connectors &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+ continue;
+
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ce4d2fb32810..fcb0ab0abb75 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -433,8 +433,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
drm_modeset_lock_all(dev);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(obj, prop,
- prop_value);
+ ret = drm_connector_set_obj_prop(obj, prop, prop_value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c78ca0e84ffd..02db9ac82d7a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -659,10 +659,12 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* drm_bus_flags_from_videomode - extract information about pixelclk and
* DE polarity from videomode and store it in a separate variable
* @vm: videomode structure to use
- * @bus_flags: information about pixelclk and DE polarity will be stored here
+ * @bus_flags: information about pixelclk, sync and DE polarity will be stored
+ * here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
- * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
+ * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
@@ -672,6 +674,11 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
*bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
@@ -684,7 +691,7 @@ EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
- * @bus_flags: information about pixelclk and DE polarity
+ * @bus_flags: information about pixelclk, sync and DE polarity
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
@@ -1257,7 +1264,7 @@ static const char * const drm_mode_status_names[] = {
#undef MODE_STATUS
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
@@ -1346,7 +1353,7 @@ void drm_mode_sort(struct list_head *mode_list)
EXPORT_SYMBOL(drm_mode_sort);
/**
- * drm_mode_connector_list_update - update the mode list for the connector
+ * drm_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* This moves the modes from the @connector probed_modes list
@@ -1356,7 +1363,7 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *pmode, *pt;
@@ -1405,7 +1412,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
}
}
}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+EXPORT_SYMBOL(drm_connector_list_update);
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 1fe122461298..2763a5ec845b 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -9,21 +9,28 @@
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+/**
+ * DOC: overview
+ *
+ * A set of helper functions to aid DRM drivers in parsing standard DT
+ * properties.
+ */
+
static void drm_release_of(struct device *dev, void *data)
{
of_node_put(data);
}
/**
- * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
* @port: port OF node
*
* Given a port OF node, return the possible mask of the corresponding
* CRTC within a device's list of CRTCs. Returns zero if not found.
*/
-static uint32_t drm_crtc_port_mask(struct drm_device *dev,
- struct device_node *port)
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
{
unsigned int index = 0;
struct drm_crtc *tmp;
@@ -37,6 +44,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL(drm_of_crtc_port_mask);
/**
* drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
@@ -62,7 +70,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
- possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+ possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
}
@@ -93,7 +101,7 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
- * @master_ops: component master ops to be used
+ * @m_ops: component master ops to be used
*
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
@@ -238,10 +246,17 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
+ if (!of_device_is_available(remote)) {
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
if (panel) {
*panel = of_drm_find_panel(remote);
- if (*panel)
+ if (!IS_ERR(*panel))
ret = 0;
+ else
+ *panel = NULL;
}
/* No panel found yet, check for a bridge next. */
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 308d442a531b..b902361dee6e 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* An error is returned if the panel is already attached to another connector.
*
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
+ panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+ if (!panel->link) {
+ dev_err(panel->dev, "failed to link panel to %s\n",
+ dev_name(connector->dev->dev));
+ return -EINVAL;
+ }
+
panel->connector = connector;
panel->drm = connector->dev;
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
* Detaches a panel from the connector it is attached to. If a panel is not
* attached to any connector this is effectively a no-op.
*
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_detach(struct drm_panel *panel)
{
+ device_link_del(panel->link);
+
panel->connector = NULL;
panel->drm = NULL;
@@ -135,12 +151,19 @@ EXPORT_SYMBOL(drm_panel_detach);
* tree node. If a matching panel is found, return a pointer to it.
*
* Return: A pointer to the panel registered for the specified device tree
- * node or NULL if no panel matching the device tree node can be found.
+ * node or an ERR_PTR() if no panel matching the device tree node can be found.
+ * Possible error codes returned by this function:
+ * - EPROBE_DEFER: the panel device has not been probed yet, and the caller
+ * should retry later
+ * - ENODEV: the device is not available (status != "okay" or "ok")
*/
struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
struct drm_panel *panel;
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&panel_lock);
list_for_each_entry(panel, &panel_list, list) {
@@ -151,7 +174,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
mutex_unlock(&panel_lock);
- return NULL;
+ return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(of_drm_find_panel);
#endif
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 4db9c515b74f..896e42a34895 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -326,64 +326,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
EXPORT_SYMBOL(drm_legacy_pci_init);
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
-{
- struct pci_dev *root;
- u32 lnkcap, lnkcap2;
-
- *mask = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- /* we've been informed via and serverworks don't make the cut */
- if (root->vendor == PCI_VENDOR_ID_VIA ||
- root->vendor == PCI_VENDOR_ID_SERVERWORKS)
- return -EINVAL;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
-
- if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *mask |= DRM_PCIE_SPEED_50;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *mask |= DRM_PCIE_SPEED_80;
- } else { /* pre-r3.0 */
- if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
- *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
- }
-
- DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
-
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
-{
- struct pci_dev *root;
- u32 lnkcap;
-
- *mlw = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
-
- *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
-
- DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_max_link_width);
-
#else
void drm_pci_agp_destroy(struct drm_device *dev) {}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 035054455301..6153cbda239f 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@@ -561,19 +565,66 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
if (i == plane->format_count)
return -EINVAL;
- if (!plane->modifier_count)
- return 0;
+ if (plane->funcs->format_mod_supported) {
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return -EINVAL;
+ } else {
+ if (!plane->modifier_count)
+ return 0;
- for (i = 0; i < plane->modifier_count; i++) {
- if (modifier == plane->modifiers[i])
- break;
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return -EINVAL;
}
- if (i == plane->modifier_count)
- return -EINVAL;
- if (plane->funcs->format_mod_supported &&
- !plane->funcs->format_mod_supported(plane, format, modifier))
+ return 0;
+}
+
+static int __setplane_check(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret;
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
return -EINVAL;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
+ if (ret) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(fb->format->format,
+ &format_name),
+ fb->modifier);
+ return ret;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ return -ERANGE;
+ }
+
+ ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ if (ret)
+ return ret;
return 0;
}
@@ -598,6 +649,8 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
+ WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
+
/* No fb means shut it down */
if (!fb) {
plane->old_fb = plane->fb;
@@ -611,37 +664,9 @@ static int __setplane_internal(struct drm_plane *plane,
goto out;
}
- /* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->format->format,
- fb->modifier);
- if (ret) {
- struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
- drm_get_format_name(fb->format->format,
- &format_name),
- fb->modifier);
- goto out;
- }
-
- /* Give drivers some help against integer overflows */
- if (crtc_w > INT_MAX ||
- crtc_x > INT_MAX - (int32_t) crtc_w ||
- crtc_h > INT_MAX ||
- crtc_y > INT_MAX - (int32_t) crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- crtc_w, crtc_h, crtc_x, crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
- ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (ret)
goto out;
@@ -665,6 +690,41 @@ out:
return ret;
}
+static int __setplane_atomic(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ int ret;
+
+ WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev));
+
+ /* No fb means shut it down */
+ if (!fb)
+ return plane->funcs->disable_plane(plane, ctx);
+
+ /*
+ * FIXME: This is redundant with drm_atomic_plane_check(),
+ * but the legacy cursor/"async" .update_plane() tricks
+ * don't call that so we still need this here. Should remove
+ * this when all .update_plane() implementations have been
+ * fixed to call drm_atomic_plane_check().
+ */
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ if (ret)
+ return ret;
+
+ return plane->funcs->update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -682,9 +742,15 @@ retry:
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
goto fail;
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
fail:
if (ret == -EDEADLK) {
@@ -816,9 +882,14 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, ctx);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
if (fb)
drm_framebuffer_put(fb);
@@ -1092,8 +1163,10 @@ retry:
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
- plane->fb = fb;
- drm_framebuffer_get(fb);
+ if (!plane->state) {
+ plane->fb = fb;
+ drm_framebuffer_get(fb);
+ }
}
out:
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index f88f68161519..621f17643bb0 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -440,6 +440,7 @@ out:
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane update handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -455,7 +456,8 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
@@ -489,6 +491,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
/**
* drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane disable handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -499,9 +502,11 @@ EXPORT_SYMBOL(drm_plane_helper_update);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_plane_helper_disable(struct drm_plane *plane)
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
+ struct drm_framebuffer *old_fb;
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
@@ -521,8 +526,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state->plane = plane;
plane_state->crtc = NULL;
+ old_fb = plane_state->fb;
drm_atomic_set_fb_for_plane(plane_state, NULL);
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 397b46b33739..186db2e4c57a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
- * @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
@@ -435,35 +434,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
-/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index b25f98f33f6c..0e7fc3e7dfb4 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -30,6 +30,100 @@
#include <drm/drmP.h>
#include <drm/drm_print.h>
+void __drm_puts_coredump(struct drm_printer *p, const char *str)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ ssize_t len;
+
+ if (!iterator->remain)
+ return;
+
+ if (iterator->offset < iterator->start) {
+ ssize_t copy;
+
+ len = strlen(str);
+
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ copy = len - (iterator->start - iterator->offset);
+
+ if (copy > iterator->remain)
+ copy = iterator->remain;
+
+ /* Copy out the bit of the string that we need */
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
+
+ iterator->offset = iterator->start + copy;
+ iterator->remain -= copy;
+ } else {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ len = min_t(ssize_t, strlen(str), iterator->remain);
+
+ memcpy(iterator->data + pos, str, len);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+ }
+}
+EXPORT_SYMBOL(__drm_puts_coredump);
+
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ size_t len;
+ char *buf;
+
+ if (!iterator->remain)
+ return;
+
+ /* Figure out how big the string will be */
+ len = snprintf(NULL, 0, "%pV", vaf);
+
+ /* This is the easiest path, we've already advanced beyond the offset */
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ /* Then check if we can directly copy into the target buffer */
+ if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+
+ return;
+ }
+
+ /*
+ * Finally, hit the slow path and make a temporary string to copy over
+ * using _drm_puts_coredump
+ */
+ buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!buf)
+ return;
+
+ snprintf(buf, len + 1, "%pV", vaf);
+ __drm_puts_coredump(p, (const char *) buf);
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(__drm_printfn_coredump);
+
+void __drm_puts_seq_file(struct drm_printer *p, const char *str)
+{
+ seq_puts(p->arg, str);
+}
+EXPORT_SYMBOL(__drm_puts_seq_file);
+
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
{
seq_printf(p->arg, "%pV", vaf);
@@ -49,6 +143,23 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
EXPORT_SYMBOL(__drm_printfn_debug);
/**
+ * drm_puts - print a const string to a &drm_printer stream
+ * @p: the &drm printer
+ * @str: const string
+ *
+ * Allow &drm_printer types that have a constant string
+ * option to use it.
+ */
+void drm_puts(struct drm_printer *p, const char *str)
+{
+ if (p->puts)
+ p->puts(p, str);
+ else
+ drm_printf(p, "%s", str);
+}
+EXPORT_SYMBOL(drm_puts);
+
+/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
* @f: format string
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 527743394150..a1bb157bfdfa 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
@@ -88,9 +89,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- uint32_t *ids = connector->encoder_ids;
enum drm_mode_status ret = MODE_OK;
- unsigned int i;
+ struct drm_encoder *encoder;
+ int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -98,13 +99,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
struct drm_crtc *crtc;
- if (!encoder)
- continue;
-
ret = drm_encoder_mode_valid(encoder, mode);
if (ret != MODE_OK) {
/* No point in continuing for crtc check as this encoder
@@ -363,7 +360,7 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
- * duplicates are merged together (see drm_mode_connector_list_update()).
+ * duplicates are merged together (see drm_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
@@ -475,7 +472,7 @@ retry:
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
}
@@ -488,7 +485,7 @@ retry:
if (count == 0)
goto prune;
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -563,6 +560,8 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
+
+ drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
drm_mode_object_unregister(blob->dev, &blob->base);
- kfree(blob);
+ kvfree(blob);
}
/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return ERR_PTR(-ENOMEM);
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
- kfree(blob);
+ kvfree(blob);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 7a00455ca568..51fa978f0d23 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -52,7 +52,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
bool has_primary = state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != state->enable)
@@ -281,13 +281,13 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
if (ret)
return ret;
- encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret || !connector)
return ret;
- return drm_mode_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, encoder);
}
EXPORT_SYMBOL(drm_simple_display_pipe_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index d4f4ce484529..adb3cb27d31e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
.get_driver_name = drm_syncobj_null_fence_get_name,
.get_timeline_name = drm_syncobj_null_fence_get_name,
.enable_signaling = drm_syncobj_null_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = NULL,
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2660543ad86a..c3301046dfaa 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 23c749c05b5a..a6b2fe36b025 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644
index 000000000000..c20e6fe00cb3
--- /dev/null
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ *
+ * * Writeback connectors don't provide a way to output visually to the user.
+ *
+ * * Writeback connectors are visible to userspace only when the client sets
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS.
+ *
+ * * Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ * "WRITEBACK_FB_ID":
+ * Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ * framebuffer to be written by the writeback connector. This property is
+ * similar to the FB_ID property on planes, but will always read as zero
+ * and is not preserved across commits.
+ * Userspace must set this property to an output buffer every time it
+ * wishes the buffer to get filled.
+ *
+ * "WRITEBACK_PIXEL_FORMATS":
+ * Immutable blob property to store the supported pixel formats table. The
+ * data is an array of u32 DRM_FORMAT_* fourcc values.
+ * Userspace can use this blob to find out what pixel formats are supported
+ * by the connector's writeback engine.
+ *
+ * "WRITEBACK_OUT_FENCE_PTR":
+ * Userspace can use this property to provide a pointer for the kernel to
+ * fill with a sync_file file descriptor, which will signal once the
+ * writeback is finished. The value should be the address of a 32-bit
+ * signed integer, cast to a u64.
+ * Userspace should wait for this fence to signal before making another
+ * commit affecting any of the same CRTCs, Planes or Connectors.
+ * **Failure to do so will result in undefined behaviour.**
+ * For this reason it is strongly recommended that all userspace
+ * applications making use of writeback connectors *always* retrieve an
+ * out-fence for the commit and use it appropriately.
+ * From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+ struct drm_writeback_connector, \
+ fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+ .get_driver_name = drm_writeback_fence_get_driver_name,
+ .get_timeline_name = drm_writeback_fence_get_timeline_name,
+ .enable_signaling = drm_writeback_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+
+ if (!dev->mode_config.writeback_fb_id_property) {
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_FB_ID",
+ DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_fb_id_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_pixel_formats_property) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_ATOMIC |
+ DRM_MODE_PROP_IMMUTABLE,
+ "WRITEBACK_PIXEL_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_pixel_formats_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_out_fence_ptr_property) {
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_OUT_FENCE_PTR", 0,
+ U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_out_fence_ptr_property = prop;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_property_blob *blob;
+ struct drm_connector *connector = &wb_connector->base;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret = create_writeback_properties(dev);
+
+ if (ret != 0)
+ return ret;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+ ret = drm_encoder_init(dev, &wb_connector->encoder,
+ &drm_writeback_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ goto fail;
+
+ connector->interlace_allowed = 0;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ goto connector_fail;
+
+ ret = drm_connector_attach_encoder(connector,
+ &wb_connector->encoder);
+ if (ret)
+ goto attach_fail;
+
+ INIT_LIST_HEAD(&wb_connector->job_queue);
+ spin_lock_init(&wb_connector->job_lock);
+
+ wb_connector->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&wb_connector->fence_lock);
+ snprintf(wb_connector->timeline_name,
+ sizeof(wb_connector->timeline_name),
+ "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_out_fence_ptr_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_fb_id_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_pixel_formats_property,
+ blob->base.id);
+ wb_connector->pixel_formats_blob_ptr = blob;
+
+ return 0;
+
+attach_fail:
+ drm_connector_cleanup(connector);
+connector_fail:
+ drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+ drm_property_blob_put(blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_add_tail(&job->list_entry, &wb_connector->job_queue);
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+ struct drm_writeback_job *job = container_of(work,
+ struct drm_writeback_job,
+ cleanup_work);
+ drm_framebuffer_put(job->fb);
+ kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status)
+{
+ unsigned long flags;
+ struct drm_writeback_job *job;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ job = list_first_entry_or_null(&wb_connector->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ if (job) {
+ list_del(&job->list_entry);
+ if (job->out_fence) {
+ if (status)
+ dma_fence_set_error(job->out_fence, status);
+ dma_fence_signal(job->out_fence);
+ dma_fence_put(job->out_fence);
+ }
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+ if (WARN_ON(!job))
+ return;
+
+ INIT_WORK(&job->cleanup_work, cleanup_work);
+ queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+ struct dma_fence *fence;
+
+ if (WARN_ON(wb_connector->base.connector_type !=
+ DRM_MODE_CONNECTOR_WRITEBACK))
+ return NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_writeback_fence_ops,
+ &wb_connector->fence_lock, wb_connector->fence_context,
+ ++wb_connector->fence_seqno);
+
+ return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e5013a999147..9b2720b41571 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
+ struct drm_sched_rq *rq;
if (gpu) {
- drm_sched_entity_init(&gpu->sched,
- &ctx->sched_entity[i],
- &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&ctx->sched_entity[i],
+ &rq, 1, NULL);
}
}
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_fini(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}
@@ -631,8 +630,11 @@ static struct platform_driver etnaviv_platform_driver = {
},
};
+static struct platform_device *etnaviv_drm;
+
static int __init etnaviv_init(void)
{
+ struct platform_device *pdev;
int ret;
struct device_node *np;
@@ -644,7 +646,7 @@ static int __init etnaviv_init(void)
ret = platform_driver_register(&etnaviv_platform_driver);
if (ret != 0)
- platform_driver_unregister(&etnaviv_gpu_driver);
+ goto unregister_gpu_driver;
/*
* If the DT contains at least one available GPU device, instantiate
@@ -653,20 +655,33 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
-
- platform_device_register_simple("etnaviv", -1, NULL, 0);
+ pdev = platform_device_register_simple("etnaviv", -1,
+ NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ etnaviv_drm = pdev;
of_node_put(np);
break;
}
+ return 0;
+
+unregister_platform_driver:
+ platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+ platform_driver_unregister(&etnaviv_gpu_driver);
return ret;
}
module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
- platform_driver_unregister(&etnaviv_gpu_driver);
+ platform_device_unregister(etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
+ platform_driver_unregister(&etnaviv_gpu_driver);
}
module_exit(etnaviv_exit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d36c7bbe66db..8d02d1b7dcf5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -18,6 +18,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_fault *vmf);
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 209ef1274b80..1fa74226db91 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-int etnaviv_gem_fault(struct vm_fault *vmf)
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
pgoff_t pgoff;
- int ret;
+ int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet. Note that vm_insert_page() is
+ * something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
- ret = mutex_lock_interruptible(&etnaviv_obj->lock);
- if (ret)
- goto out;
-
+ err = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (err)
+ return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
+ err = PTR_ERR(pages);
+ return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
@@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, vmf->address, page);
-
-out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46ecd3e66ac9..983e67f19e45 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_idr_lock);
+ mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_idr_lock);
+ mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 686f6552db48..f225fbc6edd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
+ gpu->buffer.suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -1027,11 +1028,6 @@ static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
@@ -1049,9 +1045,7 @@ static void etnaviv_fence_release(struct dma_fence *fence)
static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
- .enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
@@ -1733,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_idr_lock);
+ mutex_init(&gpu->fence_lock);
/* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..9a75a6937268 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_idr_lock;
+ struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
struct work_struct sync_point_work;
int sync_point_event;
+ /* hang detection */
+ u32 hangcheck_dma_addr;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 71fbc1f96cb6..f1c88d8ad5ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
- u32 *p;
- int ret, i;
+ int ret;
/* allocate scratch page */
etnaviv_domain->base.bad_page_cpu =
@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
+
+ memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
+ SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..69e9b431bf1f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
+#include "state.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
+ u32 dma_addr;
+ int change;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+ */
+ if (fence_completed(gpu, submit->out_fence->seqno))
+ return;
+
+ /*
+ * If the GPU is still making forward progress on the front-end (which
+ * should never loop) we shift out the timeout to give it a chance to
+ * finish the job.
+ */
+ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ change = dma_addr - gpu->hangcheck_dma_addr;
+ if (change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ schedule_delayed_work(&sched_job->work_tdr,
+ sched_job->sched->timeout);
+ return;
+ }
/* block scheduler */
kthread_park(gpu->sched.thread);
@@ -116,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
- int ret;
+ int ret = 0;
- ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
- sched_entity, submit->cmdbuf.ctx);
+ /*
+ * Hold the fence lock across the whole operation to avoid jobs being
+ * pushed out of order with regard to their sched fence seqnos as
+ * allocated in drm_sched_job_init.
+ */
+ mutex_lock(&submit->gpu->fence_lock);
+
+ ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+ submit->cmdbuf.ctx);
if (ret)
- return ret;
+ goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
- mutex_unlock(&submit->gpu->fence_idr_lock);
- if (submit->out_fence_id < 0)
- return -ENOMEM;
+ if (submit->out_fence_id < 0) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
- return 0;
+out_unlock:
+ mutex_unlock(&submit->gpu->fence_lock);
+
+ return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 3b323f1e0475..2ad146bbf4f5 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 82c95c34447f..94529aa82339 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDOSDxB(win));
}
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -673,6 +673,8 @@ err:
static const struct dev_pm_ops exynos5433_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3931d5e33fe0..88cbd000eb09 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -832,6 +832,8 @@ static int exynos7_decon_resume(struct device *dev)
static const struct dev_pm_ops exynos7_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver decon_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 86330f396784..c8449ae4f4fe 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -232,9 +233,11 @@ static int exynos_dp_probe(struct platform_device *pdev)
np = of_parse_phandle(dev->of_node, "panel", 0);
if (np) {
dp->plat_data.panel = of_drm_find_panel(np);
+
of_node_put(np);
- if (!dp->plat_data.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(dp->plat_data.panel))
+ return PTR_ERR(dp->plat_data.panel);
+
goto out;
}
@@ -276,6 +279,8 @@ static int exynos_dp_resume(struct device *dev)
static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos_dp_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
deleted file mode 100644
index b0c0621fcdf7..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/* exynos_drm_core.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
-
-static LIST_HEAD(exynos_drm_subdrv_list);
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
- return 0;
-}
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_del(&subdrv->list);
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv, *n;
- int err;
-
- if (!dev)
- return -EINVAL;
-
- list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
- if (subdrv->probe) {
- subdrv->drm_dev = dev;
-
- /*
- * this probe callback would be called by sub driver
- * after setting of all resources to this sub driver,
- * such as clock, irq and register map are done.
- */
- err = subdrv->probe(dev, subdrv->dev);
- if (err) {
- DRM_DEBUG("exynos drm subdrv probe failed.\n");
- list_del(&subdrv->list);
- continue;
- }
- }
- }
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_remove(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv;
-
- if (!dev) {
- WARN(1, "Unexpected drm device unregister!\n");
- return -EINVAL;
- }
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->remove)
- subdrv->remove(dev, subdrv->dev);
- }
-
- return 0;
-}
-
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->open) {
- ret = subdrv->open(dev, subdrv->dev, file);
- if (ret)
- goto err;
- }
- }
-
- return 0;
-
-err:
- list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
- return ret;
-}
-
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 66945e0dc57f..2f0babb67c51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -113,7 +113,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -240,8 +240,8 @@ struct drm_encoder *exynos_dpi_probe(struct device *dev)
if (ctx->panel_node) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
- if (!ctx->panel)
- return ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ctx->panel))
+ return ERR_CAST(ctx->panel);
}
return &ctx->encoder;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a81b4a5e24a7..b599f74692e5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,8 +55,7 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
-
- ret = exynos_drm_subdrv_open(dev, file);
+ ret = g2d_open(dev, file);
if (ret)
goto err_file_priv_free;
@@ -70,7 +69,7 @@ err_file_priv_free:
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- exynos_drm_subdrv_close(dev, file);
+ g2d_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -147,13 +146,12 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
-#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
+ if (!drm_dev)
return 0;
private = drm_dev->dev_private;
@@ -170,25 +168,23 @@ static int exynos_drm_suspend(struct device *dev)
return 0;
}
-static int exynos_drm_resume(struct device *dev)
+static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
- return 0;
+ if (!drm_dev)
+ return;
private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
}
-#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
+ .prepare = exynos_drm_suspend,
+ .complete = exynos_drm_resume,
};
/* forward declaration */
@@ -240,6 +236,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
}, {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
@@ -376,11 +373,6 @@ static int exynos_drm_bind(struct device *dev)
if (ret)
goto err_unbind_all;
- /* Probe non kms sub drivers and virtual display driver. */
- ret = exynos_drm_device_subdrv_probe(drm);
- if (ret)
- goto err_unbind_all;
-
drm_mode_config_reset(drm);
/*
@@ -411,7 +403,6 @@ err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
- exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
@@ -420,7 +411,7 @@ err_mode_config_cleanup:
err_free_private:
kfree(private);
err_free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -431,8 +422,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_device_subdrv_remove(drm);
-
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
@@ -444,7 +433,7 @@ static void exynos_drm_unbind(struct device *dev)
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops exynos_drm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0f6d079a55c9..c737c4bd2c19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -179,17 +179,13 @@ static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
crtc->pipe_clk->enable(crtc->pipe_clk, enable);
}
-struct exynos_drm_g2d_private {
- struct device *dev;
+struct drm_exynos_file_private {
+ /* for g2d api */
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head userptr_list;
};
-struct drm_exynos_file_private {
- struct exynos_drm_g2d_private *g2d_priv;
-};
-
/*
* Exynos drm private structure.
*
@@ -201,6 +197,7 @@ struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
struct drm_atomic_state *suspend_state;
+ struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
@@ -217,44 +214,6 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
-/*
- * Exynos drm sub driver structure.
- *
- * @list: sub driver has its own list object to register to exynos drm driver.
- * @dev: pointer to device object for subdrv device driver.
- * @drm_dev: pointer to drm_device and this pointer would be set
- * when sub driver calls exynos_drm_subdrv_register().
- * @probe: this callback would be called by exynos drm driver after
- * subdrv is registered to it.
- * @remove: this callback is used to release resources created
- * by probe callback.
- * @open: this would be called with drm device file open.
- * @close: this would be called with drm device file close.
- */
-struct exynos_drm_subdrv {
- struct list_head list;
- struct device *dev;
- struct drm_device *drm_dev;
-
- int (*probe)(struct drm_device *drm_dev, struct device *dev);
- void (*remove)(struct drm_device *drm_dev, struct device *dev);
- int (*open)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
- void (*close)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
-};
-
- /* This function would be called by non kms drivers such as g2d and ipp. */
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-
-/* this function removes subdrv list from exynos drm driver */
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev);
-int exynos_drm_device_subdrv_remove(struct drm_device *dev);
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6d29777884f9..781b82c2c579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1479,7 +1479,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -1519,6 +1519,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(dsi->panel))
+ dsi->panel = NULL;
+
if (dsi->panel) {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
@@ -1860,6 +1863,8 @@ err_clk:
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver dsi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7fcc1a7ab1a0..9f52382e19ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -101,7 +101,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
@@ -112,15 +111,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
+ exynos_gem[i] = exynos_drm_gem_get(file_priv,
+ mode_cmd->handles[i]);
+ if (!exynos_gem[i]) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
- exynos_gem[i] = to_exynos_gem(obj);
-
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
@@ -138,7 +136,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err:
while (i--)
- drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+ exynos_drm_gem_put(exynos_gem[i]);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6127ef25acd6..e8d0670bb5f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
static void fimc_set_window(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, h1, h2, v1, v2;
/* cropped image */
h1 = buf->rect.x;
- h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+ h2 = real_width - buf->rect.w - buf->rect.x;
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- buf->buf.width, buf->buf.height);
+ real_width, buf->buf.height);
DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
/*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
static void fimc_src_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
* for now, we support only ITU601 8 bit mode
*/
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
- EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
static void fimc_dst_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 01b1570d0c3a..b7f56935a46b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1192,6 +1192,8 @@ static int exynos_fimd_resume(struct device *dev)
static const struct dev_pm_ops exynos_fimd_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver fimd_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f68ef1b3a28c..f2481a2014bb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -190,7 +191,7 @@ struct g2d_buf_desc {
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
- unsigned long handles[MAX_REG_TYPE_NR];
+ void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
@@ -237,7 +238,7 @@ struct g2d_data {
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
- struct exynos_drm_subdrv subdrv;
+ struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
@@ -268,14 +269,13 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
- g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
@@ -308,7 +308,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
@@ -316,12 +316,10 @@ err:
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
-
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
- dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+ dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
@@ -355,32 +353,31 @@ static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
mutex_unlock(&g2d->cmdlist_mutex);
}
-static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
- if (list_empty(&g2d_priv->inuse_cmdlist))
+ if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
- lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
- list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+ list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
- list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+ list_add_tail(&node->event->base.link, &file_priv->event_list);
}
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
- unsigned long obj,
+static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ void *obj,
bool force)
{
- struct g2d_cmdlist_userptr *g2d_userptr =
- (struct g2d_cmdlist_userptr *)obj;
+ struct g2d_cmdlist_userptr *g2d_userptr = obj;
struct page **pages;
if (!obj)
@@ -398,7 +395,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
return;
out:
- dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+ dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
pages = frame_vector_pages(g2d_userptr->vec);
@@ -419,16 +416,14 @@ out:
kfree(g2d_userptr);
}
-static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
- unsigned long *obj)
+ void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
- struct g2d_data *g2d;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
@@ -439,10 +434,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-EINVAL);
}
- g2d = dev_get_drvdata(g2d_priv->dev);
-
/* check if userptr already exists in userptr_list. */
- list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
@@ -450,7 +443,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
@@ -517,7 +510,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->sgt = sgt;
- if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+ if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
DRM_ERROR("failed to map sgt with dma region.\n");
ret = -ENOMEM;
@@ -527,14 +520,14 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
- list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+ list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
@@ -556,19 +549,14 @@ err_free:
return ERR_PTR(ret);
}
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
- struct g2d_data *g2d,
- struct drm_file *filp)
+static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
- list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
- g2d_userptr_put_dma_addr(drm_dev,
- (unsigned long)g2d_userptr,
- true);
+ g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
@@ -723,26 +711,23 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
- unsigned long size;
+ struct exynos_drm_gem *exynos_gem;
- size = exynos_drm_gem_get_size(drm_dev, handle, file);
- if (!size) {
+ exynos_gem = exynos_drm_gem_get(file, handle);
+ if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- size)) {
+ if (!g2d_check_buf_desc_is_valid(buf_desc,
+ reg_type, exynos_gem->size)) {
+ exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
- addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
- file);
- if (IS_ERR(addr)) {
- ret = -EFAULT;
- goto err;
- }
+ addr = &exynos_gem->dma_addr;
+ buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
@@ -758,11 +743,11 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- addr = g2d_userptr_get_dma_addr(drm_dev,
+ addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
- &handle);
+ &buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
@@ -771,7 +756,6 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
- buf_info->handles[reg_type] = handle;
}
return 0;
@@ -785,29 +769,26 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
- unsigned long handle;
+ void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
- handle = buf_info->handles[reg_type];
+ obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
- exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
- filp);
+ exynos_drm_gem_put(obj);
else
- g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
- false);
+ g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
- buf_info->handles[reg_type] = 0;
+ buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
@@ -922,7 +903,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
- struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
@@ -1031,7 +1012,7 @@ out:
mutex_unlock(&g2d->runqueue_mutex);
}
-static int g2d_check_reg_offset(struct device *dev,
+static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
@@ -1131,7 +1112,7 @@ static int g2d_check_reg_offset(struct device *dev,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -1139,23 +1120,8 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1166,9 +1132,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
@@ -1177,17 +1142,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
@@ -1199,7 +1153,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
@@ -1267,7 +1221,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "cmdlist size is too big\n");
+ dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
@@ -1282,7 +1236,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
@@ -1301,7 +1255,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
@@ -1319,7 +1273,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
/* tail */
cmdlist->data[cmdlist->last] = 0;
- g2d_add_cmdlist_to_inuse(g2d_priv, node);
+ g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
@@ -1337,25 +1291,13 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
@@ -1367,11 +1309,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
- list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
- list_splice_init(&g2d_priv->event_list, event_list);
+ list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
- dev_err(dev, "there is no inuse cmdlist\n");
+ dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
@@ -1395,71 +1337,28 @@ out:
return 0;
}
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct g2d_data *g2d;
- int ret;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
- /* allocate dma-aware cmdlist buffer. */
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0) {
- dev_err(dev, "cmdlist init failed\n");
- return ret;
- }
-
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable iommu.\n");
- g2d_fini_cmdlist(g2d);
- }
-
- return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv;
-
- g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv)
- return -ENOMEM;
- g2d_priv->dev = dev;
- file_priv->g2d_priv = g2d_priv;
-
- INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
- INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->userptr_list);
+ INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&file_priv->event_list);
+ INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
- if (!dev)
+ if (!priv->g2d_dev)
return;
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return;
+ g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
@@ -1480,24 +1379,70 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
- g2d_userptr_free_all(drm_dev, g2d, file);
+ g2d_userptr_free_all(g2d, file);
+}
+
+static int g2d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ g2d->drm_dev = drm_dev;
- kfree(file_priv->g2d_priv);
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ return ret;
+ }
+ priv->g2d_dev = dev;
+
+ dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+ return 0;
+}
+
+static void g2d_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ /* Suspend operation and wait for engine idle. */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+ priv->g2d_dev = NULL;
+
+ cancel_work_sync(&g2d->runqueue_work);
+ drm_iommu_detach_device(g2d->drm_dev, dev);
}
+static const struct component_ops g2d_component_ops = {
+ .bind = g2d_bind,
+ .unbind = g2d_unbind,
+};
+
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d;
- struct exynos_drm_subdrv *subdrv;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
@@ -1564,22 +1509,12 @@ static int g2d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, g2d);
- subdrv = &g2d->subdrv;
- subdrv->dev = dev;
- subdrv->probe = g2d_subdrv_probe;
- subdrv->remove = g2d_subdrv_remove;
- subdrv->open = g2d_open;
- subdrv->close = g2d_close;
-
- ret = exynos_drm_subdrv_register(subdrv);
+ ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
- dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
- G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
-
return 0;
err_put_clk:
@@ -1595,12 +1530,7 @@ static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
- /* Suspend operation and wait for engine idle. */
- set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
- g2d_wait_finish(g2d, NULL);
-
- cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_subdrv_unregister(&g2d->subdrv);
+ component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 1a9c7ca8c15b..287b2ed8f178 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -14,6 +14,9 @@ extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern int g2d_open(struct drm_device *drm_dev, struct drm_file *file);
+extern void g2d_close(struct drm_device *drm_dev, struct drm_file *file);
#else
static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -33,4 +36,12 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
{
return -ENODEV;
}
+
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ return 0;
+}
+
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6e1494fa71b4..34ace85feb68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -171,26 +171,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return 0;
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return exynos_gem->size;
-}
-
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -299,43 +279,15 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return ERR_PTR(-EINVAL);
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- return &exynos_gem->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
-
- /*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
- */
- drm_gem_object_unreference_unlocked(obj);
+ if (!obj)
+ return NULL;
+ return to_exynos_gem(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +335,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9057d7f1d6ed..d46a62c30812 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -77,32 +77,26 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
- * get dma address from gem handle and this function could be used for
+ * get exynos drm object from gem handle, this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle);
/*
- * put dma address from gem handle and this function could be used for
- * other drivers such as 2d/3d acceleration drivers.
- * with this function call, gem object reference count would be decreased.
+ * put exynos drm object acquired from exynos_drm_gem_get(),
+ * gem object reference count would be decreased.
*/
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
+{
+ drm_gem_object_put_unlocked(&exynos_gem->base);
+}
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* get buffer size to gem handle. */
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35ac66730563..7ba414b52faa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
}
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
switch (degree) {
case DRM_MODE_ROTATE_0:
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
cfg |= GSC_IN_ROT_90;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_270:
cfg |= GSC_IN_ROT_270;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
}
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
GSC_SRCIMG_WIDTH_MASK);
- cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+ cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_SRCIMG_HEIGHT(buf->buf.height));
gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
}
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
/* original size */
cfg = gsc_read(GSC_DSTIMG_SIZE);
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
- cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+ cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_DSTIMG_HEIGHT(buf->buf.height);
gsc_write(cfg, GSC_DSTIMG_SIZE);
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
};
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
- { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+ { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 26374e58c557..23226a0212e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -345,39 +345,18 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
int ret = 0;
int i;
- /* basic checks */
- if (buf->buf.width == 0 || buf->buf.height == 0)
- return -EINVAL;
- buf->format = drm_format_info(buf->buf.fourcc);
- for (i = 0; i < buf->format->num_planes; i++) {
- unsigned int width = (i == 0) ? buf->buf.width :
- DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
- if (buf->buf.pitch[i] == 0)
- buf->buf.pitch[i] = width * buf->format->cpp[i];
- if (buf->buf.pitch[i] < width * buf->format->cpp[i])
- return -EINVAL;
- if (!buf->buf.gem_id[i])
- return -ENOENT;
- }
-
- /* pitch for additional planes must match */
- if (buf->format->num_planes > 2 &&
- buf->buf.pitch[1] != buf->buf.pitch[2])
- return -EINVAL;
-
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
- struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+ struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
buf->buf.gem_id[i]);
- if (!obj) {
+ if (!gem) {
ret = -ENOENT;
goto gem_free;
}
- buf->exynos_gem[i] = to_exynos_gem(obj);
+ buf->exynos_gem[i] = gem;
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
@@ -391,7 +370,7 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
return 0;
gem_free:
while (i--) {
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
buf->exynos_gem[i] = NULL;
}
return ret;
@@ -404,7 +383,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+ int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
- if (!__size_limit_check(buf->buf.width, &l.h) ||
+ if (!__size_limit_check(real_width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
return 0;
}
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+ struct exynos_drm_ipp_buffer *buf,
+ struct exynos_drm_ipp_buffer *src,
+ struct exynos_drm_ipp_buffer *dst,
+ bool rotate, bool swap)
+{
+ const struct exynos_drm_ipp_formats *fmt;
+ int ret, i;
+
+ fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+ buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+ if (!fmt) {
+ DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+ buf == src ? "src" : "dst");
+ return -EINVAL;
+ }
+
+ /* basic checks */
+ if (buf->buf.width == 0 || buf->buf.height == 0)
+ return -EINVAL;
+
+ buf->format = drm_format_info(buf->buf.fourcc);
+ for (i = 0; i < buf->format->num_planes; i++) {
+ unsigned int width = (i == 0) ? buf->buf.width :
+ DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+ if (buf->buf.pitch[i] == 0)
+ buf->buf.pitch[i] = width * buf->format->cpp[i];
+ if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+ return -EINVAL;
+ if (!buf->buf.gem_id[i])
+ return -ENOENT;
+ }
+
+ /* pitch for additional planes must match */
+ if (buf->format->num_planes > 2 &&
+ buf->buf.pitch[1] != buf->buf.pitch[2])
+ return -EINVAL;
+
+ /* check driver limits */
+ ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+ fmt->num_limits,
+ rotate,
+ buf == dst ? swap : false);
+ if (ret)
+ return ret;
+ ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+ fmt->limits,
+ fmt->num_limits, swap);
+ return ret;
+}
+
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
- const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
return -EINVAL;
}
- src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_SOURCE);
- if (!src_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
- src_fmt->num_limits,
- rotate, false);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- src_fmt->limits,
- src_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
if (ret)
return ret;
- dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_DESTINATION);
- if (!dst_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
- dst_fmt->num_limits,
- false, swap);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- dst_fmt->limits,
- dst_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2174814273e2..2fd299a58297 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -367,6 +367,8 @@ static int exynos_mic_resume(struct device *dev)
static const struct dev_pm_ops exynos_mic_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static int exynos_mic_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 38a2a7f1204b..dba29aec59b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
if (exynos_state->base.fb)
- drm_framebuffer_unreference(exynos_state->base.fb);
+ drm_framebuffer_put(exynos_state->base.fb);
kfree(exynos_state);
plane->state = NULL;
}
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
- plane->crtc = state->crtc;
-
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1a76dd3d52e1..a820a68429b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
val &= ~ROT_CONTROL_FLIP_MASK;
if (rotation & DRM_MODE_REFLECT_X)
- val |= ROT_CONTROL_FLIP_HORIZONTAL;
- if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_VERTICAL;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
val &= ~ROT_CONTROL_ROT_MASK;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 91d4382343d0..0ddb6eec7b11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -30,6 +30,7 @@
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
+#define SCALER_RESET_WAIT_RETRIES 100
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
static u32 scaler_get_format(u32 drm_fmt)
{
switch (drm_fmt) {
- case DRM_FORMAT_NV21:
- return SCALER_YUV420_2P_UV;
case DRM_FORMAT_NV12:
+ return SCALER_YUV420_2P_UV;
+ case DRM_FORMAT_NV21:
return SCALER_YUV420_2P_VU;
case DRM_FORMAT_YUV420:
return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
return SCALER_YUV422_1P_UYVY;
case DRM_FORMAT_YVYU:
return SCALER_YUV422_1P_YVYU;
- case DRM_FORMAT_NV61:
- return SCALER_YUV422_2P_UV;
case DRM_FORMAT_NV16:
+ return SCALER_YUV422_2P_UV;
+ case DRM_FORMAT_NV61:
return SCALER_YUV422_2P_VU;
case DRM_FORMAT_YUV422:
return SCALER_YUV422_3P;
- case DRM_FORMAT_NV42:
- return SCALER_YUV444_2P_UV;
case DRM_FORMAT_NV24:
+ return SCALER_YUV444_2P_UV;
+ case DRM_FORMAT_NV42:
return SCALER_YUV444_2P_VU;
case DRM_FORMAT_YUV444:
return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
return 0;
}
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+ int retry = SCALER_RESET_WAIT_RETRIES;
+
+ scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+ do {
+ cpu_relax();
+ } while (retry > 1 &&
+ scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+ do {
+ cpu_relax();
+ scaler_write(1, SCALER_INT_EN);
+ } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+ return retry ? 0 : -EIO;
+}
+
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
- scaler->task = task;
-
pm_runtime_get_sync(scaler->dev);
+ if (scaler_reset(scaler)) {
+ pm_runtime_put(scaler->dev);
+ return -EIO;
+ }
+
+ scaler->task = task;
scaler_set_src_fmt(scaler, src_fmt);
scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
- return scaler_read(SCALER_INT_STATUS);
+ u32 val = scaler_read(SCALER_INT_STATUS);
+
+ scaler_write(val, SCALER_INT_STATUS);
+
+ return val;
}
static inline int scaler_task_done(u32 val)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e6b0940b1ac2..19697c1362d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -319,7 +319,7 @@ static int vidi_get_modes(struct drm_connector *connector)
return -ENOMEM;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
}
@@ -344,7 +344,7 @@ static int vidi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index db91932550cf..2092a650df7d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -888,7 +888,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -951,7 +951,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
@@ -2093,6 +2093,8 @@ static int __maybe_unused exynos_hdmi_resume(struct device *dev)
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver hdmi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 272c79f5f5bf..ffbf4a950f69 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -837,8 +837,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
- struct exynos_drm_private *priv;
- priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
@@ -1271,6 +1269,8 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver mixer_driver = {
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a993cbb7..16b39734115c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c54806d08dd7..2298ed2a9e1c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -117,7 +117,7 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_sysfs;
@@ -148,8 +148,9 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
if (panel_node) {
fsl_dev->connector.panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!fsl_dev->connector.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(fsl_dev->connector.panel))
+ return PTR_ERR(fsl_dev->connector.panel);
+
return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index c51d9259c7a7..204c8e452eb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
- offset = psbfb->gtt->offset;
+ offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
switch (fb->format->depth) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5ea785f07ba8..90ed20083009 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1770,7 +1770,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index f0878998526a..4e4e4a66eaee 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -216,7 +216,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb0a2ae916e0..2f00a37684a2 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -33,6 +33,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -40,14 +41,9 @@
#include "framebuffer.h"
#include "gtt.h"
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle);
-
static const struct drm_framebuffer_funcs psb_fb_funcs = {
- .destroy = psb_user_framebuffer_destroy,
- .create_handle = psb_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
- if (psbfb->gtt->npage) {
+ if (gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
- psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ psb_gtt_roll(dev, gtt, var->yoffset * pages);
}
return 0;
}
@@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
int page_num;
int i;
unsigned long address;
int ret;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
- psbfb->gtt->offset;
+ gtt->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->gtt = gt;
+ fb->base.obj[0] = &gt->gem;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
- if (psbfb->gtt)
- drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+ if (psbfb->base.obj[0])
+ drm_gem_object_put_unlocked(psbfb->base.obj[0]);
return 0;
}
@@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-/**
- * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- * @fb: framebuffer
- * @file_priv: our DRM file
- * @handle: returned handle
- *
- * Our framebuffer object is a GTT range which also contains a GEM
- * object. We need to turn it into a handle for userspace. GEM will do
- * the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
- return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- * psb_user_framebuffer_destroy - destruct user created fb
- * @fb: framebuffer
- *
- * User framebuffers are backed by GEM objects so all we have to do is
- * clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
-
- /* Let DRM do its clean up */
- drm_framebuffer_cleanup(fb);
- /* We are no longer using the resource in GEM */
- drm_gem_object_unreference_unlocked(&r->gem);
- kfree(fb);
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 395f20b07aab..23dc3c5f8f0d 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -31,7 +31,6 @@ struct psb_framebuffer {
struct drm_framebuffer base;
struct address_space *addr_space;
struct fb_info *fbdev;
- struct gtt_range *gtt;
};
struct psb_fbdev {
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 131239759a75..913bf4c256fa 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -93,7 +93,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference_unlocked(&r->gem);
+ drm_gem_object_put_unlocked(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index f3c48a2be71b..09c1161a7ac6 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *gtt;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -76,12 +76,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
goto gma_pipe_cleaner;
}
+ gtt = to_gtt_range(fb->obj[0]);
+
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
+ ret = psb_gtt_pin(gtt);
if (ret < 0)
goto gma_pipe_set_base_exit;
- start = psbfb->gtt->offset;
+ start = gtt->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +131,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+ psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@@ -353,7 +355,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
return 0;
@@ -429,7 +431,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
@@ -437,7 +439,7 @@ unlock:
return ret;
unref_cursor:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -491,7 +493,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- gt = to_psb_fb(crtc->primary->fb)->gtt;
+ gt = to_gtt_range(crtc->primary->fb->obj[0]);
psb_gtt_unpin(gt);
}
}
@@ -663,7 +665,7 @@ void gma_connector_attach_encoder(struct gma_connector *connector,
struct gma_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
+ drm_connector_attach_encoder(&connector->base,
&encoder->base);
}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index cdbb350c9d5d..cb0c3a2a1fd4 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,6 +53,8 @@ struct gtt_range {
int roll; /* Roll applied to the GTT entries */
};
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed,
u32 align);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 978ae4b25e82..e0ccf1d19a4d 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -34,7 +34,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
@@ -61,7 +61,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -133,7 +133,7 @@ struct bdb_general_features {
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -213,7 +213,7 @@ struct child_device_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
struct bdb_general_definitions {
@@ -256,7 +256,7 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_backlight {
u8 type:2;
@@ -268,7 +268,7 @@ struct bdb_lvds_backlight {
u8 i2caddr;
u8 brightnesscmd;
/*FIXME: more...*/
-} __attribute__((packed));
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -278,12 +278,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -300,7 +300,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -328,7 +328,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -336,17 +336,17 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
struct aimdb_header {
char signature[16];
@@ -354,12 +354,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -370,12 +370,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -391,7 +391,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
@@ -436,7 +436,7 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index a05c020602bd..d0bf5a1e94e8 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -999,7 +999,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
p_funcs->encoder_helper_funcs);
/*attach to given connector*/
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 5c066448be5b..2b9fa0163dea 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -167,7 +167,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -196,7 +195,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 0fff269d3fe6..1b7fd6a9d8a5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 78566a80ad25..c6d72de1c054 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -578,7 +578,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
}
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
return ret;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e6943fef0611..83babb815a5d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -376,7 +376,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
* preferred mode is the right one.
*/
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
kfree(edid);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index e5360726d80b..fb4da3cd6681 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -66,7 +66,7 @@ int psb_intel_ddc_get_modes(struct drm_connector *connector,
edid = drm_get_edid(connector, adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index f2ee6aa10afa..dd3cec0e3190 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
+#define MAX_ARG_LEN 32
+
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
+ u8 buf[MAX_ARG_LEN*2 + 2], status;
+ struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
+ if (args_len > MAX_ARG_LEN) {
+ DRM_ERROR("Need to increase arg length\n");
+ return false;
+ }
+
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
@@ -1465,7 +1472,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2f4749ebf8d..744956cea749 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -133,7 +133,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2269be91f3e1..bb774202a5a1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -859,7 +859,6 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
if (src_w != crtc_w || src_h != crtc_h) {
- DRM_ERROR("Scale not support!!!\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 3f7396caad48..5d2f0d548469 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -76,9 +76,12 @@ struct tda9950_priv {
static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
{
struct i2c_msg msg;
- u8 buf[cnt + 1];
+ u8 buf[CEC_MAX_MSG_SIZE + 3];
int ret;
+ if (WARN_ON(cnt > sizeof(buf) - 1))
+ return -EINVAL;
+
buf[0] = addr;
memcpy(buf + 1, p, cnt);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 6ebd8842dbcc..a7c39f39793f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -69,6 +69,7 @@ struct tda998x_priv {
bool edid_delay_active;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct drm_connector connector;
struct tda998x_audio_port audio_port[2];
@@ -79,9 +80,10 @@ struct tda998x_priv {
#define conn_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, connector)
-
#define enc_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, encoder)
+#define bridge_to_tda998x_priv(x) \
+ container_of(x, struct tda998x_priv, bridge)
/* The TDA9988 series of devices use a paged register scheme.. to simplify
* things we encode the page # in upper bits of the register #. To read/
@@ -589,13 +591,22 @@ out:
return ret;
}
+#define MAX_WRITE_RANGE_BUF 32
+
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
- u8 buf[cnt+1];
+ /* This is the maximum size of the buffer passed in */
+ u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
+ if (cnt > MAX_WRITE_RANGE_BUF) {
+ dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+ MAX_WRITE_RANGE_BUF);
+ return;
+ }
+
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
@@ -753,7 +764,7 @@ static void tda998x_detect_work(struct work_struct *work)
{
struct tda998x_priv *priv =
container_of(work, struct tda998x_priv, detect_work);
- struct drm_device *dev = priv->encoder.dev;
+ struct drm_device *dev = priv->connector.dev;
if (dev)
drm_kms_helper_hotplug_event(dev);
@@ -805,7 +816,7 @@ static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
- u8 buf[32];
+ u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
@@ -1095,29 +1106,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_fill_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- int ret;
-
- mutex_lock(&priv->audio_mutex);
- ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-
- if (connector->edid_blob_ptr) {
- struct edid *edid = (void *)connector->edid_blob_ptr->data;
-
- cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
-
- priv->sink_has_audio = drm_detect_monitor_audio(edid);
- } else {
- priv->sink_has_audio = false;
- }
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1136,7 +1124,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .fill_modes = tda998x_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1234,41 +1222,30 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
return 0;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
+ mutex_lock(&priv->audio_mutex);
n = drm_add_edid_modes(connector, edid);
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ mutex_unlock(&priv->audio_mutex);
kfree(edid);
return n;
}
-static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TDA19988 dotclock can go up to 165MHz */
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
- return MODE_CLOCK_HIGH;
- if (mode->htotal >= BIT(13))
- return MODE_BAD_HVALUE;
- if (mode->vtotal >= BIT(11))
- return MODE_BAD_VVALUE;
- return MODE_OK;
-}
-
static struct drm_encoder *
tda998x_connector_best_encoder(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- return &priv->encoder;
+ return priv->bridge.encoder;
}
static
const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
.get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
.best_encoder = tda998x_connector_best_encoder,
};
@@ -1292,25 +1269,48 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
if (ret)
return ret;
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+ drm_connector_attach_encoder(&priv->connector,
+ priv->bridge.encoder);
return 0;
}
-/* DRM encoder functions */
+/* DRM bridge functions */
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static int tda998x_bridge_attach(struct drm_bridge *bridge)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
- bool on;
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- /* we only care about on or off: */
- on = mode == DRM_MODE_DPMS_ON;
+ return tda998x_connector_init(priv, bridge->dev);
+}
- if (on == priv->is_on)
- return;
+static void tda998x_bridge_detach(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- if (on) {
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ /* TDA19988 dotclock can go up to 165MHz */
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void tda998x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (!priv->is_on) {
/* enable video ports, audio will be enabled later */
reg_write(priv, REG_ENA_VP_0, 0xff);
reg_write(priv, REG_ENA_VP_1, 0xff);
@@ -1321,7 +1321,14 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
priv->is_on = true;
- } else {
+ }
+}
+
+static void tda998x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (priv->is_on) {
/* disable video ports */
reg_write(priv, REG_ENA_VP_0, 0x00);
reg_write(priv, REG_ENA_VP_1, 0x00);
@@ -1331,12 +1338,12 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ unsigned long tmds_clock;
u16 ref_pix, ref_line, n_pix, n_line;
u16 hs_pix_s, hs_pix_e;
u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1407,12 +1414,19 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
(mode->vsync_end - mode->vsync_start)/2;
}
- div = 148500 / mode->clock;
- if (div != 0) {
- div--;
- if (div > 3)
- div = 3;
- }
+ tmds_clock = mode->clock;
+
+ /*
+ * The divisor is power-of-2. The TDA9983B datasheet gives
+ * this as ranges of Msample/s, which is 10x the TMDS clock:
+ * 0 - 800 to 1500 Msample/s
+ * 1 - 400 to 800 Msample/s
+ * 2 - 200 to 400 Msample/s
+ * 3 - as 2 above
+ */
+ for (div = 0; div < 3; div++)
+ if (80000 >> div <= tmds_clock)
+ break;
mutex_lock(&priv->audio_mutex);
@@ -1543,26 +1557,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_destroy(struct tda998x_priv *priv)
-{
- /* disable all IRQs and free the IRQ handler */
- cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
- reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
- if (priv->audio_pdev)
- platform_device_unregister(priv->audio_pdev);
-
- if (priv->hdmi->irq)
- free_irq(priv->hdmi->irq, priv);
-
- del_timer_sync(&priv->edid_delay_timer);
- cancel_work_sync(&priv->detect_work);
-
- i2c_unregister_device(priv->cec);
-
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
-}
+static const struct drm_bridge_funcs tda998x_bridge_funcs = {
+ .attach = tda998x_bridge_attach,
+ .detach = tda998x_bridge_detach,
+ .mode_valid = tda998x_bridge_mode_valid,
+ .disable = tda998x_bridge_disable,
+ .mode_set = tda998x_bridge_mode_set,
+ .enable = tda998x_bridge_enable,
+};
/* I2C driver functions */
@@ -1608,16 +1610,69 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->audio_params = p->audio_params;
+}
+
+static void tda998x_destroy(struct device *dev)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (priv->audio_pdev)
+ platform_device_unregister(priv->audio_pdev);
+
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
+ del_timer_sync(&priv->edid_delay_timer);
+ cancel_work_sync(&priv->detect_work);
+
+ i2c_unregister_device(priv->cec);
+
+ if (priv->cec_notify)
+ cec_notifier_put(priv->cec_notify);
+}
+
+static int tda998x_create(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct i2c_board_info cec_info;
+ struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
mutex_init(&priv->mutex); /* protect the page access */
mutex_init(&priv->audio_mutex); /* protect access from audio thread */
mutex_init(&priv->edid_mutex);
+ INIT_LIST_HEAD(&priv->bridge.list);
init_waitqueue_head(&priv->edid_delay_waitq);
timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1640,13 +1695,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* read version: */
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+ dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+ dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
}
@@ -1657,20 +1712,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
switch (priv->rev) {
case TDA9989N2:
- dev_info(&client->dev, "found TDA9989 n2");
+ dev_info(dev, "found TDA9989 n2");
break;
case TDA19989:
- dev_info(&client->dev, "found TDA19989");
+ dev_info(dev, "found TDA19989");
break;
case TDA19989N2:
- dev_info(&client->dev, "found TDA19989 n2");
+ dev_info(dev, "found TDA19989 n2");
break;
case TDA19988:
- dev_info(&client->dev, "found TDA19988");
+ dev_info(dev, "found TDA19988");
break;
default:
- dev_err(&client->dev, "found unsupported device: %04x\n",
- priv->rev);
+ dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
}
@@ -1713,8 +1767,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
- dev_err(&client->dev,
- "failed to request IRQ#%u: %d\n",
+ dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
}
@@ -1723,13 +1776,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(&client->dev);
+ priv->cec_notify = cec_notifier_get(dev);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
}
- priv->cec_glue.parent = &client->dev;
+ priv->cec_glue.parent = dev;
priv->cec_glue.data = priv;
priv->cec_glue.init = tda998x_cec_hook_init;
priv->cec_glue.exit = tda998x_cec_hook_exit;
@@ -1759,61 +1812,44 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* enable EDID read irq: */
reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (!np)
- return 0; /* non-DT */
+ if (np) {
+ /* get the device tree parameters */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
+ ret = tda998x_get_audio_ports(priv, np);
+ if (ret)
+ goto fail;
- /* get the device tree parameters */
- ret = of_property_read_u32(np, "video-ports", &video);
- if (ret == 0) {
- priv->vip_cntrl_0 = video >> 16;
- priv->vip_cntrl_1 = video >> 8;
- priv->vip_cntrl_2 = video;
+ if (priv->audio_port[0].format != AFMT_UNUSED)
+ tda998x_audio_codec_init(priv, &client->dev);
+ } else if (dev->platform_data) {
+ tda998x_set_config(priv, dev->platform_data);
}
- ret = tda998x_get_audio_ports(priv, np);
- if (ret)
- goto fail;
+ priv->bridge.funcs = &tda998x_bridge_funcs;
+#ifdef CONFIG_OF
+ priv->bridge.of_node = dev->of_node;
+#endif
- if (priv->audio_port[0].format != AFMT_UNUSED)
- tda998x_audio_codec_init(priv, &client->dev);
+ drm_bridge_add(&priv->bridge);
return 0;
fail:
- /* if encoder_init fails, the encoder slave is never registered,
- * so cleanup here:
- */
- i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
- if (client->irq)
- free_irq(client->irq, priv);
+ tda998x_destroy(dev);
err_irq:
return ret;
}
-static void tda998x_encoder_prepare(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tda998x_encoder_commit(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
- .dpms = tda998x_encoder_dpms,
- .prepare = tda998x_encoder_prepare,
- .commit = tda998x_encoder_commit,
- .mode_set = tda998x_encoder_mode_set,
-};
+/* DRM encoder functions */
static void tda998x_encoder_destroy(struct drm_encoder *encoder)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-
- tda998x_destroy(priv);
drm_encoder_cleanup(encoder);
}
@@ -1821,40 +1857,12 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static void tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- priv->audio_params = p->audio_params;
-}
-
-static int tda998x_bind(struct device *dev, struct device *master, void *data)
+static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
- struct tda998x_encoder_params *params = dev->platform_data;
- struct i2c_client *client = to_i2c_client(dev);
- struct drm_device *drm = data;
- struct tda998x_priv *priv;
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- dev_set_drvdata(dev, priv);
-
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -1866,40 +1874,36 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
priv->encoder.possible_crtcs = crtcs;
- ret = tda998x_create(client, priv);
- if (ret)
- return ret;
-
- if (!dev->of_node && params)
- tda998x_set_config(priv, params);
-
- drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
goto err_encoder;
- ret = tda998x_connector_init(priv, drm);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
if (ret)
- goto err_connector;
+ goto err_bridge;
return 0;
-err_connector:
+err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
- tda998x_destroy(priv);
return ret;
}
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+
+ return tda998x_encoder_init(dev, drm);
+}
+
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
- tda998x_destroy(priv);
}
static const struct component_ops tda998x_ops = {
@@ -1910,16 +1914,27 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ int ret;
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
- return component_add(&client->dev, &tda998x_ops);
+
+ ret = tda998x_create(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(&client->dev, &tda998x_ops);
+ if (ret)
+ tda998x_destroy(&client->dev);
+ return ret;
}
static int tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
+ tda998x_destroy(&client->dev);
return 0;
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 576a417690d4..3b378936f575 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -934,7 +934,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index dfd95889f4b7..33a458b7f1fc 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,8 @@ config DRM_I915
select SYNC_FILE
select IOSF_MBI
select CRC32
+ select SND_HDA_I915 if SND_HDA_CORE
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9de8b1c51a5c..459f8f88a34c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_ERRLOG_GEM
+ bool "Insert extra logging (very verbose) for common GEM errors"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ Enable additional logging that may help track down the cause of
+ principally userspace errors.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
depends on DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4c6adae23e18..5794f102f9b8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ icl_dsi.o \
intel_crt.o \
intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
- intel_dsi.o \
intel_dsi_dcs_backlight.o \
- intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
- intel_tv.o
+ intel_tv.o \
+ vlv_dsi.o \
+ vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 80b3e16cf48c..caac9942e1e3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,7 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t dummy;
+ u8 dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- uint8_t buf[2] = { addr, val };
+ u8 buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
- uint8_t outputs_enable, lvds_control_2, lvds_power_down;
- uint8_t horizontal_active_pixel_input;
- uint8_t horizontal_active_pixel_output, vertical_active_line_output;
- uint8_t active_input_line_output;
+ u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+ u8 outputs_enable, lvds_control_2, lvds_power_down;
+ u8 horizontal_active_pixel_input;
+ u8 horizontal_active_pixel_output, vertical_active_line_output;
+ u8 active_input_line_output;
DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
#define DUMP(reg) \
do { \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 7aeeffd2428b..397ac5233726 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
static struct ch7xxx_id_struct {
- uint8_t vid;
+ u8 vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
};
static struct ch7xxx_did_struct {
- uint8_t did;
+ u8 did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
bool quiet;
};
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
{
int i;
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
{
int i;
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
}
/** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
}
/** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
- uint8_t vendor, device;
+ u8 vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
- uint8_t cdet, orig_pm, pm;
+ u8 cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t tvco, tpcp, tpd, tlpf, idf;
+ u8 tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
- uint8_t val;
+ u8 val;
if ((i % 8) == 0)
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index c73aff163908..24278cc49090 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -161,7 +161,7 @@
* instead. The following list contains all registers that
* require saving.
*/
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
struct ivch_priv {
bool quiet;
- uint16_t width, height;
+ u16 width, height;
/* Register backup */
- uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+ u16 reg_backup[ARRAY_SIZE(backup_addresses)];
};
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
*
* Each of the 256 registers are 16 bits long.
*/
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
}
/* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
- uint16_t temp;
+ u16 temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
- uint16_t vr01, vr30, backlight;
+ u16 vr01, vr30, backlight;
ivch_reset(dvo);
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
- uint16_t vr01;
+ u16 vr01;
ivch_reset(dvo);
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
- uint16_t vr40 = 0;
- uint16_t vr01 = 0;
- uint16_t vr10;
+ u16 vr40 = 0;
+ u16 vr01 = 0;
+ u16 vr10;
ivch_reset(dvo);
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
- uint16_t x_ratio, y_ratio;
+ u16 x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
- uint16_t val;
+ u16 val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 2379c33cfe51..c584e01dc8dc 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -191,8 +191,8 @@ enum {
};
struct ns2501_reg {
- uint8_t offset;
- uint8_t value;
+ u8 offset;
+ u8 value;
};
/*
@@ -202,23 +202,23 @@ struct ns2501_reg {
* read all this with a grain of salt.
*/
struct ns2501_configuration {
- uint8_t sync; /* configuration of the C0 register */
- uint8_t conf; /* configuration register 8 */
- uint8_t syncb; /* configuration register 41 */
- uint8_t dither; /* configuration of the dithering */
- uint8_t pll_a; /* PLL configuration, register A, 1B */
- uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
- uint16_t hstart; /* horizontal start, registers C1/C2 */
- uint16_t hstop; /* horizontal total, registers C3/C4 */
- uint16_t vstart; /* vertical start, registers C5/C6 */
- uint16_t vstop; /* vertical total, registers C7/C8 */
- uint16_t vsync; /* manual vertical sync start, 80/81 */
- uint16_t vtotal; /* number of lines generated, 82/83 */
- uint16_t hpos; /* horizontal position + 256, 98/99 */
- uint16_t vpos; /* vertical position, 8e/8f */
- uint16_t voffs; /* vertical output offset, 9c/9d */
- uint16_t hscale; /* horizontal scaling factor, b8/b9 */
- uint16_t vscale; /* vertical scaling factor, 10/11 */
+ u8 sync; /* configuration of the C0 register */
+ u8 conf; /* configuration register 8 */
+ u8 syncb; /* configuration register 41 */
+ u8 dither; /* configuration of the dithering */
+ u8 pll_a; /* PLL configuration, register A, 1B */
+ u16 pll_b; /* PLL configuration, register B, 1C/1D */
+ u16 hstart; /* horizontal start, registers C1/C2 */
+ u16 hstop; /* horizontal total, registers C3/C4 */
+ u16 vstart; /* vertical start, registers C5/C6 */
+ u16 vstop; /* vertical total, registers C7/C8 */
+ u16 vsync; /* manual vertical sync start, 80/81 */
+ u16 vtotal; /* number of lines generated, 82/83 */
+ u16 hpos; /* horizontal position + 256, 98/99 */
+ u16 vpos; /* vertical position, 8e/8f */
+ u16 voffs; /* vertical output offset, 9c/9d */
+ u16 hscale; /* horizontal scaling factor, b8/b9 */
+ u16 vscale; /* vertical scaling factor, 10/11 */
};
/*
@@ -389,7 +389,7 @@ struct ns2501_priv {
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 1c1a0674dbab..4ae5d8fd9ff0 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -65,7 +65,7 @@ struct sil164_priv {
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -173,7 +173,7 @@ out:
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
- uint8_t reg9;
+ u8 reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 31e181da93db..d603bc2f2506 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -90,7 +90,7 @@ struct tfp410_priv {
bool quiet;
};
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
- uint8_t ch1, ch2;
+ u8 ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
- uint8_t ctl2;
+ u8 ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val, val2;
+ u8 val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..fe754022e356 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
assert_rpm_wakelock_held(dev_priv);
- if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b51c05d03f14..a614db310ea2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -172,6 +172,7 @@ struct decode_info {
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -862,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -872,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
- return 0;
+ return -EBADRQC;
}
if (is_shadowed_mmio(offset)) {
@@ -894,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ /* TODO
+ * Right now only scan LRI command on KBL and in inhibit context.
+ * It's good enough to support initializing mmio by lri command in
+ * vgpu inhibit context on KBL.
+ */
+ if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
+ intel_gvt_hypervisor_read_gpa(s->vgpu,
+ s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+ /* check inhibit context */
+ if (ctx_sr_ctl & 1) {
+ u32 data = cmd_val(s, index + 1);
+
+ if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+ intel_vgpu_mask_mmio_write(vgpu,
+ offset, &data, 4);
+ else
+ vgpu_vreg(vgpu, offset) = data;
+ }
+ }
+
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
@@ -1256,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1284,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1308,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1317,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
static int check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_check_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_check_mi_display_flip(s, info);
}
static int update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_update_plane_mmio_from_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
}
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1615,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
*/
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
- struct intel_gvt *gvt = s->vgpu->gvt;
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- /* BDW decides privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
+ /* Decide privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8) &&
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
- return 0;
- }
+ return 0;
return 1;
}
@@ -2349,6 +2362,9 @@ static struct cmd_info cmd_info[] = {
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
+ {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8180e8d1e2..3019dbc39aef 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
+ if (IS_BROXTON(dev_priv)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+ BXT_DE_PORT_HP_DDIB |
+ BXT_DE_PORT_HP_DDIC);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIA;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIB;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIC;
+ }
+
+ return;
+ }
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -196,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
int pipe, id;
+ int found = false;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe))
- goto out;
+ if (pipe_is_enabled(vgpu, pipe)) {
+ found = true;
+ break;
+ }
}
+ if (found)
+ break;
}
/* all the pipes are disabled */
- hrtimer_cancel(&irq->vblank_timer.timer);
- return;
-
-out:
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
-
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
+ mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
struct intel_vgpu *vgpu;
int id;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
+ mutex_unlock(&gvt->lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6f4f8e941fc2..6e3f56684f4e 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return obj;
}
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+ if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+ return true;
+ else
+ return false;
+}
+
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
- /* The invalid cursor hotspot value is delivered to host
- * until we find a way to get the cursor hotspot info of
- * guest OS.
- */
- info->x_hot = UINT_MAX;
- info->y_hot = UINT_MAX;
+ if (validate_hotspot(&c)) {
+ info->x_hot = c.x_hot;
+ info->y_hot = c.y_hot;
+ } else {
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ }
+
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index f61337632969..4b98539025c5 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 1)
+ port = PORT_B;
+ else if (port_select == 2)
+ port = PORT_C;
+ else if (port_select == 3)
+ port = PORT_D;
+ return port;
+}
+
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- port = get_port_from_gmbus0(pin_select);
+ if (IS_BROXTON(dev_priv))
+ port = bxt_get_port_from_gmbus0(pin_select);
+ else
+ port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 427e40e64d41..714d709829a2 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -146,14 +146,11 @@ struct execlist_ring_context {
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
- struct execlist_mmio_pair pdp3_UDW;
- struct execlist_mmio_pair pdp3_LDW;
- struct execlist_mmio_pair pdp2_UDW;
- struct execlist_mmio_pair pdp2_LDW;
- struct execlist_mmio_pair pdp1_UDW;
- struct execlist_mmio_pair pdp1_LDW;
- struct execlist_mmio_pair pdp0_UDW;
- struct execlist_mmio_pair pdp0_LDW;
+ /*
+ * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+ * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+ */
+ struct execlist_mmio_pair pdps[8];
};
struct intel_vgpu_elsp_dwords {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 1c120683e958..face664be3e8 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -36,6 +36,7 @@
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
+#include "i915_pvinfo.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
_PLANE_CTL_TILED_SHIFT;
fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4;
switch (mode) {
- case CURSOR_MODE_128_ARGB_AX:
+ case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
- case CURSOR_MODE_256_ARGB_AX:
+ case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
- case CURSOR_MODE_64_ARGB_AX:
+ case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
- case CURSOR_MODE_64_32B_AX:
+ case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
- mode = val & CURSOR_MODE;
- plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ mode = val & MCURSOR_MODE;
+ plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+ plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+ plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a73e1d418c22..4ac18b447247 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
h = (struct gvt_firmware_header *)fw->data;
- crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ crc32_start = offsetofend(struct gvt_firmware_header, crc32);
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 23296547da95..00aad8164dec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ /* We take IPS bit as 'PSE' for PTE level. */
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
@@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
+#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+
+#define GTT_64K_PTE_STRIDE 16
+
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
@@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+ pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
else
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
@@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+ e->val64 &= ~ADDR_64K_MASK;
+ pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
@@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{
- /* Entry doesn't have PSE bit. */
- if (get_pse_type(e->type) == GTT_TYPE_INVALID)
- return false;
+ return !!(e->val64 & _PAGE_PSE);
+}
- e->type = get_entry_type(e->type);
- if (!(e->val64 & _PAGE_PSE))
+static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+{
+ if (gen8_gtt_test_pse(e)) {
+ switch (e->type) {
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ e->val64 &= ~_PAGE_PSE;
+ e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+ break;
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+ e->val64 &= ~_PAGE_PSE;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+}
+
+static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
return false;
- e->type = get_pse_type(e->type);
- return true;
+ return !!(e->val64 & GEN8_PDE_IPS_64K);
+}
+
+static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+ return;
+
+ e->val64 &= ~GEN8_PDE_IPS_64K;
}
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
e->val64 |= _PAGE_PRESENT;
}
+static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+}
+
+static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+}
+
+static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+}
+
/*
* Per-platform GMA routines.
*/
@@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
+ .clear_pse = gen8_gtt_clear_pse,
+ .clear_ips = gen8_gtt_clear_ips,
+ .test_ips = gen8_gtt_test_ips,
+ .clear_64k_splited = gen8_gtt_clear_64k_splited,
+ .set_64k_splited = gen8_gtt_set_64k_splited,
+ .test_64k_splited = gen8_gtt_test_64k_splited,
.get_pfn = gen8_gtt_get_pfn,
.set_pfn = gen8_gtt_set_pfn,
};
@@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
+/* Update entry type per pse and ips bit. */
+static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+ struct intel_gvt_gtt_entry *entry, bool ips)
+{
+ switch (entry->type) {
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ if (pte_ops->test_pse(entry))
+ entry->type = get_pse_type(entry->type);
+ break;
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ if (ips)
+ entry->type = get_pse_type(entry->type);
+ break;
+ default:
+ GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+ }
+
+ GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+}
+
/*
* MM helpers.
*/
@@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm->ppgtt_mm.shadow_pdps,
entry, index, false, 0, mm->vgpu);
-
- pte_ops->test_pse(entry);
+ update_entry_type_for_real(pte_ops, entry, false);
}
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
if (ret)
return ret;
- ops->test_pse(e);
+ update_entry_type_for_real(ops, e, guest ?
+ spt->guest_page.pde_ips : false);
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
type, e->type, index, e->val64);
@@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
- if (spt->guest_page.oos_page)
- detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+ if (spt->guest_page.gfn) {
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
- intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ }
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, int type, unsigned long gfn)
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -753,26 +840,12 @@ retry:
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- /*
- * Init guest_page.
- */
- spt->guest_page.type = type;
- spt->guest_page.gfn = gfn;
-
- ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
- ppgtt_write_protection_handler, spt);
- if (ret)
- goto err_unmap_dma;
-
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
if (ret)
- goto err_unreg_page_track;
+ goto err_unmap_dma;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
return spt;
-err_unreg_page_track:
- intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
err_unmap_dma:
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
err_free_spt:
@@ -780,6 +853,37 @@ err_free_spt:
return ERR_PTR(ret);
}
+/* Allocate shadow page table associated with specific gfn. */
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ unsigned long gfn, bool guest_pde_ips)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+ int ret;
+
+ spt = ppgtt_alloc_spt(vgpu, type);
+ if (IS_ERR(spt))
+ return spt;
+
+ /*
+ * Init guest_page.
+ */
+ ret = intel_vgpu_register_page_track(vgpu, gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret) {
+ ppgtt_free_spt(spt);
+ return ERR_PTR(ret);
+ }
+
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
+ spt->guest_page.pde_ips = guest_pde_ips;
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+
+ return spt;
+}
+
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
@@ -787,24 +891,38 @@ err_free_spt:
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_guest_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+#define for_each_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); \
+ i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+ if (!ppgtt_get_shadow_entry(spt, e, i))
+
+static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
-
atomic_inc(&spt->refcount);
}
+static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+ return atomic_dec_return(&spt->refcount);
+}
+
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
pfn = ops->get_pfn(entry);
type = spt->shadow_page.type;
- if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ /* Uninitialized spte or unshadowed spte. */
+ if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
- int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
- trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-
- if (atomic_dec_return(&spt->refcount) > 0)
+ if (ppgtt_put_spt(spt) > 0)
return 0;
for_each_present_shadow_entry(spt, &e, index) {
@@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(spt, &e);
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ /* We don't setup 64K shadow entry so far. */
+ WARN(1, "suspicious 64K gtt entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("invalidate 2M entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- WARN(1, "GVT doesn't support 2M/1GB page\n");
+ WARN(1, "GVT doesn't support 1GB page\n");
continue;
case GTT_TYPE_PPGTT_PML4_ENTRY:
case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -899,6 +1021,22 @@ fail:
return ret;
}
+static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+ GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ /* 64K paging only controlled by IPS bit in PTE now. */
+ return true;
+ } else
+ return false;
+}
+
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
@@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ bool ips = false;
int ret;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
+ if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
+
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
- if (spt)
+ if (spt) {
ppgtt_get_spt(spt);
- else {
+
+ if (ips != spt->guest_page.pde_ips) {
+ spt->guest_page.pde_ips = ips;
+
+ gvt_dbg_mm("reshadow PDE since ips changed\n");
+ clear_page(spt->shadow_page.vaddr);
+ ret = ppgtt_populate_spt(spt);
+ if (ret) {
+ ppgtt_put_spt(spt);
+ goto err;
+ }
+ }
+ } else {
int type = get_next_pt_type(we->type);
- spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
- goto fail;
+ goto err;
}
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
- goto fail;
+ goto err_free_spt;
ret = ppgtt_populate_spt(spt);
if (ret)
- goto fail;
+ goto err_free_spt;
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
spt->shadow_page.type);
}
return spt;
-fail:
+
+err_free_spt:
+ ppgtt_free_spt(spt);
+err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
return ERR_PTR(ret);
@@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
se->type = ge->type;
se->val64 = ge->val64;
+ /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+ if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ops->clear_ips(se);
+
ops->set_pfn(se, s->shadow_page.mfn);
}
+/**
+ * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+ * negtive if found err.
+ */
+static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ return 0;
+
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+ if (pfn == INTEL_GVT_INVALID_ADDR)
+ return -EINVAL;
+
+ return PageTransHuge(pfn_to_page(pfn));
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *sub_spt;
+ struct intel_gvt_gtt_entry sub_se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ unsigned long sub_index;
+ int ret;
+
+ gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+ start_gfn = ops->get_pfn(se);
+
+ sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+ if (IS_ERR(sub_spt))
+ return PTR_ERR(sub_spt);
+
+ for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ if (ret) {
+ ppgtt_invalidate_spt(spt);
+ return ret;
+ }
+ sub_se.val64 = se->val64;
+
+ /* Copy the PAT field from PDE. */
+ sub_se.val64 &= ~_PAGE_PAT;
+ sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+ ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+ }
+
+ /* Clear dirty field. */
+ se->val64 &= ~_PAGE_DIRTY;
+
+ ops->clear_pse(se);
+ ops->clear_ips(se);
+ ops->set_pfn(se, sub_spt->shadow_page.mfn);
+ ppgtt_set_shadow_entry(spt, se, index);
+ return 0;
+}
+
+static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = *se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ int i, ret;
+
+ gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+
+ GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+
+ start_gfn = ops->get_pfn(se);
+
+ entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+ ops->set_64k_splited(&entry);
+
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + i, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return ret;
+
+ ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &entry, index + i);
+ }
+ return 0;
+}
+
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *ge)
{
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn;
+ unsigned long gfn, page_size = PAGE_SIZE;
dma_addr_t dma_addr;
int ret;
@@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ gvt_vdbg_mm("shadow 64K gtt entry\n");
+ /*
+ * The layout of 64K page is special, the page size is
+ * controlled by uper PDE. To be simple, we always split
+ * 64K page to smaller 4K pages in shadow PT.
+ */
+ return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("shadow 2M gtt entry\n");
+ ret = is_2MB_gtt_possible(vgpu, ge);
+ if (ret == 0)
+ return split_2MB_gtt_entry(vgpu, spt, index, &se);
+ else if (ret < 0)
+ return ret;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+ break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
};
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+ &dma_addr);
if (ret)
return -ENXIO;
@@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- } else
+ } else {
+ /* We don't setup 64K shadow entry so far. */
+ WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ "suspicious 64K entry\n");
ppgtt_invalidate_pte(spt, se);
+ }
return 0;
fail:
@@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry old_se;
int new_present;
- int ret;
+ int i, ret;
new_present = ops->test_present(we);
@@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
goto fail;
if (!new_present) {
- ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &old_se, index);
+ /* For 64KB splited entries, we need clear them all. */
+ if (ops->test_64k_splited(&old_se) &&
+ !(index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("remove splited 64K shadow entries\n");
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ops->clear_64k_splited(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index + i);
+ }
+ } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+ old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ ops->clear_pse(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ } else {
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ }
}
return 0;
@@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
ppgtt_get_guest_entry(spt, &we, index);
- ops->test_pse(&we);
+ /*
+ * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+ * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+ * ignored.
+ */
+ if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+ (index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+ index);
+ return 0;
+ }
if (bytes == info->gtt_entry_size) {
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@ -1592,6 +1901,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1616,6 +1926,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -1868,6 +2179,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+ * write, we assume the two 4 bytes writes are consecutive.
+ * Otherwise, we abort and report error
+ */
+ if (bytes < info->gtt_entry_size) {
+ if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+ /* the first partial part*/
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ return 0;
+ } else if ((g_gtt_index ==
+ (ggtt_mm->ggtt_mm.last_partial_off >>
+ info->gtt_entry_size_shift)) &&
+ (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+ /* the second partial part */
+
+ int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+ last_off, bytes);
+
+ ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+ } else {
+ int last_offset;
+
+ gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+ ggtt_mm->ggtt_mm.last_partial_off, off,
+ bytes, info->gtt_entry_size);
+
+ /* set host ggtt entry to scratch page and clear
+ * virtual ggtt entry as not present for last
+ * partially write offset
+ */
+ last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+ (~(info->gtt_entry_size - 1));
+
+ ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate_pte(vgpu, &m);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate(gvt->dev_priv);
+
+ ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+ return 0;
+ }
+ }
+
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
@@ -1881,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- &dma_addr);
+ PAGE_SIZE, &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
@@ -1973,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
- if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ if (type > GTT_TYPE_PPGTT_PTE_PT) {
struct intel_gvt_gtt_entry se;
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2257,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
- gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- } else {
- return -ENODEV;
- }
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
page = (void *)get_zeroed_page(GFP_KERNEL);
if (!page) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 3792f2b7f4ff..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
void (*clear_present)(struct intel_gvt_gtt_entry *e);
void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*clear_pse)(struct intel_gvt_gtt_entry *e);
+ bool (*test_ips)(struct intel_gvt_gtt_entry *e);
+ void (*clear_ips)(struct intel_gvt_gtt_entry *e);
+ bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
@@ -95,6 +101,7 @@ typedef enum {
GTT_TYPE_GGTT_PTE,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY,
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
@@ -150,6 +157,8 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
+ unsigned long last_partial_off;
+ u64 last_partial_data;
} ggtt_mm;
};
};
@@ -220,6 +229,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
unsigned long mfn;
@@ -227,6 +237,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 61bd14fcb649..46c8b720e336 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -238,18 +239,15 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- }
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
@@ -271,11 +269,8 @@ static int gvt_service_thread(void *data)
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request)) {
- mutex_lock(&gvt->lock);
+ (void *)&gvt->service_request))
intel_gvt_emulate_vblank(gvt);
- mutex_unlock(&gvt->lock);
- }
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -321,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
@@ -328,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
- intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
-
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
- intel_gvt_clean_vgpu_types(gvt);
-
+ intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -379,6 +372,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
@@ -473,3 +467,7 @@ out_clean_idr:
kfree(gvt);
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+MODULE_SOFTDEP("pre: kvmgt");
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 05d15a095310..31f6cdbe5c42 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
struct intel_vgpu {
struct intel_gvt *gvt;
+ struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -268,6 +274,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -294,7 +302,13 @@ struct intel_vgpu_type {
};
struct intel_gvt {
+ /* GVT scope lock, protect GVT itself, and all resource currently
+ * not yet protected by special locks(vgpu and scheduler lock).
+ */
struct mutex lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
+
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
@@ -314,6 +328,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
+
+ /* service_request is always used in bit operation, we should always
+ * use it with atomic bit ops so that no need to use gvt big lock.
+ */
unsigned long service_request;
struct {
@@ -361,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+ ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
@@ -468,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -545,7 +564,8 @@ struct intel_gvt_ops {
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+ void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
@@ -639,6 +659,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bcbc47a88a70..7a58ca555197 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv))
return D_KBL;
+ else if (IS_BROXTON(gvt->dev_priv))
+ return D_BXT;
return 0;
}
@@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
return 0;
}
+static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+ gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+ else if (!ips)
+ gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+ else {
+ /* All engines must be enabled together for vGPU,
+ * since we don't know which engine the ppgtt will
+ * bind to when shadowing.
+ */
+ gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+ ips);
+ return -EINVAL;
+ }
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
}
+ /* vgpu_lock already hold by emulate mmio r/w */
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
@@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ /* vgpu_lock already hold by emulate mmio r/w */
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_check_vblank_emulation(vgpu->gvt);
+ mutex_lock(&vgpu->vgpu_lock);
return 0;
}
@@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
- case 0x78830:
- case 0x78834:
+ case _vgtif_reg(cursor_x_hot):
+ case _vgtif_reg(cursor_y_hot):
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
+ } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from Broxton MRB.
+ */
+ if (!*data0)
+ *data0 = 0x16080707;
+ else
+ *data0 = 0x16161616;
}
break;
case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- v &= (1 << 31) | (1 << 29) | (1 << 9) |
- (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ if (IS_BROXTON(vgpu->gvt->dev_priv))
+ v &= (1 << 31) | (1 << 29);
+ else
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
v |= (v >> 1);
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BXT_DE_PLL_PLL_ENABLE)
+ v |= BXT_DE_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & PORT_PLL_ENABLE)
+ v |= PORT_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+ u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = vgpu_vreg(vgpu, offset);
+
+ v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ vgpu_vreg(vgpu, offset - 0x800) = v;
+ } else {
+ vgpu_vreg(vgpu, offset - 0x400) = v;
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ }
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BIT(0)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ }
+
+ if (v & BIT(1)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+ PHY_POWER_GOOD;
+ }
+
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+ gamw_echo_dev_rw_ia_write);
+
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
- MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
- MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+ return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+ MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+ MMIO_D(ERROR_GEN6, D_BXT);
+ MMIO_D(DONE_REG, D_BXT);
+ MMIO_D(EIR, D_BXT);
+ MMIO_D(PGTBL_ER, D_BXT);
+ MMIO_D(_MMIO(0x4194), D_BXT);
+ MMIO_D(_MMIO(0x4294), D_BXT);
+ MMIO_D(_MMIO(0x4494), D_BXT);
+
+ MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+ MMIO_RING_D(RING_IPEHR, D_BXT);
+ MMIO_RING_D(RING_INSTPS, D_BXT);
+ MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+ MMIO_RING_D(RING_BBSTATE, D_BXT);
+ MMIO_RING_D(RING_IPEIR, D_BXT);
+
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+ MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+ bxt_port_pll_enable_write);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+ MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+ MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+ MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+ MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+ MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+
+ MMIO_D(RC6_CTX_BASE, D_BXT);
+
+ MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+ MMIO_D(GEN6_GFXPAUSE, D_BXT);
+ MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+ MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
+ } else if (IS_BROXTON(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_bxt_mmio_info(gvt);
+ if (ret)
+ goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
@@ -3046,6 +3388,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mask, old_vreg;
+
+ old_vreg = vgpu_vreg(vgpu, offset);
+ write_vreg(vgpu, offset, p_data, bytes);
+ mask = vgpu_vreg(vgpu, offset) >> 16;
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+ (vgpu_vreg(vgpu, offset) & mask);
+
+ return 0;
+}
+
+/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index f6dd9f717888..5af11cf1b482 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -53,7 +53,7 @@ struct intel_gvt_mpt {
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr);
+ unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7a041b368f68..5daa23ae566b 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- WARN_ON(!up_irq_info);
+ if (WARN_ON(!up_irq_info))
+ return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- irq->ops = &gen8_irq_ops;
- irq->irq_map = gen8_irq_map;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
/* common event initialization */
init_events(irq);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index df4e4a07db3d..a45f46d8537f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -43,6 +43,8 @@
#include <linux/mdev.h>
#include <linux/debugfs.h>
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "gvt.h"
@@ -94,6 +96,7 @@ struct gvt_dma {
struct rb_node dma_addr_node;
gfn_t gfn;
dma_addr_t dma_addr;
+ unsigned long size;
struct kref ref;
};
@@ -106,36 +109,90 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t *dma_addr)
+static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- struct page *page;
- unsigned long pfn;
+ int total_pages;
+ int npage;
int ret;
- /* Pin the page first. */
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
- if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, ret);
- return -EINVAL;
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+ WARN_ON(ret != 1);
}
+}
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -EINVAL;
+/* Pin a normal or compound guest page for dma. */
+static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, struct page **page)
+{
+ unsigned long base_pfn = 0;
+ int total_pages;
+ int npage;
+ int ret;
+
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ /*
+ * We pin the pages one-by-one to avoid allocating a big arrary
+ * on stack to hold pfns.
+ */
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+ unsigned long pfn;
+
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+ cur_gfn, ret);
+ goto err;
+ }
+
+ if (!pfn_valid(pfn)) {
+ gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+ npage++;
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (npage == 0)
+ base_pfn = pfn;
+ else if (base_pfn + npage != pfn) {
+ gvt_vgpu_err("The pages are not continuous\n");
+ ret = -EINVAL;
+ npage++;
+ goto err;
+ }
}
+ *page = pfn_to_page(base_pfn);
+ return 0;
+err:
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ return ret;
+}
+
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr, unsigned long size)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct page *page = NULL;
+ int ret;
+
+ ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+ if (ret)
+ return ret;
+
/* Setup DMA mapping. */
- page = pfn_to_page(pfn);
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr)) {
- gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+ gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+ page_to_pfn(page), ret);
+ gvt_unpin_guest_page(vgpu, gfn, size);
return -ENOMEM;
}
@@ -143,14 +200,12 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- int ret;
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- WARN_ON(ret != 1);
+ dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@ -191,7 +246,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
}
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
@@ -203,6 +258,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->vgpu = vgpu;
new->gfn = gfn;
new->dma_addr = dma_addr;
+ new->size = size;
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
@@ -260,7 +316,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
- gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -515,7 +571,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (!entry)
continue;
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -611,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
- intel_gvt_ops->vgpu_deactivate(vgpu);
+ intel_gvt_ops->vgpu_release(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
@@ -1084,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ unsigned int i;
+ int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
size_t size;
int nr_areas = 1;
@@ -1169,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
return -EINVAL;
+ info.index =
+ array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
@@ -1195,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
- kfree(sparse);
- if (ret)
+ if (ret) {
+ kfree(sparse);
return ret;
+ }
break;
default:
+ kfree(sparse);
return -EINVAL;
}
}
@@ -1215,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
+ kfree(sparse);
return -EFAULT;
}
info.cap_offset = sizeof(info);
@@ -1223,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
kfree(caps.buf);
}
+ kfree(sparse);
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1560,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
@@ -1648,7 +1713,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
}
int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr)
+ unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
@@ -1665,11 +1730,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
- ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1681,7 +1746,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
return 0;
err_unmap:
- gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
mutex_unlock(&info->vgpu->vdev.cache_lock);
return ret;
@@ -1691,7 +1756,8 @@ static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
- gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(entry->vgpu, entry);
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index b31eb36fc102..994366035364 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
return;
gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
memcpy(pt, p_data, bytes);
}
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -156,7 +156,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -220,7 +220,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 71b620875943..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -42,15 +42,16 @@ struct intel_vgpu;
#define D_BDW (1 << 0)
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
+#define D_BXT (1 << 3)
-#define D_GEN9PLUS (D_SKL | D_KBL)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
-#define D_SKL_PLUS (D_SKL | D_KBL)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
@@ -98,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 0f949554d118..42e1e6bdcc2c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
{
- u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+ const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ && mmio->in_context)
continue;
// save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow_ctx, ring_id))
+ !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;
if (mmio->mask)
@@ -574,14 +578,18 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ if (IS_SKYLAKE(gvt->dev_priv) ||
+ IS_KABYLAKE(gvt->dev_priv) ||
+ IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->in_context)
+ if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 32ffcd566cdd..67f19992b226 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
- * @gpfn: guest pfn
+ * @gfn: guest pfn
+ * @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn,
+ struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
dma_addr);
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 53e2bd79c97d..256d0db8bbb1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
- struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
}
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d053cbe1dc94..09d7bb72b4ff 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..43aa058e29fc 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{
- struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
- pdp_pair[i].val = pdp[7 - i];
+ ring_context->pdps[i].val = pdp[7 - i];
}
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req)
{
- return i915_gem_context_force_single_submission(req->ctx);
+ return i915_gem_context_force_single_submission(req->gem_context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
- struct i915_request *req = workload->req;
- if (IS_KABYLAKE(req->i915) &&
- is_inhibit_context(req->ctx, req->engine->id))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct intel_context *ce;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->shadowed)
+ if (workload->req)
return 0;
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ce = intel_context_pin(shadow_ctx, engine);
+ if (IS_ERR(ce)) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ return PTR_ERR(ce);
+ }
+
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(shadow_ctx,
- dev_priv->engine[ring_id]);
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(ce);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_scan;
+ goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err_scan;
+ goto err_shadow;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
goto err_shadow;
}
+ workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
- goto err_unpin;
- workload->shadowed = true;
- return 0;
+ goto err_req;
-err_unpin:
- intel_context_unpin(shadow_ctx, engine);
+ return 0;
+err_req:
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
- return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
- int ring_id = workload->ring_id;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct i915_request *rq;
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ret;
-
- rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_unpin;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_request_get(rq);
- ret = copy_workload_to_ring_buffer(workload);
- if (ret)
- goto err_unpin;
- return 0;
-
err_unpin:
- intel_context_unpin(shadow_ctx, engine);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ intel_context_unpin(ce);
return ret;
}
@@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ ret = i915_vma_move_to_active(bb->vma,
+ workload->req,
+ 0);
+ if (ret)
+ goto err;
}
}
return 0;
@@ -517,21 +489,13 @@ err:
return ret;
}
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ struct intel_vgpu_workload *workload =
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+ struct i915_request *rq = workload->req;
+ struct execlist_ring_context *shadow_ring_context =
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
- ret = intel_gvt_generate_request(workload);
+ ret = copy_workload_to_ring_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
@@ -670,16 +631,14 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- int ret = 0;
+ int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
+ mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
ret = prepare_workload(workload);
- if (ret) {
- intel_context_unpin(shadow_ctx, engine);
- goto out;
- }
out:
if (ret)
@@ -704,6 +659,7 @@ out:
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
+ struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ring_id = workload->ring_id;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+ workload->ctx_desc.lrca);
+ context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;
i = 2;
@@ -832,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
kunmap(page);
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -858,19 +811,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *rq = workload->req;
int event;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
- if (workload->req) {
- struct drm_i915_private *dev_priv =
- workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine =
- dev_priv->engine[workload->ring_id];
+ if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -886,8 +837,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_request_put(fetch_and_zero(&workload->req));
-
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
@@ -896,10 +845,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+
/* unpin shadow ctx as the shadow_ctx update is done */
- intel_context_unpin(s->shadow_ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&rq->i915->drm.struct_mutex);
+ intel_context_unpin(rq->hw_context);
+ mutex_unlock(&rq->i915->drm.struct_mutex);
+
+ i915_request_put(fetch_and_zero(&workload->req));
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -928,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
@@ -939,7 +891,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
struct workload_thread_param {
@@ -957,7 +910,8 @@ static int workload_thread(void *priv)
struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv);
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -991,9 +945,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->lock);
if (ret) {
vgpu = workload->vgpu;
@@ -1130,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
if (!s->active)
return;
- clean_workloads(vgpu, engine_mask);
+ intel_vgpu_clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
@@ -1270,7 +1222,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
- workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
@@ -1285,7 +1236,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 gpa;
int i;
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 6c644782193e..ca5529d0e48e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
- bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
@@ -159,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 572a18c2bfb5..a4e8e3cf74fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
@@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -218,27 +222,43 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
* @vgpu: virtual GPU
*
* This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
*
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt *gvt = vgpu->gvt;
-
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = false;
if (atomic_read(&vgpu->submission.running_workload_num)) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_stop_schedule(vgpu);
- intel_vgpu_dmabuf_cleanup(vgpu);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
+}
+
+/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_gvt_deactivate_vgpu(vgpu);
+
+ mutex_lock(&vgpu->vgpu_lock);
+ intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+ intel_vgpu_dmabuf_cleanup(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -252,14 +272,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -269,10 +286,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- vfree(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
+
+ vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -298,6 +321,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
+ mutex_init(&vgpu->vgpu_lock);
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +348,10 @@ out_free_vgpu:
*/
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->vgpu_lock);
intel_vgpu_clean_sched_policy(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+
vfree(vgpu);
}
@@ -342,8 +369,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (!vgpu)
return ERR_PTR(-ENOMEM);
- mutex_lock(&gvt->lock);
-
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
@@ -353,6 +378,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
+ mutex_init(&vgpu->vgpu_lock);
+ mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
@@ -400,8 +427,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- mutex_unlock(&gvt->lock);
-
return vgpu;
out_clean_sched_policy:
@@ -424,7 +449,6 @@ out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
- mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
@@ -456,12 +480,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+ mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (IS_ERR(vgpu))
- return vgpu;
-
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
+ if (!IS_ERR(vgpu))
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
return vgpu;
}
@@ -473,7 +497,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +537,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +579,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13e7b9e4a6e6..f9ce35da4123 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- if (ppgtt->base.file != stats->file_priv)
+ if (ppgtt->vm.file != stats->file_priv)
continue;
}
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->base.total, &ggtt->mappable_end);
+ ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request,
client_link);
rcu_read_lock();
- task = pid_task(request && request->ctx->pid ?
- request->ctx->pid : file->pid,
+ task = pid_task(request && request->gem_context->pid ?
+ request->gem_context->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
- pm_mask = I915_READ(GEN6_PMINTRMSK);
- } else {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
}
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(dev_priv) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
- yesno(engine->hangcheck.stalled));
+ yesno(engine->hangcheck.stalled),
+ yesno(engine->hangcheck.wedged));
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
else
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
- if (fbc->work.scheduled)
- seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
- fbc->work.scheduled_vblank,
- drm_crtc_vblank_count(&fbc->crtc->base));
-
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
+ describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, fb->obj);
+ describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power));
+ rps_power_to_str(rps->power.mode));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->up_threshold);
+ rps->power.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->down_threshold);
+ rps->power.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_level_get(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->guc.log);
return 0;
}
@@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_level_set(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static const char *psr2_live_status(u32 val)
-{
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
-
- val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status))
- return live_status[val];
-
- return "unknown";
-}
-
-static const char *psr_sink_status(u8 val)
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
+ u8 val;
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
@@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val)
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
- "sink internal error"
+ "sink internal error",
};
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ int ret;
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- return sink_status[val];
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
- return "unknown";
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, psr_status;
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ psr_status = I915_READ(EDP_PSR2_STATUS);
+ val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ psr_status = I915_READ(EDP_PSR_STATUS);
+ val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ }
+
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
bool sink_support;
@@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
- seq_printf(m, "Re-enable work scheduled: %s\n",
- yesno(work_busy(&dev_priv->psr.work.work)));
-
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_enabled) {
- u32 psr2 = I915_READ(EDP_PSR2_STATUS);
-
- seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
- psr2, psr2_live_status(psr2));
- }
- if (dev_priv->psr.enabled) {
- struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
- u8 val;
-
- if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
- psr_sink_status(val));
- }
+ psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
if (READ_ONCE(dev_priv->psr.debug)) {
@@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-static int i915_sink_crc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp = NULL;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- u8 crc[6];
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
-
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_crtc *crtc;
- struct drm_connector_state *state;
- struct intel_crtc_state *crtc_state;
-
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
-retry:
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
- if (ret)
- goto err;
-
- state = connector->base.state;
- if (!state->best_encoder)
- continue;
-
- crtc = state->crtc;
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret)
- goto err;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- if (!crtc_state->base.active)
- continue;
-
- /*
- * We need to wait for all crtc updates to complete, to make
- * sure any pending modesets and plane updates are completed.
- */
- if (crtc_state->base.commit) {
- ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
-
- if (ret)
- goto err;
- }
-
- intel_dp = enc_to_intel_dp(state->best_encoder);
-
- ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
- if (ret)
- goto err;
-
- seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
- crc[0], crc[1], crc[2],
- crc[3], crc[4], crc[5]);
- goto out;
-
-err:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- goto out;
- }
- ret = -ENODEV;
-out:
- drm_connector_list_iter_end(&conn_iter);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
int i;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for (i = 0; i < workarounds->count; ++i) {
- i915_reg_t addr;
- u32 mask, value, read;
- bool ok;
-
- addr = workarounds->reg[i].addr;
- mask = workarounds->reg[i].mask;
- value = workarounds->reg[i].value;
- read = I915_READ(addr);
- ok = (value & mask) == (read & mask);
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
- i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
- }
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "Workarounds applied: %d\n", wa->count);
+ for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
return 0;
}
@@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915,
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unlock;
@@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (val & DROP_RETIRE)
i915_retire_requests(dev_priv);
@@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE)
- drain_delayed_work(&dev_priv->gt.idle_work);
+ if (val & DROP_IDLE) {
+ do {
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ flush_delayed_work(&dev_priv->gt.retire_work);
+ drain_delayed_work(&dev_priv->gt.idle_work);
+ } while (READ_ONCE(dev_priv->gt.awake));
+ }
if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
@@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
@@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files {
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
{"i915_next_seqno", &i915_next_seqno_fops},
- {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
struct dentry *ent;
- int ret, i;
+ int i;
ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
minor->debugfs_root, to_i915(minor->dev),
@@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (!ent)
return -ENOMEM;
- ret = intel_pipe_crc_create(minor);
- if (ret)
- return ret;
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ent = debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("i915_dpcd", S_IRUGO, root,
connector, &i915_dpcd_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9c449b8d8eab..f8cfd16be534 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
return true;
}
return false;
}
+
+bool i915_error_injected(void)
+{
+ return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
if (is_error && !shown_bug_once) {
/*
@@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
-
- va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
- return i915_modparams.inject_load_failure &&
- i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
- return false;
-#endif
}
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
-
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
- /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
- * (which really amounts to a PCH but no South Display).
- */
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
- dev_priv->pch_type = PCH_NOP;
- return;
- }
-
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
@@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
id = intel_virt_detect_pch(dev_priv);
- if (id) {
- pch_type = intel_pch_type(dev_priv, id);
- if (WARN_ON(pch_type == PCH_NONE))
- pch_type = PCH_NOP;
- } else {
- pch_type = PCH_NOP;
- }
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
}
}
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
if (!pch)
DRM_DEBUG_KMS("No PCH found.\n");
@@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_uc_fini_misc(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
-
- i915_gem_drain_freed_objects(dev_priv);
-
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_modeset;
intel_setup_overlay(dev_priv);
@@ -723,6 +702,8 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_modeset:
+ intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
@@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
- intel_opregion_setup(dev_priv);
-
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* get lost on g4x as well, and interrupt delivery seems to stay
* properly dead afterwards. So we'll just disable them for all
* pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
@@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto err_ggtt;
+ goto err_msi;
+
+ intel_opregion_setup(dev_priv);
return 0;
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
err_perf:
@@ -1433,6 +1423,7 @@ out_fini:
drm_dev_fini(&dev_priv->drm);
out_free:
kfree(dev_priv);
+ pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int err;
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ err = i915_gem_suspend(i915);
+ if (err)
+ dev_err(&i915->drm.pdev->dev,
+ "GEM idle failed, suspend/resume might fail\n");
+
+ return err;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- int error;
-
- /* ignore lid events during suspend */
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_SUSPENDED;
- mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
@@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev_priv);
- if (error) {
- dev_err(&pdev->dev,
- "GEM idle failed, resume might fail\n");
- goto out;
- }
-
intel_display_suspend(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
@@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
-out:
enable_rpm_wakeref_asserts(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ i915_gem_suspend_late(dev_priv);
+
intel_display_set_init_power(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
/*
* In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
+ i915_gem_sanitize(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
@@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev_priv);
intel_display_resume(dev);
@@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_DONE;
- mutex_unlock(&dev_priv->modeset_restore_lock);
-
intel_opregion_notify_adapter(dev_priv, PCI_D0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
else
intel_display_set_init_power(dev_priv, true);
- i915_gem_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2081,6 +2077,22 @@ out:
return ret;
}
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(dev);
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
+ .prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52f3b91d14fd..4aca5344863d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
@@ -85,8 +86,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180514"
-#define DRIVER_TIMESTAMP 1526300884
+#define DRIVER_DATE "20180719"
+#define DRIVER_TIMESTAMP 1532015279
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -107,13 +108,24 @@
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
#else
+
#define i915_inject_load_failure() false
+#define i915_error_injected() false
+
#endif
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
@@ -287,7 +299,6 @@ struct i915_hotplug {
u32 event_bits;
struct delayed_work reenable_work;
- struct intel_digital_port *irq_port[I915_MAX_PORTS];
u32 long_port_mask;
u32 short_port_mask;
struct work_struct dig_port_work;
@@ -500,6 +511,7 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool flip_pending;
bool underrun_detected;
struct work_struct underrun_work;
@@ -567,12 +579,6 @@ struct intel_fbc {
unsigned int gen9_wa_cfb_stride;
} params;
- struct intel_fbc_work {
- bool scheduled;
- u64 scheduled_vblank;
- struct work_struct work;
- } work;
-
const char *no_fbc_reason;
};
@@ -608,26 +614,17 @@ struct i915_psr {
bool sink_support;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
bool psr2_enabled;
u8 sink_sync_latency;
bool debug;
ktime_t last_entry_attempt;
ktime_t last_exit;
-
- void (*enable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*disable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*enable_sink)(struct intel_dp *);
- void (*activate)(struct intel_dp *);
- void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -639,7 +636,7 @@ enum intel_pch {
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP,
+ PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -652,6 +649,7 @@ enum intel_sbi_destination {
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
struct intel_fbdev;
struct intel_fbc_work;
@@ -781,11 +779,17 @@ struct intel_rps {
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
-
int last_adj;
- enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
bool enabled;
atomic_t num_waiters;
@@ -954,7 +958,7 @@ struct i915_gem_mm {
/**
* Small stash of WC pages
*/
- struct pagevec wc_stash;
+ struct pagestash wc_stash;
/**
* tmpfs instance used for shmem backed objects
@@ -1002,16 +1006,13 @@ struct i915_gem_mm {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
-enum modeset_restore {
- MODESET_ON_LID_OPEN,
- MODESET_DONE,
- MODESET_SUSPENDED,
-};
+#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
@@ -1056,9 +1057,9 @@ struct intel_vbt_data {
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
@@ -1074,7 +1075,6 @@ struct intel_vbt_data {
int vswing;
bool low_vswing;
bool initialized;
- bool support;
int bpp;
struct edp_power_seq pps;
} edp;
@@ -1085,8 +1085,8 @@ struct intel_vbt_data {
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
- int tp1_wakeup_time;
- int tp2_tp3_wakeup_time;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1271,20 +1271,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_MAX,
};
-struct intel_pipe_crc_entry {
- uint32_t frame;
- uint32_t crc[5];
-};
-
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
- bool opened; /* exclusive access to the result file */
- struct intel_pipe_crc_entry *entries;
- enum intel_pipe_crc_source source;
- int head, tail;
- wait_queue_head_t wq;
int skipped;
+ enum intel_pipe_crc_source source;
};
struct i915_frontbuffer_tracking {
@@ -1299,7 +1290,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_wa_reg {
- i915_reg_t addr;
+ u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
@@ -1739,12 +1730,9 @@ struct drm_i915_private {
unsigned long quirks;
- enum modeset_restore modeset_restore;
- struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
@@ -1850,6 +1838,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
@@ -1957,7 +1946,9 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;
+ struct intel_context *pinned_ctx;
u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq;
@@ -2310,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv)
}
#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
@@ -2562,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv)
(IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
-/*
- * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
- * even when in MSI mode. This results in spurious interrupt warnings if the
- * legacy irq no. is shared with another device. The kernel then disables that
- * interrupt source and so prevents the other device from working properly.
- *
- * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
- * interrupts.
- */
-#define HAS_AUX_IRQ(dev_priv) true
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+ IS_GEMINILAKE(dev_priv) || \
+ IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2747,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3101,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3168,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+ unsigned int flags, long timeout);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
@@ -3212,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- return container_of(vm, struct i915_hw_ppgtt, base);
+ return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
@@ -3319,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3444,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
+ bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@@ -3677,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
- unsigned long j = timespec_to_jiffies(value);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d44ad7bc1e94..fcc73a6ab503 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
void i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
void i915_gem_unpark(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- pinned = ggtt->base.reserved;
+ pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = ggtt->base.total;
+ args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
* that was!).
*/
- wmb();
+ i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
@@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
}
break;
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
@@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
@@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto err;
}
+ /* Writes not allowed into this read-only object */
+ if (i915_gem_object_is_readonly(obj)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
@@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
@@ -2002,9 +2016,12 @@ int i915_gem_fault(struct vm_fault *vmf)
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
struct i915_vma *vma;
pgoff_t page_offset;
- unsigned int flags;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -2038,27 +2055,34 @@ int i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- /* If the object is smaller than a couple of partial vma, it is
- * not worth only creating a single partial vma - we may as well
- * clear enough space for the full object.
- */
- flags = PIN_MAPPABLE;
- if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
- flags |= PIN_NONBLOCK | PIN_NONFAULT;
/* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+ unsigned int flags;
+
+ flags = PIN_MAPPABLE;
+ if (view.type == I915_GGTT_VIEW_NORMAL)
+ flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
- /* Userspace is now writing through an untracked VMA, abandon
+ /*
+ * Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ if (IS_ERR(vma) && !view.type) {
+ flags = PIN_MAPPABLE;
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ }
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -2108,10 +2132,9 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = VM_FAULT_SIGBUS;
- break;
- }
+ if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -2126,21 +2149,16 @@ err:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- ret = VM_FAULT_NOPAGE;
- break;
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
+ return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
}
- return ret;
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2259,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
do {
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ err = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
break;
@@ -2404,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
- if (i915_gem_object_has_pinned_pages(obj))
- return;
-
- GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- goto unlock;
-
- /* ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early. */
pages = fetch_and_zero(&obj->mm.pages);
- GEM_BUG_ON(!pages);
+ if (!pages)
+ return NULL;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2445,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
}
__i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
+
+ GEM_BUG_ON(obj->bind_count);
+ if (!i915_gem_object_has_pages(obj))
+ return;
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2968,16 +2999,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score, yesno(banned && bannable));
-
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
- if (banned)
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
i915_gem_context_set_banned(ctx);
+ }
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
@@ -3025,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct i915_request *request = NULL;
+ struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
@@ -3036,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
- /*
- * Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- *
- * Note that this needs to be a single atomic operation on the
- * tasklet (flush existing tasks, prevent new tasks) to prevent
- * a race between reset and set-wedged. It is not, so we do the best
- * we can atm and make sure we don't lock the machine up in the more
- * common case of recursively being called from set-wedged from inside
- * i915_reset.
- */
- if (!atomic_read(&engine->execlists.tasklet.count))
- tasklet_kill(&engine->execlists.tasklet);
- tasklet_disable(&engine->execlists.tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- request = i915_gem_find_active_request(engine);
+ request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
@@ -3111,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct i915_request *request)
-{
- void *vaddr = request->ring->vaddr;
- u32 head;
-
- /* As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = request->head;
- if (request->postfix < head) {
- memset(vaddr + head, 0, request->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, request->postfix - head);
-
- dma_fence_set_error(&request->fence, -EIO);
-}
-
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->ctx;
+ struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->ctx == hung_ctx)
- skip_request(request);
+ if (request->gem_context == hung_ctx)
+ i915_request_skip(request, -EIO);
list_for_each_entry(request, &timeline->requests, link)
- skip_request(request);
+ i915_request_skip(request, -EIO);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3189,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
if (stalled) {
- i915_gem_context_mark_guilty(request->ctx);
- skip_request(request);
+ i915_gem_context_mark_guilty(request->gem_context);
+ i915_request_skip(request, -EIO);
/* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
+ if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
@@ -3203,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/
request = i915_gem_find_active_request(engine);
if (request) {
- i915_gem_context_mark_innocent(request->ctx);
+ unsigned long flags;
+
+ i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
request = list_prev_entry(request, link);
if (&request->link == &engine->timeline.requests)
request = NULL;
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
}
@@ -3232,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
if (request)
request = i915_gem_reset_request(engine, request, stalled);
- if (request) {
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
- }
-
/* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+ engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3252,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
- ctx = fetch_and_zero(&engine->last_retired_context);
- if (ctx)
- intel_context_unpin(ctx, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3277,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
rq = i915_request_alloc(engine,
dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_request_add(rq, false);
+ i915_request_add(rq);
}
}
@@ -3286,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
@@ -3565,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
work_pending(&i915->gt.idle_work.work));
}
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(engine->last_retired_context !=
+ to_intel_context(i915->kernel_context, engine));
+ }
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
@@ -3576,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return;
+
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+ READ_ONCE(dev_priv->gt.active_requests));
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -3609,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
epoch = __i915_gem_park(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3755,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
+static long wait_for_timeline(struct i915_timeline *tl,
+ unsigned int flags, long timeout)
{
- return i915_gem_active_wait(&tl->last_request, flags);
+ struct i915_request *rq;
+
+ rq = i915_gem_active_get_unlocked(&tl->last_request);
+ if (!rq)
+ return timeout;
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ timeout = i915_request_wait(rq, flags, timeout);
+ i915_request_put(rq);
+
+ return timeout;
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3773,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915)
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
+ unsigned int flags, long timeout)
{
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
@@ -3786,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
- err = wait_for_timeline(tl, flags);
- if (err)
- return err;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
}
- i915_retire_requests(i915);
- return wait_for_engines(i915);
+ err = wait_for_engines(i915);
+ if (err)
+ return err;
+
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
} else {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
for_each_engine(engine, i915, id) {
- err = wait_for_timeline(&engine->timeline, flags);
- if (err)
- return err;
- }
+ struct i915_timeline *tl = &engine->timeline;
- return 0;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
+ }
}
+
+ return 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4379,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
@@ -4967,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
{
- struct i915_gem_context *kernel_context = i915->kernel_context;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ int err;
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context != kernel_context);
- }
-}
+ GEM_TRACE("\n");
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(&i915->gpu_error)) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -4995,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
+ err = -ENODEV;
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ if (!err)
+ intel_engines_sanitize(i915);
+
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_runtime_pm_put(i915);
+
+ i915_gem_contexts_lost(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *dev_priv)
+int i915_gem_suspend(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
- intel_runtime_pm_get(dev_priv);
- intel_suspend_gt_powersave(dev_priv);
+ GEM_TRACE("\n");
- mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(i915);
+ intel_suspend_gt_powersave(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
- /* We have to flush all the executing contexts to main memory so
+ /*
+ * We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That
- * leaves the dev_priv->kernel_context still active when
+ * leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ ret = i915_gem_switch_to_kernel_context(i915);
if (ret)
goto err_unlock;
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ MAX_SCHEDULE_TIMEOUT);
if (ret && ret != -EIO)
goto err_unlock;
- assert_kernel_context_is_current(dev_priv);
+ assert_kernel_context_is_current(i915);
}
- i915_gem_contexts_lost(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+
+ mutex_unlock(&i915->drm.struct_mutex);
- intel_uc_suspend(dev_priv);
+ intel_uc_suspend(i915);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
- /* As the idle_work is rearming if it detects a race, play safe and
+ /*
+ * As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- drain_delayed_work(&dev_priv->gt.idle_work);
+ drain_delayed_work(&i915->gt.idle_work);
- /* Assert that we sucessfully flushed all the work and
+ /*
+ * Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(dev_priv)))
- i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+ WARN_ON(i915->gt.awake);
+ if (WARN_ON(!intel_engines_are_idle(i915)))
+ i915_gem_set_wedged(i915); /* no hope, discard everything */
+
+ intel_runtime_pm_put(i915);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(i915);
+ return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -5069,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- intel_uc_sanitize(dev_priv);
- i915_gem_sanitize(dev_priv);
- intel_runtime_pm_put(dev_priv);
- return 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
- return ret;
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
@@ -5256,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto cleanup_uc;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+
+cleanup_uc:
+ intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
return ret;
}
@@ -5294,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (engine->init_context)
err = engine->init_context(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (err)
goto err_active;
}
@@ -5303,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- if (err)
+ if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+ i915_gem_set_wedged(i915);
+ err = -EIO; /* Caller will declare us wedged */
goto err_active;
+ }
assert_kernel_context_is_current(i915);
@@ -5368,7 +5447,9 @@ err_active:
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
- if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ if (WARN_ON(i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT)))
goto out_ctx;
i915_gem_contexts_lost(i915);
@@ -5379,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- /*
- * We need to fallback to 4K pages since gvt gtt handling doesn't
- * support huge page entries - we will need to check either hypervisor
- * mm can support huge guest page or just do emulation in gvt.
- */
- if (intel_vgpu_active(dev_priv))
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
@@ -5402,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- ret = intel_wopcm_init(&dev_priv->wopcm);
+ ret = intel_uc_init_misc(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
+ ret = intel_wopcm_init(&dev_priv->wopcm);
if (ret)
- return ret;
+ goto err_uc_misc;
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -5484,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_init_hw:
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
- i915_gem_contexts_lost(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend_late(dev_priv);
+
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
@@ -5502,6 +5585,7 @@ err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
+err_uc_misc:
intel_uc_fini_misc(dev_priv);
if (ret != -EIO)
@@ -5514,7 +5598,8 @@ err_unlock:
* for all other failure, such as an allocation failure, bail.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_load_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
ret = 0;
@@ -5524,6 +5609,28 @@ err_unlock:
return ret;
}
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ i915_gem_suspend_late(dev_priv);
+
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+ i915_gem_cleanup_userptr(dev_priv);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
@@ -5688,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
return 0;
}
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &dev_priv->mm.unbound_list,
- &dev_priv->mm.bound_list,
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
NULL
- }, **p;
+ }, **phase;
- /* Called just before we write the hibernation image.
+ /*
+ * Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
@@ -5711,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(i915);
- spin_lock(&dev_priv->mm.obj_lock);
- for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, mm.link)
- __start_cpu_write(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -6039,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = fetch_and_zero(&obj->mm.pages);
- if (pages) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- __i915_gem_object_reset_page_iter(obj);
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
+ pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
@@ -6066,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
err_xfer:
obj->ops = &i915_gem_object_ops;
- obj->mm.pages = pages;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
err_unlock:
mutex_unlock(&obj->mm.lock);
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 525920404ede..e46592956872 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,6 +26,7 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/interrupt.h>
struct drm_i915_private;
@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+ do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
@@ -72,4 +76,21 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_inc_return(&t->count) == 1)
+ tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_dec_return(&t->count) == 0)
+ tasklet_kill(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 060335d3d9e0..b10770cfccd2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
- if (!ce->state)
- continue;
-
- WARN_ON(ce->pin_count);
- if (ce->ring)
- intel_ring_free(ce->ring);
-
- __i915_gem_object_release_unless_active(ce->state->obj);
+ if (ce->ops)
+ ce->ops->destroy(ce);
}
kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->base);
+ i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret;
unsigned int max;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
+ } else {
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ if (USES_GUC_SUBMISSION(dev_priv))
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+ }
+
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
- dev_priv->engine[RCS]->context_size ? "logical" :
- "fake");
+ DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ "logical" : "fake");
return 0;
}
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id) {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
-
- if (!engine->last_retired_context)
- continue;
-
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = NULL;
- }
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
{
struct i915_request *rq;
- if (timeline == &engine->timeline)
- return NULL;
+ GEM_BUG_ON(timeline == &engine->timeline);
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine)
+ if (rq && rq->engine == engine) {
+ GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ timeline->name, engine->name,
+ rq->fence.context, rq->fence.seqno);
+ GEM_BUG_ON(rq->timeline != timeline);
return rq;
+ }
return NULL;
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct drm_i915_private *i915 = engine->i915;
+ const struct intel_context * const ce =
+ to_intel_context(i915->kernel_context, engine);
+ struct i915_timeline *barrier = ce->ring->timeline;
+ struct intel_ring *ring;
+ bool any_active = false;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+ struct i915_request *rq;
+
+ rq = last_request_on_engine(ring->timeline, engine);
+ if (!rq)
+ continue;
+
+ any_active = true;
+
+ if (rq->hw_context == ce)
+ continue;
+
+ /*
+ * Was this request submitted after the previous
+ * switch-to-kernel-context?
+ */
+ if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+ GEM_TRACE("%s needs barrier for %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
return false;
+ }
+
+ GEM_TRACE("%s has barrier after %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
}
- return intel_engine_has_kernel_context(engine);
+ /*
+ * If any other timeline was still active and behind the last barrier,
+ * then our last switch-to-kernel-context must still be queued and
+ * will run last (leaving the engine in the kernel context when it
+ * eventually idles).
+ */
+ if (any_active)
+ return true;
+
+ /* The engine is idle; check that it is idling in the kernel context. */
+ return engine->last_retired_context == ce;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
- i915_retire_requests(dev_priv);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->kernel_context);
+
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
- if (engine_has_idle_kernel_context(engine))
+ GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+ if (engine_has_kernel_context_barrier(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ GEM_TRACE("emit barrier on %s\n", engine->name);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
- if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
+ prev = last_request_on_engine(ring->timeline, engine);
+ if (!prev)
+ continue;
+
+ if (prev->gem_context == i915->kernel_context)
+ continue;
+
+ GEM_TRACE("add barrier on %s for %llx:%d\n",
+ engine->name,
+ prev->fence.context,
+ prev->fence.seqno);
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &prev->submit,
+ I915_FENCE_GFP);
+ i915_timeline_sync_set(rq->timeline, &prev->fence);
}
- /*
- * Force a flush after the switch to ensure that all rendering
- * and operations prior to switching to the kernel context hits
- * memory. This should be guaranteed by the previous request,
- * but an extra layer of paranoia before we declare the system
- * idle (on suspend etc) is advisable!
- */
- __i915_request_add(rq, true);
+ i915_request_add(rq);
}
return 0;
@@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
- if (!dev_priv->engine[RCS]->context_size)
+ if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
return -ENODEV;
if (args->pad != 0)
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
- args->value = ctx->ppgtt->base.total;
+ args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
- args->value = to_i915(dev)->ggtt.base.total;
+ args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..b116e4942c10 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
struct pid;
@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
+struct intel_context;
+
+struct intel_context_ops {
+ void (*unpin)(struct intel_context *ce);
+ void (*destroy)(struct intel_context *ce);
+};
+
/**
* struct i915_gem_context - client state
*
@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
+ struct i915_gem_context *gem_context;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+
+ const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id];
}
-static inline struct intel_ring *
+static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
{
- engine->context_unpin(engine, ctx);
+ GEM_BUG_ON(!ce->pin_count);
+ if (--ce->pin_count)
+ return;
+
+ GEM_BUG_ON(!ce->ops);
+ ce->ops->unpin(ce);
}
/* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 69a7aec49e84..82e2ca17a441 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 54814a196ee4..02b83a5ed96c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915)
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 22df17c8ca9b..3f0c612d42e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -66,6 +66,15 @@ enum {
#define __I915_EXEC_ILLEGAL_FLAGS \
(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+/* Catch emission of unexpected errors for CI! */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#undef EINVAL
+#define EINVAL ({ \
+ DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
+ 22; \
+})
+#endif
+
/**
* DOC: User command execution
*
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb,
* paranoia do it everywhere.
*/
if (i == batch_idx) {
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ if (entry->relocation_count &&
+ !(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
@@ -723,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+ eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -921,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_request_add(cache->rq, true);
+ i915_request_add(cache->rq);
cache->rq = NULL;
}
@@ -948,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- ggtt->base.clear_range(&ggtt->base,
- cache->node.start,
- cache->node.size);
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1021,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
- (&ggtt->base.mm, &cache->node,
+ (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -1042,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- i915_vma_move_to_active(batch, rq, 0);
- reservation_object_lock(batch->resv, NULL);
- reservation_object_add_excl_fence(batch->resv, &rq->fence);
- reservation_object_unlock(batch->resv);
- i915_vma_unpin(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
rq->batch = batch;
+ i915_vma_unpin(batch);
cache->rq = rq;
cache->rq_cmd = cmd;
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
/* Return with batch mapping (cmd) still pinned */
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1761,25 +1771,6 @@ slow:
return eb_relocate_slow(eb);
}
-static void eb_export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- reservation_object_lock(resv, NULL);
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
-}
-
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
- i915_vma_move_to_active(vma, eb->request, flags);
- eb_export_fence(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err)) {
+ i915_request_skip(eb->request, err);
+ return err;
+ }
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return true;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = rq->engine->id;
-
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- if (!i915_vma_is_active(vma))
- obj->active_count++;
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], rq);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
- obj->write_domain = 0;
- if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, rq);
-
- obj->read_domains = 0;
- }
- obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, rq);
-}
-
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
@@ -2438,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_request_add(eb.request, err == 0);
+ i915_request_add(eb.request);
add_to_client(eb.request, file);
if (fences)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 996ab2ad6c45..f00c7fbef79e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -42,7 +42,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/**
* DOC: Global GTT views
@@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 unused)
{
u32 pte_flags;
- int ret;
+ int err;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
}
- /* Currently applicable only to VLV */
+ /* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ u32 flags)
{
- gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
- pte |= addr;
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
switch (level) {
case I915_CACHE_NONE:
@@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+static void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct pagevec *pvec = &vm->free_pages;
- struct pagevec stash;
+ struct pagevec stack;
+ struct page *page;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
- /* A placeholder for a specific mutex to guard the WC stash */
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
/* Look in our global stash of WC pages... */
- pvec = &vm->i915->mm.wc_stash;
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
/*
- * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
- pagevec_init(&stash);
+ pagevec_init(&stack);
do {
struct page *page;
@@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- stash.pages[stash.nr++] = page;
- } while (stash.nr < pagevec_space(pvec));
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
- if (stash.nr) {
- int nr = min_t(int, stash.nr, pagevec_space(pvec));
- struct page **pages = stash.pages + stash.nr - nr;
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
- if (nr && !set_pages_array_wc(pages, nr)) {
- memcpy(pvec->pages + pvec->nr,
- pages, sizeof(pages[0]) * nr);
- pvec->nr += nr;
- stash.nr -= nr;
- }
+ /* Merge spare WC pages to the global stash */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
- pagevec_release(&stash);
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
}
- return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
}
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
- struct pagevec *pvec = &vm->free_pages;
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+ lockdep_assert_held(&vm->free_pages.lock);
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
- struct pagevec *stash = &vm->i915->mm.wc_stash;
-
- /* When we use WC, first fill up the global stash and then
+ /*
+ * When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
- if (pagevec_space(stash)) {
- do {
- stash->pages[stash->nr++] =
- pvec->pages[--pvec->nr];
- if (!pvec->nr)
- return;
- } while (pagevec_space(stash));
-
- /* As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (!immediate)
- return;
- }
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+ pvec = &stack;
set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
}
__pagevec_release(pvec);
@@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
* unconditional might_sleep() for everybody.
*/
might_sleep();
- if (!pagevec_add(&vm->free_pages, page))
+ spin_lock(&vm->free_pages.lock);
+ if (!pagevec_add(&vm->free_pages.pvec, page))
vm_free_pages_release(vm, false);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+ struct drm_i915_private *dev_priv)
+{
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ i915_gem_shrinker_taints_mutex(&vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
}
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
{
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
if (unlikely(!p->page))
return -ENOMEM;
- p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
vm_free_page(vm, p->page);
return -ENOMEM;
@@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
static int setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p)
{
- return __setup_page_dma(vm, p, I915_GFP_DMA);
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
}
static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
@@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
- addr = dma_map_page(vm->dma, page, 0, size,
- PCI_DMA_BIDIRECTIONAL);
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, addr)))
goto free_page;
@@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
@@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
}
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
struct i915_page_table *pt)
{
- fill32_px(vm, pt,
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
@@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm,
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
+ I915_GFP_ALLOW_FAIL);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
@@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
- unsigned entry,
- dma_addr_t addr)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- BUG_ON(entry >= 4);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- int i, ret;
-
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- ret = gen8_write_pdp(rq, i, pd_daddr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
@@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
do {
@@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
break;
}
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
}
@@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
- cache_level);
+ cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
@@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
- &iter, &idx, cache_level))
+ &iter, &idx, cache_level,
+ flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -1272,7 +1315,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
int i;
@@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+ if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
}
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
+ cleanup_px(&ppgtt->vm, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
gen8_free_scratch(vm);
}
@@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
@@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u64 pd_start = start;
u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+ if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u32 pte;
gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+ if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
@@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+ if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
@@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
unsigned int pdpe;
@@ -1601,211 +1644,153 @@ unwind:
* space.
*
*/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- int ret;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
+ /*
+ * From bdw, there is support for read-only pages in the PPGTT.
+ *
+ * XXX GVT is not honouring the lack of RW in the PTE bits.
+ */
+ ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+ ppgtt->vm.pt_kmap_wc = true;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
- if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
- if (ret)
- goto free_scratch;
+ if (use_4lvl(&ppgtt->vm)) {
+ err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ if (err)
+ goto err_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+ gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
- if (ret)
- goto free_scratch;
+ err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+ if (err)
+ goto err_scratch;
- if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdp(ppgtt);
- if (ret) {
+ if (intel_vgpu_active(i915)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err) {
__pdp_fini(&ppgtt->pdp);
- goto free_scratch;
+ goto err_scratch;
}
}
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->debug_dump = gen8_dump_ppgtt;
- return 0;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
-free_scratch:
- gen8_free_scratch(&ppgtt->base);
- return ret;
+ return ppgtt;
+
+err_scratch:
+ gen8_free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_table *unused;
- gen6_pte_t scratch_pte;
- u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ struct i915_page_table *pt;
+ u32 pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ gen6_for_all_pdes(pt, &base->pd, pde) {
+ gen6_pte_t *vaddr;
+
+ if (pt == base->vm.scratch_pt)
+ continue;
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ u32 expected =
+ GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+ GEN6_PDE_VALID;
+ u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+ if (pd_entry != expected)
+ seq_printf(m,
+ "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+ }
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
- u32 expected;
- gen6_pte_t *pt_vaddr;
- const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
- pd_entry = readl(ppgtt->pd_addr + pde);
- expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
- if (pd_entry != expected)
- seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
- seq_printf(m, "\tPDE: %x\n", pd_entry);
-
- pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
- for (pte = 0; pte < GEN6_PTES; pte+=4) {
- unsigned long va =
- (pde * PAGE_SIZE * GEN6_PTES) +
- (pte * PAGE_SIZE);
+ vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+ for (pte = 0; pte < GEN6_PTES; pte += 4) {
int i;
- bool found = false;
+
for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
+ if (vaddr[pte + i] != scratch_pte)
+ break;
+ if (i == 4)
continue;
- seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ pde, pte,
+ (pde * GEN6_PTES + pte) * PAGE_SIZE);
for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ if (vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", vaddr[pte + i]);
else
- seq_puts(m, " SCRATCH ");
+ seq_puts(m, " SCRATCH");
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_atomic(vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
- writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
- u32 start, u32 length)
-{
- struct i915_page_table *pt;
- unsigned int pde;
-
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mark_tlbs_dirty(ppgtt);
- wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
- GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- struct drm_i915_private *dev_priv = rq->i915;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
}
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
unsigned int first_entry = start >> PAGE_SHIFT;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
- unsigned int end = min(pte + num_entries, GEN6_PTES);
+ struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ const unsigned int end = min(pte + num_entries, GEN6_PTES);
+ const unsigned int count = end - pte;
gen6_pte_t *vaddr;
- num_entries -= end - pte;
+ GEM_BUG_ON(pt == vm->scratch_pt);
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > pt->used_ptes);
+ pt->used_ptes -= count;
+ if (!pt->used_ptes)
+ ppgtt->scan_for_unused_pt = true;
- /* Note that the hw doesn't support removing PDE on the fly
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
* (they are cached inside the context with no means to
* invalidate the cache), so we can only reset the PTE
* entries back to scratch.
@@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
+ GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_table *pt;
u64 from = start;
unsigned int pde;
bool flush = false;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+ gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(vm, pt);
- ppgtt->pd.page_table[pde] = pt;
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ gen6_initialize_pt(ppgtt, pt);
+ ppgtt->base.pd.page_table[pde] = pt;
+
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+
+ GEM_BUG_ON(pt->used_ptes);
}
+
+ pt->used_ptes += count;
}
if (flush) {
- mark_tlbs_dirty(ppgtt);
- wmb();
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
}
return 0;
unwind_out:
- gen6_ppgtt_clear_range(vm, from, start);
+ gen6_ppgtt_clear_range(vm, from, start - from);
return -ENOMEM;
}
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_table *unused;
+ u32 pde;
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ ppgtt->scratch_pte =
+ vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
cleanup_scratch_page(vm);
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(vm, vm->scratch_pt);
+ gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+ ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
return 0;
}
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
{
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory *pd = &ppgtt->pd;
struct i915_page_table *pt;
u32 pde;
- drm_mm_remove_node(&ppgtt->node);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ if (pt != ppgtt->base.vm.scratch_pt)
+ free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != vm->scratch_pt)
- free_pt(vm, pt);
+ i915_vma_destroy(ppgtt->vma);
- gen6_free_scratch(vm);
+ gen6_ppgtt_free_pd(ppgtt);
+ gen6_ppgtt_free_scratch(vm);
}
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
- /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
- ret = gen6_init_scratch(vm);
- if (ret)
- return ret;
+ vma->pages = NULL;
+}
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
- PIN_HIGH);
- if (ret)
- goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+ struct i915_page_table *pt;
+ unsigned int pde;
- if (ppgtt->node.start < ggtt->mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_write_pde(ppgtt, pde, pt);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
return 0;
-
-err_out:
- gen6_free_scratch(vm);
- return ret;
}
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static void pd_vma_unbind(struct i915_vma *vma)
{
- return gen6_ppgtt_allocate_page_directories(ppgtt);
-}
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_table *pt;
+ unsigned int pde;
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- u64 start, u64 length)
-{
- struct i915_page_table *unused;
- u32 pde;
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+ if (pt->used_ptes || pt == scratch_pt)
+ continue;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+ free_pt(&ppgtt->base.vm, pt);
+ ppgtt->base.pd.page_table[pde] = scratch_pt;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
- ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev_priv))
- ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev_priv))
- ppgtt->switch_mm = gen7_mm_switch;
- else
- BUG();
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+{
+ struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_vma *vma;
- ret = gen6_ppgtt_alloc(ppgtt);
- if (ret)
- return ret;
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ init_request_active(&vma->last_fence, NULL);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
+ vma->vm = &ggtt->vm;
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->debug_dump = gen6_dump_ppgtt;
+ vma->active = RB_ROOT;
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
+ vma->size = size;
+ vma->fence_size = size;
+ vma->flags = I915_VMA_GGTT;
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
+ INIT_LIST_HEAD(&vma->obj_link);
+ list_add(&vma->vm_link, &vma->vm->unbound_list);
- return 0;
+ return vma;
}
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (INTEL_GEN(dev_priv) < 8)
- return gen6_ppgtt_init(ppgtt);
- else
- return gen8_ppgtt_init(ppgtt);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ return i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv,
- const char *name)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
- INIT_LIST_HEAD(&vm->unbound_list);
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
- list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages);
+ i915_vma_unpin(ppgtt->vma);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
- if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm, true);
+ struct i915_ggtt * const ggtt = &i915->ggtt;
+ struct gen6_hw_ppgtt *ppgtt;
+ int err;
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->base.ref);
+
+ ppgtt->base.vm.i915 = i915;
+ ppgtt->base.vm.dma = &i915->drm.pdev->dev;
+
+ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+ i915_address_space_init(&ppgtt->base.vm, i915);
+
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_free;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
@@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 8)
+ return gen6_ppgtt_create(i915);
+ else
+ return gen8_ppgtt_create(i915);
+}
+
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name)
+i915_ppgtt_create(struct drm_i915_private *i915,
+ struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
+ ppgtt = __hw_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
+ ppgtt->vm.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
+ trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
@@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
+ trace_i915_ppgtt_release(&ppgtt->vm);
- ppgtt_destroy_vma(&ppgtt->base);
+ ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
+ i915_address_space_fini(&ppgtt->vm);
kfree(ppgtt);
}
@@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- gen8_set_pte(pte, gen8_pte_encode(addr, level));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
ggtt->invalidate(vm->i915);
}
@@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2561,13 +2622,14 @@ struct insert_entries {
struct i915_address_space *vm;
struct i915_vma *vma;
enum i915_cache_level level;
+ u32 flags;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
- struct insert_entries arg = { vm, vma, level };
+ struct insert_entries arg = { vm, vma, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- /* Currently applicable only to VLV */
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
@@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
+ appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+ ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
- if (ppgtt->base.allocate_va_range) {
- /* Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
- if (err)
- goto err_ppgtt;
- }
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
i915->mm.aliasing_ppgtt = ppgtt;
- GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
struct i915_vma *vma, *vn;
struct pagevec *pvec;
- ggtt->base.closed = true;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_gem_cleanup_stolen(&dev_priv->drm);
+ ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_fini_aliasing_ppgtt(dev_priv);
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+ WARN_ON(i915_vma_unbind(vma));
+
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
+ if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash;
+ pvec = &dev_priv->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
@@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
+
+ i915_gem_cleanup_stolen(&dev_priv->drm);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt->vm.pte_encode = byt_pte_encode;
else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt->vm.pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
@@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total,
- &gmadr_base,
- &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.i915 = dev_priv;
+ ggtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* restriction!
*/
if (USES_GUC(dev_priv)) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if (ggtt->mappable_end > ggtt->base.total) {
+ if (ggtt->mappable_end > ggtt->vm.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- INIT_LIST_HEAD(&dev_priv->vm_list);
+ stash_init(&dev_priv->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
@@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+ i915_address_space_init(&ggtt->vm, dev_priv);
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
return ret;
}
@@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
+ struct i915_vma *vma, *vn;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+ ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- for_each_ggtt_vma(vma, obj) {
- if (!i915_vma_unbind(vma))
- continue;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
+ if (!i915_vma_unbind(vma))
+ continue;
- if (ggtt_bound)
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_UPDATE));
+ if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
- ggtt->base.closed = false;
+ ggtt->vm.closed = false;
+ i915_ggtt_invalidate(dev_priv);
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
dev_priv->ppat.update_hw(dev_priv);
return;
}
-
- if (USES_PPGTT(dev_priv)) {
- struct i915_address_space *vm;
-
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- struct i915_hw_ppgtt *ppgtt;
-
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- else
- ppgtt = i915_vm_to_ppgtt(vm);
-
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- }
- }
-
- i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
+ mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (mode & DRM_MM_INSERT_ONCE) {
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end,
+ DRM_MM_INSERT_BEST);
+ if (err != -ENOSPC)
+ return err;
+ }
+
if (flags & PIN_NOEVICT)
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..2a116a91420b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -58,6 +58,7 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
+struct i915_vma;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,26 @@ struct i915_pml4 {
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_i915_private *i915;
@@ -267,12 +288,13 @@ struct i915_address_space {
* assign blame.
*/
struct drm_i915_file_private *file;
- struct list_head global_link;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
bool closed;
+ struct mutex mutex; /* protects vma and our lists */
+
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -308,8 +330,13 @@ struct i915_address_space {
*/
struct list_head unbound_list;
- struct pagevec free_pages;
- bool pt_kmap_wc;
+ struct pagestash free_pages;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
@@ -331,15 +358,8 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
+
+ struct i915_vma_ops vma_ops;
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
@@ -385,9 +405,9 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct kref ref;
- struct drm_mm_node node;
+
unsigned long pd_dirty_rings;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
@@ -395,13 +415,28 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; /* GEN6-7 */
};
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+ struct i915_hw_ppgtt base;
+
+ struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+ unsigned int pin_count;
+ bool scan_for_unused_pt;
};
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
/*
* gen6_for_each_pde() iterates over every pde from start until start+length.
* If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
const u64 mask = ~((1ULL << pde_shift) - 1);
u64 end;
- WARN_ON(length == 0);
- WARN_ON(offset_in_page(addr|length));
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
end = addr + length;
@@ -543,7 +578,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
+ return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
@@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name);
+ struct drm_i915_file_private *fpriv);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
@@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b350779..83e5e01fa9ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
- unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -268,7 +267,6 @@ struct drm_i915_gem_object {
union {
struct i915_gem_userptr {
uintptr_t ptr;
- unsigned read_only :1;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@@ -337,26 +335,17 @@ __attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
- drm_gem_object_reference(&obj->base);
+ drm_gem_object_get(&obj->base);
return obj;
}
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
- __drm_gem_object_unreference(&obj->base);
+ __drm_gem_object_put(&obj->base);
}
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
@@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..90baf9086d0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..ea90d3a0d511 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -23,6 +23,7 @@
*/
#include <linux/oom.h>
+#include <linux/sched/mm.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
trace_i915_gem_shrink(i915, target, flags);
i915_retire_requests(i915);
@@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ if (i915_gem_wait_for_idle(i915,
+ 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
@@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &i915->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
@@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
+
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(mutex);
+ mutex_unlock(mutex);
+ fs_reclaim_release(GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..53440bf87650 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ /* fall through */
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
@@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
+static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
+{
+ u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+
+ *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+ }
+}
+
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
@@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
gen7_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- default:
+ case 8:
+ case 9:
+ case 10:
if (IS_LP(dev_priv))
chv_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
@@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
+ case 11:
+ default:
+ icl_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
}
/*
@@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
- vma = i915_vma_instance(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
- ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
@@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 854bd51b9478..2c9b284036d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -112,10 +112,11 @@ static void del_object(struct i915_mmu_object *mo)
mo->attached = false;
}
-static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
@@ -124,7 +125,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
LIST_HEAD(cancelled);
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
- return;
+ return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
end--;
@@ -132,6 +133,10 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, start, end);
while (it) {
+ if (!blockable) {
+ spin_unlock(&mn->lock);
+ return -EAGAIN;
+ }
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
@@ -154,6 +159,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (!list_empty(&cancelled))
flush_workqueue(mn->wq);
+
+ return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -507,7 +514,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
- if (!obj->userptr.read_only)
+ if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
@@ -643,7 +650,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,
- !obj->userptr.read_only,
+ !i915_gem_object_is_readonly(obj),
pvec);
}
@@ -789,10 +796,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- /* On almost all of the current hw, we cannot tell the GPU that a
- * page is readonly, so this is just a placeholder in the uAPI.
+ struct i915_hw_ppgtt *ppgtt;
+
+ /*
+ * On almost all of the older hw, we cannot tell the GPU that
+ * a page is readonly.
*/
- return -ENODEV;
+ ppgtt = dev_priv->kernel_context->ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only)
+ return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
@@ -806,7 +818,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
- obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+ if (args->flags & I915_USERPTR_READ_ONLY)
+ i915_gem_object_set_readonly(obj);
/* And keep a pointer to the current->mm for resolving the user pages
* at binding. This means that we need to hook into the mmu_notifier
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df234dc23274..f7f2aa71d8d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
- int i;
-
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x [ ",
+ err_printf(m, " %08x_%08x %8u %02x %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err_printf(m, "%02x ", err->rseqno[i]);
-
- err_printf(m, "] %02x", err->wseqno);
+ err->write_domain,
+ err->wseqno);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
@@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static int
-ascii85_encode_len(int len)
-{
- return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
- int i;
-
- if (in == 0)
- return false;
-
- out[5] = '\0';
- for (i = 5; i--; ) {
- out[i] = '!' + in % 85;
- in /= 85;
- }
-
- return true;
-}
-
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
- char out[6];
+ char out[ASCII85_BUFSZ];
int page;
if (!obj)
@@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len -= obj->unused;
len = ascii85_encode_len(len);
- for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
- err_puts(m, out);
- else
- err_puts(m, "z");
- }
+ for (i = 0; i < len; i++)
+ err_puts(m, ascii85_encode(obj->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
- ggtt->base.insert_page(&ggtt->base, dma, slot,
- I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
@@ -993,7 +961,7 @@ unwind:
out:
compress_fini(&compress, dst);
- ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- int i;
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
@@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, vm_link) {
+ if (!vma->obj)
+ continue;
+
if (pinned_only && !i915_vma_is_pinned(vma))
continue;
@@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
- erq->context = request->ctx->hw_id;
+ struct i915_gem_context *ctx = request->gem_context;
+
+ erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&request->ctx->ban_score);
+ erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail;
rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock();
}
@@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine);
if (request) {
+ struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = request->ctx->ppgtt ?
- &request->ctx->ppgtt->base : &ggtt->base;
+ ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
- record_context(&ee->context, request->ctx);
+ record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- to_intel_context(request->ctx,
- engine)->state);
+ request->hw_context->state);
error->simulated |=
- i915_gem_context_no_error_capture(request->ctx);
+ i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &error->i915->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
count_inactive = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++;
count_active = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++;
bo = NULL;
@@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ error->ngtier = 6;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index dac0f8c4c1cf..f893a4e8b783 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4], ngtier;
+ u32 gtier[6], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -177,7 +177,7 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4a02747ac658..90628a47ae17 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+ [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
+static const u32 hpd_icp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+};
+
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -247,9 +263,9 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit);
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit)
{
void __iomem * const regs = i915->regs;
u32 dw;
@@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
+ const u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
+ struct task_struct *tsk = NULL;
struct intel_wait *wait;
- if (!engine->breadcrumbs.irq_armed)
+ if (unlikely(!engine->breadcrumbs.irq_armed))
return;
- atomic_inc(&engine->irq_count);
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ rcu_read_lock();
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
- bool wakeup = engine->irq_seqno_barrier;
-
- /* We use a callback from the dma-fence to submit
+ /*
+ * We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
@@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->seqno)) {
struct i915_request *waiter = wait->request;
- wakeup = true;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ if (waiter &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
+
+ tsk = wait->tsk;
+ } else {
+ if (engine->irq_seqno_barrier &&
+ i915_seqno_passed(seqno, wait->seqno - 1)) {
+ set_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted);
+ tsk = wait->tsk;
+ }
}
- if (wakeup)
- wake_up_process(wait->tsk);
+ engine->breadcrumbs.irq_count++;
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
@@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
- dma_fence_signal(&rq->fence);
+ spin_lock(&rq->lock);
+ dma_fence_signal_locked(&rq->fence);
GEM_BUG_ON(!i915_request_completed(rq));
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
+ if (tsk && tsk->state & TASK_NORMAL)
+ wake_up_process(tsk);
+
+ rcu_read_unlock();
+
trace_intel_engine_notify(engine, wait);
}
@@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * rps->up_threshold)
+ if (c0 > time * rps->power.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->down_threshold)
+ else if (c0 < time * rps->power.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
@@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
- if (READ_ONCE(engine->execlists.active))
- tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted);
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
@@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
if (tasklet)
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
}
}
-static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_E:
+ switch (pin) {
+ case HPD_PORT_E:
return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
return false;
@@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
- bool long_pulse_detect(enum port port, u32 val))
+ bool long_pulse_detect(enum hpd_pin pin, u32 val))
{
- enum port port;
- int i;
+ enum hpd_pin pin;
- for_each_hpd_pin(i) {
- if ((hpd[i] & hotplug_trigger) == 0)
+ for_each_hpd_pin(pin) {
+ if ((hpd[pin] & hotplug_trigger) == 0)
continue;
- *pin_mask |= BIT(i);
-
- port = intel_hpd_pin_to_port(dev_priv, i);
- if (port == PORT_NONE)
- continue;
+ *pin_mask |= BIT(pin);
- if (long_pulse_detect(port, dig_hotplug_reg))
- *long_mask |= BIT(i);
+ if (long_pulse_detect(pin, dig_hotplug_reg))
+ *long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
uint32_t crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_pipe_crc_entry *entry;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct drm_driver *driver = dev_priv->drm.driver;
uint32_t crcs[5];
- int head, tail;
spin_lock(&pipe_crc->lock);
- if (pipe_crc->source && !crtc->base.crc.opened) {
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
-
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
-
- entry = &pipe_crc->entries[head];
-
- entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
-
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
-
- spin_unlock(&pipe_crc->lock);
-
- wake_up_interruptible(&pipe_crc->wq);
- } else {
- /*
- * For some not yet identified reason, the first CRC is
- * bonkers. So let's just wait for the next vblank and read
- * out the buggy result.
- *
- * On GEN8+ sometimes the second CRC is bonkers as well, so
- * don't trust that one either.
- */
- if (pipe_crc->skipped <= 0 ||
- (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
- pipe_crc->skipped++;
- spin_unlock(&pipe_crc->lock);
- return;
- }
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped <= 0 ||
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
- drm_crtc_add_crc_entry(&crtc->base, true,
- drm_crtc_accurate_vblank_count(&crtc->base),
- crcs);
+ return;
}
+ spin_unlock(&pipe_crc->lock);
+
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_crtc_accurate_vblank_count(&crtc->base),
+ crcs);
}
#else
static inline void
@@ -1998,10 +2029,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_status = 0, hotplug_status_mask;
+ int i;
+
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+ DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+ else
+ hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+ /*
+ * We absolutely have to clear all the pending interrupt
+ * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+ * interrupt bit won't have an edge, and the i965/g4x
+ * edge triggered IIR will not notice that an interrupt
+ * is still pending. We can't use PORT_HOTPLUG_EN to
+ * guarantee the edge as the act of toggling the enable
+ * bits can itself generate a new hotplug interrupt :(
+ */
+ for (i = 0; i < 10; i++) {
+ u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+ if (tmp == 0)
+ return hotplug_status;
- if (hotplug_status)
+ hotplug_status |= tmp;
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ }
+
+ WARN_ONCE(1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -2108,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
@@ -2193,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -2362,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2525,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -2535,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
}
/* Find, clear, then process each source of interrupt */
@@ -2570,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev_priv)) {
+ if (!HAS_PCH_NOP(dev_priv))
I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
- }
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
@@ -2598,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ u32 pin_mask = 0, long_mask = 0;
+ u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+ u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+ if (trigger_tc) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (trigger_tbt) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ else
+ DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2633,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
+ if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ iir = I915_READ(GEN11_DE_HPD_IIR);
+ if (iir) {
+ I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ ret = IRQ_HANDLED;
+ gen11_hpd_irq_handler(dev_priv, iir);
+ } else {
+ DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ }
+ }
+
if (master_ctl & GEN8_DE_PORT_IRQ) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
@@ -2648,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ tmp_mask |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
@@ -2732,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_handler(dev_priv, iir);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) ||
+ HAS_PCH_CNP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2950,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+ u32 *iir)
+{
+ void __iomem * const regs = dev_priv->regs;
+
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(*iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+ const u32 master_ctl, const u32 iir)
+{
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ if (unlikely(!iir)) {
+ DRM_ERROR("GU_MISC iir blank!\n");
+ return;
+ }
+
+ if (iir & GEN11_GU_MISC_GSE)
+ intel_opregion_asle_intr(dev_priv);
+ else
+ DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->regs;
u32 master_ctl;
+ u32 gu_misc_iir;
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
@@ -2983,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
+ gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
return IRQ_HANDLED;
}
@@ -3061,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
}
@@ -3472,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(GEN8_DE_PORT_);
GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(GEN11_GU_MISC_);
GEN3_IRQ_RESET(GEN8_PCU_);
+
+ if (HAS_PCH_ICP(dev_priv))
+ GEN3_IRQ_RESET(SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3589,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug |= ICP_DDIA_HPD_ENABLE |
+ ICP_DDIB_HPD_ENABLE;
+ I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
+ ICP_TC_HPD_ENABLE(PORT_TC2) |
+ ICP_TC_HPD_ENABLE(PORT_TC3) |
+ ICP_TC_HPD_ENABLE(PORT_TC4);
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+}
+
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+ hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+ u32 val;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+ val = I915_READ(GEN11_DE_HPD_IMR);
+ val &= ~hotplug_irqs;
+ I915_WRITE(GEN11_DE_HPD_IMR, val);
+ POSTING_READ(GEN11_DE_HPD_IMR);
+
+ gen11_hpd_detection_setup(dev_priv);
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_hpd_irq_setup(dev_priv);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 val, hotplug;
@@ -3915,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
- u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+ u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) <= 10)
+ de_misc_masked |= GEN8_DE_MISC_GSE;
+
if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3928,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ de_port_masked |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3956,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 de_hpd_masked = 0;
+ u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+ GEN11_DE_TBT_HOTPLUG_MASK;
+
+ GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ gen11_hpd_detection_setup(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
- else if (IS_BROADWELL(dev_priv))
+ } else if (IS_BROADWELL(dev_priv)) {
ilk_hpd_detection_setup(dev_priv);
+ }
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4008,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
+static void icp_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = SDE_GMBUS_ICP;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
+
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ I915_WRITE(SDEIMR, ~mask);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
+ GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4062,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
@@ -4081,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u16 *eir, u16 *eir_stuck)
+{
+ u16 emr;
+
+ *eir = I915_READ16(EIR);
+
+ if (*eir)
+ I915_WRITE16(EIR, *eir);
+
+ *eir_stuck = I915_READ16(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ16(EMR);
+ I915_WRITE16(EMR, 0xffff);
+ I915_WRITE16(EMR, emr | *eir_stuck);
+}
+
+static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u16 eir, u16 eir_stuck)
+{
+ DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+}
+
+static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u32 *eir, u32 *eir_stuck)
+{
+ u32 emr;
+
+ *eir = I915_READ(EIR);
+
+ I915_WRITE(EIR, *eir);
+
+ *eir_stuck = I915_READ(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ(EMR);
+ I915_WRITE(EMR, 0xffffffff);
+ I915_WRITE(EMR, emr | *eir_stuck);
+}
+
+static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 eir, u32 eir_stuck)
+{
+ DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+}
+
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -4095,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 eir = 0, eir_stuck = 0;
u16 iir;
iir = I915_READ16(IIR);
@@ -4107,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
@@ -4151,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4194,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4211,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4271,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
@@ -4338,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4354,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
@@ -4362,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4478,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen11_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 66ea3552c63e..295e981e4a39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(panel_ignore_lid, int, 0600,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
i915_param_named_unsafe(alpha_support, bool, 0400,
@@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
- "Enable command parsing (true=enabled [default], false=disabled)");
-
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6684025b7af8..6c4d4a21474b 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,7 +36,6 @@ struct drm_printer;
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
- param(int, panel_ignore_lid, 1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
@@ -58,7 +57,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4364922e935d..6a4d1388ad2d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7),
.is_lp = 1,
.num_pipes = 2,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
@@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) /* driver load aborted, nothing to cleanup */
+ return;
i915_driver_unload(dev);
drm_dev_put(dev);
+
+ pci_set_drvdata(pdev, NULL);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
+ if (i915_inject_load_failure()) {
+ i915_pci_remove(pdev);
+ return -ENODEV;
+ }
+
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 019bd2d073ad..6bf10952c724 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue;
}
- /*
- * XXX: Just keep the lower 21 bits for now since I'm not
- * entirely sure if the HW touches any of the higher bits in
- * this field
- */
- ctx_id = report32[2] & 0x1fffff;
+ ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
}
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_context *ce;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ce = intel_context_pin(ctx, engine);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ce))
+ return ce;
+
+ i915->perf.oa.pinned_ctx = ce;
+
+ return ce;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct intel_ring *ring;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
+ ce = oa_pin_context(i915, stream->ctx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+ switch (INTEL_GEN(i915)) {
+ case 7: {
/*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
+ * On Haswell we don't do any post processing of the reports
+ * and don't need to use the mask.
*/
- ring = intel_context_pin(stream->ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+ i915->perf.oa.specific_ctx_id_mask = 0;
+ break;
+ }
+ case 8:
+ case 9:
+ case 10:
+ if (USES_GUC_SUBMISSION(i915)) {
+ /*
+ * When using GuC, the context descriptor we write in
+ * i915 is read by GuC and rewritten before it's
+ * actually written into the hardware. The LRCA is
+ * what is put into the context id field of the
+ * context descriptor by GuC. Because it's aligned to
+ * a page, the lower 12bits are always at 0 and
+ * dropped by GuC. They won't be part of the context
+ * ID in the OA reports, so squash those lower bits.
+ */
+ i915->perf.oa.specific_ctx_id =
+ lower_32_bits(ce->lrc_desc) >> 12;
- /*
- * Explicitly track the ID (instead of calling
- * i915_ggtt_offset() on the fly) considering the difference
- * with gen8+ and execlists
- */
- dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+ /*
+ * GuC uses the top bit to signal proxy submission, so
+ * ignore that bit.
+ */
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ i915->perf.oa.specific_ctx_id =
+ upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ }
+ break;
+
+ case 11: {
+ i915->perf.oa.specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ break;
+ }
+
+ default:
+ MISSING_CASE(INTEL_GEN(i915));
}
+ DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ i915->perf.oa.specific_ctx_id,
+ i915->perf.oa.specific_ctx_id_mask);
+
return 0;
}
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
-
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- intel_context_unpin(stream->ctx, engine);
-
+ intel_context_unpin(ce);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
}
@@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* So far the best way to work around this issue seems to be draining
* the GPU from any submitted work.
*/
- ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ wait_flags,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index dc87797db500..d6c8f8fdfda5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,6 +4,7 @@
* Copyright © 2017-2018 Intel Corporation
*/
+#include <linux/irq.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -127,6 +128,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true;
+ i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
@@ -155,12 +157,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
}
static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
{
- sample->cur += mul_u32_u32(val, unit);
+ sample->cur += val;
}
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -182,8 +185,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno);
- update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- PERIOD, val);
+ if (val)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +198,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0;
}
- update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- PERIOD, !!(val & RING_WAIT));
+ if (val & RING_WAIT)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ period_ns);
- update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ period_ns);
}
if (fw)
@@ -207,7 +213,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+ sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +234,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- 1, intel_gpu_freq(dev_priv, val));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(dev_priv, val),
+ period_ns / 1000);
}
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq),
+ period_ns / 1000);
}
}
@@ -237,14 +252,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ unsigned int period_ns;
+ ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
- engines_sample(i915);
- frequency_sample(i915);
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+ i915->pmu.timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+ engines_sample(i915, period_ns);
+ frequency_sample(i915, period_ns);
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
- hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART;
}
@@ -519,12 +547,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 2ba735299f7c..7f164ca3db12 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -65,6 +65,14 @@ struct i915_pmu {
* event types.
*/
u64 enable;
+
+ /**
+ * @timer_last:
+ *
+ * Timestmap of the previous timer invocation.
+ */
+ ktime_t timer_last;
+
/**
* @enable_count: Reference counts for the enabled events.
*
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 195203f298df..eeaa3d506d95 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
+#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
u32 rsv5[4];
u32 g2v_notify;
- u32 rsv6[7];
+ u32 rsv6[5];
+
+ u32 cursor_x_hot;
+ u32 cursor_y_hot;
struct {
u32 lo;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7720569f2024..08ec7446282e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+/*
+ * Named helper wrappers around _PICK_EVEN() and _PICK().
+ */
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & ~(mask), \
"Incorrect value for mask"); \
- (mask) << 16 | (value); })
+ __MASKED_FIELD(mask, value); })
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
@@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0<<1)
-#define ILK_GRDOM_RENDER (1<<1)
-#define ILK_GRDOM_MEDIA (3<<1)
-#define ILK_GRDOM_MASK (3<<1)
-#define ILK_GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3<<21)
-#define GEN6_MBC_SNPCR_MAX (0<<21)
-#define GEN6_MBC_SNPCR_MED (1<<21)
-#define GEN6_MBC_SNPCR_LOW (2<<21)
-#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
@@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define ECOCHK_SNB_BIT (1<<10)
-#define ECOCHK_DIS_TLB (1<<8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
-#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
-#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
-#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
-#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
-#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1<<13)
-#define ECOBITS_PPGTT_CACHE64B (3<<8)
-#define ECOBITS_PPGTT_CACHE4B (0<<8)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
+#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
/* VGA stuff */
@@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _VGA_MSR_WRITE _MMIO(0x3c2)
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
-#define VGA_MSR_MEM_EN (1<<1)
-#define VGA_MSR_CGA_MODE (1<<0)
+#define VGA_MSR_MEM_EN (1 << 1)
+#define VGA_MSR_CGA_MODE (1 << 0)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
-#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_VID_EN (1 << 5)
#define VGA_AR_DATA_WRITE 0x3c0
#define VGA_AR_DATA_READ 0x3c1
@@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-#define LOWER_SLICE_ENABLED (1<<0)
-#define LOWER_SLICE_DISABLED (0<<0)
+#define LOWER_SLICE_ENABLED (1 << 0)
+#define LOWER_SLICE_DISABLED (0 << 0)
/*
* Registers used only by the command parser
@@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
-#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
-#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
-#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
-#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
-#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
-#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
-#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5)
+#define GEN7_OACONTROL_FORMAT_A13 (0 << 2)
+#define GEN7_OACONTROL_FORMAT_A29 (1 << 2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2)
#define GEN7_OACONTROL_FORMAT_SHIFT 2
-#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
-#define GEN7_OACONTROL_ENABLE (1<<0)
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1)
+#define GEN7_OACONTROL_ENABLE (1 << 0)
#define GEN8_OACTXID _MMIO(0x2364)
#define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
-#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
-#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
-#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
+#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
#define GEN8_OACONTROL _MMIO(0x2B00)
-#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
-#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
-#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
-#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2)
#define GEN8_OA_REPORT_FORMAT_SHIFT 2
-#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_ENABLE (1<<0)
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_ENABLE (1 << 0)
#define GEN8_OACTXCONTROL _MMIO(0x2360)
#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
#define GEN8_OA_TIMER_PERIOD_SHIFT 2
-#define GEN8_OA_TIMER_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_RESUME (1<<0)
+#define GEN8_OA_TIMER_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_RESUME (1 << 0)
#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
-#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
-#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
-#define GEN7_OABUFFER_RESUME (1<<0)
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1)
+#define GEN7_OABUFFER_RESUME (1 << 0)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS1 _MMIO(0x2364)
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
-#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
-#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
-#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1)
+#define GEN7_OASTATUS1_REPORT_LOST (1 << 0)
#define GEN7_OASTATUS2 _MMIO(0x2368)
#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
-#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
-#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
-#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
-#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
+#define GEN8_OASTATUS_REPORT_LOST (1 << 0)
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
#define GEN8_OAHEADPTR_MASK 0xffffffc0
#define GEN8_OATAILPTR _MMIO(0x2B10)
#define GEN8_OATAILPTR_MASK 0xffffffc0
-#define OABUFFER_SIZE_128K (0<<3)
-#define OABUFFER_SIZE_256K (1<<3)
-#define OABUFFER_SIZE_512K (2<<3)
-#define OABUFFER_SIZE_1M (3<<3)
-#define OABUFFER_SIZE_2M (4<<3)
-#define OABUFFER_SIZE_4M (5<<3)
-#define OABUFFER_SIZE_8M (6<<3)
-#define OABUFFER_SIZE_16M (7<<3)
+#define OABUFFER_SIZE_128K (0 << 3)
+#define OABUFFER_SIZE_256K (1 << 3)
+#define OABUFFER_SIZE_512K (2 << 3)
+#define OABUFFER_SIZE_1M (3 << 3)
+#define OABUFFER_SIZE_2M (4 << 3)
+#define OABUFFER_SIZE_4M (5 << 3)
+#define OABUFFER_SIZE_8M (6 << 3)
+#define OABUFFER_SIZE_16M (7 << 3)
/*
* Flexible, Aggregate EU Counter Registers.
@@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
#define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31)
#define OASTARTTRIG3 _MMIO(0x2718)
#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
@@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
#define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31)
#define OASTARTTRIG7 _MMIO(0x2728)
#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
@@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG3 _MMIO(0x2748)
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
@@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG7 _MMIO(0x2758)
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
@@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_COMPARE_VALUE_MASK 0xffff
#define OACEC_COMPARE_VALUE_SHIFT 3
-#define OACEC_SELECT_NOA (0<<19)
-#define OACEC_SELECT_PREV (1<<19)
-#define OACEC_SELECT_BOOLEAN (2<<19)
+#define OACEC_SELECT_NOA (0 << 19)
+#define OACEC_SELECT_PREV (1 << 19)
+#define OACEC_SELECT_BOOLEAN (2 << 19)
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
@@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Reset registers
*/
#define DEBUG_RESET_I830 _MMIO(0x6070)
-#define DEBUG_RESET_FULL (1<<7)
-#define DEBUG_RESET_RENDER (1<<8)
-#define DEBUG_RESET_DISPLAY (1<<9)
+#define DEBUG_RESET_FULL (1 << 7)
+#define DEBUG_RESET_RENDER (1 << 8)
+#define DEBUG_RESET_DISPLAY (1 << 9)
/*
* IOSF sideband
@@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
-#define IOSF_SB_BUSY (1<<0)
+#define IOSF_SB_BUSY (1 << 0)
#define IOSF_PORT_BUNIT 0x03
#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
@@ -1044,13 +1062,13 @@ enum i915_power_well_id {
/*
* HSW/BDW
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
HSW_DISP_PW_GLOBAL = 15,
/*
* GEN9+
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
SKL_DISP_PW_MISC_IO = 0,
SKL_DISP_PW_DDI_A_E,
@@ -1074,17 +1092,54 @@ enum i915_power_well_id {
SKL_DISP_PW_2,
/* - custom power wells */
- SKL_DISP_PW_DC_OFF,
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C, /* 19 */
+ GLK_DPIO_CMN_C, /* 18 */
+
+ /*
+ * GEN11+
+ * - _HSW_PWR_WELL_CTL1-4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_1 = 0,
+ ICL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ ICL_DISP_PW_4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_AUX1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_AUX_A = 16,
+ ICL_DISP_PW_AUX_B,
+ ICL_DISP_PW_AUX_C,
+ ICL_DISP_PW_AUX_D,
+ ICL_DISP_PW_AUX_E,
+ ICL_DISP_PW_AUX_F,
+
+ ICL_DISP_PW_AUX_TBT1 = 24,
+ ICL_DISP_PW_AUX_TBT2,
+ ICL_DISP_PW_AUX_TBT3,
+ ICL_DISP_PW_AUX_TBT4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_DDI1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_DDI_A = 32,
+ ICL_DISP_PW_DDI_B,
+ ICL_DISP_PW_DDI_C,
+ ICL_DISP_PW_DDI_D,
+ ICL_DISP_PW_DDI_E,
+ ICL_DISP_PW_DDI_F, /* 37 */
/*
* Multiple platforms.
* Must start following the highest ID of any platform.
* - custom power wells
*/
- I915_DISP_PW_ALWAYS_ON = 20,
+ SKL_DISP_PW_DC_OFF = 38,
+ I915_DISP_PW_ALWAYS_ON,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
@@ -1098,8 +1153,8 @@ enum i915_power_well_id {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
-#define GPLLENABLE (1<<4)
-#define GENFREQSTATUS (1<<0)
+#define GPLLENABLE (1 << 4)
+#define GENFREQSTATUS (1 << 0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -1141,11 +1196,11 @@ enum i915_power_well_id {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define VLV_OVERRIDE_EN 1
-#define VLV_SOC_TDP_EN (1 << 1)
-#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1194,10 +1249,10 @@ enum i915_power_well_id {
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_CMNRST (1<<0)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1270,7 @@ enum i915_power_well_id {
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_ENABLE_CALIBRATION (1 << 11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _VLV_PLL_DW3_CH1 0x802c
@@ -1264,10 +1319,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW0_CH0 0x8200
#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1<<16)
-#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
+#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
+#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -1279,11 +1334,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
#define _VLV_PCS01_DW1_CH0 0x204
@@ -1308,12 +1363,12 @@ enum i915_power_well_id {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
-#define DPIO_PCS_TX2MARGIN_000 (0<<13)
-#define DPIO_PCS_TX2MARGIN_101 (1<<13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
-#define DPIO_PCS_TX1MARGIN_000 (0<<10)
-#define DPIO_PCS_TX1MARGIN_101 (1<<10)
+#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
+#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
+#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
+#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
+#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
#define _VLV_PCS01_DW9_CH0 0x224
@@ -1325,14 +1380,14 @@ enum i915_power_well_id {
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -1344,10 +1399,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
+#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
#define _VLV_PCS01_DW11_CH0 0x022c
@@ -1366,11 +1421,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
+#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -1391,7 +1446,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
#define DPIO_SWING_MARGIN101_SHIFT 16
#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1465,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW5_CH0 0x8294
#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1<<31)
+#define DPIO_TX_OCALINIT_EN (1 << 31)
#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
#define _VLV_TX_DW11_CH0 0x82ac
@@ -1640,10 +1695,10 @@ enum i915_power_well_id {
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27)
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
-#define PORT_PLL_DCO_AMP(x) ((x)<<10)
+#define PORT_PLL_DCO_AMP(x) ((x) << 10)
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
@@ -1666,6 +1721,26 @@ enum i915_power_well_id {
#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
_ICL_PORT_CL_DW5_B)
+#define _CNL_PORT_CL_DW10_A 0x162028
+#define _ICL_PORT_CL_DW10_B 0x6c028
+#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
+ _CNL_PORT_CL_DW10_A, \
+ _ICL_PORT_CL_DW10_B)
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1678,6 +1753,13 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+#define _ICL_PORT_CL_DW12_A 0x162030
+#define _ICL_PORT_CL_DW12_B 0x6C030
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
+ _ICL_PORT_CL_DW12_A, \
+ _ICL_PORT_CL_DW12_B)
+
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1715,16 +1797,22 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
+
#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
+#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
+#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
_ICL_PORT_PCS_DW1_GRP_A, \
_ICL_PORT_PCS_DW1_GRP_B)
#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_PCS_DW1_LN0_A, \
_ICL_PORT_PCS_DW1_LN0_B)
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_PCS_DW1_AUX_A, \
+ _ICL_PORT_PCS_DW1_AUX_B)
#define COMMON_KEEPER_EN (1 << 26)
/* CNL Port TX registers */
@@ -1745,7 +1833,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_GRP_OFFSET, \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1841,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_LN0_OFFSET, \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_F_LN0_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1761,16 +1849,23 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
#define _ICL_PORT_TX_DW2_LN0_A 0x162888
#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
+#define _ICL_PORT_TX_DW2_AUX_A 0x162388
+#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_GRP_A, \
_ICL_PORT_TX_DW2_GRP_B)
#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_LN0_A, \
_ICL_PORT_TX_DW2_LN0_B)
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_AUX_A, \
+ _ICL_PORT_TX_DW2_AUX_B)
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
#define SWING_SEL_LOWER_MASK (0x7 << 11)
+#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
+#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
#define RCOMP_SCALAR(x) ((x) << 0)
#define RCOMP_SCALAR_MASK (0xFF << 0)
@@ -1779,21 +1874,26 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
- (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define _ICL_PORT_TX_DW4_GRP_A 0x162690
#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
#define _ICL_PORT_TX_DW4_LN0_A 0x162890
#define _ICL_PORT_TX_DW4_LN1_A 0x162990
#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
+#define _ICL_PORT_TX_DW4_AUX_A 0x162390
+#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW4_GRP_A, \
_ICL_PORT_TX_DW4_GRP_B)
#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
_ICL_PORT_TX_DW4_LN0_A, \
_ICL_PORT_TX_DW4_LN0_B) + \
- (ln * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
+ ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+ _ICL_PORT_TX_DW4_LN0_A)))
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW4_AUX_A, \
+ _ICL_PORT_TX_DW4_AUX_B)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1808,12 +1908,17 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
#define _ICL_PORT_TX_DW5_LN0_A 0x162894
#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
+#define _ICL_PORT_TX_DW5_AUX_A 0x162394
+#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_GRP_A, \
_ICL_PORT_TX_DW5_GRP_B)
#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_LN0_A, \
_ICL_PORT_TX_DW5_LN0_B)
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_AUX_A, \
+ _ICL_PORT_TX_DW5_AUX_B)
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1990,6 +2095,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B)
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2134,8 +2244,8 @@ enum i915_power_well_id {
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8+3*(port))
-#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
@@ -2155,10 +2265,10 @@ enum i915_power_well_id {
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
-#define I830_FENCE_REG_VALID (1<<0)
+#define I830_FENCE_REG_VALID (1 << 0)
#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
-#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+#define I830_FENCE_MAX_SIZE_VAL (1 << 8)
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2277,7 @@ enum i915_power_well_id {
#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
-#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_REG_VALID (1 << 0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2300,13 @@ enum i915_power_well_id {
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER _MMIO(0x02024)
-#define PRB0_BASE (0x2030-0x30)
-#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE (0x2050-0x30) /* gen3 */
-#define SRB0_BASE (0x2100-0x30) /* gen2 */
-#define SRB1_BASE (0x2110-0x30) /* gen2 */
-#define SRB2_BASE (0x2120-0x30) /* 830 */
-#define SRB3_BASE (0x2130-0x30) /* 830 */
+#define PRB0_BASE (0x2030 - 0x30)
+#define PRB1_BASE (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE (0x2130 - 0x30) /* 830 */
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -2209,14 +2319,14 @@ enum i915_power_well_id {
#define GEN11_VEBOX_RING_BASE 0x1c8000
#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
-#define RING_TAIL(base) _MMIO((base)+0x30)
-#define RING_HEAD(base) _MMIO((base)+0x34)
-#define RING_START(base) _MMIO((base)+0x38)
-#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_TAIL(base) _MMIO((base) + 0x30)
+#define RING_HEAD(base) _MMIO((base) + 0x34)
+#define RING_START(base) _MMIO((base) + 0x38)
+#define RING_CTL(base) _MMIO((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base) _MMIO((base)+0x40)
-#define RING_SYNC_1(base) _MMIO((base)+0x44)
-#define RING_SYNC_2(base) _MMIO((base)+0x48)
+#define RING_SYNC_0(base) _MMIO((base) + 0x40)
+#define RING_SYNC_1(base) _MMIO((base) + 0x44)
+#define RING_SYNC_2(base) _MMIO((base) + 0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2340,22 @@ enum i915_power_well_id {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC INVALID_MMIO_REG
-#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
-#define RING_HWS_PGA(base) _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK _MMIO(0x4028)
#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
#define ARB_MODE _MMIO(0x4030)
-#define ARB_MODE_SWIZZLE_SNB (1<<4)
-#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define ARB_MODE_SWIZZLE_IVB (1 << 5)
#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2365,30 @@ enum i915_power_well_id {
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define ARB_MODE_SWIZZLE_BDW (1<<1)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1<<0)
+#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-#define RING_ACTHD(base) _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
-#define RING_NOPID(base) _MMIO((base)+0x94)
-#define RING_IMR(base) _MMIO((base)+0xa8)
-#define RING_HWSTAM(base) _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base) _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c)
+#define RING_NOPID(base) _MMIO((base) + 0x94)
+#define RING_IMR(base) _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base) _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base) _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -2290,24 +2401,25 @@ enum i915_power_well_id {
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
-#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
-#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
-#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2333,19 +2445,19 @@ enum i915_power_well_id {
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base) _MMIO((base)+0x64)
-#define RING_IPEHR(base) _MMIO((base)+0x68)
+#define RING_IPEIR(base) _MMIO((base) + 0x64)
+#define RING_IPEHR(base) _MMIO((base) + 0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define RING_INSTDONE(base) _MMIO((base)+0x6c)
-#define RING_INSTPS(base) _MMIO((base)+0x70)
-#define RING_DMA_FADD(base) _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base) _MMIO((base)+0xc0)
-#define RING_MI_MODE(base) _MMIO((base)+0x9c)
+#define RING_INSTDONE(base) _MMIO((base) + 0x6c)
+#define RING_INSTPS(base) _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base) _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base) _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -2353,37 +2465,37 @@ enum i915_power_well_id {
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1<<0)
+#define PWRCTX_EN (1 << 0)
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
-#define RING_BBSTATE(base) _MMIO((base)+0x110)
+#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
-#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base) _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1<<31)
-#define ERR_INT_MMIO_UNCLAIMED (1<<13)
-#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
-#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
-#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
-#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
-#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
-#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
@@ -2391,7 +2503,7 @@ enum i915_power_well_id {
#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define FPGA_DBG_RM_NOCLAIM (1 << 31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
@@ -2400,22 +2512,22 @@ enum i915_power_well_id {
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1<<0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
-#define DERRMR_PIPEA_VBLANK (1<<3)
-#define DERRMR_PIPEA_HBLANK (1<<5)
-#define DERRMR_PIPEB_SCANLINE (1<<8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
-#define DERRMR_PIPEB_VBLANK (1<<11)
-#define DERRMR_PIPEB_HBLANK (1<<13)
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1<<14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
-#define DERRMR_PIPEC_VBLANK (1<<21)
-#define DERRMR_PIPEC_HBLANK (1<<22)
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2439,7 +2551,7 @@ enum i915_power_well_id {
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
#define MI_MODE _MMIO(0x209c)
@@ -2478,22 +2590,22 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define GFX_RUN_LIST_ENABLE (1<<15)
-#define GFX_INTERRUPT_STEERING (1<<14)
-#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
-#define GFX_SURFACE_FAULT_ENABLE (1<<12)
-#define GFX_REPLAY_MODE (1<<11)
-#define GFX_PSMI_GRANULARITY (1<<10)
-#define GFX_PPGTT_ENABLE (1<<9)
-#define GEN8_GFX_PPGTT_48B (1<<7)
-
-#define GFX_FORWARD_VBLANK_MASK (3<<5)
-#define GFX_FORWARD_VBLANK_NEVER (0<<5)
-#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
-#define GFX_FORWARD_VBLANK_COND (2<<5)
-
-#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define GFX_RUN_LIST_ENABLE (1 << 15)
+#define GFX_INTERRUPT_STEERING (1 << 14)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
+#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
+#define GFX_REPLAY_MODE (1 << 11)
+#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GFX_PPGTT_ENABLE (1 << 9)
+#define GEN8_GFX_PPGTT_48B (1 << 7)
+
+#define GFX_FORWARD_VBLANK_MASK (3 << 5)
+#define GFX_FORWARD_VBLANK_NEVER (0 << 5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5)
+#define GFX_FORWARD_VBLANK_COND (2 << 5)
+
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2507,8 +2619,8 @@ enum i915_power_well_id {
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define GINT_DIS (1<<22)
-#define GCFG_DIS (1<<8)
+#define GINT_DIS (1 << 22)
+#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2518,35 +2630,35 @@ enum i915_power_well_id {
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
-#define GM45_ERROR_PAGE_TABLE (1<<5)
-#define GM45_ERROR_MEM_PRIV (1<<4)
-#define I915_ERROR_PAGE_TABLE (1<<4)
-#define GM45_ERROR_CP_PRIV (1<<3)
-#define I915_ERROR_MEMORY_REFRESH (1<<1)
-#define I915_ERROR_INSTRUCTION (1<<0)
+#define GM45_ERROR_PAGE_TABLE (1 << 5)
+#define GM45_ERROR_MEM_PRIV (1 << 4)
+#define I915_ERROR_PAGE_TABLE (1 << 4)
+#define GM45_ERROR_CP_PRIV (1 << 3)
+#define I915_ERROR_MEMORY_REFRESH (1 << 1)
+#define I915_ERROR_INSTRUCTION (1 << 0)
#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1<<9)
-#define INSTPM_SYNC_FLUSH (1<<5)
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
#define ACTHD _MMIO(0x20c8)
#define MEM_MODE _MMIO(0x20cc)
-#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1<<31)
-#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
-#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK (1 << 31)
+#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
+#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -2645,37 +2757,37 @@ enum i915_power_well_id {
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define CM0_IZ_OPT_DISABLE (1<<6)
-#define CM0_ZR_OPT_DISABLE (1<<5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define CM0_COLOR_EVICT_DISABLE (1<<3)
-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1<<0)
+#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
-#define ECO_GATING_CX_ONLY (1<<3)
-#define ECO_FLIP_DONE (1<<0)
+#define ECO_GATING_CX_ONLY (1 << 3)
+#define ECO_FLIP_DONE (1 << 0)
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
-#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_BLITTER_FBC_NOTIFY (1 << 3)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
-#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2714,6 +2826,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2727,7 +2843,7 @@ enum i915_power_well_id {
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
@@ -2784,44 +2900,43 @@ enum i915_power_well_id {
(IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT (1<<5)
-
-#define I915_PM_INTERRUPT (1<<31)
-#define I915_ISP_INTERRUPT (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIC_INTERRUPT (1<<19)
-#define I915_MIPIA_INTERRUPT (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
-#define I915_HWB_OOM_INTERRUPT (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_MISC_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_WINVALID_INTERRUPT (1<<1)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1<<25)
+#define ILK_BSD_USER_INTERRUPT (1 << 5)
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
@@ -2844,19 +2959,19 @@ enum i915_power_well_id {
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
-#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
-#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
-#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
-#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
+#define GEN7_FF_TS_SCHED_HW (0x0 << 16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
-#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
-#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
-#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
-#define GEN7_FF_VS_SCHED_HW (0x0<<12)
-#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
-#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
-#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
-#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+#define GEN7_FF_VS_SCHED_HS1 (0x5 << 12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3 << 12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1 << 12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0 << 12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5 << 4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3 << 4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
/*
* Framebuffer compression (915+ only)
@@ -2865,51 +2980,51 @@ enum i915_power_well_id {
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN (1<<31)
-#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_EN (1 << 31)
+#define FBC_CTL_PERIODIC (1 << 30)
#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define FBC_CTL_C3_IDLE (1 << 13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1<<31)
-#define FBC_STAT_COMPRESSED (1<<30)
-#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_COMPRESSING (1 << 31)
+#define FBC_STAT_COMPRESSED (1 << 30)
+#define FBC_STAT_MODIFIED (1 << 29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0<<4)
-#define FBC_CTL_IDLE_IMM (0<<2)
-#define FBC_CTL_IDLE_FULL (1<<2)
-#define FBC_CTL_IDLE_LINE (2<<2)
-#define FBC_CTL_IDLE_DEBUG (3<<2)
-#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_CTL_FENCE_DBL (0 << 4)
+#define FBC_CTL_IDLE_IMM (0 << 2)
+#define FBC_CTL_IDLE_FULL (1 << 2)
+#define FBC_CTL_IDLE_LINE (2 << 2)
+#define FBC_CTL_IDLE_DEBUG (3 << 2)
+#define FBC_CTL_CPU_FENCE (1 << 1)
+#define FBC_CTL_PLANE(plane) ((plane) << 0)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1<<30)
+#define FBC_LLC_FULLY_OPEN (1 << 30)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANE(plane) ((plane)<<30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
-#define DPFC_CTL_FENCE_EN (1<<29)
-#define IVB_DPFC_CTL_FENCE_EN (1<<28)
-#define DPFC_CTL_PERSISTENT_MODE (1<<25)
-#define DPFC_SR_EN (1<<10)
-#define DPFC_CTL_LIMIT_1X (0<<6)
-#define DPFC_CTL_LIMIT_2X (1<<6)
-#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_CTL_EN (1 << 31)
+#define DPFC_CTL_PLANE(plane) ((plane) << 30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
+#define DPFC_CTL_FENCE_EN (1 << 29)
+#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
+#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
+#define DPFC_SR_EN (1 << 10)
+#define DPFC_CTL_LIMIT_1X (0 << 6)
+#define DPFC_CTL_LIMIT_2X (1 << 6)
+#define DPFC_CTL_LIMIT_4X (2 << 6)
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_EN (1 << 27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2922,12 +3037,12 @@ enum i915_power_well_id {
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1<<31)
+#define DPFC_HT_MODIFY (1 << 31)
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1<<10)
+#define FBC_CTL_FALSE_COLOR (1 << 10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
@@ -2938,15 +3053,15 @@ enum i915_power_well_id {
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
+#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1<<0)
-#define SNB_FBC_FRONT_BUFFER (1<<1)
+#define ILK_FBC_RT_VALID (1 << 0)
+#define SNB_FBC_FRONT_BUFFER (1 << 1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
-#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_FBCQ_DIS (1 << 22)
+#define ILK_PABSTRETCH_DIS (1 << 21)
/*
@@ -2955,7 +3070,7 @@ enum i915_power_well_id {
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define SNB_CPU_FENCE_ENABLE (1 << 29)
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
@@ -2965,8 +3080,8 @@ enum i915_power_well_id {
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1<<2)
-#define FBC_REND_CACHE_CLEAN (1<<1)
+#define FBC_REND_NUKE (1 << 2)
+#define FBC_REND_CACHE_CLEAN (1 << 1)
/*
* GPIO regs
@@ -2979,6 +3094,10 @@ enum i915_power_well_id {
#define GPIOF _MMIO(0x5024)
#define GPIOG _MMIO(0x5028)
#define GPIOH _MMIO(0x502c)
+#define GPIOJ _MMIO(0x5034)
+#define GPIOK _MMIO(0x5038)
+#define GPIOL _MMIO(0x503C)
+#define GPIOM _MMIO(0x5040)
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2995,12 +3114,13 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1<<11)
-#define GMBUS_RATE_100KHZ (0<<8)
-#define GMBUS_RATE_50KHZ (1<<8)
-#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
@@ -3021,36 +3141,37 @@ enum i915_power_well_id {
#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1<<31)
-#define GMBUS_SW_RDY (1<<30)
-#define GMBUS_ENT (1<<29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0<<25)
-#define GMBUS_CYCLE_WAIT (1<<25)
-#define GMBUS_CYCLE_INDEX (2<<25)
-#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
#define GMBUS_BYTE_COUNT_SHIFT 16
#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1<<0)
-#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1<<15)
-#define GMBUS_HW_WAIT_PHASE (1<<14)
-#define GMBUS_STALL_TIMEOUT (1<<13)
-#define GMBUS_INT (1<<12)
-#define GMBUS_HW_RDY (1<<11)
-#define GMBUS_SATOER (1<<10)
-#define GMBUS_ACTIVE (1<<9)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define GMBUS_NAK_EN (1<<3)
-#define GMBUS_IDLE_EN (1<<2)
-#define GMBUS_HW_WAIT_EN (1<<1)
-#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1<<31)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
/*
* Clock control & power management
@@ -3088,10 +3209,10 @@ enum i915_power_well_id {
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1<<15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
-#define DPLL_SSC_REF_CLK_CHV (1<<13)
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -3101,20 +3222,20 @@ enum i915_power_well_id {
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3135,7 +3256,7 @@ enum i915_power_well_id {
/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
@@ -3224,10 +3345,10 @@ enum i915_power_well_id {
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE _MMIO(0x6104)
-#define DSTATE_GFX_RESET_I830 (1<<6)
-#define DSTATE_PLL_D3_OFF (1<<3)
-#define DSTATE_GFX_CLOCK_GATING (1<<1)
-#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSTATE_GFX_RESET_I830 (1 << 6)
+#define DSTATE_PLL_D3_OFF (1 << 3)
+#define DSTATE_GFX_CLOCK_GATING (1 << 1)
+#define DSTATE_DOT_CLOCK_GATING (1 << 0)
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -3344,7 +3465,7 @@ enum i915_power_well_id {
#define DEUC _MMIO(0x6214) /* CRL only */
#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1<<15)
+#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
@@ -3469,7 +3590,7 @@ enum i915_power_well_id {
#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
#define TSC1 _MMIO(0x11001)
-#define TSE (1<<0)
+#define TSE (1 << 0)
#define TR1 _MMIO(0x11006)
#define TSFS _MMIO(0x11020)
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -3515,23 +3636,23 @@ enum i915_power_well_id {
#define MEMCTL_CMD_CHVID 3
#define MEMCTL_CMD_VMMOFF 4
#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
when command complete */
#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_SFCAVM (1 << 7)
#define MEMCTL_TGT_VID_MASK 0x007f
#define MEMIHYST _MMIO(0x1117c)
#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1<<8)
-#define MEMINT_CX_SUPR_EN (1<<7)
-#define MEMINT_CONT_BUSY_EN (1<<6)
-#define MEMINT_AVG_BUSY_EN (1<<5)
-#define MEMINT_EVAL_CHG_EN (1<<4)
-#define MEMINT_MON_IDLE_EN (1<<3)
-#define MEMINT_UP_EVAL_EN (1<<2)
-#define MEMINT_DOWN_EVAL_EN (1<<1)
-#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
#define MEM_RSEXIT_MASK 0xc000
#define MEM_RSEXIT_SHIFT 14
@@ -3553,26 +3674,26 @@ enum i915_power_well_id {
#define MEM_INT_STEER_SMI 2
#define MEM_INT_STEER_SCI 3
#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1<<7)
-#define MEMINT_CONT_BUSY (1<<6)
-#define MEMINT_AVG_BUSY (1<<5)
-#define MEMINT_EVAL_CHG (1<<4)
-#define MEMINT_MON_IDLE (1<<3)
-#define MEMINT_UP_EVAL (1<<2)
-#define MEMINT_DOWN_EVAL (1<<1)
-#define MEMINT_SW_CMD (1<<0)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_EN (1 << 31)
#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
#define MEMMODE_BOOST_FREQ_SHIFT 24
#define MEMMODE_IDLE_MODE_MASK 0x00030000
#define MEMMODE_IDLE_MODE_SHIFT 16
#define MEMMODE_IDLE_MODE_EVAL 0
#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1<<15)
-#define MEMMODE_SWMODE_EN (1<<14)
-#define MEMMODE_RCLK_GATE (1<<13)
-#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
#define MEMMODE_FSTART_SHIFT 8
#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
@@ -3586,8 +3707,8 @@ enum i915_power_well_id {
#define SWMEMCMD_TARVID (3 << 13)
#define SWMEMCMD_VRM_OFF (4 << 13)
#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1<<12)
-#define SFCAVM (1<<11)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
#define SWFREQ_MASK 0x0380 /* P0-7 */
#define SWFREQ_SHIFT 7
#define TARVID_MASK 0x001f
@@ -3596,49 +3717,49 @@ enum i915_power_well_id {
#define RCUPEI _MMIO(0x111b0)
#define RCDNEI _MMIO(0x111b4)
#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1<<31)
-#define RS2EN (1<<30)
-#define RS3EN (1<<29)
-#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
-#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7<<20)
-#define RSX_STATUS_ON (0<<20)
-#define RSX_STATUS_RC1 (1<<20)
-#define RSX_STATUS_RC1E (2<<20)
-#define RSX_STATUS_RS1 (3<<20)
-#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7<<20)
-#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
-#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1<<17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3<<14)
-#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1<<14)
-#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3<<12)
-#define SLOW_RS123 (0<<12)
-#define SLOW_RS23 (1<<12)
-#define SLOW_RS3 (2<<12)
-#define NORMAL_RS123 (3<<12)
-#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
-#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3<<4)
-#define RS_CSTATE_C367_RS1 (0<<4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define RS_CSTATE_RSVD (2<<4)
-#define RS_CSTATE_C367_RS2 (3<<4)
-#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
#define VIDCTL _MMIO(0x111c0)
#define VIDSTS _MMIO(0x111c8)
#define VIDSTART _MMIO(0x111cc) /* 8 bits */
@@ -3647,7 +3768,7 @@ enum i915_power_well_id {
#define MEMSTAT_VID_SHIFT 8
#define MEMSTAT_PSTATE_MASK 0x00f8
#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_MON_ACTV (1 << 2)
#define MEMSTAT_SRC_CTL_MASK 0x0003
#define MEMSTAT_SRC_CTL_CORE 0
#define MEMSTAT_SRC_CTL_TRB 1
@@ -3656,7 +3777,7 @@ enum i915_power_well_id {
#define RCPREVBSYTUPAVG _MMIO(0x113b8)
#define RCPREVBSYTDNAVG _MMIO(0x113bc)
#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
#define SDEW _MMIO(0x1124c)
#define CSIEW0 _MMIO(0x11250)
#define CSIEW1 _MMIO(0x11254)
@@ -3673,8 +3794,8 @@ enum i915_power_well_id {
#define RPPREVBSYTUPAVG _MMIO(0x113b8)
#define RPPREVBSYTDNAVG _MMIO(0x113bc)
#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1<<31)
-#define ECR_IMONE (1<<30)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
#define OGW0 _MMIO(0x11608)
#define OGW1 _MMIO(0x1160c)
@@ -3781,11 +3902,11 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ID_SHIFT 32
@@ -3807,7 +3928,7 @@ enum {
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3<<20)
+#define OC_BUF (0x3 << 20)
#define OGAMC5 _MMIO(0x30010)
#define OGAMC4 _MMIO(0x30014)
#define OGAMC3 _MMIO(0x30018)
@@ -3975,64 +4096,65 @@ enum {
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1<<0)
-#define VLV_EDP_PSR_RESET (1<<1)
-#define VLV_EDP_PSR_MODE_MASK (7<<2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
-#define VLV_EDP_PSR_DBL_FRAME (1<<10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_ENABLE (1 << 0)
+#define VLV_EDP_PSR_RESET (1 << 1)
+#define VLV_EDP_PSR_MODE_MASK (7 << 2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
+#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0<<0)
-#define VLV_EDP_PSR_INACTIVE (1<<0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
-#define VLV_EDP_PSR_EXIT (5<<0)
-#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_EDP_PSR_DISABLED (0 << 0)
+#define VLV_EDP_PSR_INACTIVE (1 << 0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
+#define VLV_EDP_PSR_EXIT (5 << 0)
+#define VLV_EDP_PSR_IN_TRANS (1 << 7)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
-#define EDP_PSR_ENABLE (1<<31)
-#define BDW_PSR_SINGLE_FRAME (1<<30)
-#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
-#define EDP_PSR_LINK_STANDBY (1<<27)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_ENABLE (1 << 31)
+#define BDW_PSR_SINGLE_FRAME (1 << 30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY (1 << 27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
-#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
-#define EDP_PSR_TP1_TP2_SEL (0<<11)
-#define EDP_PSR_TP1_TP3_SEL (1<<11)
-#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
-#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
-#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
-#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
-#define EDP_PSR_TP1_TIME_500us (0<<4)
-#define EDP_PSR_TP1_TIME_100us (1<<4)
-#define EDP_PSR_TP1_TIME_2500us (2<<4)
-#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
+#define EDP_PSR_TP1_TP2_SEL (0 << 11)
+#define EDP_PSR_TP1_TP3_SEL (1 << 11)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP1_TIME_500us (0 << 4)
+#define EDP_PSR_TP1_TIME_100us (1 << 4)
+#define EDP_PSR_TP1_TIME_2500us (2 << 4)
+#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
/* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4052,55 +4174,56 @@ enum {
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define EDP_PSR_STATUS_STATE_MASK (7<<29)
-#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
-#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
-#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
-#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
-#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
-#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
-#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
-#define EDP_PSR_STATUS_LINK_MASK (3<<26)
-#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
-#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
-#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
+#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
+#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
+#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
#define EDP_PSR_STATUS_COUNT_SHIFT 16
#define EDP_PSR_STATUS_COUNT_MASK 0xf
-#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
-#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
-#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
-#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
-#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
+#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
+#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
-#define EDP_PSR2_ENABLE (1<<31)
-#define EDP_SU_TRACK_ENABLE (1<<30)
-#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define EDP_PSR2_TP2_TIME_500 (0<<8)
-#define EDP_PSR2_TP2_TIME_100 (1<<8)
-#define EDP_PSR2_TP2_TIME_2500 (2<<8)
-#define EDP_PSR2_TP2_TIME_50 (3<<8)
-#define EDP_PSR2_TP2_TIME_MASK (3<<8)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
#define EDP_PSR2_IDLE_FRAME_MASK 0xf
#define EDP_PSR2_IDLE_FRAME_SHIFT 0
@@ -4128,7 +4251,7 @@ enum {
#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define EDP_PSR2_STATUS _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
@@ -4136,47 +4259,48 @@ enum {
#define PCH_ADPA _MMIO(0xe1100)
#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_ENABLE (1 << 31)
#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_PIPE_SEL_SHIFT 30
+#define ADPA_PIPE_SEL_MASK (1 << 30)
+#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
+#define ADPA_PIPE_SEL_SHIFT_CPT 29
+#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
+#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
+#define ADPA_DPMS_MASK (~(3 << 10))
+#define ADPA_DPMS_ON (0 << 10)
+#define ADPA_DPMS_SUSPEND (1 << 10)
+#define ADPA_DPMS_STANDBY (2 << 10)
+#define ADPA_DPMS_OFF (3 << 10)
/* Hotplug control (945+ only) */
@@ -4301,9 +4425,9 @@ enum {
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/*
@@ -4343,12 +4467,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */
@@ -4359,7 +4485,9 @@ enum {
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_SEL_SHIFT 30
+#define DVO_PIPE_SEL_MASK (1 << 30)
+#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28)
@@ -4381,7 +4509,7 @@ enum {
#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVO_PRESERVE_MASK (0x7 << 24)
#define DVOA_SRCDIM _MMIO(0x61124)
#define DVOB_SRCDIM _MMIO(0x61144)
#define DVOC_SRCDIM _MMIO(0x61164)
@@ -4396,9 +4524,12 @@ enum {
*/
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPEB_SELECT (1 << 30)
-#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT 30
+#define LVDS_PIPE_SEL_MASK (1 << 30)
+#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT_CPT 29
+#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4471,6 +4602,16 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 26)
+#define VSC_SELECT_SHIFT 26
+#define VSC_DIP_HW_HEA_DATA (0 << 26)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
+#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VDIP_ENABLE_PPS (1 << 24)
+
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4695,7 +4836,9 @@ enum {
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */
@@ -5177,10 +5320,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31)
-#define DP_PIPEB_SELECT (1 << 30)
-#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
-#define DP_PIPE_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_SHIFT 30
+#define DP_PIPE_SEL_MASK (1 << 30)
+#define DP_PIPE_SEL(pipe) ((pipe) << 30)
+#define DP_PIPE_SEL_SHIFT_IVB 29
+#define DP_PIPE_SEL_MASK_IVB (3 << 29)
+#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -5287,6 +5435,13 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
+#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
+
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5342,7 +5497,7 @@ enum {
#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
@@ -5384,18 +5539,18 @@ enum {
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_ENABLE (1 << 31)
#define PIPECONF_DISABLE 0
-#define PIPECONF_DOUBLE_WIDE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_DOUBLE_WIDE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
-#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PIPE_LOCKED (1 << 25)
#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1<<24)
-#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_GAMMA (1 << 24)
+#define PIPECONF_FORCE_BORDER (1 << 25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5414,67 +5569,67 @@ enum {
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
-#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
-#define PIPECONF_8BPC (0<<5)
-#define PIPECONF_10BPC (1<<5)
-#define PIPECONF_6BPC (2<<5)
-#define PIPECONF_12BPC (3<<5)
-#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_8BPC (0 << 5)
+#define PIPECONF_10BPC (1 << 5)
+#define PIPECONF_6BPC (2 << 5)
+#define PIPECONF_12BPC (3 << 5)
+#define PIPECONF_DITHER_EN (1 << 4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define PIPECONF_DITHER_TYPE_SP (0<<2)
-#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define PIPECONF_DITHER_TYPE_TEMP (3 << 2)
#define _PIPEASTAT 0x70024
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
-#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-#define PIPE_CRC_DONE_ENABLE (1UL<<28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_HBLANK_INT_STATUS (1UL<<0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
@@ -5503,67 +5658,67 @@ enum {
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1<<27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
-#define PIPEMISC_DITHER_BPC_MASK (7<<5)
-#define PIPEMISC_DITHER_8_BPC (0<<5)
-#define PIPEMISC_DITHER_10_BPC (1<<5)
-#define PIPEMISC_DITHER_6_BPC (2<<5)
-#define PIPEMISC_DITHER_12_BPC (3<<5)
-#define PIPEMISC_DITHER_ENABLE (1<<4)
-#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
-#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC_YUV420_ENABLE (1 << 27)
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
+#define PIPEMISC_DITHER_8_BPC (0 << 5)
+#define PIPEMISC_DITHER_10_BPC (1 << 5)
+#define PIPEMISC_DITHER_6_BPC (2 << 5)
+#define PIPEMISC_DITHER_12_BPC (3 << 5)
+#define PIPEMISC_DITHER_ENABLE (1 << 4)
+#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
+#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
-#define PIPEB_HLINE_INT_EN (1<<28)
-#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIP_DONE_INT_EN (1<<26)
-#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
-#define PLANEB_FLIP_DONE_INT_EN (1<<24)
-#define PIPE_PSR_INT_EN (1<<22)
-#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
-#define PIPEA_HLINE_INT_EN (1<<20)
-#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
-#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
-#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
-#define PIPEC_HLINE_INT_EN (1<<12)
-#define PIPEC_VBLANK_INT_EN (1<<11)
-#define SPRITEF_FLIPDONE_INT_EN (1<<10)
-#define SPRITEE_FLIPDONE_INT_EN (1<<9)
-#define PLANEC_FLIPDONE_INT_EN (1<<8)
+#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
+#define PIPEB_HLINE_INT_EN (1 << 28)
+#define PIPEB_VBLANK_INT_EN (1 << 27)
+#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
+#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
+#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
+#define PIPE_PSR_INT_EN (1 << 22)
+#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
+#define PIPEA_HLINE_INT_EN (1 << 20)
+#define PIPEA_VBLANK_INT_EN (1 << 19)
+#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
+#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
+#define PLANEA_FLIPDONE_INT_EN (1 << 16)
+#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
+#define PIPEC_HLINE_INT_EN (1 << 12)
+#define PIPEC_VBLANK_INT_EN (1 << 11)
+#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
+#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
+#define PLANEC_FLIPDONE_INT_EN (1 << 8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
-#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
-#define PLANEC_INVALID_GTT_INT_EN (1<<25)
-#define CURSORC_INVALID_GTT_INT_EN (1<<24)
-#define CURSORB_INVALID_GTT_INT_EN (1<<23)
-#define CURSORA_INVALID_GTT_INT_EN (1<<22)
-#define SPRITED_INVALID_GTT_INT_EN (1<<21)
-#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
-#define PLANEB_INVALID_GTT_INT_EN (1<<19)
-#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
-#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
-#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
+#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
+#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
+#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
+#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
+#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
+#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
+#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
+#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
+#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
+#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
+#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1<<11)
-#define SPRITEE_INVALID_GTT_STATUS (1<<10)
-#define PLANEC_INVALID_GTT_STATUS (1<<9)
-#define CURSORC_INVALID_GTT_STATUS (1<<8)
-#define CURSORB_INVALID_GTT_STATUS (1<<7)
-#define CURSORA_INVALID_GTT_STATUS (1<<6)
-#define SPRITED_INVALID_GTT_STATUS (1<<5)
-#define SPRITEC_INVALID_GTT_STATUS (1<<4)
-#define PLANEB_INVALID_GTT_STATUS (1<<3)
-#define SPRITEB_INVALID_GTT_STATUS (1<<2)
-#define SPRITEA_INVALID_GTT_STATUS (1<<1)
-#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
+#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
+#define PLANEC_INVALID_GTT_STATUS (1 << 9)
+#define CURSORC_INVALID_GTT_STATUS (1 << 8)
+#define CURSORB_INVALID_GTT_STATUS (1 << 7)
+#define CURSORA_INVALID_GTT_STATUS (1 << 6)
+#define SPRITED_INVALID_GTT_STATUS (1 << 5)
+#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
+#define PLANEB_INVALID_GTT_STATUS (1 << 3)
+#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
+#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
+#define PLANEA_INVALID_GTT_STATUS (1 << 0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
@@ -5604,149 +5759,149 @@ enum {
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_CURSORB_MASK (0x3f << 16)
#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f<<8)
-#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f<<0)
-#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f<<8)
+#define DSPFW_CURSORA_MASK (0x3f << 8)
#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1<<31)
-#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
/* vlv/chv */
#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff<<0)
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f<<0)
+#define DSPFW_CURSORC_MASK (0x3f << 0)
/* vlv/chv high order bits */
#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1<<21)
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1<<20)
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1<<12)
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1<<0)
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
/* drain latency register values*/
#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1<<7)
-#define DDL_PRECISION_LOW (0<<7)
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
#define DRAIN_LATENCY_MASK 0x7f
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1<<31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -5818,32 +5973,32 @@ enum {
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK _MMIO(0x45100)
-#define WM0_PIPE_PLANE_MASK (0xffff<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff << 16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff << 8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK _MMIO(0x45104)
#define WM0_PIPEC_IVB _MMIO(0x45200)
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_SR_EN (1 << 31)
#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_LATENCY_MASK (0x7f << 24)
+#define WM1_LP_FBC_MASK (0xf << 20)
#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff<<8)
+#define WM1_LP_SR_MASK (0x7ff << 8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1<<31)
+#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1<<31)
+#define WM3_LP_EN (1 << 31)
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1<<31)
+#define WM1S_LP_EN (1 << 31)
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5900,8 +6055,7 @@ enum {
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28
-#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5910,18 +6064,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */
-#define CURSOR_MODE 0x27
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_128_32B_AX 0x02
-#define CURSOR_MODE_256_32B_AX 0x03
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_MODE 0x27
+#define MCURSOR_MODE_DISABLE 0x00
+#define MCURSOR_MODE_128_32B_AX 0x02
+#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_32B_AX 0x07
+#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
+#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURSOR_ROTATE_180 (1<<15)
-#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_ROTATE_180 (1 << 15)
+#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
@@ -5958,41 +6115,41 @@ enum {
/* Display A control */
#define _DSPACNTR 0x70180
-#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_YUV422 (0x0<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_BGRA555 (0x3<<26)
-#define DISPPLANE_BGRX555 (0x4<<26)
-#define DISPPLANE_BGRX565 (0x5<<26)
-#define DISPPLANE_BGRX888 (0x6<<26)
-#define DISPPLANE_BGRA888 (0x7<<26)
-#define DISPPLANE_RGBX101010 (0x8<<26)
-#define DISPPLANE_RGBA101010 (0x9<<26)
-#define DISPPLANE_BGRX101010 (0xa<<26)
-#define DISPPLANE_RGBX161616 (0xc<<26)
-#define DISPPLANE_RGBX888 (0xe<<26)
-#define DISPPLANE_RGBA888 (0xf<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_YUV422 (0x0 << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_BGRA555 (0x3 << 26)
+#define DISPPLANE_BGRX555 (0x4 << 26)
+#define DISPPLANE_BGRX565 (0x5 << 26)
+#define DISPPLANE_BGRX888 (0x6 << 26)
+#define DISPPLANE_BGRA888 (0x7 << 26)
+#define DISPPLANE_RGBX101010 (0x8 << 26)
+#define DISPPLANE_RGBA101010 (0x9 << 26)
+#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_RGBX161616 (0xc << 26)
+#define DISPPLANE_RGBX888 (0xe << 26)
+#define DISPPLANE_RGBA888 (0xf << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
-#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
-#define DISPPLANE_ROTATE_180 (1<<15)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
-#define DISPPLANE_TILED (1<<10)
-#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1 << 15)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
+#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -6015,15 +6172,15 @@ enum {
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND_LEGACY (0<<30)
-#define CHV_BLEND_ANDROID (1<<30)
-#define CHV_BLEND_MPO (2<<30)
-#define CHV_BLEND_MASK (3<<30)
+#define CHV_BLEND_LEGACY (0 << 30)
+#define CHV_BLEND_ANDROID (1 << 30)
+#define CHV_BLEND_MPO (2 << 30)
+#define CHV_BLEND_MASK (3 << 30)
#define _CHV_CANVAS_A 0x60a04
#define _PRIMPOS_A 0x60a08
#define _PRIMSIZE_A 0x60a0c
#define _PRIMCNSTALPHA_A 0x60a10
-#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+#define PRIM_CONST_ALPHA_ENABLE (1 << 31)
#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6033,8 +6190,8 @@ enum {
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
/*
* VBIOS flags
@@ -6064,7 +6221,7 @@ enum {
/* Display B control */
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
@@ -6079,27 +6236,27 @@ enum {
/* Sprite A control */
#define _DVSACNTR 0x72180
-#define DVS_ENABLE (1<<31)
-#define DVS_GAMMA_ENABLE (1<<30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27)
-#define DVS_PIXFORMAT_MASK (3<<25)
-#define DVS_FORMAT_YUV422 (0<<25)
-#define DVS_FORMAT_RGBX101010 (1<<25)
-#define DVS_FORMAT_RGBX888 (2<<25)
-#define DVS_FORMAT_RGBX161616 (3<<25)
-#define DVS_PIPE_CSC_ENABLE (1<<24)
-#define DVS_SOURCE_KEY (1<<22)
-#define DVS_RGB_ORDER_XBGR (1<<20)
-#define DVS_YUV_FORMAT_BT709 (1<<18)
-#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define DVS_YUV_ORDER_YUYV (0<<16)
-#define DVS_YUV_ORDER_UYVY (1<<16)
-#define DVS_YUV_ORDER_YVYU (2<<16)
-#define DVS_YUV_ORDER_VYUY (3<<16)
-#define DVS_ROTATE_180 (1<<15)
-#define DVS_DEST_KEY (1<<2)
-#define DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define DVS_TILED (1<<10)
+#define DVS_ENABLE (1 << 31)
+#define DVS_GAMMA_ENABLE (1 << 30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27)
+#define DVS_PIXFORMAT_MASK (3 << 25)
+#define DVS_FORMAT_YUV422 (0 << 25)
+#define DVS_FORMAT_RGBX101010 (1 << 25)
+#define DVS_FORMAT_RGBX888 (2 << 25)
+#define DVS_FORMAT_RGBX161616 (3 << 25)
+#define DVS_PIPE_CSC_ENABLE (1 << 24)
+#define DVS_SOURCE_KEY (1 << 22)
+#define DVS_RGB_ORDER_XBGR (1 << 20)
+#define DVS_YUV_FORMAT_BT709 (1 << 18)
+#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_YUYV (0 << 16)
+#define DVS_YUV_ORDER_UYVY (1 << 16)
+#define DVS_YUV_ORDER_YVYU (2 << 16)
+#define DVS_YUV_ORDER_VYUY (3 << 16)
+#define DVS_ROTATE_180 (1 << 15)
+#define DVS_DEST_KEY (1 << 2)
+#define DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define DVS_TILED (1 << 10)
#define _DVSALINOFF 0x72184
#define _DVSASTRIDE 0x72188
#define _DVSAPOS 0x7218c
@@ -6111,13 +6268,13 @@ enum {
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE (1<<31)
-#define DVS_FILTER_MASK (3<<29)
-#define DVS_FILTER_MEDIUM (0<<29)
-#define DVS_FILTER_ENHANCING (1<<29)
-#define DVS_FILTER_SOFTENING (2<<29)
-#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define DVS_SCALE_ENABLE (1 << 31)
+#define DVS_FILTER_MASK (3 << 29)
+#define DVS_FILTER_MEDIUM (0 << 29)
+#define DVS_FILTER_ENHANCING (1 << 29)
+#define DVS_FILTER_SOFTENING (2 << 29)
+#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _DVSAGAMC 0x72300
#define _DVSBCNTR 0x73180
@@ -6148,31 +6305,31 @@ enum {
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE (1<<31)
-#define SPRITE_GAMMA_ENABLE (1<<30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28)
-#define SPRITE_PIXFORMAT_MASK (7<<25)
-#define SPRITE_FORMAT_YUV422 (0<<25)
-#define SPRITE_FORMAT_RGBX101010 (1<<25)
-#define SPRITE_FORMAT_RGBX888 (2<<25)
-#define SPRITE_FORMAT_RGBX161616 (3<<25)
-#define SPRITE_FORMAT_YUV444 (4<<25)
-#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE (1<<24)
-#define SPRITE_SOURCE_KEY (1<<22)
-#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
-#define SPRITE_YUV_ORDER_YUYV (0<<16)
-#define SPRITE_YUV_ORDER_UYVY (1<<16)
-#define SPRITE_YUV_ORDER_YVYU (2<<16)
-#define SPRITE_YUV_ORDER_VYUY (3<<16)
-#define SPRITE_ROTATE_180 (1<<15)
-#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
-#define SPRITE_INT_GAMMA_ENABLE (1<<13)
-#define SPRITE_TILED (1<<10)
-#define SPRITE_DEST_KEY (1<<2)
+#define SPRITE_ENABLE (1 << 31)
+#define SPRITE_GAMMA_ENABLE (1 << 30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define SPRITE_PIXFORMAT_MASK (7 << 25)
+#define SPRITE_FORMAT_YUV422 (0 << 25)
+#define SPRITE_FORMAT_RGBX101010 (1 << 25)
+#define SPRITE_FORMAT_RGBX888 (2 << 25)
+#define SPRITE_FORMAT_RGBX161616 (3 << 25)
+#define SPRITE_FORMAT_YUV444 (4 << 25)
+#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1 << 24)
+#define SPRITE_SOURCE_KEY (1 << 22)
+#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_YUYV (0 << 16)
+#define SPRITE_YUV_ORDER_UYVY (1 << 16)
+#define SPRITE_YUV_ORDER_YVYU (2 << 16)
+#define SPRITE_YUV_ORDER_VYUY (3 << 16)
+#define SPRITE_ROTATE_180 (1 << 15)
+#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
+#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_TILED (1 << 10)
+#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
#define _SPRA_STRIDE 0x70288
#define _SPRA_POS 0x7028c
@@ -6185,13 +6342,13 @@ enum {
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE (1<<31)
-#define SPRITE_FILTER_MASK (3<<29)
-#define SPRITE_FILTER_MEDIUM (0<<29)
-#define SPRITE_FILTER_ENHANCING (1<<29)
-#define SPRITE_FILTER_SOFTENING (2<<29)
-#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define SPRITE_SCALE_ENABLE (1 << 31)
+#define SPRITE_FILTER_MASK (3 << 29)
+#define SPRITE_FILTER_MEDIUM (0 << 29)
+#define SPRITE_FILTER_ENHANCING (1 << 29)
+#define SPRITE_FILTER_SOFTENING (2 << 29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
#define _SPRB_CTL 0x71280
@@ -6225,28 +6382,28 @@ enum {
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE (1<<31)
-#define SP_GAMMA_ENABLE (1<<30)
-#define SP_PIXFORMAT_MASK (0xf<<26)
-#define SP_FORMAT_YUV422 (0<<26)
-#define SP_FORMAT_BGR565 (5<<26)
-#define SP_FORMAT_BGRX8888 (6<<26)
-#define SP_FORMAT_BGRA8888 (7<<26)
-#define SP_FORMAT_RGBX1010102 (8<<26)
-#define SP_FORMAT_RGBA1010102 (9<<26)
-#define SP_FORMAT_RGBX8888 (0xe<<26)
-#define SP_FORMAT_RGBA8888 (0xf<<26)
-#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
-#define SP_SOURCE_KEY (1<<22)
-#define SP_YUV_FORMAT_BT709 (1<<18)
-#define SP_YUV_BYTE_ORDER_MASK (3<<16)
-#define SP_YUV_ORDER_YUYV (0<<16)
-#define SP_YUV_ORDER_UYVY (1<<16)
-#define SP_YUV_ORDER_YVYU (2<<16)
-#define SP_YUV_ORDER_VYUY (3<<16)
-#define SP_ROTATE_180 (1<<15)
-#define SP_TILED (1<<10)
-#define SP_MIRROR (1<<8) /* CHV pipe B */
+#define SP_ENABLE (1 << 31)
+#define SP_GAMMA_ENABLE (1 << 30)
+#define SP_PIXFORMAT_MASK (0xf << 26)
+#define SP_FORMAT_YUV422 (0 << 26)
+#define SP_FORMAT_BGR565 (5 << 26)
+#define SP_FORMAT_BGRX8888 (6 << 26)
+#define SP_FORMAT_BGRA8888 (7 << 26)
+#define SP_FORMAT_RGBX1010102 (8 << 26)
+#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_RGBX8888 (0xe << 26)
+#define SP_FORMAT_RGBA8888 (0xf << 26)
+#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
+#define SP_SOURCE_KEY (1 << 22)
+#define SP_YUV_FORMAT_BT709 (1 << 18)
+#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_YUYV (0 << 16)
+#define SP_YUV_ORDER_UYVY (1 << 16)
+#define SP_YUV_ORDER_YVYU (2 << 16)
+#define SP_YUV_ORDER_VYUY (3 << 16)
+#define SP_ROTATE_180 (1 << 15)
+#define SP_TILED (1 << 10)
+#define SP_MIRROR (1 << 8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -6257,7 +6414,7 @@ enum {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE (1<<31)
+#define SP_CONST_ALPHA_ENABLE (1 << 31)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
@@ -6349,40 +6506,40 @@ enum {
* correctly map to the same formats in ICL, as long as bit 23 is set to 0
*/
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
-#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
-#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
-#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
-#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
-#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
-#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
-#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
-#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
+#define PLANE_CTL_FORMAT_NV12 (1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
-#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
-#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
-#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
-#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
-#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_YUV422_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
-#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
-#define PLANE_CTL_TILED_X ( 1 << 10)
-#define PLANE_CTL_TILED_Y ( 4 << 10)
-#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_TILED_LINEAR (0 << 10)
+#define PLANE_CTL_TILED_X (1 << 10)
+#define PLANE_CTL_TILED_Y (4 << 10)
+#define PLANE_CTL_TILED_YF (5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
-#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
-#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
-#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_90 0x1
@@ -6610,7 +6767,7 @@ enum {
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
-#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -6659,14 +6816,14 @@ enum {
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
-#define PF_ENABLE (1<<31)
-#define PF_PIPE_SEL_MASK_IVB (3<<29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define PF_FILTER_MASK (3<<23)
-#define PF_FILTER_PROGRAMMED (0<<23)
-#define PF_FILTER_MED_3x3 (1<<23)
-#define PF_FILTER_EDGE_ENHANCE (2<<23)
-#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define PF_ENABLE (1 << 31)
+#define PF_PIPE_SEL_MASK_IVB (3 << 29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define PF_FILTER_MASK (3 << 23)
+#define PF_FILTER_PROGRAMMED (0 << 23)
+#define PF_FILTER_MED_3x3 (1 << 23)
+#define PF_FILTER_EDGE_ENHANCE (2 << 23)
+#define PF_FILTER_EDGE_SOFTEN (3 << 23)
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
#define _PFA_WIN_POS 0x68070
@@ -6684,7 +6841,7 @@ enum {
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
-#define PS_ENABLE (1<<31)
+#define PS_ENABLE (1 << 31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
@@ -6769,6 +6926,10 @@ enum {
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define PS_Y_PHASE(x) ((x) << 16)
+#define PS_UV_RGB_PHASE(x) ((x) << 0)
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -6782,7 +6943,7 @@ enum {
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6863,37 +7024,37 @@ enum {
#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1<<30)
-#define DE_GSE_IVB (1<<29)
-#define DE_PCH_EVENT_IVB (1<<28)
-#define DE_DP_A_HOTPLUG_IVB (1<<27)
-#define DE_AUX_CHANNEL_A_IVB (1<<26)
-#define DE_EDP_PSR_INT_HSW (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
-#define DE_PIPEC_VBLANK_IVB (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PIPEB_VBLANK_IVB (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
-#define MASTER_INTERRUPT_ENABLE (1<<31)
+#define MASTER_INTERRUPT_ENABLE (1 << 31)
#define DEISR _MMIO(0x44000)
#define DEIMR _MMIO(0x44004)
@@ -6906,37 +7067,37 @@ enum {
#define GTIER _MMIO(0x4401c)
#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1<<31)
-#define GEN8_PCU_IRQ (1<<30)
-#define GEN8_DE_PCH_IRQ (1<<23)
-#define GEN8_DE_MISC_IRQ (1<<22)
-#define GEN8_DE_PORT_IRQ (1<<20)
-#define GEN8_DE_PIPE_C_IRQ (1<<18)
-#define GEN8_DE_PIPE_B_IRQ (1<<17)
-#define GEN8_DE_PIPE_A_IRQ (1<<16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
-#define GEN8_GT_VECS_IRQ (1<<6)
-#define GEN8_GT_GUC_IRQ (1<<5)
-#define GEN8_GT_PM_IRQ (1<<4)
-#define GEN8_GT_VCS2_IRQ (1<<3)
-#define GEN8_GT_VCS1_IRQ (1<<2)
-#define GEN8_GT_BCS_IRQ (1<<1)
-#define GEN8_GT_RCS_IRQ (1<<0)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS2_IRQ (1 << 3)
+#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
-#define GEN9_GUC_DB_RING_EVENT (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
@@ -6985,6 +7146,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
@@ -7011,9 +7173,16 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
#define GEN11_DISPLAY_IRQ (1 << 16)
#define GEN11_GT_DW_IRQ(x) (1 << (x))
#define GEN11_GT_DW1_IRQ (1 << 1)
@@ -7024,11 +7193,40 @@ enum {
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
#define GEN11_DE_PCH_IRQ (1 << 23)
#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
#define GEN11_DE_PORT_IRQ (1 << 20)
#define GEN11_DE_PIPE_C (1 << 18)
#define GEN11_DE_PIPE_B (1 << 17)
#define GEN11_DE_PIPE_A (1 << 16)
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC4_HOTPLUG (1 << 19)
+#define GEN11_TC3_HOTPLUG (1 << 18)
+#define GEN11_TC2_HOTPLUG (1 << 17)
+#define GEN11_TC1_HOTPLUG (1 << 16)
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+ GEN11_TC3_HOTPLUG | \
+ GEN11_TC2_HOTPLUG | \
+ GEN11_TC1_HOTPLUG)
+#define GEN11_TBT4_HOTPLUG (1 << 3)
+#define GEN11_TBT3_HOTPLUG (1 << 2)
+#define GEN11_TBT2_HOTPLUG (1 << 1)
+#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+ GEN11_TBT3_HOTPLUG | \
+ GEN11_TBT2_HOTPLUG | \
+ GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4)
+
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
#define GEN11_CSME (31)
#define GEN11_GUNIT (28)
@@ -7043,7 +7241,7 @@ enum {
#define GEN11_VECS(x) (31 - (x))
#define GEN11_VCS(x) (x)
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
@@ -7052,12 +7250,12 @@ enum {
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
@@ -7079,8 +7277,8 @@ enum {
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
-#define ILK_DPARB_GATE (1<<22)
-#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DPARB_GATE (1 << 22)
+#define ILK_VSDPFD_FULL (1 << 21)
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
@@ -7130,31 +7328,31 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
-#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
-#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
+#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1 << 15)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1 << 12)
#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE (1<<31)
-#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
-#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_FBC_MEMORY_WAKE (1 << 31)
+#define DISP_TILE_SURFACE_SWIZZLING (1 << 13)
+#define DISP_FBC_WM_DIS (1 << 15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 (1<<6)
-#define DISP_IPC_ENABLE (1<<3)
+#define DISP_DATA_PARTITION_5_6 (1 << 6)
+#define DISP_IPC_ENABLE (1 << 3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_CTL_S1 _MMIO(0x45008)
#define DBUF_CTL_S2 _MMIO(0x44FE8)
-#define DBUF_POWER_REQUEST (1<<31)
-#define DBUF_POWER_STATE (1<<30)
+#define DBUF_POWER_REQUEST (1 << 31)
+#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1<<1)
-#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7179,16 +7377,16 @@ enum {
#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7197,22 +7395,27 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+ #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+ #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+ #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+ #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+ #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3)
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7230,7 +7433,7 @@ enum {
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3AGDIS (1 << 19)
#define GEN7_L3CNTLREG2 _MMIO(0xB020)
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
@@ -7240,7 +7443,7 @@ enum {
#define GEN11_I2M_WRITE_DISABLE (1 << 28)
#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
@@ -7251,12 +7454,12 @@ enum {
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
@@ -7269,13 +7472,21 @@ enum {
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
/* PCH */
@@ -7320,7 +7531,7 @@ enum {
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* south display engine interrupt: CPT/PPT */
+/* south display engine interrupt: CPT - CNP */
#define SDE_AUDIO_POWER_D_CPT (1 << 31)
#define SDE_AUDIO_POWER_C_CPT (1 << 30)
#define SDE_AUDIO_POWER_B_CPT (1 << 29)
@@ -7368,14 +7579,29 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
+/* south display engine interrupt: ICP */
+#define SDE_TC4_HOTPLUG_ICP (1 << 27)
+#define SDE_TC3_HOTPLUG_ICP (1 << 26)
+#define SDE_TC2_HOTPLUG_ICP (1 << 25)
+#define SDE_TC1_HOTPLUG_ICP (1 << 24)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
+#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
+#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
+ SDE_DDIA_HOTPLUG_ICP)
+#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
+ SDE_TC3_HOTPLUG_ICP | \
+ SDE_TC2_HOTPLUG_ICP | \
+ SDE_TC1_HOTPLUG_ICP)
+
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
@@ -7428,6 +7654,134 @@ enum {
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define ICP_DDIB_HPD_ENABLE (1 << 7)
+#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
+#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
+#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
+#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
+#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
+#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
+#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
+#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
+#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
+#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+
#define PCH_GPIOA _MMIO(0xc5010)
#define PCH_GPIOB _MMIO(0xc5014)
#define PCH_GPIOC _MMIO(0xc5018)
@@ -7444,46 +7798,46 @@ enum {
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
-#define FP_CB_TUNE (0x3<<22)
+#define FP_CB_TUNE (0x3 << 22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST _MMIO(0xc606c)
#define PCH_DREF_CONTROL _MMIO(0xC6200)
#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
-#define DREF_SSC_SOURCE_DISABLE (0<<11)
-#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (3<<11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
-#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
-#define DREF_SSC4_DOWNSPREAD (0<<6)
-#define DREF_SSC4_CENTERSPREAD (1<<6)
-#define DREF_SSC1_DISABLE (0<<1)
-#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
#define DREF_SSC4_DISABLE (0)
#define DREF_SSC4_ENABLE (1)
#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP1_TIMER_MASK (3 << 12)
#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3<<10)
+#define FDL_TP2_TIMER_MASK (3 << 10)
#define RAWCLK_FREQ_MASK 0x3ff
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
@@ -7520,7 +7874,7 @@ enum {
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _PCH_TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
@@ -7600,15 +7954,28 @@ enum {
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define _HSW_VIDEO_DIP_GCP_B 0x61210
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
-#define S3D_ENABLE (1<<31)
+#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7651,156 +8018,156 @@ enum {
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_DISABLE (0<<31)
-#define TRANS_ENABLE (1<<31)
-#define TRANS_STATE_MASK (1<<30)
-#define TRANS_STATE_DISABLE (0<<30)
-#define TRANS_STATE_ENABLE (1<<30)
-#define TRANS_FSYNC_DELAY_HB1 (0<<27)
-#define TRANS_FSYNC_DELAY_HB2 (1<<27)
-#define TRANS_FSYNC_DELAY_HB3 (2<<27)
-#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_INTERLACE_MASK (7<<21)
-#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_INTERLACED (3<<21)
-#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define TRANS_8BPC (0<<5)
-#define TRANS_10BPC (1<<5)
-#define TRANS_6BPC (2<<5)
-#define TRANS_12BPC (3<<5)
+#define TRANS_DISABLE (0 << 31)
+#define TRANS_ENABLE (1 << 31)
+#define TRANS_STATE_MASK (1 << 30)
+#define TRANS_STATE_DISABLE (0 << 30)
+#define TRANS_STATE_ENABLE (1 << 30)
+#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
+#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
+#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
+#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_INTERLACE_MASK (7 << 21)
+#define TRANS_PROGRESSIVE (0 << 21)
+#define TRANS_INTERLACED (3 << 21)
+#define TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define TRANS_8BPC (0 << 5)
+#define TRANS_10BPC (1 << 5)
+#define TRANS_6BPC (2 << 5)
+#define TRANS_12BPC (3 << 5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SPT_PWM_GRANULARITY (1<<0)
+#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
-#define LPT_PWM_GRANULARITY (1<<5)
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0)
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define FDI_TX_DISABLE (0<<31)
-#define FDI_TX_ENABLE (1<<31)
-#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
-#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
-#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
-#define FDI_LINK_TRAIN_NONE (3<<28)
-#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_TX_DISABLE (0 << 31)
+#define FDI_TX_ENABLE (1 << 31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28)
+#define FDI_LINK_TRAIN_NONE (3 << 28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22)
/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
SNB has different settings. */
/* SNB A-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
-#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18)
/* Ironlake: hardwired to 1 */
-#define FDI_TX_PLL_ENABLE (1<<14)
+#define FDI_TX_PLL_ENABLE (1 << 14)
/* Ivybridge has different bits for lolz */
-#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
-#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8)
+#define FDI_LINK_TRAIN_NONE_IVB (3 << 8)
/* both Tx and Rx */
-#define FDI_COMPOSITE_SYNC (1<<11)
-#define FDI_LINK_TRAIN_AUTO (1<<10)
-#define FDI_SCRAMBLING_ENABLE (0<<7)
-#define FDI_SCRAMBLING_DISABLE (1<<7)
+#define FDI_COMPOSITE_SYNC (1 << 11)
+#define FDI_LINK_TRAIN_AUTO (1 << 10)
+#define FDI_SCRAMBLING_ENABLE (0 << 7)
+#define FDI_SCRAMBLING_DISABLE (1 << 7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_ENABLE (1 << 31)
/* train, dp width same as FDI_TX */
-#define FDI_FS_ERRC_ENABLE (1<<27)
-#define FDI_FE_ERRC_ENABLE (1<<26)
-#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
-#define FDI_8BPC (0<<16)
-#define FDI_10BPC (1<<16)
-#define FDI_6BPC (2<<16)
-#define FDI_12BPC (3<<16)
-#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
-#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
-#define FDI_RX_PLL_ENABLE (1<<13)
-#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
-#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
-#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
-#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
-#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_PCDCLK (1<<4)
+#define FDI_FS_ERRC_ENABLE (1 << 27)
+#define FDI_FE_ERRC_ENABLE (1 << 26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16)
+#define FDI_8BPC (0 << 16)
+#define FDI_10BPC (1 << 16)
+#define FDI_6BPC (2 << 16)
+#define FDI_12BPC (3 << 16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15)
+#define FDI_DMI_LINK_REVERSE_MASK (1 << 14)
+#define FDI_RX_PLL_ENABLE (1 << 13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10)
+#define FDI_FS_ERR_REPORT_ENABLE (1 << 9)
+#define FDI_FE_ERR_REPORT_ENABLE (1 << 8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6)
+#define FDI_PCDCLK (1 << 4)
/* CPT */
-#define FDI_AUTO_TRAINING (1<<10)
-#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
-#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
-#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+#define FDI_AUTO_TRAINING (1 << 10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
-#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
-#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
-#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
-#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_PWRDN_LANE1_MASK (3 << 26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26)
+#define FDI_RX_PWRDN_LANE0_MASK (3 << 24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24)
+#define FDI_RX_TP1_TO_TP2_48 (2 << 20)
+#define FDI_RX_TP1_TO_TP2_64 (3 << 20)
+#define FDI_RX_FDI_DELAY_90 (0x90 << 0)
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030
@@ -7811,17 +8178,17 @@ enum {
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN (1<<10)
-#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
-#define FDI_RX_FS_CODE_ERR (1<<6)
-#define FDI_RX_FE_CODE_ERR (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN (1 << 10)
+#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7)
+#define FDI_RX_FS_CODE_ERR (1 << 6)
+#define FDI_RX_FE_CODE_ERR (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0)
#define _FDI_RXA_IIR 0xf0014
#define _FDI_RXA_IMR 0xf0018
@@ -7867,71 +8234,58 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
-#define PORT_TRANS_A_SEL_CPT 0
-#define PORT_TRANS_B_SEL_CPT (1<<29)
-#define PORT_TRANS_C_SEL_CPT (2<<29)
-#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
-#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
-#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
-#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
-
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE (1<<31)
-#define TRANS_DP_PORT_SEL_B (0<<29)
-#define TRANS_DP_PORT_SEL_C (1<<29)
-#define TRANS_DP_PORT_SEL_D (2<<29)
-#define TRANS_DP_PORT_SEL_NONE (3<<29)
-#define TRANS_DP_PORT_SEL_MASK (3<<29)
-#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_ENH_FRAMING (1<<18)
-#define TRANS_DP_8BPC (0<<9)
-#define TRANS_DP_10BPC (1<<9)
-#define TRANS_DP_6BPC (2<<9)
-#define TRANS_DP_12BPC (3<<9)
-#define TRANS_DP_BPC_MASK (3<<9)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_OUTPUT_ENABLE (1 << 31)
+#define TRANS_DP_PORT_SEL_MASK (3 << 29)
+#define TRANS_DP_PORT_SEL_NONE (3 << 29)
+#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
+#define TRANS_DP_AUDIO_ONLY (1 << 26)
+#define TRANS_DP_ENH_FRAMING (1 << 18)
+#define TRANS_DP_8BPC (0 << 9)
+#define TRANS_DP_10BPC (1 << 9)
+#define TRANS_DP_6BPC (2 << 9)
+#define TRANS_DP_12BPC (3 << 9)
+#define TRANS_DP_BPC_MASK (3 << 9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
-#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
+#define TRANS_DP_SYNC_MASK (3 << 3)
/* SNB eDP training params */
/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
#define VLV_PMWGICZ _MMIO(0x1300a4)
@@ -7978,7 +8332,7 @@ enum {
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1<<5)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
#define VLV_SPAREG2H _MMIO(0xA194)
#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
@@ -7987,13 +8341,13 @@ enum {
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1<<6)
-#define GT_FIFO_BLOBDROPERR (1<<5)
-#define GT_FIFO_SB_READ_ABORTERR (1<<4)
-#define GT_FIFO_DROPERR (1<<3)
-#define GT_FIFO_OVFERR (1<<2)
-#define GT_FIFO_IAWRERR (1<<1)
-#define GT_FIFO_IARDERR (1<<0)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
#define GTFIFOCTL _MMIO(0x120008)
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
@@ -8027,37 +8381,37 @@ enum {
# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN8_UCGCTL6 _MMIO(0x9430)
-#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
-#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
-#define GEN6_TURBO_DISABLE (1<<31)
-#define GEN6_FREQUENCY(x) ((x)<<25)
-#define HSW_FREQUENCY(x) ((x)<<24)
-#define GEN9_FREQUENCY(x) ((x)<<23)
-#define GEN6_OFFSET(x) ((x)<<19)
-#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_TURBO_DISABLE (1 << 31)
+#define GEN6_FREQUENCY(x) ((x) << 25)
+#define HSW_FREQUENCY(x) ((x) << 24)
+#define GEN9_FREQUENCY(x) ((x) << 23)
+#define GEN6_OFFSET(x) ((x) << 19)
+#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
-#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
-#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
-#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
-#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
-#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
-#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
-#define GEN7_RC_CTL_TO_MODE (1<<28)
-#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
-#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
+#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20)
+#define GEN6_RC_CTL_RC7_ENABLE (1 << 22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24)
+#define GEN7_RC_CTL_TO_MODE (1 << 28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
+#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
#define GEN6_RPSTAT1 _MMIO(0xA01C)
@@ -8068,19 +8422,19 @@ enum {
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL _MMIO(0xA024)
-#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
-#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
-#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
-#define GEN6_RP_MEDIA_HW_MODE (1<<9)
-#define GEN6_RP_MEDIA_SW_MODE (0<<9)
-#define GEN6_RP_MEDIA_IS_GFX (1<<8)
-#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
-#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
-#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
-#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_MEDIA_TURBO (1 << 11)
+#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9)
+#define GEN6_RP_MEDIA_HW_MODE (1 << 9)
+#define GEN6_RP_MEDIA_SW_MODE (0 << 9)
+#define GEN6_RP_MEDIA_IS_GFX (1 << 8)
+#define GEN6_RP_ENABLE (1 << 7)
+#define GEN6_RP_UP_IDLE_MIN (0x1 << 3)
+#define GEN6_RP_UP_BUSY_AVG (0x2 << 3)
+#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
@@ -8116,15 +8470,15 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31)
-#define ARAT_EXPIRED_INTRMSK (1<<9)
+#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
+#define ARAT_EXPIRED_INTRMSK (1 << 9)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1<<0)
-#define GEN9_MEDIA_PG_ENABLE (1<<1)
+#define GEN9_RENDER_PG_ENABLE (1 << 0)
+#define GEN9_MEDIA_PG_ENABLE (1 << 1)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8137,13 +8491,13 @@ enum {
#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
#define GEN6_PMIIR _MMIO(0x44028)
#define GEN6_PMIER _MMIO(0x4402C)
-#define GEN6_PM_MBOX_EVENT (1<<25)
-#define GEN6_PM_THERMAL_EVENT (1<<24)
-#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
-#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
-#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
-#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
-#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
@@ -8152,16 +8506,16 @@ enum {
#define GEN7_GT_SCRATCH_REG_NUM 8
#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT (1<<3)
-#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
#define VLV_COUNTER_CONTROL _MMIO(0x138104)
-#define VLV_COUNT_RANGE_HIGH (1<<15)
-#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
-#define VLV_RENDER_RC0_COUNT_EN (1<<4)
-#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
-#define VLV_RENDER_RC6_COUNT_EN (1<<0)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
@@ -8172,7 +8526,7 @@ enum {
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
-#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -8216,7 +8570,7 @@ enum {
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
-#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
#define GEN6_RCn_MASK 7
#define GEN6_RC0 0
#define GEN6_RC3 2
@@ -8228,26 +8582,26 @@ enum {
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1<<1)
-#define CHV_EU08_PG_ENABLE (1<<9)
-#define CHV_EU19_PG_ENABLE (1<<17)
-#define CHV_EU210_PG_ENABLE (1<<25)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1<<1)
+#define CHV_EU311_PG_ENABLE (1 << 1)
-#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
-#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
@@ -8260,10 +8614,10 @@ enum {
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
-#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
-#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
-#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
#define GEN8_GARBCNTL _MMIO(0xB004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
@@ -8292,61 +8646,62 @@ enum {
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
-#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
-#define GEN7_PARITY_ERROR_VALID (1<<13)
-#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
-#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
+#define GEN7_PARITY_ERROR_VALID (1 << 13)
+#define GEN7_L3CDERRST1_BANK_MASK (3 << 11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8)
#define GEN7_PARITY_ERROR_ROW(reg) \
- ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+ (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14)
#define GEN7_PARITY_ERROR_BANK(reg) \
- ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+ (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11)
#define GEN7_PARITY_ERROR_SUBBANK(reg) \
- ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
-#define GEN7_L3CDERRST1_ENABLE (1<<7)
+ (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1 << 7)
#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
-#define GEN7_MAX_PS_THREAD_DEP (8<<12)
-#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
-#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
-#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
-#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
-#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
-#define FLOW_CONTROL_ENABLE (1<<15)
-#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
-#define STALL_DOP_GATING_DISABLE (1<<5)
-#define THROTTLE_12_5 (7<<2)
-#define DISABLE_EARLY_EOT (1<<1)
+#define FLOW_CONTROL_ENABLE (1 << 15)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8)
+#define STALL_DOP_GATING_DISABLE (1 << 5)
+#define THROTTLE_12_5 (7 << 2)
+#define DISABLE_EARLY_EOT (1 << 1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
-#define GEN8_ST_PO_DISABLE (1<<13)
+#define GEN8_ST_PO_DISABLE (1 << 13)
#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
-#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
-#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
-#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
-#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
-#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define CNL_FAST_ANISO_L1_BANKING_FIX (1 << 4)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
-#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8)
+#define GEN9_ENABLE_YV12_BUGFIX (1 << 4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -8478,6 +8833,14 @@ enum {
#define _HSW_PWR_WELL_CTL3 0x45408
#define _HSW_PWR_WELL_CTL4 0x4540C
+#define _ICL_PWR_WELL_CTL_AUX1 0x45440
+#define _ICL_PWR_WELL_CTL_AUX2 0x45444
+#define _ICL_PWR_WELL_CTL_AUX4 0x4544C
+
+#define _ICL_PWR_WELL_CTL_DDI1 0x45450
+#define _ICL_PWR_WELL_CTL_DDI2 0x45454
+#define _ICL_PWR_WELL_CTL_DDI4 0x4545C
+
/*
* Each power well control register contains up to 16 (request, status) HW
* flag tuples. The register index and HW flag shift is determined by the
@@ -8487,21 +8850,27 @@ enum {
*/
#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
-/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL1))
+ _HSW_PWR_WELL_CTL1, \
+ _ICL_PWR_WELL_CTL_AUX1, \
+ _ICL_PWR_WELL_CTL_DDI1))
#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL2))
+ _HSW_PWR_WELL_CTL2, \
+ _ICL_PWR_WELL_CTL_AUX2, \
+ _ICL_PWR_WELL_CTL_DDI2))
+/* KVMR doesn't have a reg for AUX or DDI power well control */
#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL4))
+ _HSW_PWR_WELL_CTL4, \
+ _ICL_PWR_WELL_CTL_AUX4, \
+ _ICL_PWR_WELL_CTL_DDI4))
#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
/* SKL Fuse Status */
@@ -8512,9 +8881,11 @@ enum skl_power_gate {
};
#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
+/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
+#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
@@ -8527,8 +8898,8 @@ enum skl_power_gate {
_CNL_AUX_ANAOVRD1_C, \
_CNL_AUX_ANAOVRD1_D, \
_CNL_AUX_ANAOVRD1_F))
-#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
-#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
+#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
+#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
@@ -8573,7 +8944,7 @@ enum skl_power_gate {
#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
#define HDCP_SHA_TEXT _MMIO(0x66d18)
/* HDCP Auth Registers */
@@ -8589,7 +8960,7 @@ enum skl_power_gate {
_PORTC_HDCP_AUTHENC, \
_PORTD_HDCP_AUTHENC, \
_PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + x)
+ _PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
@@ -8610,7 +8981,7 @@ enum skl_power_gate {
#define HDCP_STATUS_R0_READY BIT(18)
#define HDCP_STATUS_AN_READY BIT(17)
#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -8619,37 +8990,37 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
-#define TRANS_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
-#define TRANS_DDI_PORT_NONE (0<<28)
-#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
-#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
-#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
-#define TRANS_DDI_BPC_MASK (7<<20)
-#define TRANS_DDI_BPC_8 (0<<20)
-#define TRANS_DDI_BPC_10 (1<<20)
-#define TRANS_DDI_BPC_6 (2<<20)
-#define TRANS_DDI_BPC_12 (3<<20)
-#define TRANS_DDI_PVSYNC (1<<17)
-#define TRANS_DDI_PHSYNC (1<<16)
-#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
-#define TRANS_DDI_HDCP_SIGNALLING (1<<9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
-#define TRANS_DDI_BFI_ENABLE (1<<4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
-#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
+#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
@@ -8658,28 +9029,29 @@ enum skl_power_gate {
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
-#define DP_TP_CTL_FORCE_ACT (1<<25)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_MODE_SST (0 << 27)
+#define DP_TP_CTL_MODE_MST (1 << 27)
+#define DP_TP_CTL_FORCE_ACT (1 << 25)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_ACT_SENT (1<<24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1 << 25)
+#define DP_TP_STATUS_ACT_SENT (1 << 24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
@@ -8688,16 +9060,16 @@ enum skl_power_gate {
#define _DDI_BUF_CTL_A 0x64000
#define _DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_PORT_REVERSAL (1<<16)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_A_4_LANES (1<<4)
+#define DDI_BUF_EMP_MASK (0xf << 24)
+#define DDI_BUF_PORT_REVERSAL (1 << 16)
+#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
/* DDI Buffer Translations */
#define _DDI_BUF_TRANS_A 0x64E00
@@ -8712,95 +9084,99 @@ enum skl_power_gate {
#define SBI_ADDR _MMIO(0xC6000)
#define SBI_DATA _MMIO(0xC6004)
#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0<<16)
-#define SBI_CTL_DEST_MPHY (0x1<<16)
-#define SBI_CTL_OP_IORD (0x2<<8)
-#define SBI_CTL_OP_IOWR (0x3<<8)
-#define SBI_CTL_OP_CRRD (0x6<<8)
-#define SBI_CTL_OP_CRWR (0x7<<8)
-#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_CTL_DEST_ICLK (0x0 << 16)
+#define SBI_CTL_DEST_MPHY (0x1 << 16)
+#define SBI_CTL_OP_IORD (0x2 << 8)
+#define SBI_CTL_OP_IOWR (0x3 << 8)
+#define SBI_CTL_OP_CRRD (0x6 << 8)
+#define SBI_CTL_OP_CRWR (0x7 << 8)
+#define SBI_RESPONSE_FAIL (0x1 << 1)
+#define SBI_RESPONSE_SUCCESS (0x0 << 1)
+#define SBI_BUSY (0x1 << 0)
+#define SBI_READY (0x0 << 0)
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
#define SBI_SSCDITHPHASE 0x0204
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1<<3)
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
#define SBI_DBUFF0 0x2a00
#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1<<0)
-#define PIXCLK_GATE_GATE (0<<0)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
/* SPLL */
#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SSC (1<<28)
-#define SPLL_PLL_NON_SSC (2<<28)
-#define SPLL_PLL_LCPLL (3<<28)
-#define SPLL_PLL_REF_MASK (3<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
-#define SPLL_PLL_FREQ_2700MHz (2<<26)
-#define SPLL_PLL_FREQ_MASK (3<<26)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_PLL_SSC (1 << 28)
+#define SPLL_PLL_NON_SSC (2 << 28)
+#define SPLL_PLL_LCPLL (3 << 28)
+#define SPLL_PLL_REF_MASK (3 << 28)
+#define SPLL_PLL_FREQ_810MHz (0 << 26)
+#define SPLL_PLL_FREQ_1350MHz (1 << 26)
+#define SPLL_PLL_FREQ_2700MHz (2 << 26)
+#define SPLL_PLL_FREQ_MASK (3 << 26)
/* WRPLL */
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SSC (1<<28)
-#define WRPLL_PLL_NON_SSC (2<<28)
-#define WRPLL_PLL_LCPLL (3<<28)
-#define WRPLL_PLL_REF_MASK (3<<28)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_PLL_SSC (1 << 28)
+#define WRPLL_PLL_NON_SSC (2 << 28)
+#define WRPLL_PLL_LCPLL (3 << 28)
+#define WRPLL_PLL_REF_MASK (3 << 28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
/* Port clock selection */
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
-#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
-#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
-#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
-#define PORT_CLK_SEL_WRPLL1 (4<<29)
-#define PORT_CLK_SEL_WRPLL2 (5<<29)
-#define PORT_CLK_SEL_NONE (7<<29)
-#define PORT_CLK_SEL_MASK (7<<29)
+#define PORT_CLK_SEL_LCPLL_2700 (0 << 29)
+#define PORT_CLK_SEL_LCPLL_1350 (1 << 29)
+#define PORT_CLK_SEL_LCPLL_810 (2 << 29)
+#define PORT_CLK_SEL_SPLL (3 << 29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29)
+#define PORT_CLK_SEL_WRPLL1 (4 << 29)
+#define PORT_CLK_SEL_WRPLL2 (5 << 29)
+#define PORT_CLK_SEL_NONE (7 << 29)
+#define PORT_CLK_SEL_MASK (7 << 29)
/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
#define DDI_CLK_SEL_NONE (0x0 << 28)
#define DDI_CLK_SEL_MG (0x8 << 28)
+#define DDI_CLK_SEL_TBT_162 (0xC << 28)
+#define DDI_CLK_SEL_TBT_270 (0xD << 28)
+#define DDI_CLK_SEL_TBT_540 (0xE << 28)
+#define DDI_CLK_SEL_TBT_810 (0xF << 28)
#define DDI_CLK_SEL_MASK (0xF << 28)
/* Transcoder clock selection */
@@ -8808,8 +9184,8 @@ enum skl_power_gate {
#define _TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0<<29)
-#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
#define CDCLK_FREQ _MMIO(0x46200)
@@ -8819,28 +9195,29 @@ enum skl_power_gate {
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-#define TRANS_MSA_SYNC_CLK (1<<0)
-#define TRANS_MSA_6_BPC (0<<5)
-#define TRANS_MSA_8_BPC (1<<5)
-#define TRANS_MSA_10_BPC (2<<5)
-#define TRANS_MSA_12_BPC (3<<5)
-#define TRANS_MSA_16_BPC (4<<5)
+#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_6_BPC (0 << 5)
+#define TRANS_MSA_8_BPC (1 << 5)
+#define TRANS_MSA_10_BPC (2 << 5)
+#define TRANS_MSA_12_BPC (3 << 5)
+#define TRANS_MSA_16_BPC (4 << 5)
+#define TRANS_MSA_CEA_RANGE (1 << 3)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1<<31)
-#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CLK_FREQ_MASK (3<<26)
-#define LCPLL_CLK_FREQ_450 (0<<26)
-#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
-#define LCPLL_CLK_FREQ_675_BDW (3<<26)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
-#define LCPLL_POWER_DOWN_ALLOW (1<<22)
-#define LCPLL_CD_SOURCE_FCLK (1<<21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
/*
* SKL Clocks
@@ -8868,16 +9245,16 @@ enum skl_power_gate {
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1<<31)
+#define LCPLL_PLL_ENABLE (1 << 31)
/* DPLL control1 */
#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
-#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
-#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
#define DPLL_CTRL1_LINK_RATE_2700 0
#define DPLL_CTRL1_LINK_RATE_1350 1
#define DPLL_CTRL1_LINK_RATE_810 2
@@ -8887,43 +9264,43 @@ enum skl_power_gate {
/* DPLL control2 */
#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
/* DPLL Status */
#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1<<((id)*8))
+#define DPLL_LOCK(id) (1 << ((id) * 8))
/* DPLL cfg */
#define _DPLL1_CFGCR1 0x6C040
#define _DPLL2_CFGCR1 0x6C048
#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
#define _DPLL1_CFGCR2 0x6C044
#define _DPLL2_CFGCR2 0x6C04C
#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
-#define DPLL_CFGCR2_KDIV_MASK (3<<5)
-#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
-#define DPLL_CFGCR2_KDIV_5 (0<<5)
-#define DPLL_CFGCR2_KDIV_2 (1<<5)
-#define DPLL_CFGCR2_KDIV_3 (2<<5)
-#define DPLL_CFGCR2_KDIV_1 (3<<5)
-#define DPLL_CFGCR2_PDIV_MASK (7<<2)
-#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
-#define DPLL_CFGCR2_PDIV_1 (0<<2)
-#define DPLL_CFGCR2_PDIV_2 (1<<2)
-#define DPLL_CFGCR2_PDIV_3 (2<<2)
-#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
@@ -8935,9 +9312,9 @@ enum skl_power_gate {
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
- (port)+10))
+ (port) + 10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
- (port)*2)
+ (port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
@@ -8950,6 +9327,8 @@ enum skl_power_gate {
#define PLL_POWER_STATE (1 << 26)
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
#define _MG_PLL1_ENABLE 0x46030
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
@@ -8963,6 +9342,7 @@ enum skl_power_gate {
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
#define _MG_REFCLKIN_CTL_PORT4 0x16B92C
#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
+#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8)
#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_REFCLKIN_CTL_PORT1, \
_MG_REFCLKIN_CTL_PORT2)
@@ -8972,7 +9352,9 @@ enum skl_power_gate {
#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8
#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8
#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16)
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16)
#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8)
#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_CORECLKCTL1_PORT1, \
_MG_CLKTOP2_CORECLKCTL1_PORT2)
@@ -8982,9 +9364,13 @@ enum skl_power_gate {
#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4
#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4
#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16)
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16)
#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14)
#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12)
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_HSCLKCTL_PORT1, \
_MG_CLKTOP2_HSCLKCTL_PORT2)
@@ -9058,12 +9444,18 @@ enum skl_power_gate {
#define _MG_PLL_BIAS_PORT3 0x16AA14
#define _MG_PLL_BIAS_PORT4 0x16BA14
#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30)
+#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30)
#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24)
+#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24)
#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16)
+#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16)
#define MG_PLL_BIAS_BIASCAL_EN (1 << 15)
#define MG_PLL_BIAS_CTRIM(x) ((x) << 8)
+#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8)
#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5)
+#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5)
#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
+#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0)
#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
_MG_PLL_BIAS_PORT2)
@@ -9105,13 +9497,16 @@ enum skl_power_gate {
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2)
@@ -9145,22 +9540,22 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_UPTO_DC5 (1<<0)
-#define DC_STATE_EN_DC9 (1<<3)
-#define DC_STATE_EN_UPTO_DC6 (2<<0)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1<<0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
-#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
-#define D_COMP_COMP_FORCE (1<<8)
-#define D_COMP_COMP_DISABLE (1<<0)
+#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+#define D_COMP_COMP_FORCE (1 << 8)
+#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
#define _PIPE_WM_LINETIME_A 0x45270
@@ -9168,27 +9563,27 @@ enum skl_power_gate {
#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1<<13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
-#define SFUSE_STRAP_CRT_DISABLED (1<<6)
-#define SFUSE_STRAP_DDIF_DETECTED (1<<3)
-#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
-#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
-#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
#define WM_MISC _MMIO(0x45260)
#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
-#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
-#define WM_DBG_DISALLOW_SPRITE (1<<2)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
/* pipe CSC */
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
@@ -9314,6 +9709,22 @@ enum skl_power_gate {
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DSI_ESC_CLK_DIV0, \
+ _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DPHY_ESC_CLK_DIV0, \
+ _ICL_DPHY_ESC_CLK_DIV1)
+#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
+#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
+#define ICL_ESC_CLK_DIV_MASK 0x1ff
+#define ICL_ESC_CLK_DIV_SHIFT 0
+#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -9351,7 +9762,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
- ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+ (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
/* RX upper control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
@@ -9364,7 +9775,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
/* 8/3X divider to select the actual 8/3X clock output from 8x */
#define BXT_MIPI1_8X_BY3_SHIFT 19
#define BXT_MIPI2_8X_BY3_SHIFT 3
@@ -9377,7 +9788,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
BXT_MIPI2_8X_BY3_DIVIDER_MASK)
#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
/* RX lower control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
@@ -9390,7 +9801,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
#define RX_DIVIDER_BIT_1_2 0x3
#define RX_DIVIDER_BIT_3_4 0xC
@@ -9448,6 +9859,14 @@ enum skl_power_gate {
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0 0x6B094
+#define _ICL_DSI_IO_MODECTL_1 0x6B894
+#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
+ _ICL_DSI_IO_MODECTL_0, \
+ _ICL_DSI_IO_MODECTL_1)
+#define COMBO_PHY_MODE_DSI (1 << 0)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -9927,4 +10346,310 @@ enum skl_power_gate {
_ICL_PHY_MISC_B)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200
+#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204
+#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208
+#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C
+#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210
+#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214
+#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218
+#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C
+#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220
+#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224
+#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228
+#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C
+#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260
+#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264
+#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268
+#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C
+#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270
+#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8928894dd9c7..5c2c93cbab12 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -320,6 +321,7 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
+ GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -383,8 +385,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
* the subsequent request.
*/
if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = rq->ctx;
+ intel_context_unpin(engine->last_retired_context);
+ engine->last_retired_context = rq->hw_context;
}
static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +457,8 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->ctx->ban_score);
- intel_context_unpin(request->ctx, request->engine);
+ atomic_dec_if_positive(&request->gem_context->ban_score);
+ intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
@@ -502,7 +504,7 @@ static void move_to_timeline(struct i915_request *request,
GEM_BUG_ON(request->timeline == &request->engine->timeline);
lockdep_assert_held(&request->engine->timeline.lock);
- spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
}
@@ -657,7 +659,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
- struct intel_ring *ring;
+ struct intel_context *ce;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +683,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ring = intel_context_pin(ctx, engine);
- if (IS_ERR(ring))
- return ERR_CAST(ring);
- GEM_BUG_ON(!ring);
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
ret = reserve_gt(i915);
if (ret)
goto err_unpin;
- ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
i915_request_completed(rq))
i915_request_retire(rq);
@@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* Ratelimit ourselves to prevent oom from malicious clients */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err_unreserve;
@@ -760,9 +762,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;
- rq->ctx = ctx;
- rq->ring = ring;
- rq->timeline = ring->timeline;
+ rq->gem_context = ctx;
+ rq->hw_context = ce;
+ rq->ring = ce->ring;
+ rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock);
@@ -814,14 +817,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
goto err_unwind;
/* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(rq->ctx, engine);
+ __intel_context_pin(ce);
+
+ rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq;
err_unwind:
- rq->ring->emit = rq->head;
+ ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +837,7 @@ err_unwind:
err_unreserve:
unreserve_gt(i915);
err_unpin:
- intel_context_unpin(ctx, engine);
+ intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -1010,19 +1015,39 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
+void i915_request_skip(struct i915_request *rq, int error)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+ dma_fence_set_error(&rq->fence, error);
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, 0, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, 0, rq->postfix - head);
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_request_add(struct i915_request *request, bool flush_caches)
+void i915_request_add(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_ring *ring = request->ring;
struct i915_timeline *timeline = request->timeline;
+ struct intel_ring *ring = request->ring;
struct i915_request *prev;
u32 *cs;
- int err;
GEM_TRACE("%s fence %llx:%d\n",
engine->name, request->fence.context, request->fence.seqno);
@@ -1043,20 +1068,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
* know that it is time to use that space up.
*/
request->reserved_space = 0;
-
- /*
- * Emit any outstanding flushes - execbuf can fail to emit the flush
- * after having emitted the batchbuffer command. Hence we need to fix
- * things up similar to emitting the lazy request. The difference here
- * is that the flush _must_ happen before the next request, no matter
- * what.
- */
- if (flush_caches) {
- err = engine->emit_flush(request, EMIT_FLUSH);
-
- /* Not allowed to fail! */
- WARN(err, "engine->emit_flush() failed: %d!\n", err);
- }
+ engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -1095,8 +1107,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
+ if (list_is_first(&request->ring_link, &ring->request_list)) {
+ GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings);
+ }
request->emitted_jiffies = jiffies;
/*
@@ -1113,7 +1127,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(request, &request->ctx->sched);
+ engine->schedule(request, &request->gem_context->sched);
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1205,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- irq = atomic_read(&engine->irq_count);
+ irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
@@ -1217,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
*/
- if (atomic_read(&engine->irq_count) != irq)
+ if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
break;
if (signal_pending_state(state, current))
@@ -1294,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, rq);
+ intel_wait_init(&wait);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index eddbd4245cb3..e1c9365dfefb 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -93,8 +93,9 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *ctx;
+ struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
+ struct intel_context *hw_context;
struct intel_ring *ring;
struct i915_timeline *timeline;
struct intel_signal_node signaling;
@@ -133,6 +134,9 @@ struct i915_request {
/** Position in the ring of the start of the request */
u32 head;
+ /** Position in the ring of the start of the user packets */
+ u32 infix;
+
/**
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
@@ -249,13 +253,13 @@ int i915_request_await_object(struct i915_request *to,
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
-void __i915_request_add(struct i915_request *rq, bool flush_caches);
-#define i915_request_add(rq) \
- __i915_request_add(rq, false)
+void i915_request_add(struct i915_request *rq);
void __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
+void i915_request_skip(struct i915_request *request, int error);
+
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -266,6 +270,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -375,6 +380,7 @@ static inline void
init_request_active(struct i915_gem_active *active,
i915_gem_retire_fn retire)
{
+ RCU_INIT_POINTER(active->request, NULL);
INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_gem_retire_noop;
}
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 9766e806dce6..a73472dd12fd 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
- __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index dc2a4632faa7..a2c2c3ab5fb0 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -37,6 +37,8 @@ struct i915_timeline {
u32 seqno;
spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
/**
* List of breadcrumbs associated with GPU requests currently
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8cc3a256f29d..b50c6b829715 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, sync_from)
- __field(u32, sync_to)
+ __field(u32, from_class)
+ __field(u32, from_instance)
+ __field(u32, to_class)
+ __field(u32, to_instance)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
- __entry->sync_from = from->engine->id;
- __entry->sync_to = to->engine->id;
+ __entry->from_class = from->engine->uabi_class;
+ __entry->from_instance = from->engine->instance;
+ __entry->to_class = to->engine->uabi_class;
+ __entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno;
),
- TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev,
- __entry->sync_from, __entry->sync_to,
+ __entry->from_class, __entry->from_instance,
+ __entry->to_class, __entry->to_instance,
__entry->seqno)
);
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->prio, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->prio, __entry->global_seqno, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
- __entry->dev, __entry->hw_id, __entry->ring,
- __entry->ctx, __entry->seqno,
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed)
);
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ring)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
- __entry->ring = engine->id;
+ __entry->class = engine->uabi_class;
+ __entry->instance = engine->instance;
__entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters;
),
- TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->waiters)
+ TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->seqno, __entry->waiters)
);
DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
__field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global,
- !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ __entry->flags)
);
DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free,
TP_ARGS(ctx)
);
-/**
- * DOC: switch_mm tracepoint
- *
- * This tracepoint allows tracking of the mm switch, which is an important point
- * in the lifetime of the vm in the legacy submission path. This tracepoint is
- * called only if full ppgtt is enabled.
- */
-TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
-
- TP_ARGS(engine, to),
-
- TP_STRUCT__entry(
- __field(u32, ring)
- __field(struct i915_gem_context *, to)
- __field(struct i915_address_space *, vm)
- __field(u32, dev)
- ),
-
- TP_fast_assign(
- __entry->ring = engine->id;
- __entry->to = to;
- __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->i915->drm.primary->index;
- ),
-
- TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
- __entry->dev, __entry->ring, __entry->to, __entry->vm)
-);
-
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 00165ad55fb3..395dd2511568 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -43,7 +43,7 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
-#if GCC_VERSION >= 70000
+#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
#else
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5fe9f3f39467..869cf4a3b6de 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size,
node->size / 1024);
- ggtt->base.reserved -= node->size;
+ ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->base, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
- ggtt->base.reserved += size;
+ ggtt->vm.reserved += size;
return ret;
}
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long ggtt_end = ggtt->base.total;
+ unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index bb8338450dc1..551acc390046 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..11d834f94220 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -21,7 +21,7 @@
* IN THE SOFTWARE.
*
*/
-
+
#include "i915_vma.h"
#include "i915_drv.h"
@@ -30,18 +30,53 @@
#include <drm/drm_gem.h>
+#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
+
+#include <linux/stackdepot.h>
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+ unsigned long entries[12];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+ char buf[512];
+
+ if (!vma->node.stack) {
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
+ vma->node.start, vma->node.size, reason);
+ return;
+ }
+
+ depot_fetch_stack(vma->node.stack, &trace);
+ snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
+ vma->node.start, vma->node.size, reason, buf);
+}
+
+#else
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+}
+
+#endif
+
+struct i915_vma_active {
+ struct i915_gem_active base;
+ struct i915_vma *vma;
+ struct rb_node node;
+ u64 timeline;
+};
+
static void
-i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
+__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ if (--vma->active_count)
return;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
}
}
+static void
+i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ struct i915_vma_active *active =
+ container_of(base, typeof(*active), base);
+
+ __i915_vma_retire(active->vma, rq);
+}
+
+static void
+i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
+}
+
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -82,19 +132,20 @@ vma_create(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
struct rb_node *rb, **p;
- int i;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
+ vma->active = RB_ROOT;
+
+ init_request_active(&vma->last_active, i915_vma_last_retire);
init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
+ vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->resv;
vma->size = obj->base.size;
@@ -109,7 +160,7 @@ vma_create(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT));
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
- GEM_BUG_ON(vma->size >= obj->base.size);
+ GEM_BUG_ON(vma->size > obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
@@ -280,7 +331,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -345,7 +396,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
@@ -365,6 +416,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
return;
obj = vma->obj;
+ GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
@@ -459,6 +511,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
return true;
}
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+ /*
+ * Combine the assertion that the object is bound and that we have
+ * pinned its pages. But we should never have bound the object
+ * more than we have pinned its pages. (For complete accuracy, we
+ * assume that no else is pinning the pages, but as a rough assertion
+ * that we will not run into problems later, this will do!)
+ */
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -477,7 +541,7 @@ static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int cache_level;
u64 start, end;
int ret;
@@ -512,20 +576,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+ size, flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return -ENOSPC;
}
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ if (vma->obj) {
+ ret = i915_gem_object_pin_pages(vma->obj);
+ if (ret)
+ return ret;
+
+ cache_level = vma->obj->cache_level;
+ } else {
+ cache_level = 0;
+ }
GEM_BUG_ON(vma->pages);
- ret = vma->vm->set_pages(vma);
+ ret = vma->ops->set_pages(vma);
if (ret)
goto err_unpin;
@@ -538,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, obj->cache_level,
+ size, offset, cache_level,
flags);
if (ret)
goto err_clear;
@@ -577,7 +646,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, obj->cache_level,
+ size, alignment, cache_level,
start, end, flags);
if (ret)
goto err_clear;
@@ -586,23 +655,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+ obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
+ assert_bind_count(obj);
+ }
return 0;
err_clear:
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
err_unpin:
- i915_gem_object_unpin_pages(obj);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
return ret;
}
@@ -610,30 +684,35 @@ static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- /* Since the unbound list is global, only move to that list if
+ /*
+ * Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
- spin_lock(&i915->mm.obj_lock);
- if (--obj->bind_count == 0)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ spin_lock(&i915->mm.obj_lock);
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
+
+ /*
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+ assert_bind_count(obj);
+ }
}
int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +737,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
if (ret)
goto err_remove;
@@ -715,23 +794,28 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- int i;
+ struct drm_i915_private *i915 = vma->vm->i915;
+ struct i915_vma_active *iter, *n;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->obj_link);
list_del(&vma->vm_link);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+ if (vma->obj)
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+ rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
+ GEM_BUG_ON(i915_gem_active_isset(&iter->base));
+ kfree(iter);
+ }
+
+ kmem_cache_free(i915->vmas, vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -795,23 +879,173 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-int i915_vma_unbind(struct i915_vma *vma)
+static void export_fence(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ struct reservation_object *resv = vma->resv;
+
+ /*
+ * Ignore errors from failing to allocate the new fence, we can't
+ * handle an error right now. Worst case should be missed
+ * synchronisation leading to rendering corruption.
+ */
+ reservation_object_lock(resv, NULL);
+ if (flags & EXEC_OBJECT_WRITE)
+ reservation_object_add_excl_fence(resv, &rq->fence);
+ else if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &rq->fence);
+ reservation_object_unlock(resv);
+}
+
+static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
+{
+ struct i915_vma_active *active;
+ struct rb_node **p, *parent;
+ struct i915_request *old;
+
+ /*
+ * We track the most recently used timeline to skip a rbtree search
+ * for the common case, under typical loads we never need the rbtree
+ * at all. We can reuse the last_active slot if it is empty, that is
+ * after the previous activity has been retired, or if the active
+ * matches the current timeline.
+ *
+ * Note that we allow the timeline to be active simultaneously in
+ * the rbtree and the last_active cache. We do this to avoid having
+ * to search and replace the rbtree element for a new timeline, with
+ * the cost being that we must be aware that the vma may be retired
+ * twice for the same timeline (as the older rbtree element will be
+ * retired before the new request added to last_active).
+ */
+ old = i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (!old || old->fence.context == idx)
+ goto out;
+
+ /* Move the currently active fence into the rbtree */
+ idx = old->fence.context;
+
+ parent = NULL;
+ p = &vma->active.rb_node;
+ while (*p) {
+ parent = *p;
+
+ active = rb_entry(parent, struct i915_vma_active, node);
+ if (active->timeline == idx)
+ goto replace;
+
+ if (active->timeline < idx)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ active = kmalloc(sizeof(*active), GFP_KERNEL);
+
+ /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
+ if (unlikely(!i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex))) {
+ kfree(active);
+ goto out;
+ }
+
+ if (unlikely(!active))
+ return ERR_PTR(-ENOMEM);
+
+ init_request_active(&active->base, i915_vma_retire);
+ active->vma = vma;
+ active->timeline = idx;
+
+ rb_link_node(&active->node, parent, p);
+ rb_insert_color(&active->node, &vma->active);
+
+replace:
+ /*
+ * Overwrite the previous active slot in the rbtree with last_active,
+ * leaving last_active zeroed. If the previous slot is still active,
+ * we must be careful as we now only expect to receive one retire
+ * callback not two, and so much undo the active counting for the
+ * overwritten slot.
+ */
+ if (i915_gem_active_isset(&active->base)) {
+ /* Retire ourselves from the old rq->active_list */
+ __list_del_entry(&active->base.link);
+ vma->active_count--;
+ GEM_BUG_ON(!vma->active_count);
+ }
+ GEM_BUG_ON(list_empty(&vma->last_active.link));
+ list_replace_init(&vma->last_active.link, &active->base.link);
+ active->base.request = fetch_and_zero(&vma->last_active.request);
+
+out:
+ return &vma->last_active;
+}
+
+int i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
+ struct i915_gem_active *active;
+
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ active = active_instance(vma, rq->fence.context);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ /*
+ * Add a reference if we're newly entering the active list.
+ * The order in which we add operations to the retirement queue is
+ * vital here: mark_active adds to the start of the callback list,
+ * such that subsequent callbacks are called first. Therefore we
+ * add the active reference first and queue for it to be dropped
+ * *last*.
+ */
+ if (!i915_gem_active_isset(active) && !vma->active_count++) {
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+ obj->active_count++;
+ }
+ i915_gem_active_set(active, rq);
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ GEM_BUG_ON(!obj->active_count);
+
+ obj->write_domain = 0;
+ if (flags & EXEC_OBJECT_WRITE) {
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
+
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
+
+ obj->read_domains = 0;
+ }
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_gem_active_set(&vma->last_fence, rq);
+
+ export_fence(vma, rq, flags);
+ return 0;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- /* First wait upon any activity as retiring the request may
+ /*
+ * First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
might_sleep();
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
+ if (i915_vma_is_active(vma)) {
+ struct i915_vma_active *active, *n;
- /* When a closed VMA is retired, it is unbound - eek.
+ /*
+ * When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
@@ -825,33 +1059,36 @@ int i915_vma_unbind(struct i915_vma *vma)
*/
__i915_vma_pin(vma);
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->i915->drm.struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_active_retire(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
- if (!ret) {
- ret = i915_gem_active_retire(&vma->last_fence,
+ rbtree_postorder_for_each_entry_safe(active, n,
+ &vma->active, node) {
+ ret = i915_gem_active_retire(&active->base,
&vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
}
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
+unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;
}
GEM_BUG_ON(i915_vma_is_active(vma));
- if (i915_vma_is_pinned(vma))
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
return -EBUSY;
+ }
if (!drm_mm_node_allocated(&vma->node))
return 0;
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
@@ -878,7 +1115,7 @@ int i915_vma_unbind(struct i915_vma *vma)
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
+ vma->ops->unbind_vma(vma);
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index fc4294cfaa91..f06d66377107 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -26,6 +26,7 @@
#define __I915_VMA_H__
#include <linux/io-mapping.h>
+#include <linux/rbtree.h>
#include <drm/drm_mm.h>
@@ -49,10 +50,12 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
struct drm_i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
+ void *private; /* owned by creator */
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
@@ -92,8 +95,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
#define I915_VMA_GGTT_WRITE BIT(12)
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
+ unsigned int active_count;
+ struct rb_root active;
+ struct i915_gem_active last_active;
struct i915_gem_active last_fence;
/**
@@ -136,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+static inline bool i915_vma_is_active(struct i915_vma *vma)
+{
+ return vma->active_count;
+}
+
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags);
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_GGTT;
@@ -185,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -339,6 +324,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
__i915_vma_unpin(vma);
}
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+ unsigned int where)
+{
+ return vma->flags & where;
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
@@ -407,7 +398,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
new file mode 100644
index 000000000000..13830e43a4d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Madhav Chauhan <madhav.chauhan@intel.com>
+ * Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include "intel_dsi.h"
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 afe_clk_khz; /* 8X Clock */
+ u32 esc_clk_div_m;
+
+ afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
+ intel_dsi->lane_count);
+
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ }
+}
+
+static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp |= COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_display_power_get(dev_priv, port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
+static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ u32 lane_mask;
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ case 4:
+ default:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW10(port));
+ tmp &= ~PWR_DOWN_LN_MASK;
+ I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
+ }
+}
+
+static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+{
+ /* step 4a: power up all lanes of the DDI used by DSI */
+ gen11_dsi_power_up_lanes(encoder);
+}
+
+static void __attribute__((unused))
+gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ /* step2: enable IO power */
+ gen11_dsi_enable_io_power(encoder);
+
+ /* step3: enable DSI PLL */
+ gen11_dsi_program_esc_clk_div(encoder);
+
+ /* step4: enable DSI port and DPHY */
+ gen11_dsi_enable_port_and_phy(encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index d1abf4bb7c81..6ba478e57b9b 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -12,10 +12,6 @@
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
-static struct intel_dsm_priv {
- acpi_handle dhandle;
-} intel_dsm_priv;
-
static const guid_t intel_dsm_guid =
GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
@@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type)
}
}
-static void intel_dsm_platform_mux_info(void)
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
{
int i;
union acpi_object *pkg, *connector_count;
- pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
+ pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
@@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void)
ACPI_FREE(pkg);
}
-static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
- return false;
+ return NULL;
if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
- return false;
+ return NULL;
}
- intel_dsm_priv.dhandle = dhandle;
- intel_dsm_platform_mux_info();
+ intel_dsm_platform_mux_info(dhandle);
- return true;
+ return dhandle;
}
static bool intel_dsm_detect(void)
{
+ acpi_handle dhandle = NULL;
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
- bool has_dsm = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_dsm |= intel_dsm_pci_probe(pdev);
+ dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
}
- if (vga_count == 2 && has_dsm) {
- acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ if (vga_count == 2 && dhandle) {
+ acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
return true;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 40285d1b91b7..b04952bacf77 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+ new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 6d068786eb41..dcba645cabb8 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
&crtc_state->base.adjusted_mode;
int ret;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!crtc)
return 0;
@@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3ea566f99450..b725835b47ef 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -59,6 +59,7 @@
*/
/* DP N/M table */
+#define LC_810M 810000
#define LC_540M 540000
#define LC_270M 270000
#define LC_162M 162000
@@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 128000, LC_540M, 4096, 33750 },
{ 176400, LC_540M, 3136, 18750 },
{ 192000, LC_540M, 2048, 11250 },
+ { 32000, LC_810M, 1024, 50625 },
+ { 44100, LC_810M, 784, 28125 },
+ { 48000, LC_810M, 512, 16875 },
+ { 64000, LC_810M, 2048, 50625 },
+ { 88200, LC_810M, 1568, 28125 },
+ { 96000, LC_810M, 1024, 16875 },
+ { 128000, LC_810M, 4096, 50625 },
+ { 176400, LC_810M, 3136, 28125 },
+ { 192000, LC_810M, 2048, 16875 },
};
static const struct dp_aud_n_m *
@@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
static bool intel_eld_uptodate(struct drm_connector *connector,
- i915_reg_t reg_eldv, uint32_t bits_eldv,
- i915_reg_t reg_elda, uint32_t bits_elda,
+ i915_reg_t reg_eldv, u32 bits_eldv,
+ i915_reg_t reg_elda, u32 bits_elda,
i915_reg_t reg_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int i;
tmp = I915_READ(reg_eldv);
@@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
I915_WRITE(reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ if (I915_READ(reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t eldv, tmp;
+ u32 eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 eldv;
+ u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
tmp = I915_READ(G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
@@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
len = min(drm_eld_size(eld) / 4, len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp |= eldv;
@@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
- uint32_t tmp;
+ u32 tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
- const uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int len, i;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
@@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint32_t tmp, eldv;
+ u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint8_t *eld = connector->eld;
- uint32_t tmp, eldv;
+ const u8 *eld = connector->eld;
+ u32 tmp, eldv;
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
@@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+ I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(aud_cntrl_st2);
@@ -639,11 +649,12 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -681,11 +692,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -880,7 +892,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
return ret;
}
-static const struct i915_audio_component_ops i915_audio_component_ops = {
+static const struct drm_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
@@ -897,12 +909,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->ops || acomp->dev))
+ if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = &i915_audio_component_ops;
- acomp->dev = i915_kdev;
+ acomp->base.ops = &i915_audio_component_ops;
+ acomp->base.dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -919,8 +931,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = NULL;
- acomp->dev = NULL;
+ acomp->base.ops = NULL;
+ acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 54270bdde100..1faa494e2bc9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
- dev_priv->vbt.lvds_vbt = 1;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp.support = 1;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ /*
+ * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+ * to mean "eDP". The VBT spec doesn't agree with that
+ * interpretation, but real world VBTs seem to.
+ */
+ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ } else {
+ /*
+ * FIXME it's not clear which BDB version has the LVDS config
+ * bits defined. Revision history in the VBT spec says:
+ * "0.92 | Add two definitions for VBT value of LVDS Active
+ * Config (00b and 11b values defined) | 06/13/2005"
+ * but does not the specify the BDB version.
+ *
+ * So far version 134 (on i945gm) is the oldest VBT observed
+ * in the wild with the bits correctly populated. Version
+ * 108 (on i85x) does not have the bits correctly populated.
+ */
+ if (bdb->version >= 134 &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ }
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
- if (!edp) {
- if (dev_priv->vbt.edp.support)
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ if (!edp)
return;
- }
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
@@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 173) {
- uint8_t vswing;
+ u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
if (i915_modparams.edp_vswing) {
@@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
break;
}
- dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
- dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+ /*
+ * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+ * Old decimal value is wake up time in multiples of 100 us.
+ */
+ if (bdb->version >= 205 &&
+ (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10)) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+ dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
* byte.
*/
- size_of_sequence = *((const uint32_t *)(data + index));
+ size_of_sequence = *((const u32 *)(data + index));
index += 4;
seq_end = index + size_of_sequence;
@@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
};
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv)) {
- if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
- return cnp_ddc_pin_map[vbt_pin];
- } else {
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
- return 0;
- }
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (HAS_PCH_ICP(dev_priv)) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(dev_priv)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
}
- return vbt_pin;
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
- dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
+ /* driver features */
+ dev_priv->vbt.int_lvds_support = 1;
+
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
/*
@@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (HAS_PCH_NOP(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 18e643df523e..1db6ba7d926e 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
struct intel_engine_cs *engine =
from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned int irq_count;
if (!b->irq_armed)
return;
- if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
- b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+ irq_count = READ_ONCE(b->irq_count);
+ if (b->hangcheck_interrupts != irq_count) {
+ b->hangcheck_interrupts = irq_count;
mod_timer(&b->hangcheck, wait_timeout());
return;
}
@@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b)
if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
return false;
- /* Only start with the heavy weight fake irq timer if we have not
+ /*
+ * Only start with the heavy weight fake irq timer if we have not
* seen any interrupts since enabling it the first time. If the
* interrupts are still arriving, it means we made a mistake in our
* engine->seqno_barrier(), a timing error that should be transient
* and unlikely to reoccur.
*/
- return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+ return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
}
static void enable_fake_irq(struct intel_breadcrumbs *b)
@@ -846,8 +849,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
/*
* Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +875,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 704ddb4d3ca7..29075c763428 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
break;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ /* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
cdclk_state->cdclk = 133333;
break;
@@ -991,6 +992,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
u32 freq_select, cdclk_ctl;
int ret;
+ /*
+ * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+ * unsupported on SKL. In theory this should never happen since only
+ * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+ * supported on SKL either, see the above WA. WARN whenever trying to
+ * use the corresponding VCO freq as that always leads to using the
+ * minimum 308MHz CDCLK.
+ */
+ WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1787,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
switch (ref) {
default:
MISSING_CASE(ref);
+ /* fall through */
case 24000:
ranges = ranges_24;
break;
@@ -1814,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 307200:
case 556800:
case 652800:
@@ -1861,11 +1874,36 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
skl_cdclk_decimal(cdclk));
mutex_lock(&dev_priv->pcu_lock);
- /* TODO: add proper DVFS support. */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
+
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ case 50000:
+ case 307200:
+ case 312000:
+ return 0;
+ case 556800:
+ case 552000:
+ return 1;
+ default:
+ MISSING_CASE(cdclk);
+ /* fall through */
+ case 652800:
+ case 648000:
+ return 2;
+ }
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1879,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
MISSING_CASE(val);
+ /* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
cdclk_state->ref = 24000;
break;
@@ -1899,7 +1938,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
*/
cdclk_state->vco = 0;
cdclk_state->cdclk = cdclk_state->bypass;
- return;
+ goto out;
}
cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
@@ -1908,6 +1947,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ icl_calc_voltage_level(cdclk_state->cdclk);
}
/**
@@ -1950,6 +1997,8 @@ sanitize:
sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
+ sanitized_state.voltage_level =
+ icl_calc_voltage_level(sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state);
}
@@ -1967,6 +2016,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -2470,6 +2520,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ max(icl_calc_voltage_level(cdclk),
+ cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = icl_calc_cdclk(0, ref);
@@ -2477,6 +2530,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ icl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual = intel_state->cdclk.logical;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 072b326d5ee0..0c6bf82bb059 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(adpa_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+ return val & ADPA_DAC_ENABLE;
+}
+
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(crt->adpa_reg);
+ ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- if (!(tmp & ADPA_DAC_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev_priv))
- adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
- else if (crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
+ adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
- adpa |= ADPA_PIPE_B_SELECT;
+ adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
@@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
@@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
@@ -333,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector,
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
+ /* HSW/BDW FDI limited to 4k */
+ if (mode->hdisplay > 4096)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -375,6 +383,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ /* HSW/BDW FDI limited to 4k */
+ if (adjusted_mode->crtc_hdisplay > 4096 ||
+ adjusted_mode->crtc_hblank_start > 4096)
+ return false;
+
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
@@ -513,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
+ if (IS_G45(dev_priv))
tries = 2;
else
tries = 1;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..8761513f3532 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (port == PORT_A || port == PORT_B)
+ icl_get_combo_buf_trans(dev_priv, port,
+ INTEL_OUTPUT_HDMI, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -1055,14 +1062,31 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int clock = crtc->config->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return DDI_CLK_SEL_NONE;
+ case DPLL_ID_ICL_TBTPLL:
+ switch (clock) {
+ case 162000:
+ return DDI_CLK_SEL_TBT_162;
+ case 270000:
+ return DDI_CLK_SEL_TBT_270;
+ case 540000:
+ return DDI_CLK_SEL_TBT_540;
+ case 810000:
+ return DDI_CLK_SEL_TBT_810;
+ default:
+ MISSING_CASE(clock);
+ break;
+ }
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -1243,35 +1267,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_encoder *ret = NULL;
- struct drm_atomic_state *state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int num_encoders = 0;
- int i;
-
- state = crtc_state->base.state;
-
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- ret = to_intel_encoder(connector_state->best_encoder);
- num_encoders++;
- }
-
- WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
#define LC_FREQ 2700
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1369,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ if (INTEL_GEN(dev_priv) >= 11) {
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+ } else {
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ }
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1451,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int link_clock = 0;
+ uint32_t pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ if (port == PORT_A || port == PORT_B) {
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ else
+ link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+ pll_id);
+ } else {
+ /* FIXME - Add for MG PLL */
+ WARN(1, "MG PLL clock_get code not implemented yet\n");
+ }
+
+ pipe_config->port_clock = link_clock;
+ ddi_dotclock_get(pipe_config);
+}
+
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1644,6 +1668,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1659,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
+
+ if (crtc_state->limited_color_range)
+ temp |= TRANS_MSA_CEA_RANGE;
+
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
@@ -1782,15 +1812,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
+
+ if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ /* Quirk time at 100ms for reliable operation */
+ msleep(100);
+ }
}
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
@@ -1958,15 +1997,50 @@ out:
return ret;
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
+static inline enum intel_display_power_domain
+intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+{
+ /* CNL HW requires corresponding AUX IOs to be powered up for PSR with
+ * DC states enabled at the same time, while for driver initiated AUX
+ * transfers we need the same AUX IOs to be powered but with DC states
+ * disabled. Accordingly use the AUX power domain here which leaves DC
+ * states enabled.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_dp->aux_power_domain;
+}
+
+static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum pipe pipe;
+ struct intel_digital_port *dig_port;
+ u64 domains;
- if (intel_ddi_get_hw_state(encoder, &pipe))
- return BIT_ULL(dig_port->ddi_io_power_domain);
+ /*
+ * TODO: Add support for MST encoders. Atm, the following should never
+ * happen since fake-MST encoders don't set their get_power_domains()
+ * hook.
+ */
+ if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ return 0;
- return 0;
+ dig_port = enc_to_dig_port(&encoder->base);
+ domains = BIT_ULL(dig_port->ddi_io_power_domain);
+
+ /* AUX power is only needed for (e)DP mode, not for HDMI. */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
+ }
+
+ return domains;
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2115,6 +2189,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+}
+
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
@@ -2586,6 +2680,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
+
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -2610,6 +2707,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2640,6 +2739,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
+ intel_ddi_enable_pipe_clock(crtc_state);
+
intel_dig_port->set_infoframes(&encoder->base,
crtc_state->has_infoframe,
crtc_state, conn_state);
@@ -2709,6 +2810,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
@@ -2724,6 +2827,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -2734,11 +2840,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- intel_disable_ddi_buf(encoder);
-
dig_port->set_infoframes(&encoder->base, false,
old_crtc_state, old_conn_state);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_disable_ddi_buf(encoder);
+
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3025,6 +3133,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
{
if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
+ else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 1;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3533,7 +3643,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0fd13df424cf..0ef0c6448d53 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p)
{
+ drm_printf(p, "Has logical contexts? %s\n",
+ yesno(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 933e31669557..633f9fbf72ea 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -186,6 +186,7 @@ struct intel_device_info {
struct intel_driver_caps {
unsigned int scheduler;
+ bool has_logical_contexts:1;
};
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2cc6faa1daa8..ed3fa1c8a983 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
- * We can ditch the crtc->primary->fb check as soon as we can
+ * We can ditch the crtc->primary->state->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
u32 val;
- enum pipe panel_pipe = PIPE_A;
+ enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pp_reg = PP_CONTROL(0);
port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- if (port_sel == PANEL_PORT_SELECT_LVDS &&
- I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
- /* XXX: else fix for eDP */
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
+ u32 port_sel;
+
pp_reg = PP_CONTROL(0);
- if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
+ port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
static void assert_plane(struct intel_plane *plane, bool state)
{
- bool cur_state = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool cur_state;
+
+ cur_state = plane->get_hw_state(plane, &pipe);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 port_sel, u32 val)
-{
- if ((val & DP_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
- if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
- return false;
- } else {
- if ((val & DP_PIPE_MASK) != (pipe << 30))
- return false;
- }
- return true;
-}
-
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
{
- if ((val & SDVO_ENABLE) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
- return false;
- } else {
- if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
- return false;
- }
- return true;
-}
+ enum pipe port_pipe;
+ bool state;
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & LVDS_PORT_EN) == 0)
- return false;
+ state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
- return false;
- }
- return true;
-}
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & ADPA_DAC_ENABLE) == 0)
- return false;
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
- return false;
- }
- return true;
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg,
- u32 port_sel)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ enum pipe port_pipe;
+ bool state;
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
- && (val & DP_PIPEB_SELECT),
- "IBX PCH dp port still using transcoder B\n");
-}
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg)
-{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
- "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
- && (val & SDVO_PIPE_B_SELECT),
- "IBX PCH hdmi port still using transcoder B\n");
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 val;
+ enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
- val = I915_READ(PCH_ADPA);
- I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- val = I915_READ(PCH_LVDS);
- I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+ if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > intel_fb->obj->base.size) {
+ if (max_size * tile_size > obj->base.size) {
DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, intel_fb->obj->base.size);
+ max_size * tile_size, obj->base.size);
return -EINVAL;
}
@@ -2796,10 +2756,10 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
/* FIXME pre-g4x don't work like this */
if (visible) {
- crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
crtc_state->active_planes |= BIT(plane->id);
} else {
- crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
crtc_state->active_planes &= ~BIT(plane->id);
}
@@ -2922,9 +2882,8 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_get(fb);
- primary->fb = primary->state->fb = fb;
- primary->crtc = primary->state->crtc = &intel_crtc->base;
+ plane_state->fb = fb;
+ plane_state->crtc = &intel_crtc->base;
intel_set_plane_visible(to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-4 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -3689,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
- if (intel_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
}
}
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_connector_state *connector_state;
+ const struct drm_connector *connector;
+ struct intel_encoder *encoder = NULL;
+ int num_encoders = 0;
+ int i;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP)
- return encoder->port;
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ num_encoders++;
}
- return -1;
+ WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
+
+ return encoder;
}
/*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
&crtc_state->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
- switch (intel_trans_dp_port_sel(crtc)) {
- case PORT_B:
- temp |= TRANS_DP_PORT_SEL_B;
- break;
- case PORT_C:
- temp |= TRANS_DP_PORT_SEL_C;
- break;
- case PORT_D:
- temp |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- BUG();
- }
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ WARN_ON(port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
I915_WRITE(reg, temp);
}
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
&crtc->config->scaler_state;
if (crtc->config->pch_pfit.enabled) {
+ u16 uv_rgb_hphase, uv_rgb_vphase;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
}
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- ironlake_pch_enable(pipe_config);
+ ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
- /* Must wait for vblank to avoid spurious PCH FIFO underruns */
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Must wait for vblank to avoid spurious PCH FIFO underruns.
+ * And a second vblank wait is needed at least on ILK with
+ * some interlaced HDMI modes. Let's do the double wait always
+ * in case there are more corner cases we don't know about.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ }
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5611,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
+ u32 pipe_chicken;
if (WARN_ON(intel_crtc->active))
return;
@@ -5623,6 +5645,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5651,11 +5675,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(pipe_config);
-
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
intel_crtc->config->pch_pfit.enabled;
@@ -5673,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(&pipe_config->base);
+ /*
+ * Display WA #1153: enable hardware to bypass the alpha math
+ * and rounding for per-pixel values 00 and 0xff
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
+ if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
+ I915_WRITE_FW(PIPE_CHICKEN(pipe),
+ pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
+ }
+
intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -5688,7 +5718,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- lpt_pch_enable(pipe_config);
+ lpt_pch_enable(old_intel_state, pipe_config);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5771,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- if (intel_crtc->config->has_pch_encoder) {
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- }
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5794,7 +5822,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5805,20 +5833,17 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(old_crtc_state);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_pipe_clock(intel_crtc->config);
-
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (INTEL_GEN(dev_priv) >= 11)
@@ -5849,6 +5874,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (IS_ICELAKE(dev_priv))
+ return port >= PORT_C && port <= PORT_F;
+
+ return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (!intel_port_is_tc(dev_priv, port))
+ return PORT_TC_NONE;
+
+ return port - PORT_C;
+}
+
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
{
switch (port) {
@@ -7675,16 +7716,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8748,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum plane_id plane_id = plane->id;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9187,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
+ intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9172,6 +9220,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_state *pipe_config)
+{
+ enum intel_dpll_id id;
+ u32 temp;
+
+ /* TODO: TBT pll not implemented. */
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+ temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+ if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ return;
+ break;
+ case PORT_C:
+ id = DPLL_ID_ICL_MGPLL1;
+ break;
+ case PORT_D:
+ id = DPLL_ID_ICL_MGPLL2;
+ break;
+ case PORT_E:
+ id = DPLL_ID_ICL_MGPLL3;
+ break;
+ case PORT_F:
+ id = DPLL_ID_ICL_MGPLL4;
+ break;
+ default:
+ MISSING_CASE(port);
+ return;
+ }
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
@@ -9273,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
WARN(1, "unknown pipe linked to edp transcoder\n");
+ /* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
trans_edp_pipe = PIPE_A;
@@ -9328,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!intel_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(dev_priv))
break;
/* XXX: this works for video mode only */
@@ -9359,7 +9446,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9692,7 +9781,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -9704,6 +9794,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ *pipe = PIPE_A;
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -9715,25 +9807,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- u32 cntl;
+ u32 cntl = 0;
- cntl = MCURSOR_GAMMA_ENABLE;
+ if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+ if (INTEL_GEN(dev_priv) <= 10) {
+ cntl |= MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+ }
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
break;
case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9838,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
}
if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
+ cntl |= MCURSOR_ROTATE_180;
return cntl;
}
@@ -9903,23 +10000,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-3 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ val = I915_READ(CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -10631,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.state->crtc)
- drm_connector_unreference(&connector->base);
+ drm_connector_put(&connector->base);
if (connector->base.encoder) {
connector->base.state->best_encoder =
@@ -10639,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
connector->base.state->crtc =
connector->base.encoder->crtc;
- drm_connector_reference(&connector->base);
+ drm_connector_get(&connector->base);
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
@@ -10918,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
+ /* else: fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -11791,7 +11898,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct drm_crtc_state *new_state)
{
struct intel_dpll_hw_state dpll_hw_state;
- unsigned crtc_mask;
+ unsigned int crtc_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11818,7 +11925,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = 1 << drm_crtc_index(crtc);
+ crtc_mask = drm_crtc_mask(crtc);
if (new_state->active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
@@ -11853,7 +11960,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
if (old_state->shared_dpll &&
old_state->shared_dpll != new_state->shared_dpll) {
- unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+ unsigned int crtc_mask = drm_crtc_mask(crtc);
struct intel_shared_dpll *pll = old_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
@@ -12449,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
}
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+ struct drm_i915_private *i915 = to_i915(state->dev);
+
+ drm_atomic_helper_cleanup_planes(&i915->drm, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+
+ intel_atomic_helper_free_state(i915);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -12609,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_helper_commit_cleanup_done(state);
-
- drm_atomic_state_put(state);
-
- intel_atomic_helper_free_state(dev_priv);
+ /*
+ * Defer the cleanup of the old state to a separate worker to not
+ * impede the current task (userspace for blocking modesets) that
+ * are executed inline. For out-of-line asynchronous modesets/flips,
+ * deferring to a new worker seems overkill, but we would place a
+ * schedule point (cond_resched()) here anyway to keep latencies
+ * down.
+ */
+ INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
+ schedule_work(&state->commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -12981,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, true);
+ intel_state->rps_interactive = true;
+ }
+
return 0;
}
@@ -12997,8 +13133,15 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(old_state->state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ if (intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, false);
+ intel_state->rps_interactive = false;
+ }
+
/* Should only be called after a successful intel_prepare_plane_fb()! */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_plane_unpin_fb(to_intel_plane_state(old_state));
@@ -13181,8 +13324,17 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13195,8 +13347,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13211,8 +13372,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13423,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
-
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 4)
- return i965_mod_supported(format, modifier);
- else
- return i8xx_mod_supported(format, modifier);
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
}
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
- return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13283,7 +13460,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_primary_plane_format_mod_supported,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
};
static int
@@ -13408,7 +13585,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_plane_format_mod_supported,
+ .format_mod_supported = intel_cursor_format_mod_supported,
};
static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13643,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
@@ -13521,6 +13699,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
+ primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_PRIMARY);
+
if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
intel_primary_formats = skl_pri_planar_formats;
num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13710,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+ if (primary->has_ccs)
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13718,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
+
+ plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13728,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i965_plane_funcs;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13738,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i8xx_plane_funcs;
}
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -13951,7 +14138,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (intel_crt_present(dev_priv))
intel_crt_init(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ } else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -13961,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (HAS_DDI(dev_priv)) {
int found;
@@ -14067,7 +14261,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
@@ -14124,14 +14318,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
- i915_gem_object_lock(intel_fb->obj);
- WARN_ON(!intel_fb->obj->framebuffer_references--);
- i915_gem_object_unlock(intel_fb->obj);
+ i915_gem_object_lock(obj);
+ WARN_ON(!obj->framebuffer_references--);
+ i915_gem_object_unlock(obj);
- i915_gem_object_put(intel_fb->obj);
+ i915_gem_object_put(obj);
kfree(intel_fb);
}
@@ -14140,8 +14335,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
if (obj->userptr.mm) {
DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14349,11 +14543,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
break;
case DRM_FORMAT_NV12:
- if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
- mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
- DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
- goto err;
- }
if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
@@ -14411,9 +14600,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
i, fb->pitches[i], stride_alignment);
goto err;
}
- }
- intel_fb->obj = obj;
+ fb->obj[i] = &obj->base;
+ }
ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
@@ -14469,6 +14658,10 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int hdisplay_max, htotal_max;
+ int vdisplay_max, vtotal_max;
+
/*
* Can't reject DBLSCAN here because Xorg ddxen can add piles
* of DBLSCAN modes to the output's mode list when they detect
@@ -14498,6 +14691,36 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ hdisplay_max = 4096;
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else {
+ hdisplay_max = 2048;
+ vdisplay_max = 2048;
+ htotal_max = 4096;
+ vtotal_max = 4096;
+ }
+
+ if (mode->hdisplay > hdisplay_max ||
+ mode->hsync_start > htotal_max ||
+ mode->hsync_end > htotal_max ||
+ mode->htotal > htotal_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > vdisplay_max ||
+ mode->vsync_start > vtotal_max ||
+ mode->vsync_end > vtotal_max ||
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -14646,6 +14869,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
DRM_INFO("Applying T12 delay quirk\n");
}
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14732,6 +14967,13 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -14936,6 +15178,7 @@ int intel_modeset_init(struct drm_device *dev)
}
}
+ /* maximum framebuffer dimensions */
if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
@@ -14951,11 +15194,11 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
} else if (IS_GEN2(dev_priv)) {
- dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
- dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
} else {
- dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
- dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
}
dev->mode_config.fb_base = ggtt->gmadr.start;
@@ -15105,8 +15348,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -15120,12 +15363,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 val = I915_READ(DSPCNTR(i9xx_plane));
+ enum pipe pipe;
- return (val & DISPLAY_PLANE_ENABLE) == 0 ||
- (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+ if (!plane->get_hw_state(plane, &pipe))
+ return true;
+
+ return pipe == crtc->pipe;
}
static void
@@ -15284,6 +15527,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15324,7 +15570,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- bool visible = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
intel_set_plane_visible(crtc_state, plane_state, visible);
}
@@ -15423,9 +15672,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
- 1 << drm_connector_index(&connector->base);
+ drm_connector_mask(&connector->base);
encoder->base.crtc->state->encoder_mask |=
- 1 << drm_encoder_index(&encoder->base);
+ drm_encoder_mask(&encoder->base);
}
} else {
@@ -15491,11 +15740,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
for_each_intel_encoder(&dev_priv->drm, encoder) {
u64 get_domains;
enum intel_display_power_domain domain;
+ struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
continue;
- get_domains = encoder->get_power_domains(encoder);
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ get_domains = encoder->get_power_domains(encoder, crtc_state);
for_each_power_domain(domain, get_domains)
intel_display_power_get(dev_priv, domain);
}
@@ -15671,6 +15929,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(dev_priv->modeset_wq);
+
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
@@ -15714,8 +15974,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
- &encoder->base);
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 2ef31617614a..138a1bc1818c 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -126,6 +126,41 @@ enum port {
#define port_name(p) ((p) + 'A')
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ case PORT_F:
+ return "Port F";
+ default:
+ return "<invalid>";
+ }
+}
+
+enum tc_port {
+ PORT_TC_NONE = -1,
+
+ PORT_TC1 = 0,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+
+ I915_MAX_TC_PORTS
+};
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
@@ -144,7 +179,7 @@ enum aux_ch {
AUX_CH_B,
AUX_CH_C,
AUX_CH_D,
- _AUX_CH_E, /* does not exist */
+ AUX_CH_E, /* ICL+ */
AUX_CH_F,
};
@@ -185,8 +220,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -249,7 +289,7 @@ struct intel_link_m_n {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- BIT(drm_plane_index(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base)))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -266,13 +306,17 @@ struct intel_link_m_n {
list_for_each_entry(intel_crtc, \
&(dev)->mode_config.crtc_list, \
base.head) \
- for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+ for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_dp(dev, intel_encoder) \
+ for_each_intel_encoder(dev, intel_encoder) \
+ for_each_if(intel_encoder_is_dp(intel_encoder))
+
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 16faea30114a..cd0f649b57a5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -56,7 +56,7 @@ struct dp_link_dpll {
struct dpll dpll;
};
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ 270000,
@@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
return 810000;
}
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (port == PORT_B)
+ return 540000;
+
+ return 810000;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
@@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- max_rate = cnl_max_source_rate(intel_dp);
+ if (INTEL_GEN(dev_priv) == 10)
+ max_rate = cnl_max_source_rate(intel_dp);
+ else
+ max_rate = icl_max_source_rate(intel_dp);
} else if (IS_GEN9_LP(dev_priv)) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
@@ -516,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
uint32_t DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ "skipping pipe %c power sequencer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
@@ -532,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_PAT_1;
if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SELECT_CHV(pipe);
- else if (pipe == PIPE_B)
- DP |= DP_PIPEB_SELECT;
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
@@ -557,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
/*
* Similar magic as in intel_dp_enable_port().
* We _must_ do this port enable + disable trick
- * to make this power seqeuencer lock onto the port.
+ * to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
I915_WRITE(intel_dp->output_reg, DP);
@@ -586,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -785,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP &&
- encoder->type != INTEL_OUTPUT_DDI)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
-
- /* Skip pure DVI/HDMI DDI encoders */
- if (!i915_mmio_reg_valid(intel_dp->output_reg))
- continue;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -939,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -947,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- if (has_aux_irq)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
- else
- done = wait_for(C, 10) == 0;
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
- has_aux_irq);
+ DRM_ERROR("dp aux hw did not signal timeout!\n");
#undef C
return status;
@@ -1019,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
}
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider)
{
@@ -1040,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1050,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
}
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t unused)
{
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1079,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -1134,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- has_aux_irq,
send_bytes,
aux_clock_divider);
@@ -1151,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+ status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -1350,6 +1339,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
case DP_AUX_D:
aux_ch = AUX_CH_D;
break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
@@ -1377,6 +1369,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
return POWER_DOMAIN_AUX_C;
case AUX_CH_D:
return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
default:
@@ -1463,6 +1457,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_CTL(aux_ch);
default:
@@ -1481,6 +1476,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_DATA(aux_ch, index);
default:
@@ -1544,6 +1540,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
return max_rate >= 540000;
}
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+{
+ int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+ return max_rate >= 810000;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1553,8 +1556,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
int i, count = 0;
if (IS_G4X(dev_priv)) {
- divisor = gen4_dpll;
- count = ARRAY_SIZE(gen4_dpll);
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -1970,7 +1973,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev_priv) && port == PORT_A) {
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1980,7 +1983,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- intel_dp->DP |= crtc->pipe << 29;
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
@@ -2006,9 +2009,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (IS_CHERRYVIEW(dev_priv))
- intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
- else if (crtc->pipe == PIPE_B)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
}
}
@@ -2630,52 +2633,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = I915_READ(TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = I915_READ(dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
+ ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
- tmp = I915_READ(intel_dp->output_reg);
-
- if (!(tmp & DP_PORT_EN))
- goto out;
-
- if (IS_GEN7(dev_priv) && port == PORT_A) {
- *pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- enum pipe p;
-
- for_each_pipe(dev_priv, p) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
- if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
- *pipe = p;
- ret = true;
-
- goto out;
- }
- }
-
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
- i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev_priv)) {
- *pipe = DP_PORT_TO_PIPE_CHV(tmp);
- } else {
- *pipe = PORT_TO_PIPE(tmp);
- }
-
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -2796,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_psr_disable(intel_dp, old_crtc_state);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
@@ -2854,10 +2867,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
+ uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+ if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & DP_TRAINING_PATTERN_MASK);
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2868,7 +2882,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
@@ -2882,10 +2896,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -3008,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@ -3043,11 +3057,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
edp_panel_vdd_off_sync(intel_dp);
/*
- * VLV seems to get confused when multiple power seqeuencers
+ * VLV seems to get confused when multiple power sequencers
* have the same port selected (even if only one has power/vdd
* enabled). The failure manifests as vlv_wait_port_ready() failing
* CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power seqeuencers, but let's clear the
+ * selected in multiple power sequencers, but let's clear the
* port select always when logically disconnecting a power sequencer
* from a port.
*/
@@ -3066,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
- enum port port;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->base.port;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3197,14 +3204,14 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ if (HAS_DDI(dev_priv))
return intel_ddi_dp_voltage_max(encoder);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev_priv) && port == PORT_A)
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3216,33 +3223,11 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+ if (HAS_DDI(dev_priv)) {
+ return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3255,7 +3240,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3450,7 +3435,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
}
static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -3487,9 +3472,9 @@ gen4_signal_levels(uint8_t train_set)
return signal_levels;
}
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3515,9 +3500,9 @@ gen6_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3564,14 +3549,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
- signal_levels = gen7_edp_signal_levels(train_set);
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
- signal_levels = gen6_edp_signal_levels(train_set);
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
- signal_levels = gen4_signal_levels(train_set);
+ signal_levels = g4x_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
@@ -3654,7 +3639,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3683,8 +3668,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+ DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
@@ -3739,8 +3725,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- intel_psr_init_dpcd(intel_dp);
-
/*
* Read the eDP display control registers.
*
@@ -3756,6 +3740,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -3884,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
intel_dp->is_mst);
}
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, bool disable_wa)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret = 0;
- int count = 0;
- int attempts = 10;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
- count = buf & DP_TEST_COUNT_MASK;
- } while (--attempts && count);
-
- if (attempts == 0) {
- DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
- ret = -ETIMEDOUT;
- }
-
- out:
- if (disable_wa)
- hsw_enable_ips(crtc_state);
- return ret;
-}
-
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
-
- if (!(buf & DP_TEST_CRC_SUPPORTED))
- return -ENOTTY;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
- return -EIO;
-
- if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
- if (ret)
- return ret;
- }
-
- hsw_disable_ips(crtc_state);
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(crtc_state);
- return -EIO;
- }
-
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
- return 0;
-}
-
-int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int count, ret;
- int attempts = 6;
-
- ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
- if (ret)
- return ret;
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto stop;
- }
- count = buf & DP_TEST_COUNT_MASK;
-
- } while (--attempts && count == 0);
-
- if (attempts == 0) {
- DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
- ret = -ETIMEDOUT;
- goto stop;
- }
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
- ret = -EIO;
- goto stop;
- }
-
-stop:
- intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
- return ret;
-}
-
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -4466,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
+ /* Handle CEC interrupts, if any */
+ drm_dp_cec_irq(&intel_dp->aux);
+
/* defer to the hotplug work for link retraining if needed */
if (intel_dp_needs_link_retrain(intel_dp))
return false;
+ intel_psr_short_pulse(intel_dp);
+
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
@@ -4537,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum drm_connector_status status;
-
- status = intel_panel_detect(dev_priv);
- if (status == connector_status_unknown)
- status = connector_status_connected;
-
- return status;
+ return connector_status_connected;
}
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
@@ -4780,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = edid;
intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ drm_dp_cec_set_edid(&intel_dp->aux, edid);
}
static void
@@ -4787,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ drm_dp_cec_unset_edid(&intel_dp->aux);
kfree(intel_connector->detect_edid);
intel_connector->detect_edid = NULL;
@@ -4805,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
- /* Can't disconnect eDP, but you can close the lid... */
+ /* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
@@ -4975,6 +4842,7 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -4987,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
- return drm_dp_aux_register(&intel_dp->aux);
+ ret = drm_dp_aux_register(&intel_dp->aux);
+ if (!ret)
+ drm_dp_cec_register_connector(&intel_dp->aux,
+ connector->name, dev->dev);
+ return ret;
}
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ drm_dp_cec_unregister_connector(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
intel_connector_unregister(connector);
}
@@ -5319,14 +5194,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
- if ((intel_dp->DP & DP_PORT_EN) == 0)
- return INVALID_PIPE;
+ if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
- if (IS_CHERRYVIEW(dev_priv))
- return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- return PORT_TO_PIPE(intel_dp->DP);
+ return INVALID_PIPE;
}
void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5675,7 +5550,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
/*
* On some VLV machines the BIOS can leave the VDD
- * enabled even on power seqeuencers which aren't
+ * enabled even on power sequencers which aren't
* hooked up to any port. This would mess up the
* power domain tracking the first time we pick
* one of these power sequencers for use since
@@ -5683,7 +5558,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* already on and therefore wouldn't grab the power
* domain reference. Disable VDD first to avoid this.
* This also avoids spuriously turning the VDD on as
- * soon as the new power seqeuencer gets initialized.
+ * soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5721,10 +5596,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- if (port == PORT_A)
+ switch (port) {
+ case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
- else
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
}
pp_on |= port_sel;
@@ -6179,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
@@ -6268,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_mode_connector_set_link_status_property(connector,
- DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_hotplug_event(connector->dev);
@@ -6382,7 +6267,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -6462,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
@@ -6481,37 +6365,44 @@ err_connector_alloc:
return false;
}
-void intel_dp_mst_suspend(struct drm_device *dev)
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- /* disable MST */
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- if (intel_dig_port->dp.is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
+ continue;
+
+ if (intel_dp->is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
}
}
-void intel_dp_mst_resume(struct drm_device *dev)
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
int ret;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
if (ret)
- intel_dp_check_mst_status(&intel_dig_port->dp);
+ intel_dp_check_mst_status(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 2bb2ceb9d463..357136f17f85 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -26,7 +26,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
- uint8_t reg_val = 0;
+ u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
@@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t read_val[2] = { 0x0 };
- uint16_t level = 0;
+ u8 read_val[2] = { 0x0 };
+ u16 level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
@@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t vals[2] = { 0x0 };
+ u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 3fcaa98b9055..4da6e33c7fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -219,32 +219,47 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
}
/*
- * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * Pick training pattern for channel equalization. Training pattern 4 for HBR3
+ * or for 1.4 devices that support it, training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
- u32 training_pattern = DP_TRAINING_PATTERN_2;
- bool source_tps3, sink_tps3;
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/*
+ * Intel platforms that support HBR3 also support TPS4. It is mandatory
+ * for all downstream devices that support HBR3. There are no known eDP
+ * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
+ * specification.
+ */
+ source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+ sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (intel_dp->link_rate == 810000) {
+ if (!source_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ if (!sink_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ }
+ /*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
-
if (source_tps3 && sink_tps3) {
- training_pattern = DP_TRAINING_PATTERN_3;
- } else if (intel_dp->link_rate == 540000) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
- return training_pattern;
+ return DP_TRAINING_PATTERN_2;
}
static bool
@@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
+ /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+ if (training_pattern != DP_TRAINING_PATTERN_4)
+ training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
- training_pattern |
- DP_LINK_SCRAMBLING_DISABLE)) {
+ training_pattern)) {
DRM_ERROR("failed to start channel equalization\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5890500a3a8b..7e3e01607643 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -403,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
-static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_dp)
- return NULL;
- return &intel_dp->mst_encoders[0]->base.base;
-}
-
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
- .best_encoder = intel_mst_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
};
@@ -476,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
- ret = drm_mode_connector_attach_encoder(&intel_connector->base,
- enc);
+ ret = drm_connector_attach_encoder(&intel_connector->base, enc);
if (ret)
goto err;
}
@@ -485,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- ret = drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_connector_set_path_property(connector, pathprop);
if (ret)
goto err;
@@ -524,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
intel_connector->mst_port = NULL;
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
- drm_connector_unreference(connector);
+ drm_connector_put(connector);
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 383fbc15113d..b51ad2917dbe 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -163,8 +163,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
- unsigned old_mask;
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int old_mask;
if (WARN_ON(pll == NULL))
return;
@@ -207,7 +207,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
if (INTEL_GEN(dev_priv) < 5)
@@ -2525,6 +2525,77 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
return true;
}
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ const struct skl_wrpll_params *params;
+ int index, n_entries, link_clock;
+
+ /* Read back values from DPLL CFGCR registers */
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+ dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+ dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+ pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+ kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+ qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+ DPLL_CFGCR1_QDIV_MODE_SHIFT;
+ qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+ params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+ for (index = 0; index < n_entries; index++) {
+ if (dco_integer == params[index].dco_integer &&
+ dco_fraction == params[index].dco_fraction &&
+ pdiv == params[index].pdiv &&
+ kdiv == params[index].kdiv &&
+ qdiv_mode == params[index].qdiv_mode &&
+ qdiv_ratio == params[index].qdiv_ratio)
+ break;
+ }
+
+ /* Map PLL Index to Link Clock */
+ switch (index) {
+ default:
+ MISSING_CASE(index);
+ /* fall through */
+ case 0:
+ link_clock = 540000;
+ break;
+ case 1:
+ link_clock = 270000;
+ break;
+ case 2:
+ link_clock = 162000;
+ break;
+ case 3:
+ link_clock = 324000;
+ break;
+ case 4:
+ link_clock = 216000;
+ break;
+ case 5:
+ link_clock = 432000;
+ break;
+ case 6:
+ link_clock = 648000;
+ break;
+ case 7:
+ link_clock = 810000;
+ break;
+ }
+
+ return link_clock;
+}
+
static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
@@ -2569,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
switch (div1) {
default:
MISSING_CASE(div1);
+ /* fall through */
case 2:
hsdiv = 0;
break;
@@ -2742,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
-
- if (refclk_khz != 38400) {
- pll_state->mg_pll_tdc_coldst_bias |=
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
+ pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
}
+ pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+
return true;
}
@@ -2787,10 +2865,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case PORT_D:
case PORT_E:
case PORT_F:
- min = icl_port_to_mg_pll_id(port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ if (0 /* TODO: TBT PLLs */) {
+ min = DPLL_ID_ICL_TBTPLL;
+ max = min;
+ ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+ &pll_state);
+ } else {
+ min = icl_port_to_mg_pll_id(port);
+ max = min;
+ ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+ &pll_state);
+ }
break;
default:
MISSING_CASE(port);
@@ -2820,9 +2905,12 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return CNL_DPLL_ENABLE(id);
+ case DPLL_ID_ICL_TBTPLL:
+ return TBT_PLL_ENABLE;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -2850,6 +2938,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
break;
@@ -2859,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
case DPLL_ID_ICL_MGPLL4:
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
hw_state->mg_clktop2_coreclkctl1 =
I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
hw_state->mg_clktop2_hsclkctl =
I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+
hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
hw_state->mg_pll_tdc_coldst_bias =
I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
break;
default:
MISSING_CASE(id);
@@ -2898,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum port port = icl_mg_pll_id_to_port(pll->info->id);
+ u32 val;
+
+ /*
+ * Some of the following registers have reserved fields, so program
+ * these with RMW based on a mask. The mask can be fixed or generated
+ * during the calc/readout phase if the mask depends on some other HW
+ * state like refclk, see icl_calc_mg_pll_state().
+ */
+ val = I915_READ(MG_REFCLKIN_CTL(port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(MG_REFCLKIN_CTL(port), val);
+
+ val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+
+ val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
- I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
- hw_state->mg_clktop2_coreclkctl1);
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
- I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
- hw_state->mg_pll_tdc_coldst_bias);
+
+ val = I915_READ(MG_PLL_BIAS(port));
+ val &= ~hw_state->mg_pll_bias_mask;
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(MG_PLL_BIAS(port), val);
+
+ val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
}
@@ -2936,6 +3077,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
icl_dpll_write(dev_priv, pll);
break;
case DPLL_ID_ICL_MGPLL1:
@@ -3034,6 +3176,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
{ "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
{ "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
{ "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7a0cd564a9ee..7e522cf4f13f 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -114,23 +114,27 @@ enum intel_dpll_id {
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
+ * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ */
+ DPLL_ID_ICL_TBTPLL = 2,
+ /**
* @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
*/
- DPLL_ID_ICL_MGPLL1 = 2,
+ DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
*/
- DPLL_ID_ICL_MGPLL2 = 3,
+ DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
*/
- DPLL_ID_ICL_MGPLL3 = 4,
+ DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
*/
- DPLL_ID_ICL_MGPLL4 = 5,
+ DPLL_ID_ICL_MGPLL4 = 6,
};
-#define I915_NUM_PLLS 6
+#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -176,6 +180,8 @@ struct intel_dpll_hw_state {
uint32_t mg_pll_ssc;
uint32_t mg_pll_bias;
uint32_t mg_pll_tdc_coldst_bias;
+ uint32_t mg_pll_bias_mask;
+ uint32_t mg_pll_tdc_coldst_bias_mask;
};
/**
@@ -336,5 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..8fc61e96754f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -39,6 +39,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <media/cec-notifier.h>
/**
* __wait_for - magic wait macro
@@ -158,12 +159,6 @@
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-/* Maximum cursor sizes */
-#define GEN2_CURSOR_WIDTH 64
-#define GEN2_CURSOR_HEIGHT 64
-#define MAX_CURSOR_WIDTH 256
-#define MAX_CURSOR_HEIGHT 256
-
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -194,7 +189,6 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -261,7 +255,8 @@ struct intel_encoder {
struct intel_crtc_state *pipe_config);
/* Returns a mask of power domains that need to be referenced as part
* of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder);
+ u64 (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -310,6 +305,8 @@ struct intel_panel {
} backlight;
};
+struct intel_digital_port;
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -488,6 +485,8 @@ struct intel_atomic_state {
*/
bool skip_intermediate_wm;
+ bool rps_interactive;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -953,6 +952,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
bool has_fbc;
+ bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -971,7 +971,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
- bool (*get_hw_state)(struct intel_plane *plane);
+ bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1004,7 +1004,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
i915_reg_t hdmi_reg;
@@ -1017,6 +1017,7 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
+ struct cec_notifier *cec_notifier;
};
struct intel_dp_mst_encoder;
@@ -1139,7 +1140,6 @@ struct intel_dp {
* register with to kick off an AUX transaction.
*/
uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
@@ -1252,22 +1252,29 @@ intel_attached_encoder(struct drm_connector *connector)
return to_intel_connector(connector)->encoder;
}
-static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (intel_encoder_is_dig_port(intel_encoder))
return container_of(encoder, struct intel_digital_port,
base.base);
- default:
+ else
return NULL;
- }
}
static inline struct intel_dp_mst_encoder *
@@ -1281,6 +1288,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* Skip pure HDMI/DVI DDI encoders */
+ return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1337,9 +1358,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
@@ -1376,6 +1394,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
@@ -1388,12 +1408,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1407,6 +1424,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1488,6 +1507,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1615,6 +1637,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format);
@@ -1644,6 +1667,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1661,8 +1687,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1676,8 +1700,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_device *dev);
-void intel_dp_mst_resume(struct drm_device *dev);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
@@ -1707,6 +1731,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
@@ -1726,8 +1751,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* intel_dsi.c */
-void intel_dsi_init(struct drm_i915_private *dev_priv);
+/* vlv_dsi.c */
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1821,6 +1846,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1867,7 +1894,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1911,12 +1937,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -2058,12 +2084,13 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
/* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
/* intel_sprite.c */
-bool intel_format_is_yuv(u32 format);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -2076,10 +2103,9 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
-bool intel_format_is_yuv(uint32_t format);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
@@ -2145,7 +2171,6 @@ void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
-int intel_pipe_crc_create(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
@@ -2161,5 +2186,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
}
#endif
-extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 7afeb9580f41..ad7c1cb32983 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
-/* intel_dsi.c */
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+/* vlv_dsi.c */
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
-/* intel_dsi_pll.c */
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config);
-void intel_disable_dsi_pll(struct intel_encoder *encoder);
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config);
-void intel_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port);
+/* vlv_dsi_pll.c */
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 4d6ffa7b3e7b..ac83d6b89ae0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 61d908e0df0e..4e142ff49708 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
- if (!(tmp & DVO_ENABLE))
- return false;
-
- *pipe = PORT_TO_PIPE(tmp);
+ *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
- return true;
+ return tmp & DVO_ENABLE;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -282,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
- if (pipe == 1)
- dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_SEL(pipe);
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
@@ -443,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
int gpio;
bool dvoinit;
enum pipe pipe;
- uint32_t dpll[I915_MAX_PIPES];
+ u32 dpll[I915_MAX_PIPES];
enum port port;
/*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 1590375f31cb..2d1952849d69 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -25,7 +25,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "i915_vgpu.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
break;
default:
MISSING_CASE(class);
+ /* fall through */
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
@@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
+ if (engine->context_size)
+ DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
- return true;
-
- return false;
-}
-
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- execlists->csb_use_mmio = csb_force_mmio(engine->i915);
-
execlists->port_mask = 1;
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
}
/**
@@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+ lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -499,7 +490,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -515,7 +507,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -533,7 +525,7 @@ err_unref:
return ret;
}
-static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->scratch);
}
@@ -585,7 +577,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -645,6 +637,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_context_unpin(to_intel_context(ctx, engine));
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -658,7 +656,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
@@ -670,18 +669,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ring = intel_context_pin(engine->i915->kernel_context, engine);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ ce = intel_context_pin(i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (engine->i915->preempt_context) {
- ring = intel_context_pin(engine->i915->preempt_context, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
+ if (i915->preempt_context) {
+ ce = intel_context_pin(i915->preempt_context, engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
goto err_unpin_kernel;
}
}
@@ -690,7 +689,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
+ if (HWS_NEEDS_PHYSICAL(i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
@@ -702,10 +701,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+
err_unpin_kernel:
- intel_context_unpin(engine->i915->kernel_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
return ret;
}
@@ -718,6 +718,8 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+
intel_engine_cleanup_scratch(engine);
if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +734,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
- intel_context_unpin(engine->i915->kernel_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
}
@@ -769,6 +771,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+ int err;
+
+ if (INTEL_GEN(dev_priv) < 3)
+ return -ENODEV;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ err = 0;
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 1000, 0,
+ NULL)) {
+ GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ err = -ETIMEDOUT;
+ }
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ POSTING_READ_FW(mode);
+
+ return err;
+}
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
@@ -780,12 +811,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr_s_ss_select;
+ u32 slice = fls(sseu->slice_mask);
+ u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ if (INTEL_GEN(dev_priv) == 10)
+ mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+ GEN8_MCR_SUBSLICE(subslice);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+ GEN11_MCR_SUBSLICE(subslice);
+ else
+ mcr_s_ss_select = 0;
+
+ return mcr_s_ss_select;
+}
+
static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr_slice_subslice_mask;
uint32_t mcr_slice_subslice_select;
+ uint32_t default_mcr_s_ss_select;
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
@@ -802,6 +853,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +865,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
- /*
- * The HW expects the slice and sublice selectors to be reset to 0
- * after reading out the registers.
- */
- WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+ WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+ default_mcr_s_ss_select);
+
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +876,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
ret = I915_READ_FW(reg);
mcr &= ~mcr_slice_subslice_mask;
+ mcr |= default_mcr_s_ss_select;
+
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,11 +988,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active))
- return false;
+ if (READ_ONCE(engine->execlists.active)) {
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
- /* ELSP is empty, but there are ready requests? */
- if (READ_ONCE(engine->execlists.first))
+ if (READ_ONCE(engine->execlists.active))
+ return false;
+ }
+
+ /* ELSP is empty, but there are ready requests? E.g. after reset */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
return false;
/* Ring stopped? */
@@ -978,8 +1045,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
- const struct i915_gem_context * const kernel_context =
- engine->i915->kernel_context;
+ const struct intel_context *kernel_context =
+ to_intel_context(engine->i915->kernel_context, engine);
struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1058,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
*/
rq = __i915_gem_active_peek(&engine->timeline.last_request);
if (rq)
- return rq->ctx == kernel_context;
+ return rq->hw_context == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
@@ -1006,6 +1073,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
}
/**
+ * intel_engines_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_engines_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_engines_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ GEM_TRACE("\n");
+
+ for_each_engine(engine, i915, id) {
+ if (engine->reset.reset)
+ engine->reset.reset(engine, NULL);
+ }
+}
+
+/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
@@ -1043,6 +1132,11 @@ void intel_engines_park(struct drm_i915_private *i915)
if (engine->park)
engine->park(engine);
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
@@ -1060,6 +1154,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ void *map;
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
if (engine->unpark)
engine->unpark(engine);
@@ -1067,6 +1171,26 @@ void intel_engines_unpark(struct drm_i915_private *i915)
}
}
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1151,7 +1275,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
rowsize, sizeof(u32),
line, sizeof(line),
false) >= sizeof(line));
- drm_printf(m, "%08zx %s\n", pos, line);
+ drm_printf(m, "[%04zx] %s\n", pos, line);
prev = buf + pos;
skip = false;
@@ -1166,6 +1290,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
+ if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1232,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
read, execlists->csb_head,
write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1287,6 +1411,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
}
}
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+ void *ring;
+ int size;
+
+ drm_printf(m,
+ "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
+ size = rq->tail - rq->head;
+ if (rq->tail < rq->head)
+ size += rq->ring->size;
+
+ ring = kmalloc(size, GFP_ATOMIC);
+ if (ring) {
+ const void *vaddr = rq->ring->vaddr;
+ unsigned int head = rq->head;
+ unsigned int len = 0;
+
+ if (rq->tail < head) {
+ len = rq->ring->size - head;
+ memcpy(ring, vaddr + head, len);
+ head = 0;
+ }
+ memcpy(ring + len, vaddr + head, size - len);
+
+ hexdump(m, ring, size);
+ kfree(ring);
+ }
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1296,6 +1453,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
+ unsigned long flags;
struct rb_node *rb;
int count;
@@ -1336,11 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq = i915_gem_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
drm_printf(m, "\t\tring->head: 0x%08x\n",
@@ -1351,6 +1505,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
+
+ print_request_ring(m, rq);
}
rcu_read_unlock();
@@ -1362,7 +1518,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- spin_lock_irq(&engine->timeline.lock);
+ local_irq_save(flags);
+ spin_lock(&engine->timeline.lock);
last = NULL;
count = 0;
@@ -1384,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
last = NULL;
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
- for (rb = execlists->first; rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1404,22 +1561,21 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request(m, last, "\t\tQ ");
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
- spin_lock_irq(&b->rb_lock);
+ spin_lock(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->rb_lock);
+ spin_unlock(&b->rb_lock);
+ local_irq_restore(flags);
- drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
engine->irq_posted,
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
&engine->irq_posted)));
drm_printf(m, "HWSP:\n");
@@ -1468,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- tasklet_disable(&execlists->tasklet);
- write_seqlock_irqsave(&engine->stats.lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -1493,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
- tasklet_enable(&execlists->tasklet);
+ write_sequnlock(&engine->stats.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index b431b6733cc1..01d1d2088f04 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct drm_i915_private *dev_priv =
- container_of(__work, struct drm_i915_private, fbc.work.work);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
- struct intel_crtc *crtc = fbc->crtc;
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- /* CRTC is now off, leave FBC deactivated */
- mutex_lock(&fbc->lock);
- work->scheduled = false;
- mutex_unlock(&fbc->lock);
- return;
- }
-
-retry:
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- *
- * It is also worth mentioning that since work->scheduled_vblank can be
- * updated multiple times by the other threads, hitting the timeout is
- * not an error condition. We'll just end up hitting the "goto retry"
- * case below.
- */
- wait_event_timeout(vblank->queue,
- drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
- msecs_to_jiffies(50));
-
- mutex_lock(&fbc->lock);
-
- /* Were we cancelled? */
- if (!work->scheduled)
- goto out;
-
- /* Were we delayed again while this function was sleeping? */
- if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
- mutex_unlock(&fbc->lock);
- goto retry;
- }
-
- intel_fbc_hw_activate(dev_priv);
-
- work->scheduled = false;
-
-out:
- mutex_unlock(&fbc->lock);
- drm_crtc_vblank_put(&crtc->base);
-}
-
-static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
-
- WARN_ON(!mutex_is_locked(&fbc->lock));
- if (WARN_ON(!fbc->enabled))
- return;
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- DRM_ERROR("vblank not available for FBC on pipe %c\n",
- pipe_name(crtc->pipe));
- return;
- }
-
- /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
- * this function since we're not releasing fbc.lock, so it won't have an
- * opportunity to grab it to discover that it was cancelled. So we just
- * update the expected jiffy count. */
- work->scheduled = true;
- work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
- drm_crtc_vblank_put(&crtc->base);
-
- schedule_work(&work->work);
-}
-
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
WARN_ON(!mutex_is_locked(&fbc->lock));
- /* Calling cancel_work() here won't help due to the fact that the work
- * function grabs fbc->lock. Just set scheduled to false so the work
- * function can know it was cancelled. */
- fbc->work.scheduled = false;
-
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
32 * fbc->threshold) * 8;
}
-static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
- struct intel_fbc_reg_params *params2)
-{
- /* We can use this since intel_fbc_get_reg_params() does a memset. */
- return memcmp(params1, params2, sizeof(*params1)) == 0;
-}
-
void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+ fbc->flip_pending = true;
deactivate:
intel_fbc_deactivate(dev_priv, reason);
@@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_reg_params old_params;
WARN_ON(!mutex_is_locked(&fbc->lock));
if (!fbc->enabled || fbc->crtc != crtc)
return;
+ fbc->flip_pending = false;
+ WARN_ON(fbc->active);
+
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
return;
}
- if (!intel_fbc_can_activate(crtc)) {
- WARN_ON(fbc->active);
- return;
- }
-
- old_params = fbc->params;
intel_fbc_get_reg_params(crtc, &fbc->params);
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (fbc->active &&
- intel_fbc_reg_params_equal(&old_params, &fbc->params))
+ if (!intel_fbc_can_activate(crtc))
return;
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
- intel_fbc_schedule_activation(crtc);
+ if (!fbc->busy_bits) {
+ intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ intel_fbc_hw_activate(dev_priv);
+ } else
+ intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
void intel_fbc_post_update(struct intel_crtc *crtc)
@@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
- else
+ else if (!fbc->flip_pending)
__intel_fbc_post_update(fbc->crtc);
}
@@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
/**
@@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
@@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
- fbc->work.scheduled = false;
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e9e02b58b7be..fb2f9fce34cd 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -47,7 +47,7 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+ if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb->obj->stolen && !prealloc)
+ if (intel_fb_obj(fb)->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
- if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+ if (state == FBINFO_STATE_RUNNING &&
+ intel_fb_obj(&ifbdev->fb->base)->stolen)
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 7fff0a0eceb4..c3379bde266f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
-
- intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 116f4ccf1bbd..560c7406ae40 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -27,6 +27,8 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
-int intel_guc_init_wq(struct intel_guc *guc)
+static int guc_init_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
return 0;
}
-void intel_guc_fini_wq(struct intel_guc *guc)
+static void guc_fini_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc)
destroy_workqueue(guc->log.relay.flush_wq);
}
+int intel_guc_init_misc(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ int ret;
+
+ guc_init_ggtt_pin_bias(guc);
+
+ ret = guc_init_wq(guc);
+ if (ret)
+ return ret;
+
+ intel_uc_fw_fetch(i915, &guc->fw);
+
+ return 0;
+}
+
+void intel_guc_fini_misc(struct intel_guc *guc)
+{
+ intel_uc_fw_fini(&guc->fw);
+ guc_fini_wq(guc);
+}
+
static int guc_shared_data_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = guc_shared_data_create(guc);
if (ret)
- return ret;
+ goto err_fetch;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
@@ -190,6 +214,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
+err_fetch:
+ intel_uc_fw_fini(&guc->fw);
return ret;
}
@@ -201,14 +227,17 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
+ intel_uc_fw_fini(&guc->fw);
}
-static u32 get_log_control_flags(void)
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
- u32 level = i915_modparams.guc_log_level;
- u32 flags = 0;
+ u32 level = intel_guc_log_get_level(&guc->log);
+ u32 flags;
+ u32 ads;
- GEM_BUG_ON(level < 0);
+ ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
if (!GUC_LOG_LEVEL_IS_ENABLED(level))
flags |= GUC_LOG_DEFAULT_DISABLED;
@@ -222,6 +251,78 @@ static u32 get_log_control_flags(void)
return flags;
}
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ flags |= GUC_CTL_VCS2_ENABLED;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ flags |= GUC_CTL_KERNEL_SUBMISSIONS;
+ else
+ flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+ return flags;
+}
+
+static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ u32 ctxnum, base;
+
+ base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
+ ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+ base >>= PAGE_SHIFT;
+ flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
+ (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
+ }
+ return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
+ u32 flags;
+
+ #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+ #define UNIT SZ_1M
+ #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+ #else
+ #define UNIT SZ_4K
+ #define FLAG 0
+ #endif
+
+ BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!DPC_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!ISR_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+ BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
+ BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+
+ flags = GUC_LOG_VALID |
+ GUC_LOG_NOTIFY_ON_HALF_FULL |
+ FLAG |
+ ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
+ ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+ (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+ #undef UNIT
+ #undef FLAG
+
+ return flags;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -245,32 +346,13 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
- params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
- GUC_CTL_VCS2_ENABLED;
-
- params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
- params[GUC_CTL_DEBUG] = get_log_control_flags();
+ params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
- /* If GuC submission is enabled, set up additional parameters here */
- if (USES_GUC_SUBMISSION(dev_priv)) {
- u32 ads = intel_guc_ggtt_offset(guc,
- guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
- u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
- params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
- params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
- pgs >>= PAGE_SHIFT;
- params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
- (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
-
- params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
-
- /* Unmask this bit to enable the GuC's internal scheduler */
- params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
- }
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
@@ -346,10 +428,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = -EIO;
if (ret) {
- DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status,
- I915_READ(SOFT_SCRATCH(15)));
+ DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+ action[0], ret, status);
goto out;
}
@@ -386,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
spin_lock(&guc->irq_lock);
val = I915_READ(SOFT_SCRATCH(15));
msg = val & guc->msg_enabled_mask;
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
spin_unlock(&guc->irq_lock);
+ enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, msg);
}
@@ -532,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc)
*/
/**
- * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
* @guc: intel_guc structure.
*
* This function will calculate and initialize the ggtt_pin_bias value based on
* overall WOPCM size and GuC WOPCM size.
*/
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
@@ -572,7 +654,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index f1265e122d30..4121928a495e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
-int intel_guc_init_wq(struct intel_guc *guc);
-void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init_misc(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
+void intel_guc_fini_misc(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 0867ba76d445..1a0f2a39cef9 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -84,12 +84,12 @@
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
-#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 7
+#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 7
+#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9
+#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5
@@ -532,20 +532,6 @@ enum guc_log_buffer_type {
};
/**
- * DOC: GuC Log buffer Layout
- *
- * Page0 +-------------------------------+
- * | ISR state header (32 bytes) |
- * | DPC state header |
- * | Crash dump state header |
- * Page1 +-------------------------------+
- * | ISR logs |
- * Page9 +-------------------------------+
- * | DPC logs |
- * Page17 +-------------------------------+
- * | Crash Dump logs |
- * +-------------------------------+
- *
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 401e1704d61e..6da61a71d28f 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
- return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ return ISR_BUFFER_SIZE;
case GUC_DPC_LOG_BUFFER:
- return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ return DPC_BUFFER_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
- return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ return CRASH_BUFFER_SIZE;
default:
MISSING_CASE(type);
}
@@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
lockdep_assert_held(&log->relay.lock);
/* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = GUC_LOG_SIZE;
+ subbuf_size = log->vma->size;
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
- unsigned long offset;
- u32 flags;
+ u32 guc_log_size;
int ret;
GEM_BUG_ON(log->vma);
- vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
+ /*
+ * GuC Log buffer Layout
+ *
+ * +===============================+ 00B
+ * | Crash dump state header |
+ * +-------------------------------+ 32B
+ * | DPC state header |
+ * +-------------------------------+ 64B
+ * | ISR state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | DPC logs |
+ * +===============================+ + DPC_SIZE
+ * | ISR logs |
+ * +===============================+ + ISR_SIZE
+ */
+ guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
+ ISR_BUFFER_SIZE;
+
+ vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
- offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
- log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ log->level = i915_modparams.guc_log_level;
return 0;
err:
- /* logging will be off */
- i915_modparams.guc_log_level = 0;
+ DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
return ret;
}
@@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
i915_vma_unpin_and_release(&log->vma);
}
-int intel_guc_log_level_get(struct intel_guc_log *log)
-{
- GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-
- return i915_modparams.guc_log_level;
-}
-
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
/*
* GuC is recognizing log levels starting from 0 to max, we're using 0
* as indication that logging should be disabled.
*/
- if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
+ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
mutex_lock(&dev_priv->drm.struct_mutex);
- if (i915_modparams.guc_log_level == val) {
+ if (log->level == level) {
ret = 0;
goto out_unlock;
}
intel_runtime_pm_get(dev_priv);
- ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
- GUC_LOG_LEVEL_IS_ENABLED(val),
- GUC_LOG_LEVEL_TO_VERBOSITY(val));
+ ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
intel_runtime_pm_put(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
}
- i915_modparams.guc_log_level = val;
+ log->level = level;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index fa80535a6f9d..7bc763f10c03 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -30,15 +30,19 @@
#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
+#include "i915_gem.h"
struct intel_guc;
-/*
- * The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap
- */
-#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
- 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CRASH_BUFFER_SIZE SZ_2M
+#define DPC_BUFFER_SIZE SZ_8M
+#define ISR_BUFFER_SIZE SZ_8M
+#else
+#define CRASH_BUFFER_SIZE SZ_8K
+#define DPC_BUFFER_SIZE SZ_32K
+#define ISR_BUFFER_SIZE SZ_32K
+#endif
/*
* While we're using plain log level in i915, GuC controls are much more...
@@ -58,7 +62,7 @@ struct intel_guc;
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
struct intel_guc_log {
- u32 flags;
+ u32 level;
struct i915_vma *vma;
struct {
void *buf_addr;
@@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
-int intel_guc_log_level_get(struct intel_guc_log *log);
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
@@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log);
void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+ return log->level;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 2feb65096966..4aa5e6463e7b 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
- engine));
+ u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
- engine));
+ u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+ engine)->lrc_desc);
u32 data[7];
/*
@@ -623,6 +622,22 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
}
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+}
+
/**
* guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
@@ -681,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
lockdep_assert_held(&engine->timeline.lock);
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
@@ -705,12 +717,12 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(port_isset(port));
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- if (last && rq->ctx != last->ctx) {
+ if (last && rq->hw_context != last->hw_context) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->sched.link);
@@ -730,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
- execlists->first = rb;
if (submit)
port_assign(port, last);
if (last)
@@ -747,7 +757,8 @@ done:
/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
return submit;
}
@@ -793,20 +804,44 @@ static void guc_submission_tasklet(unsigned long data)
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
- GUC_PREEMPT_FINISHED) {
- execlists_cancel_port_requests(&engine->execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
- }
+ GUC_PREEMPT_FINISHED)
+ complete_preempt_context(engine);
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
}
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
+ */
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
+
+ return i915_gem_find_active_request(engine);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -876,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
__update_doorbell_desc(guc->preempt_client,
GUC_DOORBELL_INVALID);
}
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+
+ if (guc->execbuf_client) {
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client,
+ GUC_DOORBELL_INVALID);
+ }
}
/**
@@ -1090,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
client = fetch_and_zero(&guc->execbuf_client);
- guc_client_free(client);
+ if (client)
+ guc_client_free(client);
}
/*
@@ -1119,7 +1159,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
WARN_ON(!guc_verify_doorbells(guc));
ret = guc_clients_create(guc);
if (ret)
- return ret;
+ goto err_pool;
for_each_engine(engine, dev_priv, id) {
guc->preempt_work[id].engine = engine;
@@ -1128,6 +1168,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
+err_pool:
+ guc_stage_desc_pool_destroy(guc);
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1142,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
- guc_stage_desc_pool_destroy(guc);
+ if (guc->stage_desc_pool)
+ guc_stage_desc_pool_destroy(guc);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1225,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine)
intel_engine_pin_breadcrumbs_irq(engine);
}
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ /*
+ * We inherit a bunch of functions from execlists that we'd like
+ * to keep using:
+ *
+ * engine->submit_request = execlists_submit_request;
+ * engine->cancel_requests = execlists_cancel_requests;
+ * engine->schedule = execlists_schedule;
+ *
+ * But we need to override the actual submission backend in order
+ * to talk to the GuC.
+ */
+ intel_execlists_set_default_submission(engine);
+
+ engine->execlists.tasklet.func = guc_submission_tasklet;
+
+ engine->park = guc_submission_park;
+ engine->unpark = guc_submission_unpark;
+
+ engine->reset.prepare = guc_reset_prepare;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+}
+
int intel_guc_submission_enable(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1263,14 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_execlists * const execlists =
- &engine->execlists;
-
- execlists->tasklet.func = guc_submission_tasklet;
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
-
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->set_default_submission = guc_set_default_submission;
+ engine->set_default_submission(engine);
}
return 0;
@@ -1284,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(dev_priv);
guc_clients_doorbell_fini(guc);
-
- /* Revert back to manual ELSP submission */
- intel_engines_reset_default_submission(dev_priv);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index a2fe7c8d4477..c22b3e18a0f5 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_KABYLAKE(dev_priv))
return true;
+ if (IS_BROXTON(dev_priv))
+ return true;
return false;
}
@@ -90,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
if (!i915_modparams.enable_gvt) {
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
return 0;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d47e346bd49e..2fc7a0dd0df9 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine,
engine->hangcheck.seqno = hc->seqno;
engine->hangcheck.action = hc->action;
engine->hangcheck.stalled = hc->stalled;
+ engine->hangcheck.wedged = hc->wedged;
}
static enum intel_engine_hangcheck_action
@@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
hc->stalled = time_after(jiffies,
engine->hangcheck.action_timestamp + timeout);
+ hc->wedged = time_after(jiffies,
+ engine->hangcheck.action_timestamp +
+ I915_ENGINE_WEDGED_TIMEOUT);
}
static void hangcheck_declare_hang(struct drm_i915_private *i915,
@@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0;
+ unsigned int hung = 0, stuck = 0, wedged = 0;
if (!i915_modparams.enable_hangcheck)
return;
@@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
+
+ if (engine->hangcheck.wedged)
+ wedged |= intel_engine_flag(engine);
+ }
+
+ if (wedged) {
+ dev_err(dev_priv->drm.dev,
+ "GPU recovery timed out,"
+ " cancelling all in-flight rendering.\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(dev_priv);
}
if (hung)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d8cb53ef4351..a9076402dcb0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t enabled_bits;
+ u32 enabled_bits;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
"HDMI port enabled, expecting disabled\n");
}
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
+}
+
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
@@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- i915_reg_t data_reg;
int data_size = type == DP_SDP_VSC ?
VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i;
u32 val = I915_READ(ctl_reg);
- data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
-
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
/* see comment above for the reason for this offset */
@@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
+ drm_hdmi_avi_infoframe_content_type(&frame.avi,
+ conn_state);
+
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
- assert_hdmi_port_disabled(intel_hdmi);
+ assert_hdmi_transcoder_func_disabled(dev_priv,
+ crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(intel_hdmi->hdmi_reg);
-
- if (!(tmp & SDVO_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
@@ -1577,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
- status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
+ if (hdmi->has_hdmi_sink && !force_dvi) {
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+ status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+ true, force_dvi);
+
+ /* if we can't do 8,12bpc we may still be able to do 10bpc */
+ if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+ status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+ true, force_dvi);
+ }
return status;
}
-static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+ int bpc)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@@ -1596,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+ return false;
+
if (crtc_state->pipe_bpp <= 8*3)
return false;
@@ -1603,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
return false;
/*
- * HDMI 12bpc affects the clocks, so it's only possible
+ * HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
@@ -1618,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (crtc_state->ycbcr420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
- if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ if (bpc == 12 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_36))
+ return false;
+ else if (bpc == 10 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_30))
return false;
} else {
- if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+ if (bpc == 12 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_36))
+ return false;
+ else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_30))
return false;
}
}
/* Display WA #1139: glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+ if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
@@ -1637,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_8bpc)
+ int *clock_12bpc, int *clock_10bpc,
+ int *clock_8bpc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -1649,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
/* YCBCR420 TMDS rate requirement is half the pixel clock */
config->port_clock /= 2;
*clock_12bpc /= 2;
+ *clock_10bpc /= 2;
*clock_8bpc /= 2;
config->ycbcr420 = true;
@@ -1676,6 +1691,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+ int clock_10bpc = clock_8bpc * 5 / 4;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
@@ -1702,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
clock_8bpc *= 2;
+ clock_10bpc *= 2;
clock_12bpc *= 2;
}
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_8bpc)) {
+ &clock_12bpc, &clock_10bpc,
+ &clock_8bpc)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return false;
}
@@ -1725,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
/*
- * HDMI is either 12 or 8, so if the display lets 10bpc sneak
- * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
- * outputs. We also need to check that the higher clock still fits
- * within limits.
+ * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
+ * to check that the higher clock still fits within limits.
*/
- if (hdmi_12bpc_possible(pipe_config) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
+ if (hdmi_deep_color_possible(pipe_config, 12) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
+ true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
+ } else if (hdmi_deep_color_possible(pipe_config, 10) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
+ true, force_dvi) == MODE_OK) {
+ DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
+ desired_bpp = 10 * 3;
+
+ /* Need to adjust the port link by 1.25x for 10bpc. */
+ pipe_config->port_clock = clock_10bpc;
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
@@ -1874,6 +1899,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
connected = true;
}
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
return connected;
}
@@ -1882,6 +1909,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -1897,6 +1925,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
return status;
}
@@ -2037,6 +2068,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
static void intel_hdmi_destroy(struct drm_connector *connector)
{
+ if (intel_attached_hdmi(connector)->cec_notifier)
+ cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
@@ -2071,6 +2104,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+ drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
@@ -2257,7 +2291,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (IS_ICELAKE(dev_priv))
+ else if (HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2352,10 +2386,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
+ port_identifier(port));
+ if (!intel_hdmi->cec_notifier)
+ DRM_DEBUG_KMS("CEC notifier get failed\n");
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 43aa92beff2a..648a13c6043c 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -77,37 +77,6 @@
*/
/**
- * intel_hpd_port - return port hard associated with certain pin.
- * @dev_priv: private driver data pointer
- * @pin: the hpd pin to get associated port
- *
- * Return port that is associatade with @pin and PORT_NONE if no port is
- * hard associated with that @pin.
- */
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
-{
- switch (pin) {
- case HPD_PORT_A:
- return PORT_A;
- case HPD_PORT_B:
- return PORT_B;
- case HPD_PORT_C:
- return PORT_C;
- case HPD_PORT_D:
- return PORT_D;
- case HPD_PORT_E:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return PORT_F;
- return PORT_E;
- case HPD_PORT_F:
- return PORT_F;
- default:
- return PORT_NONE; /* no port for this pin */
- }
-}
-
-/**
* intel_hpd_pin_default - return default pin associated with certain port.
* @dev_priv: private driver data pointer
* @port: the hpd port to get associated pin
@@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
- int i;
+ enum hpd_pin pin;
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
+ for_each_hpd_pin(pin) {
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_connector->encoder->hpd_pin == i) {
+ if (intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
@@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
return true;
}
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+ return intel_encoder_is_dig_port(encoder) &&
+ enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
- struct intel_digital_port *intel_dig_port;
- int i;
+ struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work)
dev_priv->hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- for (i = 0; i < I915_MAX_PORTS; i++) {
- bool valid = false;
- bool long_hpd = false;
- intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+ enum port port = encoder->port;
+ bool long_hpd, short_hpd;
+ enum irqreturn ret;
+
+ if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- if (long_port_mask & (1 << i)) {
- valid = true;
- long_hpd = true;
- } else if (short_port_mask & (1 << i))
- valid = true;
+ long_hpd = long_port_mask & BIT(port);
+ short_hpd = short_port_mask & BIT(port);
- if (valid) {
- enum irqreturn ret;
+ if (!long_hpd && !short_hpd)
+ continue;
- ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == IRQ_NONE) {
- /* fall back to old school hpd */
- old_bits |= (1 << intel_dig_port->base.hpd_pin);
- }
+ dig_port = enc_to_dig_port(&encoder->base);
+
+ ret = dig_port->hpd_pulse(dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= BIT(encoder->hpd_pin);
}
}
@@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work)
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- int i;
- enum port port;
+ struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
- bool is_dig_port;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
- if (!(BIT(i) & pin_mask))
- continue;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum hpd_pin pin = encoder->hpd_pin;
+ bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
- port = intel_hpd_pin_to_port(dev_priv, i);
- is_dig_port = port != PORT_NONE &&
- dev_priv->hotplug.irq_port[port];
+ if (!(BIT(pin) & pin_mask))
+ continue;
- if (is_dig_port) {
- bool long_hpd = long_mask & BIT(i);
+ if (has_hpd_pulse) {
+ bool long_hpd = long_mask & BIT(pin);
+ enum port port = encoder->port;
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
long_hpd ? "long" : "short");
@@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
- if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* interrupts on saner platforms.
*/
WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", i);
+ "Received HPD interrupt on pin %d although disabled\n", pin);
continue;
}
- if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
- if (!is_dig_port) {
- dev_priv->hotplug.event_bits |= BIT(i);
+ if (!has_hpd_pulse) {
+ dev_priv->hotplug.event_bits |= BIT(pin);
queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, i)) {
- dev_priv->hotplug.event_bits &= ~BIT(i);
+ if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 291285277403..ffcad5fad6a7 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc)
intel_huc_fw_init_early(huc);
}
+int intel_huc_init_misc(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+
+ intel_uc_fw_fetch(i915, &huc->fw);
+ return 0;
+}
+
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aa854907abac..7e41d870b509 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -36,9 +36,15 @@ struct intel_huc {
};
void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_misc(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
+static inline void intel_huc_fini_misc(struct intel_huc *huc)
+{
+ intel_uc_fw_fini(&huc->fw);
+}
+
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
intel_uc_fw_sanitize(&huc->fw);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index e6875509bcd9..bef32b7c248e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
};
static const struct gmbus_pin gmbus_pins_icp[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
- [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
- [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
- [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
- [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
/* pin is expected to be valid */
@@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
+static inline
+unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ GMBUS_BYTE_COUNT_MAX;
+}
+
static int
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
+ unsigned int size = len;
+ bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool extra_byte_added = false;
+
+ if (burst_read) {
+ /*
+ * As per HW Spec, for 512Bytes need to read extra Byte and
+ * Ignore the extra byte read.
+ */
+ if (len == 512) {
+ extra_byte_added = true;
+ len++;
+ }
+ size = len % 256 + 256;
+ I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ }
+
I915_WRITE_FW(GMBUS1,
gmbus1_index |
GMBUS_CYCLE_WAIT |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
@@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
val = I915_READ_FW(GMBUS3);
do {
+ if (extra_byte_added && len == 1)
+ break;
+
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
+
+ if (burst_read && len == size - 4)
+ /* Reset the override bit */
+ I915_WRITE_FW(GMBUS0, gmbus0_reg);
}
return 0;
}
+/*
+ * HW spec says that 512Bytes in Burst read need special treatment.
+ * But it doesn't talk about other multiple of 256Bytes. And couldn't locate
+ * an I2C slave, which supports such a lengthy burst read too for experiments.
+ *
+ * So until things get clarified on HW support, to avoid the burst read length
+ * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
+ */
+#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
@@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+ if (HAS_GMBUS_BURST_READ(dev_priv))
+ len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
+ else
+ len = min(rx_size, gmbus_max_xfer_size(dev_priv));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
- buf, len, gmbus1_index);
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+ len = min(tx_size, gmbus_max_xfer_size(dev_priv));
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
gmbus1_index);
@@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+ u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
@@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
I915_WRITE_FW(GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ gmbus1_index);
else
ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
@@ -544,10 +590,12 @@ retry:
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+ ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0, 0);
} else {
ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
}
@@ -771,7 +819,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev_priv))
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 6269750e2b54..cdf19553ffac 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -126,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return platdev;
}
- pm_runtime_forbid(&platdev->dev);
- pm_runtime_set_active(&platdev->dev);
- pm_runtime_enable(&platdev->dev);
+ pm_runtime_no_callbacks(&platdev->dev);
return platdev;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7c4c8fb1dae4..174479232e94 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -137,6 +137,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
+#include "i915_vgpu.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
@@ -164,7 +165,8 @@
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -189,12 +191,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
!i915_request_completed(last));
}
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- * descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +201,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*
* bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
*
@@ -222,9 +219,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +234,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
/* bits 12-31 */
GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+ /*
+ * The following 32bits are copied into the OA reports (dword 2).
+ * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+ * anything below.
+ */
if (INTEL_GEN(ctx->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -271,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
- parent = &execlists->queue.rb_node;
+ parent = &execlists->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
@@ -309,10 +311,7 @@ find_priolist:
p->priority = prio;
INIT_LIST_HEAD(&p->requests);
rb_link_node(&p->node, rb, parent);
- rb_insert_color(&p->node, &execlists->queue);
-
- if (first)
- execlists->first = &p->node;
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
return p;
}
@@ -418,9 +417,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+ struct intel_context *ce = rq->hw_context;
struct i915_hw_ppgtt *ppgtt =
- rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +429,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
@@ -454,6 +453,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
unsigned int n;
/*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!engine->i915->gt.awake);
+
+ /*
* ELSQ note: the submit queue is not cleared after being submitted
* to the HW so we need to make sure we always clean it up. This is
* currently ensured by the fact that we always write the same number
@@ -495,14 +504,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ctx));
+ i915_gem_context_force_single_submission(ce->gem_context));
}
-static bool can_merge_ctx(const struct i915_gem_context *prev,
- const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
{
if (prev != next)
return false;
@@ -552,11 +561,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ __unwind_incomplete_requests(container_of(execlists,
+ struct intel_engine_cs,
+ execlists));
}
-static bool __execlists_dequeue(struct intel_engine_cs *engine)
+static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -566,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
- lockdep_assert_held(&engine->timeline.lock);
-
- /* Hardware submission is through 2 ports. Conceptually each port
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
* requests belonging to a single context from each ring. RING_HEAD
@@ -589,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -602,8 +620,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
- if (port_count(&port[0]) > 1)
- return false;
/*
* If we write to ELSP a second time before the HW has had
@@ -613,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond.
*/
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return false;
+ return;
if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- return false;
+ return;
}
/*
@@ -642,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch.
*/
if (port_count(&port[1]))
- return false;
+ return;
/*
* WaIdleLiteRestore:bdw,skl
@@ -655,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
last->tail = last->wa_tail;
}
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -671,7 +687,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+ if (last &&
+ !can_merge_ctx(rq->hw_context, last->hw_context)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -690,14 +707,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(rq->ctx)) {
+ if (ctx_single_port_submission(last->hw_context) ||
+ ctx_single_port_submission(rq->hw_context)) {
__list_del_many(&p->requests,
&rq->sched.link);
goto done;
}
- GEM_BUG_ON(last->ctx == rq->ctx);
+ GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -713,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -740,35 +756,23 @@ done:
execlists->queue_priority =
port != execlists->port ? rq_prio(last) : INT_MIN;
- execlists->first = rb;
- if (submit)
+ if (submit) {
port_assign(port, last);
+ execlists_submit_ports(engine);
+ }
/* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
/* Re-evaluate the executing context setup after each preemptive kick */
if (last)
execlists_user_begin(execlists, execlists->port);
- return submit;
-}
-
-static void execlists_dequeue(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
- bool submit;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- submit = __execlists_dequeue(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- if (submit)
- execlists_submit_ports(engine);
-
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+ /* If the engine is now idle, so should be the flag; and vice versa. */
+ GEM_BUG_ON(execlists_is_active(&engine->execlists,
+ EXECLISTS_ACTIVE_USER) ==
+ !port_isset(engine->execlists.port));
}
void
@@ -799,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++;
}
- execlists_user_end(execlists);
+ execlists_clear_all_active(execlists);
}
-static void clear_gtiir(struct intel_engine_cs *engine)
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
- struct drm_i915_private *dev_priv = engine->i915;
- int i;
-
/*
- * Clear any pending interrupt state.
- *
- * We do it twice out of paranoia that some of the IIR are
- * double buffered, and so if we only reset it once there may
- * still be an interrupt pending.
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
*/
- if (INTEL_GEN(dev_priv) >= 11) {
- static const struct {
- u8 bank;
- u8 bit;
- } gen11_gtiir[] = {
- [RCS] = {0, GEN11_RCS0},
- [BCS] = {0, GEN11_BCS},
- [_VCS(0)] = {1, GEN11_VCS(0)},
- [_VCS(1)] = {1, GEN11_VCS(1)},
- [_VCS(2)] = {1, GEN11_VCS(2)},
- [_VCS(3)] = {1, GEN11_VCS(3)},
- [_VECS(0)] = {1, GEN11_VECS(0)},
- [_VECS(1)] = {1, GEN11_VECS(1)},
- };
- unsigned long irqflags;
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- for (i = 0; i < 2; i++) {
- gen11_reset_one_iir(dev_priv,
- gen11_gtiir[engine->id].bank,
- gen11_gtiir[engine->id].bit);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- } else {
- static const u8 gtiir[] = {
- [RCS] = 0,
- [BCS] = 0,
- [VCS] = 1,
- [VCS2] = 1,
- [VECS] = 3,
- };
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
- for (i = 0; i < 2; i++) {
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- engine->irq_keep_mask);
- POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
- }
- GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
- engine->irq_keep_mask);
- }
+ execlists->csb_head = execlists->csb_write_reset;
+ WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
}
-static void reset_irq(struct intel_engine_cs *engine)
+static void nop_submission_tasklet(unsigned long data)
{
- /* Mark all CS interrupts as complete */
- smp_store_mb(engine->execlists.active, 0);
- synchronize_hardirq(engine->i915->drm.irq);
-
- clear_gtiir(engine);
-
- /*
- * The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
- */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* The driver is wedged; don't process any more events. */
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -901,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
-
- spin_lock(&engine->timeline.lock);
+ execlists_user_end(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
@@ -917,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
}
/* Flush the queued requests to the timeline list (for retiring). */
- rb = execlists->first;
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
@@ -928,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__i915_request_submit(rq);
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -938,221 +883,198 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock(&engine->timeline.lock);
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static void process_csb(struct intel_engine_cs *engine)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_private *dev_priv = engine->i915;
- bool fw = false;
+ const u32 * const buf = execlists->csb_status;
+ u8 head, tail;
/*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
*/
- GEM_BUG_ON(!dev_priv->gt.awake);
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ if (unlikely(head == tail))
+ return;
/*
- * Prefer doing test_and_clear_bit() as a two stage operation to avoid
- * imposing the cost of a locked atomic transaction when submitting a
- * new request (outside of the context-switch interrupt).
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
*/
- while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- /* The HWSP contains a (cacheable) mirror of the CSB */
- const u32 *buf =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- unsigned int head, tail;
+ rmb();
- if (unlikely(execlists->csb_use_mmio)) {
- buf = (u32 * __force)
- (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
- execlists->csb_head = -1; /* force mmio read of CSB ptrs */
- }
+ do {
+ struct i915_request *rq;
+ unsigned int status;
+ unsigned int count;
- /* Clear before reading to catch new interrupts */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- smp_mb__after_atomic();
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
- if (unlikely(execlists->csb_head == -1)) { /* following a reset */
- if (!fw) {
- intel_uncore_forcewake_get(dev_priv,
- execlists->fw_domains);
- fw = true;
- }
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
- head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
- execlists->csb_head = head;
- } else {
- const int write_idx =
- intel_hws_csb_write_index(dev_priv) -
- I915_HWS_CSB_BUF0_INDEX;
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ engine->name, head,
+ buf[2 * head + 0], buf[2 * head + 1],
+ execlists->active);
+
+ status = buf[2 * head];
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
+ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+ continue;
+
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
- head = execlists->csb_head;
- tail = READ_ONCE(buf[write_idx]);
- rmb(); /* Hopefully paired with a wmb() in HW */
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
+ buf[2*head + 1] == execlists->preempt_complete_status) {
+ GEM_TRACE("%s preempt-idle\n", engine->name);
+ complete_preempt_context(execlists);
+ continue;
}
- GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
- engine->name,
- head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
- tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
- while (head != tail) {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
+ if (status & GEN8_CTX_STATUS_PREEMPTED &&
+ execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT))
+ continue;
- if (++head == GEN8_CSB_ENTRIES)
- head = 0;
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
- /* We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
+ rq = port_unpack(port, &count);
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+ engine->name,
+ port->context_id, count,
+ rq ? rq->global_seqno : 0,
+ rq ? rq->fence.context : 0,
+ rq ? rq->fence.seqno : 0,
+ intel_engine_get_seqno(engine),
+ rq ? rq_prio(rq) : 0);
+
+ /* Check the context/desc id for this event matches */
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
+ GEM_BUG_ON(count == 0);
+ if (--count == 0) {
+ /*
+ * On the final event corresponding to the
+ * submission of this context, we expect either
+ * an element-switch event or a completion
+ * event (and on completion, the active-idle
+ * marker). No more preemptions, lite-restore
+ * or otherwise.
*/
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
- engine->name, head,
- status, buf[2*head + 1],
- execlists->active);
-
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
-
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
-
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT));
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- continue;
- }
-
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt and CSB is processed.
+ */
+ GEM_BUG_ON(!i915_request_completed(rq));
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
+ execlists_context_schedule_out(rq,
+ INTEL_CONTEXT_SCHEDULE_OUT);
+ i915_request_put(rq);
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->global_seqno : 0,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- intel_engine_get_seqno(engine),
- rq ? rq_prio(rq) : 0);
+ GEM_TRACE("%s completed ctx=%d\n",
+ engine->name, port->context_id);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt and CSB is processed.
- */
- GEM_BUG_ON(!i915_request_completed(rq));
-
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
-
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
- }
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port))
+ execlists_user_begin(execlists, port);
+ else
+ execlists_user_end(execlists);
+ } else {
+ port_set(port, port_pack(rq, count));
}
+ } while (head != tail);
- if (head != execlists->csb_head) {
- execlists->csb_head = head;
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- }
- }
+ execlists->csb_head = head;
+}
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
+{
+ lockdep_assert_held(&engine->timeline.lock);
+
+ process_csb(engine);
+ if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ unsigned long flags;
- if (fw)
- intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+ GEM_TRACE("%s awake?=%d, active=%x\n",
+ engine->name,
+ engine->i915->gt.awake,
+ engine->execlists.active);
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1163,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine,
&lookup_priolist(engine, prio)->requests);
}
-static void __submit_queue(struct intel_engine_cs *engine, int prio)
+static void __update_queue(struct intel_engine_cs *engine, int prio)
{
engine->execlists.queue_priority = prio;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static void __submit_queue_imm(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (reset_in_progress(execlists))
+ return; /* defer until we restart the engine following reset */
+
+ if (execlists->tasklet.func == execlists_submission_tasklet)
+ __execlists_submission_tasklet(engine);
+ else
+ tasklet_hi_schedule(&execlists->tasklet);
}
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
- if (prio > engine->execlists.queue_priority)
- __submit_queue(engine, prio);
+ if (prio > engine->execlists.queue_priority) {
+ __update_queue(engine, prio);
+ __submit_queue_imm(engine);
+ }
}
static void execlists_submit_request(struct i915_request *request)
@@ -1184,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
- submit_queue(engine, rq_prio(request));
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
+ submit_queue(engine, rq_prio(request));
+
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1315,13 +1252,40 @@ static void execlists_schedule(struct i915_request *request,
}
if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit))
- __submit_queue(engine, prio);
+ i915_sw_fence_done(&sched_to_request(node)->submit)) {
+ /* defer submission until after all of our updates */
+ __update_queue(engine, prio);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
}
spin_unlock_irq(&engine->timeline.lock);
}
+static void execlists_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ intel_ring_free(ce->ring);
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+ intel_ring_unpin(ce->ring);
+
+ ce->state->obj->pin_global--;
+ i915_gem_object_unpin_map(ce->state->obj);
+ i915_vma_unpin(ce->state);
+
+ i915_gem_context_put(ce->gem_context);
+}
+
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
{
unsigned int flags;
@@ -1345,21 +1309,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
}
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr;
int ret;
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
- ret = execlists_context_deferred_alloc(ctx, engine);
+ ret = execlists_context_deferred_alloc(ctx, engine, ce);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
@@ -1378,17 +1336,17 @@ execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- intel_lr_context_descriptor_update(ctx, engine);
+ intel_lr_context_descriptor_update(ctx, engine, ce);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
+ GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
-out:
- return ce->ring;
+ return ce;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1357,33 @@ err:
return ERR_PTR(ret);
}
-static void execlists_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+ .unpin = execlists_context_unpin,
+ .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
-
- intel_ring_unpin(ce->ring);
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- ce->state->obj->pin_global--;
- i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
+ ce->ops = &execlists_context_ops;
- i915_gem_context_put(ctx);
+ return __execlists_context_pin(engine, ctx, ce);
}
static int execlists_request_alloc(struct i915_request *request)
{
- struct intel_context *ce =
- to_intel_context(request->ctx, request->engine);
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -1538,29 +1496,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+struct lri {
+ i915_reg_t reg;
+ u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
{
- *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ GEM_BUG_ON(!count || count > 63);
- /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
- batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *batch++ = i915_mmio_reg_offset(lri->reg);
+ *batch++ = lri->value;
+ } while (lri++, --count);
+ *batch++ = MI_NOOP;
- *batch++ = MI_LOAD_REGISTER_IMM(3);
+ return batch;
+}
- /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
- *batch++ = _MASKED_BIT_DISABLE(
- GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ static const struct lri lri[] = {
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+ {
+ COMMON_SLICE_CHICKEN2,
+ __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+ 0),
+ },
+
+ /* BSpec: 11391 */
+ {
+ FF_SLICE_CHICKEN,
+ __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+ FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ },
+
+ /* BSpec: 11299 */
+ {
+ _3D_CHICKEN3,
+ __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+ _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ }
+ };
- /* BSpec: 11391 */
- *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
- *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- /* BSpec: 11299 */
- *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
- *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
- *batch++ = MI_NOOP;
+ batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
@@ -1652,7 +1637,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1767,17 +1752,29 @@ static void enable_execlists(struct intel_engine_cs *engine)
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_MI_MODE(engine->mmio_base),
+ _MASKED_BIT_DISABLE(STOP_RING));
+
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
- /* Following the reset, we need to reload the CSB read/write pointers */
- engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool unexpected = false;
+
+ if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1785,13 +1782,14 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
return ret;
intel_engine_reset_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- enable_execlists(engine);
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
- /* After a GPU reset, we may have requests to replay */
- if (execlists->first)
- tasklet_schedule(&execlists->tasklet);
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
return 0;
}
@@ -1833,8 +1831,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static void reset_common_ring(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *request, *active;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /*
+ * We want to flush the pending context switches, having disabled
+ * the tasklet above, we can assume exclusive access to the execlists.
+ * For this allows us to catch up with an inflight preemption event,
+ * and avoid blaming an innocent request if the stall was due to the
+ * preemption itself.
+ */
+ process_csb(engine);
+
+ /*
+ * The last active request can then be no later than the last request
+ * now in ELSP[0]. So search backwards from there, so that if the GPU
+ * has advanced beyond the last CSB update, it will be pardoned.
+ */
+ active = NULL;
+ request = port_request(execlists->port);
+ if (request) {
+ /*
+ * Prevent the breadcrumb from advancing before we decide
+ * which request is currently active.
+ */
+ intel_engine_stop_cs(engine);
+
+ list_for_each_entry_from_reverse(request,
+ &engine->timeline.requests,
+ link) {
+ if (__i915_request_completed(request,
+ request->global_seqno))
+ break;
+
+ active = request;
+ }
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
@@ -1844,8 +1903,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
- /* See execlists_cancel_requests() for the irq/spinlock split. */
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/*
* Catch up with any missed context-switch interrupts.
@@ -1857,14 +1915,14 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* requests were completed.
*/
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
/* Push back any incomplete requests for replay after the reset. */
- spin_lock(&engine->timeline.lock);
__unwind_incomplete_requests(engine);
- spin_unlock(&engine->timeline.lock);
- local_irq_restore(flags);
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1888,35 +1946,52 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
- if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR(defaults)) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- defaults + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- i915_gem_object_unpin_map(engine->default_state);
- }
+ regs = request->hw_context->lrc_reg_state;
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
}
- execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+ execlists_init_reg_state(regs,
+ request->gem_context, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
- regs[CTX_RING_HEAD + 1] = request->postfix;
- request->ring->head = request->postfix;
+ request->ring->head = intel_ring_wrap(request->ring, request->postfix);
+ regs[CTX_RING_HEAD + 1] = request->ring->head;
+
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
unwind_wa_tail(request);
}
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /* After a GPU reset, we may have requests to replay */
+ if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+ tasklet_schedule(&execlists->tasklet);
+
+ /*
+ * Flush the tasklet while we still have the forcewake to be sure
+ * that it is not allowed to sleep before we restart and reload a
+ * context.
+ *
+ * As before (with execlists_reset_prepare) we rely on the caller
+ * serialising multiple attempts to reset so that we know that we
+ * are the only one manipulating tasklet state.
+ */
+ __tasklet_enable_sync_once(&execlists->tasklet);
+
+ GEM_TRACE("%s\n", engine->name);
+}
+
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
@@ -1955,15 +2030,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->ctx->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ if (rq->gem_context->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+ rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
cs = intel_ring_begin(rq, 6);
@@ -2217,13 +2292,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-static void execlists_set_default_submission(struct intel_engine_cs *engine)
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->reset.prepare = execlists_reset_prepare;
+
engine->park = NULL;
engine->unpark = NULL;
@@ -2243,18 +2320,19 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->reset_hw = reset_common_ring;
- engine->context_pin = execlists_context_pin;
- engine->context_unpin = execlists_context_unpin;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
+ engine->context_pin = execlists_context_pin;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
- engine->set_default_submission = execlists_set_default_submission;
+ engine->set_default_submission = intel_execlists_set_default_submission;
if (INTEL_GEN(engine->i915) < 11) {
engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -2294,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- enum forcewake_domains fw_domains;
-
intel_engine_setup_common(engine);
/* Intentionally left blank. */
engine->buffer = NULL;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->execlists.fw_domains = fw_domains;
-
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
@@ -2323,33 +2384,61 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+ /* Older GVT emulation depends upon intercepting CSB mmio */
+ return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
+}
+
static int logical_ring_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_engine_init_common(engine);
if (ret)
goto error;
- if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
- engine->execlists.submit_reg = engine->i915->regs +
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- engine->execlists.ctrl_reg = engine->i915->regs +
+ execlists->ctrl_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
} else {
- engine->execlists.submit_reg = engine->i915->regs +
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_ELSP(engine));
}
- engine->execlists.preempt_complete_status = ~0u;
- if (engine->i915->preempt_context) {
+ execlists->preempt_complete_status = ~0u;
+ if (i915->preempt_context) {
struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ to_intel_context(i915->preempt_context, engine);
- engine->execlists.preempt_complete_status =
+ execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc);
}
+ execlists->csb_read =
+ i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+ if (csb_force_mmio(i915)) {
+ execlists->csb_status = (u32 __force *)
+ (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+
+ execlists->csb_write = (u32 __force *)execlists->csb_read;
+ execlists->csb_write_reset =
+ _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
+ GEN8_CSB_ENTRIES - 1);
+ } else {
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+ execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
+ }
+ reset_csb_pointers(execlists);
+
return 0;
error:
@@ -2482,7 +2571,7 @@ static void execlists_init_reg_state(u32 *regs,
struct drm_i915_private *dev_priv = engine->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
- bool rcs = engine->id == RCS;
+ bool rcs = engine->class == RENDER_CLASS;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2550,7 +2639,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
@@ -2629,10 +2718,10 @@ err_unpin_ctx:
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
struct drm_i915_gem_object *ctx_obj;
- struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma;
uint32_t context_size;
struct intel_ring *ring;
@@ -2654,7 +2743,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4ec7d8dd13c8..4dfb78e3ec7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,11 +104,6 @@ struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- return to_intel_context(ctx, engine)->lrc_desc;
-}
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 8ae8f42f430a..5dae16ccd9f1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
- uint8_t rev;
+ u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 48f618dc9abb..f9f3b0885ba5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -44,8 +44,6 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_connector {
struct intel_connector base;
-
- struct notifier_block lid_notifier;
};
struct intel_lvds_pps {
@@ -85,34 +83,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
return container_of(connector, struct intel_lvds_connector, base.base);
}
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(lvds_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+ return val & LVDS_PORT_EN;
+}
+
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(lvds_encoder->reg);
-
- if (!(tmp & LVDS_PORT_EN))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -255,14 +254,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
+ temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+ temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
+ temp &= ~LVDS_PIPE_SEL_MASK;
+ temp |= LVDS_PIPE_SEL(pipe);
}
/* set the corresponsding LVDS_BORDER bit */
@@ -454,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-/*
- * Detect the LVDS connection.
- *
- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
- * connected and closed means disconnected. We also send hotplug events as
- * needed, using lid status notification from the input layer.
- */
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- enum drm_connector_status status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- status = intel_panel_detect(dev_priv);
- if (status != connector_status_unknown)
- return status;
-
return connector_status_connected;
}
@@ -498,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
- return 1;
-}
-
-/* The GPU hangs up on these systems if modeset is performed on LID open */
-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
- {
- .callback = intel_no_modeset_on_lid_dmi_callback,
- .ident = "Toshiba Tecra A11",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
- },
- },
-
- { } /* terminating entry */
-};
-
-/*
- * Lid events. Note the use of 'modeset':
- * - we set it to MODESET_ON_LID_OPEN on lid close,
- * and set it to MODESET_DONE on open
- * - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly set)
- * - the suspend/resume paths will set it to
- * MODESET_SUSPENDED and ignore the lid open event,
- * because they restore the mode ("lid open").
- */
-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
- void *unused)
-{
- struct intel_lvds_connector *lvds_connector =
- container_of(nb, struct intel_lvds_connector, lid_notifier);
- struct drm_connector *connector = &lvds_connector->base.base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
- return NOTIFY_OK;
-
- mutex_lock(&dev_priv->modeset_restore_lock);
- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
- goto exit;
- /*
- * check and update the status of LVDS connector after receiving
- * the LID nofication event.
- */
- connector->status = connector->funcs->detect(connector, false);
-
- /* Don't force modeset on machines where it causes a GPU lockup */
- if (dmi_check_system(intel_no_modeset_on_lid))
- goto exit;
- if (!acpi_lid_open()) {
- /* do modeset on next lid open event */
- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
- goto exit;
- }
-
- if (dev_priv->modeset_restore == MODESET_DONE)
- goto exit;
-
- /*
- * Some old platform's BIOS love to wreak havoc while the lid is closed.
- * We try to detect this here and undo any damage. The split for PCH
- * platforms is rather conservative and a bit arbitrary expect that on
- * those platforms VGA disabling requires actual legacy VGA I/O access,
- * and as part of the cleanup in the hw state restore we also redisable
- * the vga plane.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_display_resume(dev);
-
- dev_priv->modeset_restore = MODESET_DONE;
-
-exit:
- mutex_unlock(&dev_priv->modeset_restore_lock);
- return NOTIFY_OK;
-}
-
-static int
-intel_lvds_connector_register(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
- int ret;
-
- ret = intel_connector_register(connector);
- if (ret)
- return ret;
-
- lvds->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
- DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds->lid_notifier.notifier_call = NULL;
- }
-
- return 0;
-}
-
-static void
-intel_lvds_connector_unregister(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-
- if (lvds->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds->lid_notifier);
-
- intel_connector_unregister(connector);
-}
-
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -641,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_lvds_connector_register,
- .early_unregister = intel_lvds_connector_unregister,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -948,7 +816,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ if (HAS_PCH_CPT(dev_priv))
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -1003,8 +875,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
/* Skip init on machines we know falsely report LVDS */
- if (dmi_check_system(intel_no_lvds))
+ if (dmi_check_system(intel_no_lvds)) {
+ WARN(!dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ return;
+ }
+
+ if (!dev_priv->vbt.int_lvds_support) {
+ DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
return;
+ }
if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
@@ -1016,10 +896,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp.support) {
- DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
- }
}
pin = GMBUS_PIN_PANEL;
@@ -1108,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
- * 4) make sure lid is open
- * if closed, act like it's not there for now
*/
/*
@@ -1125,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b39846613e3c..ca44bf368e24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
{
int ret;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c58e5f53bab0..e034b4166d32 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
-static struct intel_opregion *system_opregion;
-
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
- /* The only video events relevant to opregion are 0x80. These indicate
- either a docking event, lid switch or display switch request. In
- Linux, these are handled by the dock, button and video drivers.
- */
-
+ struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+ acpi_notifier);
struct acpi_bus_event *event = data;
struct opregion_acpi *acpi;
int ret = NOTIFY_OK;
@@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!system_opregion)
- return NOTIFY_DONE;
-
- acpi = system_opregion->acpi;
+ acpi = opregion->acpi;
if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
ret = NOTIFY_BAD;
@@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb,
return ret;
}
-static struct notifier_block intel_opregion_notifier = {
- .notifier_call = intel_opregion_video_event,
-};
-
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
@@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv)
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
+ opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
}
if (opregion->asle) {
@@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
if (opregion->acpi) {
opregion->acpi->drdy = 0;
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
}
/* just clear all opregion memory pointers now */
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e0e437ba9e51..e8498a8cda3d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -49,6 +49,7 @@ struct intel_opregion {
u32 vbt_size;
u32 *lid_state;
struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
};
#define OPREGION_SIZE (8 * 1024)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index b443278e569c..4a9f139e7b73 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -375,26 +375,6 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-enum drm_connector_status
-intel_panel_detect(struct drm_i915_private *dev_priv)
-{
- /* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
- return *dev_priv->opregion.lid_state & 0x1 ?
- connector_status_connected :
- connector_status_disconnected;
- }
-
- switch (i915_modparams.panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
-}
-
/**
* scale - scale values from one range to another
* @source_val: value in range [@source_min..@source_max]
@@ -406,11 +386,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
*/
-static uint32_t scale(uint32_t source_val,
- uint32_t source_min, uint32_t source_max,
- uint32_t target_min, uint32_t target_max)
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
{
- uint64_t target_val;
+ u64 target_val;
WARN_ON(source_min > source_max);
WARN_ON(target_min > target_max);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 39a4e4edda07..849e1b69ba73 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -30,160 +30,6 @@
#include <linux/debugfs.h>
#include "intel_drv.h"
-struct pipe_crc_info {
- const char *name;
- struct drm_i915_private *dev_priv;
- enum pipe pipe;
-};
-
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
- return -ENODEV;
-
- spin_lock_irq(&pipe_crc->lock);
-
- if (pipe_crc->opened) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EBUSY; /* already open */
- }
-
- pipe_crc->opened = true;
- filep->private_data = inode->i_private;
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->opened = false;
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
- lockdep_assert_held(&pipe_crc->lock);
- return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
- loff_t *pos)
-{
- struct pipe_crc_info *info = filep->private_data;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- char buf[PIPE_CRC_BUFFER_LEN];
- int n_entries;
- ssize_t bytes_read;
-
- /*
- * Don't allow user space to provide buffers not big enough to hold
- * a line of data.
- */
- if (count < PIPE_CRC_LINE_LEN)
- return -EINVAL;
-
- if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
- return 0;
-
- /* nothing to read */
- spin_lock_irq(&pipe_crc->lock);
- while (pipe_crc_data_count(pipe_crc) == 0) {
- int ret;
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
- pipe_crc_data_count(pipe_crc), pipe_crc->lock);
- if (ret) {
- spin_unlock_irq(&pipe_crc->lock);
- return ret;
- }
- }
-
- /* We now have one or more entries to read */
- n_entries = count / PIPE_CRC_LINE_LEN;
-
- bytes_read = 0;
- while (n_entries > 0) {
- struct intel_pipe_crc_entry *entry =
- &pipe_crc->entries[pipe_crc->tail];
-
- if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR) < 1)
- break;
-
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- pipe_crc->tail = (pipe_crc->tail + 1) &
- (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
- bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
- "%8u %8x %8x %8x %8x %8x\n",
- entry->frame, entry->crc[0],
- entry->crc[1], entry->crc[2],
- entry->crc[3], entry->crc[4]);
-
- spin_unlock_irq(&pipe_crc->lock);
-
- if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
- return -EFAULT;
-
- user_buf += PIPE_CRC_LINE_LEN;
- n_entries--;
-
- spin_lock_irq(&pipe_crc->lock);
- }
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
- .owner = THIS_MODULE,
- .open = i915_pipe_crc_open,
- .read = i915_pipe_crc_read,
- .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
- {
- .name = "i915_pipe_A_crc",
- .pipe = PIPE_A,
- },
- {
- .name = "i915_pipe_B_crc",
- .pipe = PIPE_B,
- },
- {
- .name = "i915_pipe_C_crc",
- .pipe = PIPE_C,
- },
-};
-
static const char * const pipe_crc_sources[] = {
"none",
"plane1",
@@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = {
"auto",
};
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
- BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
- return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "%c %s\n", pipe_name(pipe),
- pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
-
- return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
uint32_t *val)
{
@@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
}
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source source)
-{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- enum intel_display_power_domain power_domain;
- u32 val = 0; /* shut up gcc */
- int ret;
-
- if (pipe_crc->source == source)
- return 0;
-
- /* forbid changing the source without going back to 'none' */
- if (pipe_crc->source && source)
- return -EINVAL;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
- return -EIO;
- }
-
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
- if (ret != 0)
- goto out;
-
- /* none -> real source transition */
- if (source) {
- struct intel_pipe_crc_entry *entries;
-
- DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
- pipe_name(pipe), pipe_crc_source_name(source));
-
- entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
- sizeof(pipe_crc->entries[0]),
- GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto out;
- }
-
- spin_lock_irq(&pipe_crc->lock);
- kfree(pipe_crc->entries);
- pipe_crc->entries = entries;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
- }
-
- pipe_crc->source = source;
-
- I915_WRITE(PIPE_CRC_CTL(pipe), val);
- POSTING_READ(PIPE_CRC_CTL(pipe));
-
- /* real source -> none transition */
- if (!source) {
- struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
- pipe_name(pipe));
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->base.state->active)
- intel_wait_for_vblank(dev_priv, pipe);
- drm_modeset_unlock(&crtc->base.mutex);
-
- spin_lock_irq(&pipe_crc->lock);
- entries = pipe_crc->entries;
- pipe_crc->entries = NULL;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
-
- kfree(entries);
-
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
- }
-
- ret = 0;
-
-out:
- intel_display_power_put(dev_priv, power_domain);
-
- return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- * command: wsp* object wsp+ name wsp+ source wsp*
- * object: 'pipe'
- * name: (A | B | C)
- * source: (none | plane1 | plane2 | pf)
- * wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
- * "pipe A none" -> Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
- int n_words = 0;
-
- while (*buf) {
- char *end;
-
- /* skip leading white space */
- buf = skip_spaces(buf);
- if (!*buf)
- break; /* end of buffer */
-
- /* find end of word */
- for (end = buf; *end && !isspace(*end); end++)
- ;
-
- if (n_words == max_words) {
- DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
- max_words);
- return -EINVAL; /* ran out of words[] before bytes */
- }
-
- if (*end)
- *end++ = '\0';
- words[n_words++] = buf;
- buf = end;
- }
-
- return n_words;
-}
-
-enum intel_pipe_crc_object {
- PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
- "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
- int i;
-
- i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
- if (i < 0)
- return i;
-
- *o = i;
- return 0;
-}
-
-static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
- const char *buf, enum pipe *pipe)
-{
- const char name = buf[0];
-
- if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
- return -EINVAL;
-
- *pipe = name - 'A';
-
- return 0;
-}
-
static int
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
{
@@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
- char *buf, size_t len)
-{
-#define N_WORDS 3
- int n_words;
- char *words[N_WORDS];
- enum pipe pipe;
- enum intel_pipe_crc_object object;
- enum intel_pipe_crc_source source;
-
- n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
- if (n_words != N_WORDS) {
- DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
- N_WORDS);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_object(words[0], &object) < 0) {
- DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
- DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_source(words[2], &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
- return -EINVAL;
- }
-
- return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- char *tmpbuf;
- int ret;
-
- if (len == 0)
- return 0;
-
- if (len > PAGE_SIZE - 1) {
- DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
- PAGE_SIZE);
- return -E2BIG;
- }
-
- tmpbuf = memdup_user_nul(ubuf, len);
- if (IS_ERR(tmpbuf))
- return PTR_ERR(tmpbuf);
-
- ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
- kfree(tmpbuf);
- if (ret < 0)
- return ret;
-
- *offp += len;
- return len;
-}
-
-const struct file_operations i915_display_crc_ctl_fops = {
- .owner = THIS_MODULE,
- .open = display_crc_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = display_crc_ctl_write
-};
-
void intel_display_crc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe) {
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- pipe_crc->opened = false;
spin_lock_init(&pipe_crc->lock);
- init_waitqueue_head(&pipe_crc->wq);
- }
-}
-
-int intel_pipe_crc_create(struct drm_minor *minor)
-{
- struct drm_i915_private *dev_priv = to_i915(minor->dev);
- struct dentry *ent;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- struct pipe_crc_info *info = &i915_pipe_crc_data[i];
-
- info->dev_priv = dev_priv;
- ent = debugfs_create_file(info->name, S_IRUGO,
- minor->debugfs_root, info,
- &i915_pipe_crc_fops);
- if (!ent)
- return -ENOMEM;
}
-
- return 0;
}
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53aaaa3e6886..43ae9de12ba3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6264,42 +6264,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
return limits;
}
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
- new_power = rps->power;
- switch (rps->power) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
+ lockdep_assert_held(&rps->power.mutex);
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
- if (new_power == rps->power)
+ if (new_power == rps->power.mode)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
@@ -6362,12 +6335,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
- rps->power = new_power;
- rps->up_threshold = threshold_up;
- rps->down_threshold = threshold_down;
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(dev_priv, new_power);
+ mutex_unlock(&rps->power.mutex);
rps->last_adj = 0;
}
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
+ rps_set_power(i915, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -6780,7 +6812,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
u8 freq = rps->cur_freq;
/* force a reset */
- rps->power = -1;
+ rps->power.mode = -1;
rps->cur_freq = -1;
if (set(dev_priv, freq))
@@ -7347,11 +7379,11 @@ out:
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- if (WARN_ON(!dev_priv->vlv_pctx))
- return;
+ struct drm_i915_gem_object *pctx;
- i915_gem_object_put(dev_priv->vlv_pctx);
- dev_priv->vlv_pctx = NULL;
+ pctx = fetch_and_zero(&dev_priv->vlv_pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
}
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
@@ -9604,6 +9636,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->pcu_lock);
+ mutex_init(&dev_priv->gt_pm.rps.power.mutex);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index db27f2faa1de..4bd5768731ee 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,51 +56,10 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static inline enum intel_display_power_domain
-psr_aux_domain(struct intel_dp *intel_dp)
-{
- /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
- * However, for non-A AUX ports the corresponding non-EDP transcoders
- * would have already enabled power well 2 and DC_OFF. This means we can
- * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
- * specific AUX_IO reference without powering up any extra wells.
- * Note that PSR is enabled only on Port A even though this function
- * returns the correct domain for other ports too.
- */
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
-}
-
-static void psr_aux_io_power_get(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
-}
-
-static void psr_aux_io_power_put(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
-}
-
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
u32 debug_mask, mask;
- /* No PSR interrupts on VLV/CHV */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
mask = EDP_PSR_ERROR(TRANSCODER_EDP);
debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +160,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
@@ -232,13 +182,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- u8 val = 0;
+ u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_ERROR("Unable to get sink synchronization latency\n");
+ DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -250,13 +200,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0]) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ return;
}
+ dev_priv->psr.sink_support = true;
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
/*
* All panels that supports PSR version 03h (PSR2 +
* Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,49 +230,19 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support =
- intel_dp_get_y_coord_required(intel_dp);
- DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
- ? "supported" : "not supported");
+ dev_priv->psr.sink_psr2_support = y_req && alpm;
+ DRM_DEBUG_KMS("PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- dev_priv->psr.sink_sync_latency =
- intel_dp_get_sink_sync_latency(intel_dp);
}
}
}
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
-
- val = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t val;
-
- /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(crtc->pipe));
- val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
- val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
@@ -341,12 +273,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -373,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
/* Start with bits set for DDI_AUX_CTL register */
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
aux_clock_divider);
/* Select only valid bits for SRD_AUX_CTL */
@@ -381,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
}
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -389,95 +315,64 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
-
- if (dev_priv->psr.psr2_enabled)
+ if (dev_priv->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2;
+ }
+
if (dev_priv->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
- I915_WRITE(VLV_PSRCTL(crtc->pipe),
- VLV_EDP_PSR_MODE_SW_TIMER |
- VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
- VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- /*
- * Let's do the transition from PSR_state 1 (inactive) to
- * PSR_state 2 (transition to active - static frame transmission).
- * Then Hardware is responsible for the transition to
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
- */
- I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
- VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
- uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = EDP_PSR_ENABLE;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
- val |= EDP_PSR_TP1_TIME_2500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
- val |= EDP_PSR_TP1_TIME_500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
else
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR_TP2_TP3_TIME_2500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR_TP2_TP3_TIME_500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
else
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
@@ -485,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ if (INTEL_GEN(dev_priv) >= 8)
+ val |= EDP_PSR_CRC_ENABLE;
+
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
}
@@ -494,15 +392,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ u32 val;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,36 +411,19 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR2_TP2_TIME_2500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR2_TP2_TIME_500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
- val |= EDP_PSR2_TP2_TIME_100;
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
else
- val |= EDP_PSR2_TP2_TIME_50;
+ val |= EDP_PSR2_TP2_TIME_2500us;
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
-
- /* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
- hsw_activate_psr2(intel_dp);
- else
- hsw_activate_psr1(intel_dp);
-}
-
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -602,17 +483,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+ if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->psr.link_standby) {
- DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return;
- }
-
if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
@@ -640,11 +515,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
- return;
- }
-
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -656,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->psr.psr2_enabled)
+ if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- dev_priv->psr.activate(intel_dp);
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_enabled)
+ hsw_activate_psr2(intel_dp);
+ else
+ hsw_activate_psr1(intel_dp);
+
dev_priv->psr.active = true;
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- psr_aux_io_power_get(intel_dp);
-
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
*/
@@ -712,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP);
}
}
@@ -746,64 +619,19 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.setup_vsc(intel_dp, crtc_state);
- dev_priv->psr.enable_sink(intel_dp);
- dev_priv->psr.enable_source(intel_dp, crtc_state);
+ intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_psr_activate(intel_dp);
- } else {
- /*
- * FIXME: Activation should happen immediately since this
- * function is just called after pipe is fully trained and
- * enabled.
- * However on some platforms we face issues when first
- * activation follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until
- * next exit-activate sequence.
- */
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- uint32_t val;
-
- if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 (disabled). */
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(crtc->pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1))
- WARN(1, "PSR transition took longer than expected\n");
-
- val = I915_READ(VLV_PSRCTL(crtc->pipe));
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- val &= ~VLV_EDP_PSR_ENABLE;
- val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
- }
-}
-
-static void hsw_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void
+intel_psr_disable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -842,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.enabled)
+ return;
+
+ intel_psr_disable_source(intel_dp);
- psr_aux_io_power_put(intel_dp);
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+ dev_priv->psr.enabled = NULL;
}
/**
@@ -867,23 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ intel_psr_disable_locked(intel_dp);
+ mutex_unlock(&dev_priv->psr.lock);
+ cancel_work_sync(&dev_priv->psr.work);
+}
- dev_priv->psr.disable_source(intel_dp, old_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 mask;
- /* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (!new_crtc_state->has_psr)
+ return 0;
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
+ /*
+ * The sole user right now is intel_pipe_update_start(),
+ * which won't race with psr_enable/disable, which is
+ * where psr2_enabled is written to. So, we don't need
+ * to acquire the psr.lock. More importantly, we want the
+ * latency inside intel_pipe_update_start() to be as low
+ * as possible, so no need to acquire psr.lock when it is
+ * not needed and will induce latencies in the atomic
+ * update path.
+ */
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
- cancel_delayed_work_sync(&dev_priv->psr.work);
+ /*
+ * Max time for PSR to idle = Inverse of the refresh rate +
+ * 6 ms of exit training time + 1.5 ms of aux channel
+ * handshake. 50 msec is defesive enough to cover everything.
+ */
+ return intel_wait_for_register(dev_priv, reg, mask,
+ EDP_PSR_STATUS_STATE_IDLE, 50);
}
-static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
{
struct intel_dp *intel_dp;
i915_reg_t reg;
@@ -894,21 +765,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
if (!intel_dp)
return false;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
- mask = EDP_PSR2_STATUS_STATE_MASK;
- } else {
- reg = EDP_PSR_STATUS;
- mask = EDP_PSR_STATUS_STATE_MASK;
- }
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- struct drm_crtc *crtc =
- dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- reg = VLV_PSRSTAT(pipe);
- mask = VLV_EDP_PSR_IN_TRANS;
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&dev_priv->psr.lock);
@@ -925,17 +787,20 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
+ container_of(work, typeof(*dev_priv), psr.work);
mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled)
+ goto unlock;
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!psr_wait_for_idle(dev_priv))
+ if (!__psr_wait_for_idle_locked(dev_priv))
goto unlock;
/*
@@ -943,7 +808,7 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits)
+ if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
goto unlock;
intel_psr_activate(dev_priv->psr.enabled);
@@ -953,103 +818,24 @@ unlock:
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * Here we do the transition drirectly from
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
- * PSR_state 5 (exit).
- * PSR State 4 (active with single frame update) can be skipped.
- * On PSR_state 5 (exit) Hardware is responsible to transition
- * back to PSR_state 1 (inactive).
- * Now we are at Same state after vlv_psr_enable_source.
- */
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- I915_WRITE(VLV_PSRCTL(pipe), val);
-
- /*
- * Send AUX wake up - Spec says after transitioning to PSR
- * active we have to send AUX wake up by writing 01h in DPCD
- * 600h of sink device.
- * XXX: This might slow down the transition, but without this
- * HW doesn't complete the transition to PSR_state 1 and we
- * never get the screen updated.
- */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
- DP_SET_POWER_D0);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
-
dev_priv->psr.active = false;
}
/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
-{
- struct drm_crtc *crtc;
- enum pipe pipe;
- u32 val;
-
- if (!CAN_PSR(dev_priv))
- return;
-
- /*
- * Single frame update is already supported on BDW+ but it requires
- * many W/A and it isn't really needed.
- */
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- return;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
- }
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -1071,7 +857,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +900,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +917,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (dev_priv->psr.psr2_enabled) {
intel_psr_exit(dev_priv);
} else {
/*
@@ -1149,9 +934,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- if (!work_busy(&dev_priv->psr.work.work))
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_work(&dev_priv->psr.work);
mutex_unlock(&dev_priv->psr.lock);
}
@@ -1184,38 +967,64 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- /* On VLV and CHV only standby mode is supported. */
- dev_priv->psr.link_standby = true;
else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
- /* Override link_standby x link_off defaults */
- if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing link standby\n");
- dev_priv->psr.link_standby = true;
- }
- if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing main link off\n");
- dev_priv->psr.link_standby = false;
+ INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+ DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+ DP_PSR_LINK_CRC_ERROR;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled != intel_dp)
+ goto exit;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ goto exit;
}
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
- mutex_init(&dev_priv->psr.lock);
+ if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ intel_psr_disable_locked(intel_dp);
+ }
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->psr.enable_source = vlv_psr_enable_source;
- dev_priv->psr.disable_source = vlv_psr_disable;
- dev_priv->psr.enable_sink = vlv_psr_enable_sink;
- dev_priv->psr.activate = vlv_psr_activate;
- dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
- } else {
- dev_priv->psr.has_hw_tracking = true;
- dev_priv->psr.enable_source = hsw_psr_enable_source;
- dev_priv->psr.disable_source = hsw_psr_disable;
- dev_priv->psr.enable_sink = hsw_psr_enable_sink;
- dev_priv->psr.activate = hsw_psr_activate;
- dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+ goto exit;
}
+
+ if (val & DP_PSR_RFB_STORAGE_ERROR)
+ DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ if (val & DP_PSR_LINK_CRC_ERROR)
+ DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+ if (val & ~errors)
+ DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+ val & ~errors);
+ if (val & errors)
+ intel_psr_disable_locked(intel_dp);
+ /* clear status register */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+
+ /* TODO: handle PSR2 errors */
+exit:
+ mutex_unlock(&psr->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8f19349a6055..6a8f27d0a742 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = RING_HWS_PGA(engine->mmio_base);
}
- if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ u32 mask = ~0u;
+
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->id == RCS)
+ mask &= ~BIT(0);
+
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
+ }
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
@@ -496,6 +506,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
+ /* Check that the ring offsets point within the ring! */
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
I915_WRITE_TAIL(engine, ring->tail);
@@ -520,8 +534,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
goto out;
}
- intel_engine_init_hangcheck(engine);
-
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
@@ -531,16 +543,33 @@ out:
return ret;
}
-static void reset_ring_common(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
{
- /*
- * RC6 must be prevented until the reset is complete and the engine
- * reinitialised. If it occurs in the middle of this sequence, the
- * state written to/loaded from the power context is ill-defined (e.g.
- * the PP_BASE_DIR may be lost).
- */
- assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+ intel_engine_stop_cs(engine);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ return i915_gem_find_active_request(engine);
+}
+
+static void skip_request(struct i915_request *rq)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset32(vaddr + head, MI_NOOP,
+ (rq->ring->size - head) / sizeof(u32));
+ head = 0;
+ }
+ memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
+}
+
+static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
/*
* Try to restore the logical GPU state to match the continuation
@@ -556,47 +585,18 @@ static void reset_ring_common(struct intel_engine_cs *engine,
* If the request was innocent, we try to replay the request with
* the restored context.
*/
- if (request) {
- struct drm_i915_private *dev_priv = request->i915;
- struct intel_context *ce = to_intel_context(request->ctx,
- engine);
- struct i915_hw_ppgtt *ppgtt;
-
- if (ce->state) {
- I915_WRITE(CCID,
- i915_ggtt_offset(ce->state) |
- BIT(8) /* must be set! */ |
- CCID_EXTENDED_STATE_SAVE |
- CCID_EXTENDED_STATE_RESTORE |
- CCID_EN);
- }
-
- ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
- if (ppgtt) {
- u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
-
- /* Wait for the PD reload to complete */
- if (intel_wait_for_register(dev_priv,
- RING_PP_DIR_BASE(engine),
- BIT(0), 0,
- 10))
- DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- }
-
+ if (rq) {
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO)
- request->ring->head = request->postfix;
- } else {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
+ rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
+ if (rq->fence.error == -EIO)
+ skip_request(rq);
}
}
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
@@ -1033,6 +1033,8 @@ int intel_ring_pin(struct intel_ring *ring,
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1066,6 +1068,8 @@ err:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1093,6 +1097,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1102,10 +1107,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- /* mark ring buffers as read-only from GPU side by default */
- obj->gt_ro = 1;
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -1169,10 +1178,46 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
{
- struct i915_vma *vma = ce->state;
- int ret;
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int err = 0;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ err = gen6_ppgtt_pin(ppgtt);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ gen6_ppgtt_unpin(ppgtt);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = ce->state;
+ if (!vma)
+ return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1225,43 @@ static int context_pin(struct intel_context *ce)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (ret)
- return ret;
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
}
- return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ return err;
+
+ /*
+ * And mark is as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+
+ return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+
+ vma = ce->state;
+ if (!vma)
+ return;
+
+ vma->obj->pin_global--;
+ i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+ __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1243,7 +1318,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
}
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1258,81 +1333,79 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int ret;
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ int err;
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ err = PTR_ERR(vma);
goto err;
}
ce->state = vma;
}
- if (ce->state) {
- ret = context_pin(ce);
- if (ret)
- goto err;
+ err = __context_pin(ce);
+ if (err)
+ goto err;
- ce->state->obj->pin_global++;
- }
+ err = __context_pin_ppgtt(ce->gem_context);
+ if (err)
+ goto err_unpin;
i915_gem_context_get(ctx);
-out:
/* One ringbuffer to rule them all */
- return engine->buffer;
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+ return ce;
+
+err_unpin:
+ __context_unpin(ce);
err:
ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+ .unpin = intel_ring_context_unpin,
+ .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (ce->state) {
- ce->state->obj->pin_global--;
- i915_vma_unpin(ce->state);
- }
+ ce->ops = &ring_context_ops;
- i915_gem_context_put(ctx);
+ return __ring_context_pin(engine, ctx, ce);
}
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
struct i915_timeline *timeline;
+ struct intel_ring *ring;
+ unsigned int size;
int err;
intel_engine_setup_common(engine);
- err = intel_engine_init_common(engine);
- if (err)
- goto err;
-
timeline = i915_timeline_create(engine->i915, engine->name);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -1354,8 +1427,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
+ size = PAGE_SIZE;
+ if (HAS_BROKEN_CS_TLB(engine->i915))
+ size = I830_WA_SIZE;
+ err = intel_engine_create_scratch(engine, size);
+ if (err)
+ goto err_unpin;
+
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_scratch;
+
return 0;
+err_scratch:
+ intel_engine_cleanup_scratch(engine);
+err_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_free(ring);
err:
@@ -1392,6 +1480,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_hw_ppgtt *ppgtt)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int flush_pd_dir(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Stall until the page table load is complete */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
@@ -1402,6 +1532,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
(HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
INTEL_INFO(i915)->num_rings - 1 :
0;
+ bool force_restore = false;
int len;
u32 *cs;
@@ -1415,6 +1546,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN7(i915))
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ if (flags & MI_FORCE_RESTORE) {
+ GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+ flags &= ~MI_FORCE_RESTORE;
+ force_restore = true;
+ len += 2;
+ }
cs = intel_ring_begin(rq, len);
if (IS_ERR(cs))
@@ -1439,9 +1576,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
}
}
+ if (force_restore) {
+ /*
+ * The HW doesn't handle being told to restore the current
+ * context very well. Quite often it likes goes to go off and
+ * sulk, especially when it is meant to be reloading PP_DIR.
+ * A very simple fix to force the reload is to simply switch
+ * away from the current context and back again.
+ *
+ * Note that the kernel_context will contain random state
+ * following the INHIBIT_RESTORE. We accept this since we
+ * never use the kernel_context state; it is merely a
+ * placeholder we use to flush other contexts.
+ */
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
+ engine)->state) |
+ MI_MM_SPACE_GTT |
+ MI_RESTORE_INHIBIT;
+ }
+
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,31 +1666,28 @@ static int remap_l3(struct i915_request *rq, int slice)
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *to_ctx = rq->ctx;
- struct i915_hw_ppgtt *to_mm =
- to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
- struct i915_gem_context *from_ctx = engine->legacy_active_context;
- struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+ struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ unsigned int unwind_mm = 0;
u32 hw_flags = 0;
int ret, i;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (to_mm != from_mm ||
- (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
- trace_switch_mm(engine, to_ctx);
- ret = to_mm->switch_mm(to_mm, rq);
+ if (ppgtt) {
+ ret = load_pd_dir(rq, ppgtt);
if (ret)
goto err;
- to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
- engine->legacy_active_ppgtt = to_mm;
- hw_flags = MI_FORCE_RESTORE;
+ if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
+ unwind_mm = intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~unwind_mm;
+ hw_flags = MI_FORCE_RESTORE;
+ }
}
- if (to_intel_context(to_ctx, engine)->state &&
- (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+ if (rq->hw_context->state) {
GEM_BUG_ON(engine->id != RCS);
/*
@@ -1543,35 +1697,38 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
- if (i915_gem_context_is_kernel(to_ctx))
+ if (i915_gem_context_is_kernel(ctx))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
goto err_mm;
+ }
- engine->legacy_active_context = to_ctx;
+ if (ppgtt) {
+ ret = flush_pd_dir(rq);
+ if (ret)
+ goto err_mm;
}
- if (to_ctx->remap_slice) {
+ if (ctx->remap_slice) {
for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to_ctx->remap_slice & BIT(i)))
+ if (!(ctx->remap_slice & BIT(i)))
continue;
ret = remap_l3(rq, i);
if (ret)
- goto err_ctx;
+ goto err_mm;
}
- to_ctx->remap_slice = 0;
+ ctx->remap_slice = 0;
}
return 0;
-err_ctx:
- engine->legacy_active_context = from_ctx;
err_mm:
- engine->legacy_active_ppgtt = from_mm;
+ if (unwind_mm)
+ ppgtt->pd_dirty_rings |= unwind_mm;
err:
return ret;
}
@@ -1580,7 +1737,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -2006,11 +2163,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
- engine->reset_hw = reset_ring_common;
+ engine->reset.prepare = reset_prepare;
+ engine->reset.reset = reset_ring;
+ engine->reset.finish = reset_finish;
engine->context_pin = intel_ring_context_pin;
- engine->context_unpin = intel_ring_context_unpin;
-
engine->request_alloc = ring_request_alloc;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
@@ -2074,16 +2231,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- return ret;
- } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
- ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
- if (ret)
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 010750e8ee44..f5ffa6d31e82 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -122,7 +122,8 @@ struct intel_engine_hangcheck {
int deadlock;
struct intel_instdone instdone;
struct i915_request *active_request;
- bool stalled;
+ bool stalled:1;
+ bool wedged:1;
};
struct intel_ring {
@@ -192,6 +193,11 @@ struct i915_priolist {
int priority;
};
+struct st_preempt_hang {
+ struct completion completion;
+ bool inject_hang;
+};
+
/**
* struct intel_engine_execlists - execlist submission queue and port state
*
@@ -291,32 +297,49 @@ struct intel_engine_execlists {
/**
* @queue: queue of requests, in priority lists
*/
- struct rb_root queue;
+ struct rb_root_cached queue;
/**
- * @first: leftmost level in priority @queue
+ * @csb_read: control register for Context Switch buffer
+ *
+ * Note this register is always in mmio.
*/
- struct rb_node *first;
+ u32 __iomem *csb_read;
/**
- * @fw_domains: forcewake domains for irq tasklet
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
*/
- unsigned int fw_domains;
+ u32 *csb_write;
/**
- * @csb_head: context status buffer head
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
*/
- unsigned int csb_head;
+ u32 *csb_status;
/**
- * @csb_use_mmio: access csb through mmio, instead of hwsp
+ * @preempt_complete_status: expected CSB upon completing preemption
*/
- bool csb_use_mmio;
+ u32 preempt_complete_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
+ * @csb_write_reset: reset value for CSB write pointer
+ *
+ * As the CSB write pointer maybe either in HWSP or as a field
+ * inside an mmio register, we want to reprogram it slightly
+ * differently to avoid later confusion.
*/
- u32 preempt_complete_status;
+ u32 csb_write_reset;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};
#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -342,11 +365,10 @@ struct intel_engine_cs {
struct i915_timeline timeline;
struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
- atomic_t irq_count;
unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
-#define ENGINE_IRQ_EXECLIST 1
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -378,6 +400,7 @@ struct intel_engine_cs {
unsigned int hangcheck_interrupts;
unsigned int irq_enabled;
+ unsigned int irq_count;
bool irq_armed : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
@@ -423,18 +446,22 @@ struct intel_engine_cs {
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
- void (*reset_hw)(struct intel_engine_cs *engine,
- struct i915_request *rq);
+
+ struct {
+ struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine,
+ struct i915_request *rq);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
- struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
- void (*context_unpin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+
int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq);
@@ -550,16 +577,7 @@ struct intel_engine_cs {
* to the kernel context and trash it as the save may not happen
* before the hardware is powered down.
*/
- struct i915_gem_context *last_retired_context;
-
- /* We track the current MI_SET_CONTEXT in order to eliminate
- * redudant context switches. This presumes that requests are not
- * reordered! Or when they are the tracking is updated along with
- * the emission of individual requests into the legacy command
- * stream (ring).
- */
- struct i915_gem_context *legacy_active_context;
- struct i915_hw_ppgtt *legacy_active_ppgtt;
+ struct intel_context *last_retired_context;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
@@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists,
__clear_bit(bit, (unsigned long *)&execlists->active);
}
+static inline void
+execlists_clear_all_active(struct intel_engine_execlists *execlists)
+{
+ execlists->active = 0;
+}
+
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
return pos & (ring->size - 1);
}
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
@@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
- /* We could combine these into a single tail operation, but keeping
- * them as seperate tests will help identify the cause should one
- * ever fire.
- */
- GEM_BUG_ON(!IS_ALIGNED(tail, 8));
- GEM_BUG_ON(tail >= ring->size);
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
* "Ring Buffer Use"
@@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size);
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
+
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-static inline void intel_wait_init(struct intel_wait *wait,
- struct i915_request *rq)
+static inline void intel_wait_init(struct intel_wait *wait)
{
wait->tsk = current;
- wait->request = rq;
+ wait->request = NULL;
}
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
@@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
return cs;
}
+void intel_engines_sanitize(struct drm_i915_private *i915);
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);
@@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ if (!execlists->preempt_hang.inject_hang)
+ return false;
+
+ complete(&execlists->preempt_hang.completion);
+ return true;
+}
+
+#else
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ return false;
+}
+
+#endif
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53a6eaa9671a..6b5aa3b074ec 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -128,10 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -382,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
u32 val;
if (wait_fuses) {
- pg = SKL_PW_TO_PG(id);
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
+ SKL_PW_TO_PG(id);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@@ -428,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
+ val & ~HSW_PWR_WELL_CTL_REQ(id));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -1822,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -1894,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS ( \
+ ICL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+#define ICL_PW_2_POWER_DOMAINS ( \
+ ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - eDP/DSI VDSC
+ * - KVMR (HW control)
+ */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ ICL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -2451,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = {
},
};
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_combo_phy_aux_power_well_enable,
+ .disable = icl_combo_phy_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well icl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_1,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_2,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_PW_DC_OFF,
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_A,
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_D,
+ },
+ {
+ .name = "DDI E IO",
+ .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_E,
+ },
+ {
+ .name = "DDI F IO",
+ .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_F,
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_A,
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_B,
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_C,
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_D,
+ },
+ {
+ .name = "AUX E",
+ .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_E,
+ },
+ {
+ .name = "AUX F",
+ .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_F,
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT1,
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT2,
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT3,
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT4,
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -2468,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2556,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ set_power_wells(power_domains, icl_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
@@ -2911,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
+ /* fall through */
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
@@ -3023,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3051,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW5(port), val);
}
- /* 4. Enable power well 1 (PG1) and aux IO power. */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
icl_init_cdclk(dev_priv);
@@ -3070,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3083,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
icl_uninit_cdclk(dev_priv);
- /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Disable Comp */
for (port = PORT_A; port <= PORT_B; port++) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26975df4e593..812fe7b06f87 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
switch (crtc_state->pixel_multiplier) {
default:
WARN(1, "unknown pixel multiplier specified\n");
+ /* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1400,33 +1401,40 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (active_outputs & intel_sdvo_connector->output_flag)
- return true;
+ return active_outputs & intel_sdvo_connector->output_flag;
+}
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(sdvo_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
else
- return false;
+ *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+ return val & SDVO_ENABLE;
}
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
- u32 tmp;
+ bool ret;
- tmp = I915_READ(intel_sdvo->sdvo_reg);
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
- return false;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return ret || active_outputs;
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1553,8 +1561,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE;
@@ -1903,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
if (edid != NULL) {
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
@@ -2306,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
+ /* fall through */
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
+ /* fall through */
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
+ /* fall through */
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
+ /* fall through */
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
+ /* fall through */
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ee23613f9fd4..f7026e887fa9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -41,20 +41,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-bool intel_format_is_yuv(u32 format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_NV12:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
- local_irq_disable();
-
if (min <= 0 || max <= 0)
- return;
+ goto irq_disable;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return;
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ if (intel_psr_wait_for_idle(new_crtc_state))
+ DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+
+ local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
@@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
}
/**
@@ -284,13 +282,35 @@ skl_update_plane(struct intel_plane *plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+
+ /* TODO: handle sub-pixel coordinates */
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ y_hphase = skl_scaler_calc_phase(1, false);
+ y_vphase = skl_scaler_calc_phase(1, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
- scaler = &crtc_state->scaler_state.scalers[scaler_id];
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ }
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +347,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -380,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
const s16 *csc = csc_matrix[plane_state->base.color_encoding];
/* Seems RGB data bypasses the CSC always */
- if (!intel_format_is_yuv(fb->format->format))
+ if (!fb->format->is_yuv)
return;
I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
@@ -415,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
@@ -588,19 +610,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+ ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -754,18 +778,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+ ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -910,18 +936,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+ ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -1010,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
fb->format->format != DRM_FORMAT_NV12 &&
(src_x % 2 || src_w % 2)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
@@ -1071,6 +1099,37 @@ intel_check_sprite_plane(struct intel_plane *plane,
return 0;
}
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1100,6 +1159,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(plane->dev);
@@ -1112,11 +1181,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- to_intel_plane_state(plane_state)->ckey = *set;
- ret = drm_atomic_commit(state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
}
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
if (ret != -EDEADLK)
break;
@@ -1211,8 +1297,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
@@ -1228,8 +1323,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1350,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1382,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1433,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = g4x_sprite_format_mod_supported,
+};
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = snb_sprite_format_mod_supported,
+};
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 6)
- return snb_mod_supported(format, modifier);
- else
- return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = vlv_sprite_format_mod_supported,
+};
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
};
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1500,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
const uint64_t *modifiers;
@@ -1383,6 +1525,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
state->scaler_id = -1;
+ intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_SPRITE0 + plane);
+
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1541,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+ if (intel_plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
+
+ plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1411,6 +1558,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@@ -1427,6 +1576,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@@ -1439,9 +1590,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+ plane_funcs = &snb_sprite_funcs;
} else {
plane_formats = g4x_plane_formats;
num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+ plane_funcs = &g4x_sprite_funcs;
}
}
@@ -1468,14 +1623,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b55b5c157e38..b5b04cb892e9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp = I915_READ(TV_CTL);
- if (!(tmp & TV_ENC_ENABLE))
- return false;
+ *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return tmp & TV_ENC_ENABLE;
}
static void
@@ -1032,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
break;
}
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_ctl |= tv_mode->oversample;
if (tv_mode->progressive)
@@ -1157,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
/* Poll for TV detection */
- tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
- else
- tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1355,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr = drm_mode_create(connector->dev);
if (!mode_ptr)
continue;
- strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
- mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
+ strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
mode_ptr->hsync_start = hactive_s + 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1cffaf7b5dbe..7c95697e1a35 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
/* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
return enable_guc;
}
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
/**
* sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
*
* In case of "enable_guc" option this function will attempt to modify
* it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
* unless GuC is enabled on given platform and the driver is compiled with
* debug config when this modparam will default to "enable(1..4)".
*/
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
/* A negative value means "use platform default" */
if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+ i915_modparams.enable_guc = __get_platform_enable_guc(i915);
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "no GuC firmware");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "no GuC firmware");
}
/* Verify HuC firmware availability */
if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(dev_priv) ? "no HuC hardware" :
- "no HuC firmware");
+ !HAS_HUC(i915) ? "no HuC hardware" :
+ "no HuC firmware");
}
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
- __get_default_guc_log_level(dev_priv);
+ __get_default_guc_log_level(i915);
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "GuC not enabled");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "GuC not enabled");
i915_modparams.guc_log_level = 0;
}
@@ -171,44 +171,30 @@ void intel_uc_init_early(struct drm_i915_private *i915)
intel_huc_init_early(huc);
sanitize_options_early(i915);
-
- if (USES_GUC(i915))
- intel_uc_fw_fetch(i915, &guc->fw);
-
- if (USES_HUC(i915))
- intel_uc_fw_fetch(i915, &huc->fw);
}
void intel_uc_cleanup_early(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (USES_HUC(i915))
- intel_uc_fw_fini(&huc->fw);
-
- if (USES_GUC(i915))
- intel_uc_fw_fini(&guc->fw);
guc_free_load_err_log(guc);
}
/**
* intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
*
* Setup minimal state necessary for MMIO accesses later in the
* initialization sequence.
*/
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
{
- intel_guc_init_send_regs(&dev_priv->guc);
+ intel_guc_init_send_regs(&i915->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || !i915_modparams.guc_log_level)
+ if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
return;
if (!guc->load_err_log)
@@ -225,11 +211,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- gen9_enable_guc_interrupts(dev_priv);
+ gen9_enable_guc_interrupts(i915);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
@@ -239,60 +225,73 @@ static int guc_enable_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct);
- gen9_disable_guc_interrupts(dev_priv);
+ gen9_disable_guc_interrupts(i915);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
}
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- intel_guc_init_ggtt_pin_bias(guc);
-
- ret = intel_guc_init_wq(guc);
+ ret = intel_guc_init_misc(guc);
if (ret)
return ret;
+ if (USES_HUC(i915)) {
+ ret = intel_huc_init_misc(huc);
+ if (ret)
+ goto err_guc;
+ }
+
return 0;
+
+err_guc:
+ intel_guc_fini_misc(guc);
+ return ret;
}
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- intel_guc_fini_wq(guc);
+ if (USES_HUC(i915))
+ intel_huc_fini_misc(huc);
+
+ intel_guc_fini_misc(guc);
}
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return -ENODEV;
ret = intel_guc_init(guc);
if (ret)
return ret;
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret, attempts;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(dev_priv);
+ gen9_reset_guc_interrupts(i915);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN9(i915))
attempts = 3;
else
attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- ret = __intel_uc_reset_hw(dev_priv);
+ ret = __intel_uc_reset_hw(i915);
if (ret)
goto err_out;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_fw_upload(huc);
if (ret)
goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_auth(huc);
if (ret)
goto err_communication;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
}
- dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+ dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(dev_priv->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
- dev_info(dev_priv->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(dev_priv)));
+ dev_info(i915->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(i915)));
+ dev_info(i915->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(i915)));
return 0;
@@ -428,20 +427,20 @@ err_out:
if (GEM_WARN_ON(ret == -EIO))
ret = -EINVAL;
- dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+ dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 448293eb638d..50b39aa4ffb8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
-static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore)
+static unsigned int
+intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
-
- if (restore) { /* If reset with a user forcewake, try to restore */
- if (fw)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- }
-
- if (!restore)
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return fw; /* track the lost user forcewake domains */
}
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
@@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
}
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake)
+ unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
@@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv);
+ if (restore_forcewake) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ restore_forcewake);
+
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ dev_priv->uncore.fifo_count =
+ fifo_free_entries(dev_priv);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ }
iosf_mbi_punit_release();
}
@@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ dev_priv->uncore.fw_domains_saved =
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
{
- __intel_uncore_early_sanitize(dev_priv, true);
+ unsigned int restore_forcewake;
+
+ restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
@@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, false);
+ __intel_uncore_early_sanitize(dev_priv, 0);
dev_priv->uncore.unclaimed_mmio_check = 1;
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
@@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
@@ -1702,15 +1708,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (__intel_wait_for_register_fw(dev_priv,
- mode, MODE_IDLE, MODE_IDLE,
- 500, 0,
- NULL))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -2099,21 +2099,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
{
struct intel_engine_cs *engine;
unsigned int tmp;
+ int ret;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- if (gen8_reset_engine_start(engine))
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ if (gen8_reset_engine_start(engine)) {
+ ret = -EIO;
goto not_ready;
+ }
+ }
if (INTEL_GEN(dev_priv) >= 11)
- return gen11_reset_engines(dev_priv, engine_mask);
+ ret = gen11_reset_engines(dev_priv, engine_mask);
else
- return gen6_reset_engines(dev_priv, engine_mask);
+ ret = gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen8_reset_engine_cancel(engine);
- return -EIO;
+ return ret;
}
typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
@@ -2176,6 +2180,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
*
* FIXME: Wa for more modern gens needs to be validated
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 47478d609630..e5e157d288de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -67,21 +67,21 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
+ u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint8_t val, bool trace);
+ i915_reg_t r, u8 val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint16_t val, bool trace);
+ i915_reg_t r, u16 val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint32_t val, bool trace);
+ i915_reg_t r, u32 val, bool trace);
};
struct intel_forcewake_range {
@@ -104,6 +104,7 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
u32 fw_set;
u32 fw_clear;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 458468237b5f..bba98cf83cbd 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
};
#define VBT_DP_MAX_LINK_RATE_HBR3 0
@@ -414,7 +420,9 @@ struct child_device_config {
u16 extended_type;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
- u8 flags2_reserved:7; /* 195 */
+ u8 tbt:1; /* 209 */
+ u8 flags2_reserved:2; /* 195 */
+ u8 dp_port_trace_length:4; /* 209 */
u8 dp_gpio_index; /* 195 */
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
@@ -448,7 +456,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- uint8_t devices[0];
+ u8 devices[0];
} __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
@@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options {
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2df3538ceba5..4bcdeaf8d98f 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,29 +48,58 @@
* - Public functions to init or apply the given workaround type.
*/
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
+static void wa_add(struct drm_i915_private *i915,
+ i915_reg_t reg, const u32 mask, const u32 val)
{
- const unsigned int idx = dev_priv->workarounds.count;
+ struct i915_workarounds *wa = &i915->workarounds;
+ unsigned int start = 0, end = wa->count;
+ unsigned int addr = i915_mmio_reg_offset(reg);
+ struct i915_wa_reg *r;
+
+ while (start < end) {
+ unsigned int mid = start + (end - start) / 2;
+
+ if (wa->reg[mid].addr < addr) {
+ start = mid + 1;
+ } else if (wa->reg[mid].addr > addr) {
+ end = mid;
+ } else {
+ r = &wa->reg[mid];
+
+ if ((mask & ~r->mask) == 0) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, r->mask, r->value);
+
+ r->value &= ~mask;
+ }
+
+ r->value |= val;
+ r->mask |= mask;
+ return;
+ }
+ }
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
+ if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
+ DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, mask, val);
+ return;
+ }
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
+ r = &wa->reg[wa->count++];
+ r->addr = addr;
+ r->value = val;
+ r->mask = mask;
- dev_priv->workarounds.count++;
+ while (r-- > wa->reg) {
+ GEM_BUG_ON(r[0].addr == r[1].addr);
+ if (r[1].addr > r[0].addr)
+ break;
- return 0;
+ swap(r[1], r[0]);
+ }
}
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
+#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -463,6 +492,22 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
*/
WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ /* Wa_2006611047:icl (pre-prod)
+ * Formerly known as WaDisableImprovedTdlClkGating
+ */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+ GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+ /* Wa_2006665173:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
return 0;
}
@@ -521,7 +566,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
- *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].addr;
*cs++ = w->reg[i].value;
}
*cs++ = MI_NOOP;
@@ -647,6 +692,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ for_each_engine(engine, dev_priv, tmp) {
+ if (engine->id == RCS)
+ continue;
+
+ I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
+ }
+ }
}
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -672,8 +730,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr;
+ u32 mcr_slice_subslice_mask;
+
+ /*
+ * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+ * L3Banks could be fused off in single slice scenario. If that is
+ * the case, we might need to program MCR select to a valid L3Bank
+ * by default, to make sure we correctly read certain registers
+ * later on (in the range 0xB100 - 0xB3FF).
+ * This might be incompatible with
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+ * Fortunately, this should not happen in production hardware, so
+ * we only assert that this is the case (instead of implementing
+ * something more complex that requires checking the range of every
+ * MMIO read).
+ */
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ is_power_of_2(sseu->slice_mask)) {
+ /*
+ * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+ * enabled subslice, no need to redirect MCR packet
+ */
+ u32 slice = fls(sseu->slice_mask);
+ u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u8 ss_mask = sseu->subslice_mask[slice];
+
+ u8 enabled_mask = (ss_mask | ss_mask >>
+ GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+ u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+ /*
+ * Production silicon should have matched L3Bank and
+ * subslice enabled
+ */
+ WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+ }
+
+ mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+ GEN11_MCR_SUBSLICE_MASK;
+ else
+ mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+ GEN8_MCR_SUBSLICE_MASK;
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ mcr &= ~mcr_slice_subslice_mask;
+ mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+ I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +816,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +898,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406463099:icl
+ * Formerly known as WaGamTlbPendError
+ */
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 91c72911be3c..7efb326badcd 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put;
}
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -458,7 +458,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */
obj->mm.page_sizes.sg = page_size;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unpin;
@@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -591,12 +592,13 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma))
i915_vma_close(vma);
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -604,8 +606,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
- unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
bool single = false;
LIST_HEAD(objects);
@@ -641,7 +643,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
@@ -725,7 +727,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
unsigned int size;
@@ -819,7 +821,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
@@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -887,8 +890,8 @@ out_object_put:
static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
- const int gen = INTEL_GEN(vma->vm->i915);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj;
struct i915_vma *batch;
@@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
*cmd++ = val;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = val;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = val;
}
@@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
}
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto err_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
@@ -996,14 +1002,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
goto err_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, err == 0);
+ i915_request_add(rq);
return err;
}
@@ -1047,7 +1051,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1100,7 +1105,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1262,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -1323,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1391,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1439,7 +1448,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1493,7 +1502,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1531,7 +1540,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1587,7 +1597,8 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1690,20 +1701,20 @@ int i915_gem_huge_page_mock_selftests(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->base)) {
+ if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
- if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
goto out_close;
@@ -1712,7 +1723,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1720,7 +1731,7 @@ out_unlock:
i915_modparams.enable_ppgtt = saved_ppgtt;
- drm_dev_unref(&dev_priv->drm);
+ drm_dev_put(&dev_priv->drm);
return err;
}
@@ -1744,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
file = mock_file(dev_priv);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -1758,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
}
if (ctx->ppgtt)
- ctx->ppgtt->base.scrub_64K = true;
+ ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 340a98c0c804..3a095c37c120 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
map[offset_in_page(offset) / sizeof(*map)] = v;
- if (needs_clflush & CLFLUSH_AFTER)
+
+ if (needs_clflush & CLFLUSH_AFTER) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
@@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
*v = map[offset_in_page(offset) / sizeof(*map)];
kunmap_atomic(map);
@@ -199,7 +214,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
@@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj,
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
} else {
- *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
- return 0;
+ return err;
}
static bool always_valid(struct drm_i915_private *i915)
@@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915)
return true;
}
+static bool needs_fence_registers(struct drm_i915_private *i915)
+{
+ return !i915_terminally_wedged(&i915->gpu_error);
+}
+
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return false;
+
return intel_engine_can_store_dword(i915->engine[RCS]);
}
@@ -251,7 +270,7 @@ static const struct igt_coherency_mode {
bool (*valid)(struct drm_i915_private *i915);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
- { "gtt", gtt_set, gtt_get, always_valid },
+ { "gtt", gtt_set, gtt_get, needs_fence_registers },
{ "wc", wc_set, wc_get, always_valid },
{ "gpu", gpu_set, NULL, needs_mi_store_dword },
{ },
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index ddb03f009232..1c92560d35da 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -23,9 +23,11 @@
*/
#include "../i915_selftest.h"
+#include "i915_random.h"
#include "igt_flush_test.h"
#include "mock_drm.h"
+#include "mock_gem_device.h"
#include "huge_gem_object.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -62,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
*cmd++ = value;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = value;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = value;
}
@@ -114,7 +116,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -169,24 +171,28 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
- i915_vma_move_to_active(vma, rq, 0);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, false);
+ i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
err_vma:
@@ -247,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
}
for (; m < DW_PER_PAGE; m++) {
- if (map[m] != 0xdeadbeef) {
+ if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], 0xdeadbeef);
+ n, m, map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -289,7 +295,7 @@ create_test_object(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -305,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
if (err)
return ERR_PTR(err);
- err = cpu_fill(obj, 0xdeadbeef);
+ err = cpu_fill(obj, STACK_MAGIC);
if (err) {
pr_err("Failed to fill object with cpu, err=%d\n",
err);
@@ -335,11 +341,15 @@ static int igt_ctx_exec(void *arg)
bool first_shared_gtt = true;
int err = -ENODEV;
- /* Create a few different contexts (with different mm) and write
+ /*
+ * Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -366,6 +376,9 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -420,6 +433,237 @@ out_unlock:
return err;
}
+static int igt_ctx_readonly(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
+ int err = -ENODEV;
+
+ /*
+ * Create a few read-only objects (with the occasional writable object)
+ * and try to write into these object checking that the GPU discards
+ * any write to a read-only object.
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only) {
+ err = 0;
+ goto out_unlock;
+ }
+
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct intel_engine_cs *engine;
+ unsigned int id;
+
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ if (prandom_u32_state(&prng) & 1)
+ i915_gem_object_set_readonly(obj);
+ }
+
+ intel_runtime_pm_get(i915);
+ err = gpu_fill(obj, ctx, engine, dw);
+ intel_runtime_pm_put(i915);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+ ndwords++;
+ }
+ }
+ pr_info("Submitted %lu dwords (across %u engines)\n",
+ ndwords, INTEL_INFO(i915)->num_rings);
+
+ dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ unsigned int num_writes;
+
+ num_writes = rem;
+ if (i915_gem_object_is_readonly(obj))
+ num_writes = 0;
+
+ err = cpu_check(obj, num_writes);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ if (engines == ALL_ENGINES)
+ return "all";
+
+ for_each_engine_masked(engine, i915, engines, tmp)
+ return engine->name;
+
+ return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx,
+ unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ int err;
+
+ GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!engine_has_kernel_context_barrier(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (engine->last_retired_context->gem_context != i915->kernel_context) {
+ pr_err("engine %s not idling in kernel context!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ if (i915->gt.active_requests) {
+ pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+ i915->gt.active_requests);
+ return -EINVAL;
+ }
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!intel_engine_has_kernel_context(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * A core premise of switching to the kernel context is that
+ * if an engine is already idling in the kernel context, we
+ * do not emit another request and wake it up. The other being
+ * that we do indeed end up idling in the kernel context.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&i915->drm.struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ /* First check idling each individual engine */
+ for_each_engine(engine, i915, id) {
+ err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+ if (err)
+ goto out_unlock;
+ }
+
+ /* Now en masse */
+ err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+ if (err)
+ goto out_unlock;
+
+out_unlock:
+ GEM_TRACE_DUMP_ON(err);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -432,7 +676,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
continue;
@@ -447,14 +691,37 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
i915_gem_fini_aliasing_ppgtt(i915);
}
+int i915_gem_context_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
SUBTEST(igt_ctx_exec),
+ SUBTEST(igt_ctx_readonly),
};
bool fake_alias = false;
int err;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
/* Install a fake aliasing gtt for exercise */
if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 89dc25a5a53b..a7055b12e53c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index ab9d7bee0aae..128ad1cf0647 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size;
for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL;
}
- if (list_empty(&i915->ggtt.base.inactive_list)) {
+ if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma);
}
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes.
*/
- ggtt->base.mm.color_adjust = mock_color_adjust;
+ ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
cleanup_objects(i915);
- ggtt->base.mm.color_adjust = NULL;
+ ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915);
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f7dc926f4ef1..8e2e269db97e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -32,6 +32,20 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static void cleanup_freed_objects(struct drm_i915_private *i915)
+{
+ /*
+ * As we may hold onto the struct_mutex for inordinate lengths of
+ * time, the NMI khungtaskd detector may fire for the free objects
+ * worker.
+ */
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+}
+
static void fake_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -134,31 +148,34 @@ static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
struct i915_hw_ppgtt *ppgtt;
- u64 size, last;
- int err;
+ u64 size, last, limit;
+ int err = 0;
/* Allocate a ppggt and try to fill the entire range */
if (!USES_PPGTT(dev_priv))
return 0;
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ ppgtt = __hw_ppgtt_create(dev_priv);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = __hw_ppgtt_init(ppgtt, dev_priv);
- if (err)
- goto err_ppgtt;
-
- if (!ppgtt->base.allocate_va_range)
+ if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup;
+ /*
+ * While we only allocate the page tables here and so we could
+ * address a much larger GTT than we could actually fit into
+ * RAM, a practical limit is the amount of physical pages in the system.
+ * This should ensure that we do not run into the oomkiller during
+ * the test and take down the machine wilfully.
+ */
+ limit = totalram_pages << PAGE_SHIFT;
+ limit = min(ppgtt->vm.total, limit);
+
/* Check we can allocate the entire range */
- for (size = 4096;
- size <= ppgtt->base.total;
- size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ for (size = 4096; size <= limit; size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
if (err) {
if (err == -ENOMEM) {
pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +185,15 @@ static int igt_ppgtt_alloc(void *arg)
goto err_ppgtt_cleanup;
}
- ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ cond_resched();
+
+ ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
}
/* Check we can incrementally allocate the entire range */
- for (last = 0, size = 4096;
- size <= ppgtt->base.total;
- last = size, size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- last, size - last);
+ for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+ last, size - last);
if (err) {
if (err == -ENOMEM) {
pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -185,13 +202,14 @@ static int igt_ppgtt_alloc(void *arg)
}
goto err_ppgtt_cleanup;
}
+
+ cond_resched();
}
err_ppgtt_cleanup:
- ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_ppgtt_put(ppgtt);
mutex_unlock(&dev_priv->drm.struct_mutex);
- kfree(ppgtt);
return err;
}
@@ -293,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
i915_gem_object_put(obj);
kfree(order);
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -521,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
}
return 0;
@@ -607,6 +628,8 @@ err_put:
i915_gem_object_put(obj);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -791,6 +814,8 @@ err_obj:
kfree(order);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -859,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
return err;
}
@@ -951,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ cleanup_freed_objects(i915);
}
return 0;
@@ -982,17 +1009,17 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- GEM_BUG_ON(offset_in_page(ppgtt->base.total));
- GEM_BUG_ON(ppgtt->base.closed);
+ GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+ GEM_BUG_ON(ppgtt->vm.closed);
- err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1088,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
restart:
- list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
- drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
if (hole_start < last)
continue;
- if (ggtt->base.mm.color_adjust)
- ggtt->base.mm.color_adjust(node, 0,
- &hole_start, &hole_end);
+ if (ggtt->vm.mm.color_adjust)
+ ggtt->vm.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1134,7 +1161,7 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
@@ -1147,9 +1174,9 @@ static int igt_ggtt_page(void *arg)
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
}
order = i915_random_order(count, &prng);
@@ -1188,7 +1215,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
- ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+ ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp);
out_unpin:
@@ -1217,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
+ const u64 limit = totalram_pages << PAGE_SHIFT;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
@@ -1229,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt);
- err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
mock_context_close(ctx);
return err;
@@ -1270,7 +1298,7 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1288,20 +1316,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1319,7 +1347,7 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1337,20 +1365,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1371,7 +1399,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1383,18 +1411,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.base.total,
+ offset = random_offset(0, i915->ggtt.vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1429,8 +1457,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.base.total,
+ i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1488,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1475,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1493,15 +1521,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1510,7 +1538,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1522,7 +1550,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1542,7 +1570,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1557,13 +1585,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1579,7 +1607,7 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1597,19 +1625,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1646,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -1669,7 +1697,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_page),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+ GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index fbdb2419d418..ba4f322d56b8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.base.total + PAGE_SIZE);
+ i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
v += y * tile->width;
v += div64_u64_rem(x, tile->width, &x) << tile->size;
v += x;
- } else {
+ } else if (tile->width == 128) {
const unsigned int ytile_span = 16;
- const unsigned int ytile_height = 32 * ytile_span;
+ const unsigned int ytile_height = 512;
+
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+ } else {
+ const unsigned int ytile_span = 32;
+ const unsigned int ytile_height = 256;
v += y * ytile_span;
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
@@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
if (err)
return err;
+
+ i915_vma_destroy(vma);
}
return 0;
@@ -311,7 +320,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ /*
+ * The swizzling pattern is actually unknown as it
+ * varies based on physical address of each page.
+ * See i915_gem_detect_bit_6_swizzle().
+ */
+ break;
+
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
@@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg)
break;
}
- if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
@@ -440,7 +458,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq);
}
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
i915_request_add(rq);
- i915_gem_object_set_active_reference(obj);
+ __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
- return 0;
+
+ return err;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -479,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
return err == expected;
}
+static void disable_retire_worker(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+ if (!i915->gt.active_requests++) {
+ intel_runtime_pm_get(i915);
+ i915_gem_unpark(i915);
+ intel_runtime_pm_put(i915);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+}
+
static int igt_mmap_offset_exhaustion(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -488,6 +521,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
u64 hole_start, hole_end;
int loop, err;
+ /* Disable background reaper */
+ disable_retire_worker(i915);
+ GEM_BUG_ON(!i915->gt.awake);
+
/* Trim the device mmap space to only a page */
memset(&resv, 0, sizeof(resv));
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
@@ -496,7 +533,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
err = drm_mm_reserve_node(mm, &resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
- return err;
+ goto out_park;
}
break;
}
@@ -538,6 +575,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ if (i915_terminally_wedged(&i915->gpu_error))
+ break;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -554,6 +594,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
+ /* NB we rely on the _active_ reference to access obj now */
GEM_BUG_ON(!i915_gem_object_is_active(obj));
err = i915_gem_object_create_mmap_offset(obj);
if (err) {
@@ -565,6 +606,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
drm_mm_remove_node(&resv);
+out_park:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (--i915->gt.active_requests)
+ queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
+ else
+ queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
return err;
err_obj:
i915_gem_object_put(obj);
@@ -586,7 +634,7 @@ int i915_gem_object_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index d16d74178e9d..1b70208eeea7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 94bc2e1898a4..c4aac6141e04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -262,7 +262,7 @@ int i915_request_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t,
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -342,9 +344,9 @@ static int live_nop_request(void *arg)
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
- struct i915_request *request;
+ struct i915_request *request = NULL;
unsigned long n, prime;
+ IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = begin_live_test(&t, i915, __func__, engine->name);
@@ -430,7 +432,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -466,7 +468,7 @@ empty_request(struct intel_engine_cs *engine,
goto out_request;
out_request:
- __i915_request_add(request, err == 0);
+ i915_request_add(request);
return err ? ERR_PTR(err) : request;
}
@@ -555,7 +557,8 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -593,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
} else if (gen >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
- } else if (gen >= 4) {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(vma->node.start);
} else {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -677,7 +677,9 @@ static int live_all_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj);
}
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_request_get(request[id]);
i915_request_add(request[id]);
}
@@ -787,7 +789,9 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
@@ -861,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sequential_engines),
SUBTEST(live_empty_request),
};
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index addc5a599c4a..86c54ea37f48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -210,6 +210,8 @@ int __i915_subtests(const char *caller,
return -EINTR;
pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ GEM_TRACE("Running %s/%s\n", caller, st->name);
+
err = st->func(data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index e90f97236e50..ffa74290e054 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->base) {
+ if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm =
- &ctx->ppgtt->base;
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_vma *vma;
int err;
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.base.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+ NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size.
*/
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
@@ -734,7 +733,7 @@ int i915_vma_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 0d06f559243f..af66e3d4e23a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -9,52 +9,8 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
- GEM_TRACE("%pS timed out.\n", w->symbol);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const void *symbol)
-{
- w->i915 = i915;
- w->symbol = symbol;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
- (W)->i915; \
- __fini_wedge((W)))
-
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- struct wedge_me w;
-
cond_resched();
if (flags & I915_WAIT_LOCKED &&
@@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- wedge_on_timeout(&w, i915, HZ)
- i915_gem_wait_for_idle(i915, flags);
+ if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
+
+ GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ }
return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
new file mode 100644
index 000000000000..08e5ff11bbd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_WEDGE_ME_H
+#define IGT_WEDGE_ME_H
+
+#include <linux/workqueue.h>
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+static void __igt_wedge_me(struct work_struct *work)
+{
+ struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ pr_err("%s timed out, cancelling test.\n", w->name);
+
+ GEM_TRACE("%s timed out.\n", w->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __igt_init_wedge(struct igt_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __igt_fini_wedge(struct igt_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __igt_fini_wedge((W)))
+
+#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index d6926e7820e5..f03b407fdbe2 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915->engine[RCS]);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index fb74e2cf8a0a..407c98fb9170 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -196,19 +196,23 @@ static int igt_guc_clients(void *args)
}
unreserve_doorbell(guc->execbuf_client);
- err = guc_clients_doorbell_init(guc);
+
+ __create_doorbell(guc->execbuf_client);
+ err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
if (err != -EIO) {
pr_err("unexpected (err = %d)", err);
- goto out;
+ goto out_db;
}
if (!available_dbs(guc, guc->execbuf_client->priority)) {
pr_err("doorbell not available when it should\n");
err = -EIO;
- goto out;
+ goto out_db;
}
+out_db:
/* clean after test */
+ __destroy_doorbell(guc->execbuf_client);
err = reserve_doorbell(guc->execbuf_client);
if (err) {
pr_err("failed to reserve back the doorbell back\n");
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 438e0b045a2c..65d66cdedd26 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
#include "mock_drm.h"
@@ -105,7 +106,10 @@ static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ rq->gem_context->ppgtt ?
+ &rq->gem_context->ppgtt->vm :
+ &i915->ggtt.vm;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
@@ -127,13 +131,19 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -168,7 +178,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
@@ -181,7 +191,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
- *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
@@ -190,7 +200,7 @@ static int emit_recurse_batch(struct hang *h,
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
- *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -202,6 +212,7 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -242,7 +253,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -315,7 +326,7 @@ static int igt_hang_sanitycheck(void *arg)
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
@@ -461,7 +472,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -560,6 +571,30 @@ struct active_engine {
#define TEST_SELF BIT(2)
#define TEST_PRIORITY BIT(3)
+static int active_request_put(struct i915_request *rq)
+{
+ int err = 0;
+
+ if (!rq)
+ return 0;
+
+ if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_global_seqno(rq));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(rq->i915);
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
static int active_engine(void *data)
{
I915_RND_STATE(prng);
@@ -608,24 +643,20 @@ static int active_engine(void *data)
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
- if (old) {
- if (i915_request_wait(old, 0, HZ) < 0) {
- GEM_TRACE("%s timed out.\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(engine->i915);
- i915_request_put(old);
- err = -EIO;
- break;
- }
- i915_request_put(old);
- }
+ err = active_request_put(old);
+ if (err)
+ break;
cond_resched();
}
- for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_request_put(rq[count]);
+ for (count = 0; count < ARRAY_SIZE(rq); count++) {
+ int err__ = active_request_put(rq[count]);
+
+ /* Keep the first error */
+ if (!err)
+ err = err__;
+ }
err_file:
mock_file_free(engine->i915, file);
@@ -719,7 +750,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -891,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
return reset_count;
}
-static int igt_wait_reset(void *arg)
+static int igt_reset_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *rq;
@@ -919,7 +950,7 @@ static int igt_wait_reset(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -965,6 +996,170 @@ unlock:
return err;
}
+struct evict_vma {
+ struct completion completion;
+ struct i915_vma *vma;
+};
+
+static int evict_vma(void *data)
+{
+ struct evict_vma *arg = data;
+ struct i915_address_space *vm = arg->vma->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_mm_node evict = arg->vma->node;
+ int err;
+
+ complete(&arg->completion);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_evict_for_node(vm, &evict, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj;
+ struct task_struct *tsk = NULL;
+ struct i915_request *rq;
+ struct evict_vma arg;
+ struct hang h;
+ int err;
+
+ if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ return 0;
+
+ /* Check that we can recover an unbind stuck on a hanging request */
+
+ global_reset_lock(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fini;
+ }
+
+ arg.vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(arg.vma)) {
+ err = PTR_ERR(arg.vma);
+ goto out_obj;
+ }
+
+ rq = hang_create_request(&h, i915->engine[RCS]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_obj;
+ }
+
+ err = i915_vma_pin(arg.vma, 0, 0,
+ i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto out_obj;
+
+ err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unpin(arg.vma);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ goto out_rq;
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+ init_completion(&arg.completion);
+
+ tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ tsk = NULL;
+ goto out_reset;
+ }
+
+ wait_for_completion(&arg.completion);
+
+ if (wait_for(waitqueue_active(&rq->execute), 10)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("igt/evict_vma kthread did not wait\n");
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+out_reset:
+ fake_hangcheck(rq, intel_engine_flag(rq->engine));
+
+ if (tsk) {
+ struct igt_wedge_me w;
+
+ /* The reset, even indirectly, should take less than 10ms. */
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ err = kthread_stop(tsk);
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_rq:
+ i915_request_put(rq);
+out_obj:
+ i915_gem_object_put(obj);
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ global_reset_unlock(i915);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_evict_ggtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+
+ return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+}
+
+static int igt_reset_evict_ppgtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ err = 0;
+ if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int wait_for_others(struct drm_i915_private *i915,
struct intel_engine_cs *exclude)
{
@@ -1014,7 +1209,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(prev);
- __i915_request_add(prev, true);
+ i915_request_add(prev);
count = 0;
do {
@@ -1028,7 +1223,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
/*
* XXX We don't handle resetting the kernel context
@@ -1161,7 +1356,7 @@ static int igt_handle_error(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1210,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
- SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
+ SUBTEST(igt_reset_wait),
+ SUBTEST(igt_reset_evict_ggtt),
+ SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_handle_error),
};
bool saved_hangcheck;
@@ -1220,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO; /* we're long past hope of a successful reset */
+
intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1b8a07125150..582566faef09 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
{
- struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -155,7 +162,7 @@ spinner_create_request(struct spinner *spin,
err = emit_recurse_batch(spin, rq, arbitration_command);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -444,16 +451,134 @@ err_wedged:
goto err_ctx_lo;
}
+static int live_preempt_hang(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin_hi, i915))
+ goto err_unlock;
+
+ if (spinner_init(&spin_lo, i915))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ init_completion(&engine->execlists.preempt_hang.completion);
+ engine->execlists.preempt_hang.inject_hang = true;
+
+ i915_request_add(rq);
+
+ if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
+ HZ / 10)) {
+ pr_err("Preemption did not occur within timeout!");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ i915_reset_engine(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ engine->execlists.preempt_hang.inject_hang = false;
+
+ if (!wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ spinner_fini(&spin_lo);
+err_spin_hi:
+ spinner_fini(&spin_hi);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_preempt_hang),
};
if (!HAS_EXECLISTS(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 47bc5b2ddb56..81d9d31042a9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
check_for_unclaimed_mmio(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 17444a3abbb9..0d39b3bf0c0d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "../i915_selftest.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
static struct drm_i915_gem_object *
@@ -33,7 +34,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin;
}
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -67,15 +72,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
-
i915_gem_object_get(result);
i915_gem_object_set_active_reference(result);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return result;
@@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
+ struct igt_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w,
if (IS_ERR(results))
return PTR_ERR(results);
- err = i915_gem_object_set_to_cpu_domain(results, false);
+ err = 0;
+ igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ err = i915_gem_object_set_to_cpu_domain(results, false);
+ if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ err = -EIO;
if (err)
goto out_put;
@@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
};
int err;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..8904f1ce64e3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 302f7d103635..ca682caf1062 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
- .map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
- .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 26bf29d97007..22a73da45ad5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
spin_unlock(&engine->hw_lock);
}
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- if (!ce->pin_count++)
- i915_gem_context_get(ctx);
+ i915_gem_context_put(ce->gem_context);
+}
- return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
}
-static void mock_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+ .unpin = mock_context_unpin,
+ .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
- if (!--ce->pin_count)
- i915_gem_context_put(ctx);
+ if (!ce->pin_count++) {
+ i915_gem_context_get(ctx);
+ ce->ring = engine->buffer;
+ ce->ops = &mock_context_ops;
+ }
+
+ return ce;
}
static int mock_request_alloc(struct i915_request *request)
@@ -185,13 +194,14 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.status_page.page_addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
- engine->base.context_unpin = mock_context_unpin;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+ lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
@@ -204,8 +214,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
if (!engine->base.buffer)
goto err_breadcrumbs;
+ if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ goto err_ring;
+
return &engine->base;
+err_ring:
+ mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +253,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
+ struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+
+ __intel_context_unpin(engine->i915->kernel_context, engine);
mock_ring_free(engine->buffer);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 94baedfa0f74..43ed8b28aeaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct pci_dev *pdev;
int err;
@@ -159,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void)
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- WARN_ON(pm_runtime_get_sync(&pdev->dev));
+ if (pm_runtime_enabled(&pdev->dev))
+ WARN_ON(pm_runtime_get_sync(&pdev->dev));
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
@@ -233,13 +232,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915);
mkwrite_device_info(i915)->ring_mask = BIT(0);
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
- goto err_unlock;
-
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
- goto err_engine;
+ goto err_unlock;
+
+ i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+ if (!i915->engine[RCS])
+ goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -247,9 +246,8 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
-err_engine:
- for_each_engine(engine, i915, id)
- mock_engine_free(engine);
+err_context:
+ i915_gem_contexts_fini(i915);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
kmem_cache_destroy(i915->priorities);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 36c112088940..a140ea5c3a7c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -66,25 +66,21 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL;
kref_init(&ppgtt->ref);
- ppgtt->base.i915 = i915;
- ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
- ppgtt->base.file = ERR_PTR(-ENODEV);
-
- INIT_LIST_HEAD(&ppgtt->base.active_list);
- INIT_LIST_HEAD(&ppgtt->base.inactive_list);
- INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
- INIT_LIST_HEAD(&ppgtt->base.global_link);
- drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
- ppgtt->base.clear_range = nop_clear_range;
- ppgtt->base.insert_page = mock_insert_page;
- ppgtt->base.insert_entries = mock_insert_entries;
- ppgtt->base.bind_vma = mock_bind_ppgtt;
- ppgtt->base.unbind_vma = mock_unbind_ppgtt;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = mock_cleanup;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
+ ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.insert_page = mock_insert_page;
+ ppgtt->vm.insert_entries = mock_insert_entries;
+ ppgtt->vm.cleanup = mock_cleanup;
+
+ ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
+ ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
}
@@ -105,29 +101,28 @@ void mock_init_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- INIT_LIST_HEAD(&i915->vm_list);
-
- ggtt->base.i915 = i915;
+ ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
- ggtt->base.total = 4096 * PAGE_SIZE;
-
- ggtt->base.clear_range = nop_clear_range;
- ggtt->base.insert_page = mock_insert_page;
- ggtt->base.insert_entries = mock_insert_entries;
- ggtt->base.bind_vma = mock_bind_ggtt;
- ggtt->base.unbind_vma = mock_unbind_ggtt;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = mock_cleanup;
-
- i915_address_space_init(&ggtt->base, i915, "global");
+ ggtt->vm.total = 4096 * PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.insert_page = mock_insert_page;
+ ggtt->vm.insert_entries = mock_insert_entries;
+ ggtt->vm.cleanup = mock_cleanup;
+
+ ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
+ ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ i915_address_space_init(&ggtt->vm, i915);
}
void mock_fini_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index f349b3920199..435a2c35ee8c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
}
}
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
- }
- ret = intel_compute_dsi_pll(encoder, pipe_config);
- if (ret)
- return false;
+ ret = bxt_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ } else {
+ ret = vlv_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ }
pipe_config->clock_set = true;
@@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev_priv))
- bxt_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_device_ready(encoder);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_device_ready(encoder);
+ else
+ vlv_dsi_device_ready(encoder);
}
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
@@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- intel_disable_dsi_pll(encoder);
- intel_enable_dsi_pll(encoder, pipe_config);
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ bxt_dsi_pll_enable(encoder, pipe_config);
+ } else {
+ vlv_dsi_pll_disable(encoder);
+ vlv_dsi_pll_enable(encoder, pipe_config);
+ }
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
@@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
- vlv_dsi_clear_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_clear_device_ready(encoder);
+ else
+ vlv_dsi_clear_device_ready(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
usleep_range(2000, 5000);
@@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
val & ~MIPIO_RST_CTRL);
}
- intel_disable_dsi_pll(encoder);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ } else {
u32 val;
+ vlv_dsi_pll_disable(encoder);
+
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
@@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
@@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ } else {
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ }
- pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
- if (!pclk)
- return;
-
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
+ if (pclk) {
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->port_clock = pclk;
+ }
}
static enum drm_mode_status
@@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
enum port port;
u32 val;
- if (!IS_GEMINILAKE(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ if (IS_GEMINILAKE(dev_priv))
+ return;
- intel_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_reset_clocks(encoder, port);
+ else
+ vlv_dsi_reset_clocks(encoder, port);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
- }
+ val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1671,16 +1688,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id plane;
+ enum i9xx_plane_id i9xx_plane;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->encoder->crtc_mask == BIT(PIPE_B))
- plane = PLANE_B;
+ i9xx_plane = PLANE_B;
else
- plane = PLANE_A;
+ i9xx_plane = PLANE_A;
- val = I915_READ(DSPCNTR(plane));
+ val = I915_READ(DSPCNTR(i9xx_plane));
if (val & DISPPLANE_ROTATE_180)
orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
}
@@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
-void intel_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
@@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv))
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return;
- }
+ else
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 2ff2ee7f3b78..a132a8037ecc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
-static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
@@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
bool enabled;
u32 val;
@@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return enabled;
}
-static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
@@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
bpp, pipe_bpp);
}
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
-{
- if (IS_GEN9_LP(to_i915(encoder->base.dev)))
- return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
- else
- return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
-}
-
-static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
-{
- if (IS_GEN9_LP(dev_priv))
- return bxt_dsi_pll_is_enabled(dev_priv);
-
- MISSING_CASE(INTEL_DEVID(dev_priv));
-
- return false;
-}
-
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_compute_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- return gen9lp_compute_dsi_pll(encoder, config);
-
- return -ENODEV;
-}
-
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_enable_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- gen9lp_enable_dsi_pll(encoder, config);
-}
-
-void intel_disable_dsi_pll(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_disable_dsi_pll(encoder);
- else if (IS_GEN9_LP(dev_priv))
- bxt_disable_dsi_pll(encoder);
-}
-
-static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port)
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
@@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
}
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
-
-void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_LP(dev_priv))
- gen9lp_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_reset_clocks(encoder, port);
-}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1d053bbefc02..5ea0c82f9957 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -35,12 +35,6 @@
#define MAX_CRTC 4
-struct imx_drm_device {
- struct drm_device *drm;
- unsigned int pipes;
- struct drm_atomic_state *state;
-};
-
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
@@ -219,22 +213,12 @@ static int compare_of(struct device *dev, void *data)
static int imx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct imx_drm_device *imxdrm;
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
- if (!imxdrm) {
- ret = -ENOMEM;
- goto err_unref;
- }
-
- imxdrm->drm = drm;
- drm->dev_private = imxdrm;
-
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
@@ -306,8 +290,7 @@ err_unbind:
component_unbind_all(drm->dev, drm);
err_kms:
drm_mode_config_cleanup(drm);
-err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -327,7 +310,7 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
@@ -355,37 +338,15 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm;
-
- /* The drm_dev is NULL before .load hook is called */
- if (drm_dev == NULL)
- return 0;
-
- drm_kms_helper_poll_disable(drm_dev);
- imxdrm = drm_dev->dev_private;
- imxdrm->state = drm_atomic_helper_suspend(drm_dev);
- if (IS_ERR(imxdrm->state)) {
- drm_kms_helper_poll_enable(drm_dev);
- return PTR_ERR(imxdrm->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imx_drm;
- if (drm_dev == NULL)
- return 0;
-
- imx_drm = drm_dev->dev_private;
- drm_atomic_helper_resume(drm_dev, imx_drm->state);
- drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 15c2bec47a04..ab9c6f706eb3 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -10,7 +10,6 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_framebuffer;
struct drm_plane;
-struct imx_drm_crtc;
struct platform_device;
struct imx_crtc_state {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 56dd7a9a8e25..3bd0f8a18e74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -143,7 +143,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
if (imx_ldb_ch->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
imx_ldb_ch->edid);
num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid);
}
@@ -471,8 +471,7 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- encoder);
+ drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
if (imx_ldb_ch->panel) {
@@ -612,6 +611,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -652,14 +654,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index bc27c2699464..cffd3310240e 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -235,7 +235,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tve->ddc);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -493,7 +493,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
- drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
+ drm_connector_attach_encoder(&tve->connector, &tve->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index e83af0f2be86..7d4b710b837a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -35,7 +35,6 @@
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
- struct imx_drm_crtc *imx_crtc;
/* plane[0] is the full plane, plane[1] is the partial plane */
struct ipu_plane *plane[2];
@@ -213,7 +212,7 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
+ u32 primary_plane_mask = drm_plane_mask(crtc->primary);
if (state->active && (primary_plane_mask & state->plane_mask) == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aedecda9728a..aefd04e18f93 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -63,7 +63,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
}
if (imxpd->edid) {
- drm_mode_connector_update_edid_property(connector, imxpd->edid);
+ drm_connector_update_edid_property(connector, imxpd->edid);
num_modes = drm_add_edid_modes(connector, imxpd->edid);
}
@@ -197,7 +197,7 @@ static int imx_pd_register(struct drm_device *drm,
return ret;
}
} else {
- drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_attach_encoder(&imxpd->connector, encoder);
}
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index 7a3eb8c17ef9..5ce84d0dbf81 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "mtk_cec.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 658b8dd45b83..2d6aa150a9ff 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -539,6 +539,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int ret;
int i;
+ if (!path)
+ return 0;
+
for (i = 0; i < path_len; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct device_node *node;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8130f3dab661..87e4191c250e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -28,8 +28,12 @@
#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
@@ -41,45 +45,89 @@
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
-#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
-#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
-#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
-
-#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
-#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
-#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
-#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
-#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
-#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_OVL0 11
+#define MT8173_MUTEX_MOD_DISP_OVL1 12
+#define MT8173_MUTEX_MOD_DISP_RDMA0 13
+#define MT8173_MUTEX_MOD_DISP_RDMA1 14
+#define MT8173_MUTEX_MOD_DISP_RDMA2 15
+#define MT8173_MUTEX_MOD_DISP_WDMA0 16
+#define MT8173_MUTEX_MOD_DISP_WDMA1 17
+#define MT8173_MUTEX_MOD_DISP_COLOR0 18
+#define MT8173_MUTEX_MOD_DISP_COLOR1 19
+#define MT8173_MUTEX_MOD_DISP_AAL 20
+#define MT8173_MUTEX_MOD_DISP_GAMMA 21
+#define MT8173_MUTEX_MOD_DISP_UFOE 22
+#define MT8173_MUTEX_MOD_DISP_PWM0 23
+#define MT8173_MUTEX_MOD_DISP_PWM1 24
+#define MT8173_MUTEX_MOD_DISP_OD 25
+
+#define MT2712_MUTEX_MOD_DISP_PWM2 10
+#define MT2712_MUTEX_MOD_DISP_OVL0 11
+#define MT2712_MUTEX_MOD_DISP_OVL1 12
+#define MT2712_MUTEX_MOD_DISP_RDMA0 13
+#define MT2712_MUTEX_MOD_DISP_RDMA1 14
+#define MT2712_MUTEX_MOD_DISP_RDMA2 15
+#define MT2712_MUTEX_MOD_DISP_WDMA0 16
+#define MT2712_MUTEX_MOD_DISP_WDMA1 17
+#define MT2712_MUTEX_MOD_DISP_COLOR0 18
+#define MT2712_MUTEX_MOD_DISP_COLOR1 19
+#define MT2712_MUTEX_MOD_DISP_AAL0 20
+#define MT2712_MUTEX_MOD_DISP_UFOE 22
+#define MT2712_MUTEX_MOD_DISP_PWM0 23
+#define MT2712_MUTEX_MOD_DISP_PWM1 24
+#define MT2712_MUTEX_MOD_DISP_OD0 25
+#define MT2712_MUTEX_MOD2_DISP_AAL1 33
+#define MT2712_MUTEX_MOD2_DISP_OD1 34
+
+#define MT2701_MUTEX_MOD_DISP_OVL 3
+#define MT2701_MUTEX_MOD_DISP_WDMA 6
+#define MT2701_MUTEX_MOD_DISP_COLOR 7
+#define MT2701_MUTEX_MOD_DISP_BLS 9
+#define MT2701_MUTEX_MOD_DISP_RDMA0 10
+#define MT2701_MUTEX_MOD_DISP_RDMA1 12
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
#define MUTEX_SOF_DSI1 2
#define MUTEX_SOF_DPI0 3
+#define MUTEX_SOF_DPI1 4
+#define MUTEX_SOF_DSI2 5
+#define MUTEX_SOF_DSI3 6
#define OVL0_MOUT_EN_COLOR0 0x1
#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
#define UFOE_MOUT_EN_DSI0 0x1
#define COLOR0_SEL_IN_OVL0 0x1
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA1_MOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
#define COLOR1_SEL_IN_OVL1 0x1
#define OVL_MOUT_EN_RDMA 0x1
@@ -108,12 +156,32 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
+static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
+ [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0,
+ [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1,
+ [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2,
+ [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
+};
+
static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
[DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
[DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD,
[DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
[DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
@@ -138,7 +206,7 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
} else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
@@ -150,9 +218,48 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
*addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
- value = RDMA1_MOUT_DPI0;
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
} else {
value = 0;
}
@@ -172,6 +279,33 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
@@ -278,6 +412,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
@@ -288,13 +423,30 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DSI1:
reg = MUTEX_SOF_DSI0;
break;
+ case DDP_COMPONENT_DSI2:
+ reg = MUTEX_SOF_DSI2;
+ break;
+ case DDP_COMPONENT_DSI3:
+ reg = MUTEX_SOF_DSI3;
+ break;
case DDP_COMPONENT_DPI0:
reg = MUTEX_SOF_DPI0;
break;
+ case DDP_COMPONENT_DPI1:
+ reg = MUTEX_SOF_DPI1;
+ break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= ddp->mutex_mod[id];
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << ddp->mutex_mod[id];
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << (ddp->mutex_mod[id] - 32);
+ writel_relaxed(reg, ddp->regs + offset);
+ }
return;
}
@@ -307,20 +459,32 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DSI2:
+ case DDP_COMPONENT_DSI3:
case DDP_COMPONENT_DPI0:
+ case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~(ddp->mutex_mod[id]);
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << ddp->mutex_mod[id]);
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ writel_relaxed(reg, ddp->regs + offset);
+ }
break;
}
}
@@ -407,6 +571,7 @@ static int mtk_ddp_remove(struct platform_device *pdev)
static const struct of_device_id ddp_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
{ .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 4672317e3ad1..ff974d82a4a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -218,18 +218,25 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
- [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
+ [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
[DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
[DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
[DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
@@ -271,7 +278,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
if (comp_id == DDP_COMPONENT_BLS ||
comp_id == DDP_COMPONENT_DPI0 ||
+ comp_id == DDP_COMPONENT_DPI1 ||
comp_id == DDP_COMPONENT_DSI0 ||
+ comp_id == DDP_COMPONENT_DSI1 ||
+ comp_id == DDP_COMPONENT_DSI2 ||
+ comp_id == DDP_COMPONENT_DSI3 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
comp->clk = NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0828cf8bf85c..7413ffeb3c9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -41,19 +41,25 @@ enum mtk_ddp_comp_type {
};
enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_DSI2,
+ DDP_COMPONENT_DSI3,
DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index a2ca90fc403c..39721119713b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -146,11 +146,37 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_AAL1,
+ DDP_COMPONENT_OD1,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_PWM1,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = {
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_PWM2,
+};
+
static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_AAL,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_DSI0,
@@ -173,6 +199,15 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .main_path = mt2712_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
+ .ext_path = mt2712_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
+ .third_path = mt2712_mtk_ddp_third,
+ .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -232,6 +267,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
if (ret < 0)
goto err_component_unbind;
+ ret = mtk_drm_crtc_create(drm, private->data->third_path,
+ private->data->third_len);
+ if (ret < 0)
+ goto err_component_unbind;
+
/* Use OVL device for all DMA memory allocations */
np = private->comp_node[private->data->main_path[0]] ?:
private->comp_node[private->data->ext_path[0]];
@@ -360,24 +400,44 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
- { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
- { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
- { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
- { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
- { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
- { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-wdma",
+ .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8173-disp-ufoe",
+ .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm",
+ .data = (void *)MTK_DISP_BLS },
+ { .compatible = "mediatek,mt8173-disp-pwm",
+ .data = (void *)MTK_DISP_PWM },
+ { .compatible = "mediatek,mt8173-disp-od",
+ .data = (void *)MTK_DISP_OD },
{ }
};
@@ -552,6 +612,8 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
{ }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index c3378c452c0a..ecc00ca3221d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -17,7 +17,7 @@
#include <linux/io.h>
#include "mtk_drm_ddp_comp.h"
-#define MAX_CRTC 2
+#define MAX_CRTC 3
#define MAX_CONNECTOR 2
struct device;
@@ -33,6 +33,9 @@ struct mtk_mmsys_driver_data {
unsigned int main_len;
const enum mtk_ddp_comp_id *ext_path;
unsigned int ext_len;
+ const enum mtk_ddp_comp_id *third_path;
+ unsigned int third_len;
+
bool shadow_register;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 0d8d506695f9..be5f6f1daf55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
@@ -22,78 +23,37 @@
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
- struct drm_framebuffer base;
- /* For now we only support a single plane */
- struct drm_gem_object *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- drm_framebuffer_cleanup(fb);
-
- drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
- kfree(mtk_fb);
-}
-
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = mtk_drm_fb_create_handle,
- .destroy = mtk_drm_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
- mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
- if (!mtk_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode);
- mtk_fb->gem_obj = obj;
+ fb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
- kfree(mtk_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return mtk_fb;
+ return fb;
}
/*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
if (!fb)
return 0;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
goto unreference;
}
- mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(mtk_fb)) {
- ret = PTR_ERR(mtk_fb);
+ fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto unreference;
}
- return &mtk_fb->base;
+ return fb;
unreference:
drm_gem_object_put_unlocked(gem);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
index 9b2ae345a4e9..7f976b196a15 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -14,7 +14,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 2f4b0ffee598..f7e6aa1b5b7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!mtk_fb_get_gem_obj(fb)) {
- DRM_DEBUG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
if (!state->crtc)
return 0;
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index aa0943ec32b0..66df1b177959 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -782,7 +782,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
dsi->conn.dpms = DRM_MODE_DPMS_OFF;
- drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
if (dsi->panel) {
ret = drm_panel_attach(dsi->panel, &dsi->conn);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 59a11026dceb..2d45d1dd9554 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1220,7 +1220,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(conn, edid);
+ drm_connector_update_edid_property(conn, edid);
ret = drm_add_edid_modes(conn, edid);
kfree(edid);
@@ -1306,7 +1306,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
hdmi->conn.interlace_allowed = true;
hdmi->conn.doublescan_allowed = false;
- ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+ ret = drm_connector_attach_encoder(&hdmi->conn,
bridge->encoder);
if (ret) {
dev_err(hdmi->dev,
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index c9ad45686e7a..df7247cd93f9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -329,6 +329,12 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
vclk_freq = mode->clock;
+ if (!vic) {
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
+ vclk_freq, vclk_freq, false);
+ return;
+ }
+
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -542,10 +548,12 @@ static enum drm_mode_status
dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
+ struct meson_drm *priv = connector->dev->dev_private;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
int vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status;
DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
@@ -556,8 +564,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
/* Check against non-VIC supported modes */
if (!vic) {
- if (!meson_venc_hdmi_supported_mode(mode))
- return MODE_BAD;
+ status = meson_venc_hdmi_supported_mode(mode);
+ if (status != MODE_OK)
+ return status;
+
+ return meson_vclk_dmt_supported_freq(priv, mode->clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
@@ -583,16 +594,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies */
+ /* Finally filter by configurable vclk frequencies for VIC modes */
switch (vclk_freq) {
- case 25175:
- case 40000:
case 54000:
- case 65000:
case 74250:
- case 108000:
case 148500:
- case 162000:
case 297000:
case 594000:
return MODE_OK;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f0511220317f..ae5473257f72 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -320,32 +320,23 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
CTS_VDAC_EN, CTS_VDAC_EN);
}
-
+enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
-#define MESON_VCLK_HDMI_ENCI_54000 1
+ MESON_VCLK_HDMI_ENCI_54000 = 1,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_54000 2
+ MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_148500 3
-/* 4028 /4 /4 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_25175 4
-/* 3200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_40000 5
-/* 5200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_65000 6
+ MESON_VCLK_HDMI_DDR_148500,
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_74250 7
-/* 4320 /4 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_108000 8
+ MESON_VCLK_HDMI_74250,
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_148500 9
-/* 3240 /2 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_162000 10
+ MESON_VCLK_HDMI_148500,
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_297000 11
+ MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_594000 12
+ MESON_VCLK_HDMI_594000
+};
struct meson_vclk_params {
unsigned int pll_base_freq;
@@ -411,46 +402,6 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
- [MESON_VCLK_HDMI_25175] = {
- .pll_base_freq = 4028000,
- .pll_od1 = 4,
- .pll_od2 = 4,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_40000] = {
- .pll_base_freq = 3200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_65000] = {
- .pll_base_freq = 5200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_108000] = {
- .pll_base_freq = 4320000,
- .pll_od1 = 4,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_162000] = {
- .pll_base_freq = 3240000,
- .pll_od1 = 2,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -470,358 +421,217 @@ static inline unsigned int pll_od_to_reg(unsigned int od)
return 0;
}
-void meson_hdmi_pll_set(struct meson_drm *priv,
- unsigned int base,
- unsigned int od1,
- unsigned int od2,
- unsigned int od3)
+void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
+ unsigned int frac, unsigned int od1,
+ unsigned int od2, unsigned int od3)
{
unsigned int val;
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* Enable and unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- 0x7 << 28, 0x4 << 28);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4e00);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4aab);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4800);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4855);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4eab);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
+ if (frac)
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00004000 | frac);
+ else
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4c00);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ /* Enable and unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x7 << 28, 0x4 << 28);
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
- };
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- };
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, HDMI_PLL_RESET);
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, 0);
+ HDMI_PLL_RESET, 0);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- };
+ }
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 16, pll_od_to_reg(od1) << 16);
+ 3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 21, pll_od_to_reg(od1) << 21);
+ 3 << 21, pll_od_to_reg(od1) << 21);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 22, pll_od_to_reg(od2) << 22);
+ 3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 23, pll_od_to_reg(od2) << 23);
+ 3 << 23, pll_od_to_reg(od2) << 23);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 18, pll_od_to_reg(od3) << 18);
+ 3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 19, pll_od_to_reg(od3) << 19);
+ 3 << 19, pll_od_to_reg(od3) << 19);
+
}
-void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+#define XTAL_FREQ 24000
+
+static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+ unsigned int pll_freq)
{
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ /* The GXBB PLL has a /2 pre-multiplier */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ pll_freq /= 2;
- if (target == MESON_VCLK_TARGET_CVBS) {
- meson_venci_cvbs_clock_config(priv);
- return;
+ return pll_freq / XTAL_FREQ;
+}
+
+#define HDMI_FRAC_MAX_GXBB 4096
+#define HDMI_FRAC_MAX_GXL 1024
+
+static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int pll_freq)
+{
+ unsigned int parent_freq = XTAL_FREQ;
+ unsigned int frac_max = HDMI_FRAC_MAX_GXL;
+ unsigned int frac_m;
+ unsigned int frac;
+
+ /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ frac_max = HDMI_FRAC_MAX_GXBB;
+ parent_freq *= 2;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ /* We can have a perfect match !*/
+ if (pll_freq / m == parent_freq &&
+ pll_freq % m == 0)
+ return 0;
- if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
- dac_freq);
- return;
+ frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac_m = m * frac_max;
+ if (frac_m > frac)
+ return frac_max;
+ frac -= frac_m;
+
+ return min((u16)frac, (u16)(frac_max - 1));
+}
+
+static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int frac)
+{
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 53 || m > 123)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXBB)
+ return false;
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXL)
+ return false;
}
- venc_div = vclk_freq / venc_freq;
+ return true;
+}
- if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
- venc_freq);
- return;
+static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+ unsigned int freq,
+ unsigned int *m,
+ unsigned int *frac,
+ unsigned int *od)
+{
+ /* Cycle from /16 to /2 */
+ for (*od = 16 ; *od > 1 ; *od >>= 1) {
+ *m = meson_hdmi_pll_get_m(priv, freq * *od);
+ if (!*m)
+ continue;
+ *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ freq, *m, *frac, *od);
+
+ if (meson_hdmi_pll_validate_params(priv, *m, *frac))
+ return true;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 25175:
- freq = MESON_VCLK_HDMI_25175;
- break;
- case 40000:
- freq = MESON_VCLK_HDMI_40000;
- break;
- case 65000:
- freq = MESON_VCLK_HDMI_65000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 108000:
- freq = MESON_VCLK_HDMI_108000;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 162000:
- freq = MESON_VCLK_HDMI_162000;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ return false;
+}
+
+/* pll_freq is the frequency after the OD dividers */
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+{
+ unsigned int od, m, frac;
+
+ /* In DMT mode, path after PLL is always /10 */
+ freq *= 10;
+
+ if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
+
+/* pll_freq is the frequency after the OD dividers */
+static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+ unsigned int pll_freq)
+{
+ unsigned int od, m, frac, od1, od2, od3;
+
+ if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ od3 = 1;
+ if (od < 4) {
+ od1 = 2;
+ od2 = 1;
+ } else {
+ od2 = od / 4;
+ od1 = od / od2;
+ }
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ pll_freq, m, frac, od1, od2, od3);
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+
return;
}
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ pll_freq);
+}
+
+static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ unsigned int od1, unsigned int od2, unsigned int od3,
+ unsigned int vid_pll_div, unsigned int vclk_div,
+ unsigned int hdmi_tx_div, unsigned int venc_div,
+ bool hdmi_use_enci)
+{
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -831,19 +641,49 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
/* Set HDMI PLL rate */
- meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
- params[freq].pll_od1,
- params[freq].pll_od2,
- params[freq].pll_od3);
+ if (!od1 && !od2 && !od3) {
+ meson_hdmi_pll_generic_set(priv, pll_base_freq);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0x5a, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
+ od1, od2, od3);
+ break;
+ }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0xb4, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
+ od1, od2, od3);
+ break;
+ }
+ }
/* Setup vid_pll divider */
- meson_vid_pll_set(priv, params[freq].vid_pll_div);
+ meson_vid_pll_set(priv, vid_pll_div);
/* Set VCLK div */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_SEL_MASK, 0);
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
- VCLK_DIV_MASK, params[freq].vclk_div - 1);
+ VCLK_DIV_MASK, vclk_div - 1);
/* Set HDMI-TX source */
switch (hdmi_tx_div) {
@@ -981,4 +821,80 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
}
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int vclk_freq, unsigned int venc_freq,
+ unsigned int dac_freq, bool hdmi_use_enci)
+{
+ unsigned int freq;
+ unsigned int hdmi_tx_div;
+ unsigned int venc_div;
+
+ if (target == MESON_VCLK_TARGET_CVBS) {
+ meson_venci_cvbs_clock_config(priv);
+ return;
+ } else if (target == MESON_VCLK_TARGET_DMT) {
+ /* The DMT clock path is fixed after the PLL:
+ * - automatic PLL freq + OD management
+ * - vid_pll_div = VID_PLL_DIV_5
+ * - vclk_div = 2
+ * - hdmi_tx_div = 1
+ * - venc_div = 1
+ * - encp encoder
+ */
+ meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ VID_PLL_DIV_5, 2, 1, 1, false);
+ return;
+ }
+
+ hdmi_tx_div = vclk_freq / dac_freq;
+
+ if (hdmi_tx_div == 0) {
+ pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ dac_freq);
+ return;
+ }
+
+ venc_div = vclk_freq / venc_freq;
+
+ if (venc_div == 0) {
+ pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ venc_freq);
+ return;
+ }
+
+ switch (vclk_freq) {
+ case 54000:
+ if (hdmi_use_enci)
+ freq = MESON_VCLK_HDMI_ENCI_54000;
+ else
+ freq = MESON_VCLK_HDMI_DDR_54000;
+ break;
+ case 74250:
+ freq = MESON_VCLK_HDMI_74250;
+ break;
+ case 148500:
+ if (dac_freq != 148500)
+ freq = MESON_VCLK_HDMI_DDR_148500;
+ else
+ freq = MESON_VCLK_HDMI_148500;
+ break;
+ case 297000:
+ freq = MESON_VCLK_HDMI_297000;
+ break;
+ case 594000:
+ freq = MESON_VCLK_HDMI_594000;
+ break;
+ default:
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
+ vclk_freq);
+ return;
+ }
+
+ meson_vclk_set(priv, params[freq].pll_base_freq,
+ params[freq].pll_od1, params[freq].pll_od2,
+ params[freq].pll_od3, params[freq].vid_pll_div,
+ params[freq].vclk_div, hdmi_tx_div, venc_div,
+ hdmi_use_enci);
+}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 0401b5213471..869fa3a3073e 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -24,11 +24,15 @@
enum {
MESON_VCLK_TARGET_CVBS = 0,
MESON_VCLK_TARGET_HDMI = 1,
+ MESON_VCLK_TARGET_DMT = 2,
};
/* 27MHz is the CVBS Pixel Clock */
#define MESON_VCLK_CVBS 27000
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 6e2701389801..514245e69b38 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,314 +697,6 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x31f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x90,
- .havon_end = 0x30f,
- .vavon_bline = 0x23,
- .vavon_eline = 0x202,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x60,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 2,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x20c,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x41f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0xD8,
- .havon_end = 0x3f7,
- .vavon_bline = 0x1b,
- .vavon_eline = 0x272,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 4,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x273,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 1343,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 296,
- .havon_end = 1319,
- .vavon_bline = 35,
- .vavon_eline = 802,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 136,
- .vso_begin = 30,
- .vso_end = 50,
- .vso_bline = 0,
- .vso_eline = 6,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 805,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x63f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x180,
- .havon_end = 0x5ff,
- .vavon_bline = 0x23,
- .vavon_eline = 0x382,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x383,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x697,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x168,
- .havon_end = 0x667,
- .vavon_bline = 0x29,
- .vavon_eline = 0x428,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x70,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x429,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x86f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x1f0,
- .havon_end = 0x82f,
- .vavon_bline = 0x31,
- .vavon_eline = 0x4e0,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0xc0,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x4e1,
- },
-};
-
-struct meson_hdmi_venc_dmt_mode {
- struct drm_display_mode drm_mode;
- union meson_hdmi_venc_mode *mode;
-} meson_hdmi_venc_dmt_modes[] = {
- /* 640x480@60Hz */
- {
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_640x480_60,
- },
- /* 800x600@60Hz */
- {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_800x600_60,
- },
- /* 1024x768@60Hz */
- {
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
- 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1024x768_60,
- },
- /* 1152x864@75Hz */
- {
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
- 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1152x864_75,
- },
- /* 1280x1024@60Hz */
- {
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
- 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1280x1024_60,
- },
- /* 1600x1200@60Hz */
- {
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
- 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1600x1200_60,
- },
- /* 1920x1080@60Hz */
- {
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
- 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1080p60
- },
- { }, /* sentinel */
-};
-
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -1044,17 +736,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
return a;
}
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+ if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
+ return MODE_BAD;
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return true;
- vmode++;
- }
+ if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+ return MODE_BAD_HVALUE;
- return false;
+ if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
@@ -1072,18 +767,29 @@ bool meson_venc_hdmi_supported_vic(int vic)
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
-static union meson_hdmi_venc_mode
-*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
+ union meson_hdmi_venc_mode *dmt_mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
-
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return vmode->mode;
- vmode++;
- }
-
- return NULL;
+ memset(dmt_mode, 0, sizeof(*dmt_mode));
+
+ dmt_mode->encp.dvi_settings = 0x21;
+ dmt_mode->encp.video_mode = 0x4040;
+ dmt_mode->encp.video_mode_adv = 0x18;
+ dmt_mode->encp.max_pxcnt = mode->htotal - 1;
+ dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start;
+ dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin +
+ mode->hdisplay - 1;
+ dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start;
+ dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline +
+ mode->vdisplay - 1;
+ dmt_mode->encp.hso_begin = 0;
+ dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start;
+ dmt_mode->encp.vso_begin = 30;
+ dmt_mode->encp.vso_end = 50;
+ dmt_mode->encp.vso_bline = 0;
+ dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start;
+ dmt_mode->encp.vso_eline_present = true;
+ dmt_mode->encp.max_lncnt = mode->vtotal - 1;
}
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
@@ -1120,6 +826,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
+ union meson_hdmi_venc_mode vmode_dmt;
bool use_enci = false;
bool venc_repeat = false;
bool hdmi_repeat = false;
@@ -1147,14 +854,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
- if (meson_venc_hdmi_supported_vic(vic))
+ if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
- else
- vmode = meson_venc_hdmi_get_dmt_vmode(mode);
- if (!vmode) {
- dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
- DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
- return;
+ if (!vmode) {
+ dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+ DRM_MODE_FMT "\n", __func__,
+ DRM_MODE_ARG(mode));
+ return;
+ }
+ } else {
+ meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
+ vmode = &vmode_dmt;
}
/* Use VENCI for 480i and 576i and double HDMI pixels */
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 7c18a36a0dd0..97eaebbfa0c4 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -58,7 +58,8 @@ struct meson_cvbs_enci_mode {
};
/* HDMI Clock parameters */
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
bool meson_venc_hdmi_supported_vic(int vic);
bool meson_venc_hdmi_venc_repeat(int vic);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 79d95ca8a0c0..f7945bae3b4a 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -282,7 +282,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv)
encoder->possible_crtcs = BIT(0);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 8918539a19aa..acf7bfe68454 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1553,7 +1553,7 @@ static int mga_vga_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -1747,7 +1747,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = mgag200_fbdev_init(mdev);
if (ret) {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 38cbde971b48..843a9d40c05e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index cd40c050b2d7..261fa79d456d 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@@ -10,6 +11,9 @@ msm-y := \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
+ adreno/a6xx_gpu.o \
+ adreno/a6xx_gmu.o \
+ adreno/a6xx_hfi.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -45,6 +49,33 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
+ disp/dpu1/dpu_core_irq.o \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_blk.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_io_util.o \
+ disp/dpu1/dpu_irq.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_mdss.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_power_handle.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,7 +93,8 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 644374c7b3e0..4bff0a740c7d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -84,13 +86,12 @@ enum a2xx_sq_surfaceformat {
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
- FMT_10_11_11 = 16,
- FMT_11_11_10 = 17,
+ FMT_8_8_8 = 16,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
FMT_24_8 = 22,
- FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
@@ -106,29 +107,23 @@ enum a2xx_sq_surfaceformat {
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
- FMT_32_AS_8 = 39,
- FMT_32_AS_8_8 = 40,
- FMT_16_MPEG = 41,
- FMT_16_16_MPEG = 42,
- FMT_8_INTERLACED = 43,
- FMT_32_AS_8_INTERLACED = 44,
- FMT_32_AS_8_8_INTERLACED = 45,
- FMT_16_INTERLACED = 46,
- FMT_16_MPEG_INTERLACED = 47,
- FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
FMT_DXN = 49,
- FMT_8_8_8_8_AS_16_16_16_16 = 50,
- FMT_DXT1_AS_16_16_16_16 = 51,
- FMT_DXT2_3_AS_16_16_16_16 = 52,
- FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_3_3 = 51,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_11_11_AS_16_16_16_16 = 55,
- FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
- FMT_DXT3A_AS_1_1_1_1 = 61,
};
enum a2xx_sq_ps_vtx_mode {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 663a73216926..645a19aef399 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 3ebbeb3a9b68..669c2d4b070d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -411,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- adreno_show(gpu, m);
-}
-#endif
-
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
@@ -427,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -450,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 1a14f4a40b9c..19565e87aa7b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -263,12 +265,6 @@ enum a4xx_depth_format {
DEPTH4_32 = 3,
};
-enum a4xx_tess_spacing {
- EQUAL_SPACING = 0,
- ODD_SPACING = 2,
- EVEN_SPACING = 3,
-};
-
enum a4xx_ccu_perfcounter_select {
CCU_BUSY_CYCLES = 0,
CCU_RB_DEPTH_RETURN_STALL = 2,
@@ -3544,12 +3540,13 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3571,12 +3568,13 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3598,12 +3596,13 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3625,12 +3624,13 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3652,12 +3652,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3672,23 +3673,103 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
}
-#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x000000ff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
@@ -4087,5 +4168,71 @@ static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
#define REG_A4XX_TEX_CONST_7 0x00000007
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 16d3d596638e..7c4e6dc1ed59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -455,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- adreno_show(gpu, m);
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
}
-#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -538,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index e0e6711f4f78..182d37ff3794 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -119,6 +121,11 @@ enum a5xx_vtx_fmt {
VFMT5_8_8_8_8_SNORM = 50,
VFMT5_8_8_8_8_UINT = 51,
VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
VFMT5_16_16_UNORM = 67,
VFMT5_16_16_SNORM = 68,
VFMT5_16_16_FLOAT = 69,
@@ -204,14 +211,45 @@ enum a5xx_tex_fmt {
TFMT5_32_32_FLOAT = 103,
TFMT5_32_32_UINT = 104,
TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
TFMT5_32_32_32_32_FLOAT = 130,
TFMT5_32_32_32_32_UINT = 131,
TFMT5_32_32_32_32_SINT = 132,
TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
TFMT5_RGTC1_UNORM = 183,
TFMT5_RGTC1_SNORM = 184,
TFMT5_RGTC2_UNORM = 187,
TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
};
enum a5xx_tex_fetchsize {
@@ -239,7 +277,7 @@ enum a5xx_blit_buf {
BLIT_MRT6 = 6,
BLIT_MRT7 = 7,
BLIT_ZS = 8,
- BLIT_Z32 = 9,
+ BLIT_S = 9,
};
enum a5xx_cp_perfcounter_select {
@@ -899,6 +937,12 @@ enum a5xx_tex_type {
#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
@@ -2072,9 +2116,17 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_PC_MODE_CNTL 0x00000d02
-#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
-#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -2327,6 +2379,14 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -2590,6 +2650,7 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
#define REG_A5XX_UNKNOWN_E001 0x0000e001
@@ -2700,7 +2761,7 @@ static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
}
-#define REG_A5XX_UNKNOWN_E093 0x0000e093
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
@@ -2936,7 +2997,9 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -3002,6 +3065,13 @@ static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0
static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -3060,6 +3130,12 @@ static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode
{
return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3223,6 +3299,7 @@ static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3369,7 +3446,25 @@ static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
}
-#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3428,6 +3523,7 @@ static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
}
#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
@@ -3459,6 +3555,7 @@ static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
@@ -3627,22 +3724,69 @@ static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
#define REG_A5XX_UNKNOWN_E389 0x0000e389
#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
-#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
@@ -3667,10 +3811,40 @@ static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
{
return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A5XX_VFD_CONTROL_4 0x0000e404
@@ -3700,12 +3874,18 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
}
#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
{
return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
@@ -3960,6 +4140,7 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -4001,16 +4182,12 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
-#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
-
-#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
-
-#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
-
#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
@@ -4039,7 +4216,39 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define REG_A5XX_UNKNOWN_E600 0x0000e600
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E602 0x0000e602
@@ -4047,13 +4256,67 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
-#define REG_A5XX_UNKNOWN_E640 0x0000e640
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
@@ -4173,6 +4436,18 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
{
return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
@@ -4375,34 +4650,52 @@ static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
}
#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_SIZE_X(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
@@ -4468,6 +4761,8 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
@@ -4483,12 +4778,19 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_SRC_LO 0x00002108
@@ -4515,12 +4817,19 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_DST_LO 0x00002111
@@ -4548,6 +4857,8 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4555,12 +4866,19 @@ static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4569,12 +4887,19 @@ static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_UNKNOWN_2100 0x00002100
@@ -4698,6 +5023,12 @@ static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A5XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
@@ -4788,5 +5119,81 @@ static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
#define REG_A5XX_TEX_CONST_11 0x0000000b
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d39400e5bc42..ab1d9308c311 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
@@ -19,6 +20,8 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -91,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
} else {
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
}
if (ret)
goto out;
@@ -1123,8 +1127,9 @@ static const u32 a5xx_registers[] = {
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
- 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
- 0xB9A0, 0xB9BF, ~0
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1195,19 +1200,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
+ if (IS_ERR(dumper->ptr))
+ return PTR_ERR(dumper->ptr);
+
+ return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_put_vaddr(dumper->bo);
+
+ drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ a5xx_crashdumper_free(gpu, &dumper);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
- adreno_show(gpu, m);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /* Get the HLSQ regs with the help of the crashdumper */
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
}
#endif
@@ -1239,11 +1456,15 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 000000000000..87eab51f7000
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,4562 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_color_fmt {
+ RB6_A8_UNORM = 2,
+ RB6_R8_UNORM = 3,
+ RB6_R8_SNORM = 4,
+ RB6_R8_UINT = 5,
+ RB6_R8_SINT = 6,
+ RB6_R4G4B4A4_UNORM = 8,
+ RB6_R5G5B5A1_UNORM = 10,
+ RB6_R5G6B5_UNORM = 14,
+ RB6_R8G8_UNORM = 15,
+ RB6_R8G8_SNORM = 16,
+ RB6_R8G8_UINT = 17,
+ RB6_R8G8_SINT = 18,
+ RB6_R16_UNORM = 21,
+ RB6_R16_SNORM = 22,
+ RB6_R16_FLOAT = 23,
+ RB6_R16_UINT = 24,
+ RB6_R16_SINT = 25,
+ RB6_R8G8B8A8_UNORM = 48,
+ RB6_R8G8B8_UNORM = 49,
+ RB6_R8G8B8A8_SNORM = 50,
+ RB6_R8G8B8A8_UINT = 51,
+ RB6_R8G8B8A8_SINT = 52,
+ RB6_R10G10B10A2_UNORM = 55,
+ RB6_R10G10B10A2_UINT = 58,
+ RB6_R11G11B10_FLOAT = 66,
+ RB6_R16G16_UNORM = 67,
+ RB6_R16G16_SNORM = 68,
+ RB6_R16G16_FLOAT = 69,
+ RB6_R16G16_UINT = 70,
+ RB6_R16G16_SINT = 71,
+ RB6_R32_FLOAT = 74,
+ RB6_R32_UINT = 75,
+ RB6_R32_SINT = 76,
+ RB6_R16G16B16A16_UNORM = 96,
+ RB6_R16G16B16A16_SNORM = 97,
+ RB6_R16G16B16A16_FLOAT = 98,
+ RB6_R16G16B16A16_UINT = 99,
+ RB6_R16G16B16A16_SINT = 100,
+ RB6_R32G32_FLOAT = 103,
+ RB6_R32G32_UINT = 104,
+ RB6_R32G32_SINT = 105,
+ RB6_R32G32B32A32_FLOAT = 130,
+ RB6_R32G32B32A32_UINT = 131,
+ RB6_R32G32B32A32_SINT = 132,
+ RB6_X8Z24_UNORM = 160,
+};
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_vtx_fmt {
+ VFMT6_8_UNORM = 3,
+ VFMT6_8_SNORM = 4,
+ VFMT6_8_UINT = 5,
+ VFMT6_8_SINT = 6,
+ VFMT6_8_8_UNORM = 15,
+ VFMT6_8_8_SNORM = 16,
+ VFMT6_8_8_UINT = 17,
+ VFMT6_8_8_SINT = 18,
+ VFMT6_16_UNORM = 21,
+ VFMT6_16_SNORM = 22,
+ VFMT6_16_FLOAT = 23,
+ VFMT6_16_UINT = 24,
+ VFMT6_16_SINT = 25,
+ VFMT6_8_8_8_UNORM = 33,
+ VFMT6_8_8_8_SNORM = 34,
+ VFMT6_8_8_8_UINT = 35,
+ VFMT6_8_8_8_SINT = 36,
+ VFMT6_8_8_8_8_UNORM = 48,
+ VFMT6_8_8_8_8_SNORM = 50,
+ VFMT6_8_8_8_8_UINT = 51,
+ VFMT6_8_8_8_8_SINT = 52,
+ VFMT6_10_10_10_2_UNORM = 54,
+ VFMT6_10_10_10_2_SNORM = 57,
+ VFMT6_10_10_10_2_UINT = 58,
+ VFMT6_10_10_10_2_SINT = 59,
+ VFMT6_11_11_10_FLOAT = 66,
+ VFMT6_16_16_UNORM = 67,
+ VFMT6_16_16_SNORM = 68,
+ VFMT6_16_16_FLOAT = 69,
+ VFMT6_16_16_UINT = 70,
+ VFMT6_16_16_SINT = 71,
+ VFMT6_32_UNORM = 72,
+ VFMT6_32_SNORM = 73,
+ VFMT6_32_FLOAT = 74,
+ VFMT6_32_UINT = 75,
+ VFMT6_32_SINT = 76,
+ VFMT6_32_FIXED = 77,
+ VFMT6_16_16_16_UNORM = 88,
+ VFMT6_16_16_16_SNORM = 89,
+ VFMT6_16_16_16_FLOAT = 90,
+ VFMT6_16_16_16_UINT = 91,
+ VFMT6_16_16_16_SINT = 92,
+ VFMT6_16_16_16_16_UNORM = 96,
+ VFMT6_16_16_16_16_SNORM = 97,
+ VFMT6_16_16_16_16_FLOAT = 98,
+ VFMT6_16_16_16_16_UINT = 99,
+ VFMT6_16_16_16_16_SINT = 100,
+ VFMT6_32_32_UNORM = 101,
+ VFMT6_32_32_SNORM = 102,
+ VFMT6_32_32_FLOAT = 103,
+ VFMT6_32_32_UINT = 104,
+ VFMT6_32_32_SINT = 105,
+ VFMT6_32_32_FIXED = 106,
+ VFMT6_32_32_32_UNORM = 112,
+ VFMT6_32_32_32_SNORM = 113,
+ VFMT6_32_32_32_UINT = 114,
+ VFMT6_32_32_32_SINT = 115,
+ VFMT6_32_32_32_FLOAT = 116,
+ VFMT6_32_32_32_FIXED = 117,
+ VFMT6_32_32_32_32_UNORM = 128,
+ VFMT6_32_32_32_32_SNORM = 129,
+ VFMT6_32_32_32_32_FLOAT = 130,
+ VFMT6_32_32_32_32_UINT = 131,
+ VFMT6_32_32_32_32_SINT = 132,
+ VFMT6_32_32_32_32_FIXED = 133,
+};
+
+enum a6xx_tex_fmt {
+ TFMT6_A8_UNORM = 2,
+ TFMT6_8_UNORM = 3,
+ TFMT6_8_SNORM = 4,
+ TFMT6_8_UINT = 5,
+ TFMT6_8_SINT = 6,
+ TFMT6_4_4_4_4_UNORM = 8,
+ TFMT6_5_5_5_1_UNORM = 10,
+ TFMT6_5_6_5_UNORM = 14,
+ TFMT6_8_8_UNORM = 15,
+ TFMT6_8_8_SNORM = 16,
+ TFMT6_8_8_UINT = 17,
+ TFMT6_8_8_SINT = 18,
+ TFMT6_L8_A8_UNORM = 19,
+ TFMT6_16_UNORM = 21,
+ TFMT6_16_SNORM = 22,
+ TFMT6_16_FLOAT = 23,
+ TFMT6_16_UINT = 24,
+ TFMT6_16_SINT = 25,
+ TFMT6_8_8_8_8_UNORM = 48,
+ TFMT6_8_8_8_UNORM = 49,
+ TFMT6_8_8_8_8_SNORM = 50,
+ TFMT6_8_8_8_8_UINT = 51,
+ TFMT6_8_8_8_8_SINT = 52,
+ TFMT6_9_9_9_E5_FLOAT = 53,
+ TFMT6_10_10_10_2_UNORM = 54,
+ TFMT6_10_10_10_2_UINT = 58,
+ TFMT6_11_11_10_FLOAT = 66,
+ TFMT6_16_16_UNORM = 67,
+ TFMT6_16_16_SNORM = 68,
+ TFMT6_16_16_FLOAT = 69,
+ TFMT6_16_16_UINT = 70,
+ TFMT6_16_16_SINT = 71,
+ TFMT6_32_FLOAT = 74,
+ TFMT6_32_UINT = 75,
+ TFMT6_32_SINT = 76,
+ TFMT6_16_16_16_16_UNORM = 96,
+ TFMT6_16_16_16_16_SNORM = 97,
+ TFMT6_16_16_16_16_FLOAT = 98,
+ TFMT6_16_16_16_16_UINT = 99,
+ TFMT6_16_16_16_16_SINT = 100,
+ TFMT6_32_32_FLOAT = 103,
+ TFMT6_32_32_UINT = 104,
+ TFMT6_32_32_SINT = 105,
+ TFMT6_32_32_32_UINT = 114,
+ TFMT6_32_32_32_SINT = 115,
+ TFMT6_32_32_32_FLOAT = 116,
+ TFMT6_32_32_32_32_FLOAT = 130,
+ TFMT6_32_32_32_32_UINT = 131,
+ TFMT6_32_32_32_32_SINT = 132,
+ TFMT6_X8Z24_UNORM = 160,
+ TFMT6_ETC2_RG11_UNORM = 171,
+ TFMT6_ETC2_RG11_SNORM = 172,
+ TFMT6_ETC2_R11_UNORM = 173,
+ TFMT6_ETC2_R11_SNORM = 174,
+ TFMT6_ETC1 = 175,
+ TFMT6_ETC2_RGB8 = 176,
+ TFMT6_ETC2_RGBA8 = 177,
+ TFMT6_ETC2_RGB8A1 = 178,
+ TFMT6_DXT1 = 179,
+ TFMT6_DXT3 = 180,
+ TFMT6_DXT5 = 181,
+ TFMT6_RGTC1_UNORM = 183,
+ TFMT6_RGTC1_SNORM = 184,
+ TFMT6_RGTC2_UNORM = 187,
+ TFMT6_RGTC2_SNORM = 188,
+ TFMT6_BPTC_UFLOAT = 190,
+ TFMT6_BPTC_FLOAT = 191,
+ TFMT6_BPTC = 192,
+ TFMT6_ASTC_4x4 = 193,
+ TFMT6_ASTC_5x4 = 194,
+ TFMT6_ASTC_5x5 = 195,
+ TFMT6_ASTC_6x5 = 196,
+ TFMT6_ASTC_6x6 = 197,
+ TFMT6_ASTC_8x5 = 198,
+ TFMT6_ASTC_8x6 = 199,
+ TFMT6_ASTC_8x8 = 200,
+ TFMT6_ASTC_10x5 = 201,
+ TFMT6_ASTC_10x6 = 202,
+ TFMT6_ASTC_10x8 = 203,
+ TFMT6_ASTC_10x10 = 204,
+ TFMT6_ASTC_12x10 = 205,
+ TFMT6_ASTC_12x12 = 206,
+};
+
+enum a6xx_tex_fetchsize {
+ TFETCH6_1_BYTE = 0,
+ TFETCH6_2_BYTE = 1,
+ TFETCH6_4_BYTE = 2,
+ TFETCH6_8_BYTE = 3,
+ TFETCH6_16_BYTE = 4,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_LO 0x00000830
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_HI 0x00000831
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_0 0x000008d0
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_1 0x000008d1
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_2 0x000008d2
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_3 0x000008d3
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_4 0x000008d4
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_5 0x000008d5
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_6 0x000008d6
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_7 0x000008d7
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_8 0x000008d8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_9 0x000008d9
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_10 0x000008da
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_11 0x000008db
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_12 0x000008dc
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_13 0x000008dd
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_BASE_HI 0x00000929
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_LO 0x00000400
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_HI 0x00000401
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_LO 0x00000402
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_HI 0x00000403
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_LO 0x00000404
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_HI 0x00000405
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_LO 0x00000406
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_HI 0x00000407
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_LO 0x00000408
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_HI 0x00000409
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_LO 0x0000040a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_HI 0x0000040b
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_LO 0x0000040c
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_HI 0x0000040d
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_LO 0x0000040e
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_HI 0x0000040f
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_LO 0x00000410
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_HI 0x00000411
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_LO 0x00000412
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_HI 0x00000413
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_LO 0x00000414
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_HI 0x00000415
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_LO 0x00000416
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_HI 0x00000417
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_LO 0x00000418
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_HI 0x00000419
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_LO 0x0000041a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_HI 0x0000041b
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_LO 0x0000041c
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_HI 0x0000041d
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_LO 0x0000041e
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_HI 0x0000041f
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_LO 0x00000420
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_HI 0x00000421
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_LO 0x00000422
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_HI 0x00000423
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_LO 0x00000424
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_HI 0x00000425
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_LO 0x00000426
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_HI 0x00000427
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_LO 0x00000428
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_HI 0x00000429
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_LO 0x0000042a
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_HI 0x0000042b
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_LO 0x0000042c
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_HI 0x0000042d
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_LO 0x0000042e
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_HI 0x0000042f
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_LO 0x00000430
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_HI 0x00000431
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_LO 0x00000432
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_HI 0x00000433
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_LO 0x00000434
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_HI 0x00000435
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_LO 0x00000436
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_HI 0x00000437
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_LO 0x00000438
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_HI 0x00000439
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_LO 0x0000043a
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_HI 0x0000043b
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_LO 0x0000043c
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_HI 0x0000043d
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_LO 0x0000043e
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_HI 0x0000043f
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_LO 0x00000440
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_HI 0x00000441
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_LO 0x00000442
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_HI 0x00000443
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_LO 0x00000444
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_HI 0x00000445
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_LO 0x00000446
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_HI 0x00000447
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_LO 0x00000448
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_HI 0x00000449
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_LO 0x0000044a
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_HI 0x0000044b
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_LO 0x0000044c
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_HI 0x0000044d
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_LO 0x0000044e
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_HI 0x0000044f
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_LO 0x00000450
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_HI 0x00000451
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_LO 0x00000452
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_HI 0x00000453
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_LO 0x00000454
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_HI 0x00000455
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_LO 0x00000456
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_HI 0x00000457
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_LO 0x00000458
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_HI 0x00000459
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_LO 0x0000045a
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_HI 0x0000045b
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_LO 0x0000045c
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_HI 0x0000045d
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_LO 0x0000045e
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_HI 0x0000045f
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_LO 0x00000460
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_HI 0x00000461
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_LO 0x00000462
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_HI 0x00000463
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_LO 0x00000464
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_HI 0x0000046d
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_LO 0x0000046e
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_HI 0x0000046f
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_LO 0x00000470
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_HI 0x00000471
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_LO 0x00000472
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_HI 0x00000473
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_LO 0x00000474
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_HI 0x00000475
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_LO 0x00000476
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_HI 0x00000477
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_LO 0x00000478
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_HI 0x00000479
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_LO 0x0000047a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_HI 0x0000047b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_LO 0x0000047c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_HI 0x0000047d
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_LO 0x0000047e
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_HI 0x0000047f
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_LO 0x00000480
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_HI 0x00000481
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_LO 0x00000482
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_HI 0x00000483
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_LO 0x00000484
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_HI 0x00000485
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_LO 0x00000486
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_HI 0x00000487
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_LO 0x00000488
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_HI 0x00000489
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_LO 0x0000048a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_HI 0x0000048b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_LO 0x0000048c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_HI 0x0000048d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_LO 0x0000048e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_HI 0x0000048f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_LO 0x00000490
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_HI 0x00000491
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_LO 0x00000492
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_HI 0x00000493
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_LO 0x00000494
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_HI 0x00000495
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_LO 0x00000496
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_HI 0x00000497
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_LO 0x00000498
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_HI 0x00000499
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_LO 0x0000049a
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_HI 0x0000049b
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_LO 0x0000049c
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_HI 0x0000049d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_LO 0x0000049e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_HI 0x0000049f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_LO 0x000004a0
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_HI 0x000004a1
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_LO 0x000004a2
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_HI 0x000004a3
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_LO 0x000004a4
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_HI 0x000004a5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_LO 0x000004a6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_HI 0x000004a7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_LO 0x000004a8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_HI 0x000004a9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_LO 0x000004aa
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_HI 0x000004ab
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_LO 0x000004ac
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_HI 0x000004ad
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_LO 0x000004ae
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_HI 0x000004af
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_LO 0x000004b0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_HI 0x000004b1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_LO 0x000004b2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_HI 0x000004b3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_LO 0x000004b4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_HI 0x000004b5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_LO 0x000004b6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_HI 0x000004b7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_LO 0x000004b8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_HI 0x000004b9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_LO 0x000004ba
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_HI 0x000004bb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_LO 0x000004bc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_HI 0x000004bd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_LO 0x000004be
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_HI 0x000004bf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_LO 0x000004c0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_HI 0x000004c1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_LO 0x000004c2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_HI 0x000004c3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_LO 0x000004c4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_HI 0x000004c5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_LO 0x000004c6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_HI 0x000004c7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_LO 0x000004c8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_HI 0x000004c9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_LO 0x000004ca
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_HI 0x000004cb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_LO 0x000004cc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_HI 0x000004cd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_LO 0x000004ce
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_HI 0x000004cf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_LO 0x000004d0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_HI 0x000004d1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_LO 0x000004d2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_HI 0x000004d3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_LO 0x000004d4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_HI 0x000004d5
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_LO 0x000004d6
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_HI 0x000004d7
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_LO 0x000004d8
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_HI 0x000004d9
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_LO 0x000004da
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_HI 0x000004db
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_LO 0x000004dc
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_HI 0x000004dd
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_LO 0x000004de
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_HI 0x000004df
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_LO 0x000004e0
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_HI 0x000004e1
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_LO 0x000004e2
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_HI 0x000004e3
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_LO 0x000004e4
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_HI 0x000004e5
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_LO 0x000004e6
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_HI 0x000004e7
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_LO 0x000004e8
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_HI 0x000004e9
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_LO 0x000004ea
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_HI 0x000004eb
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_LO 0x000004ec
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_HI 0x000004ed
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_LO 0x000004ee
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_HI 0x000004ef
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_LO 0x000004f0
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_HI 0x000004f1
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_LO 0x000004f2
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_HI 0x000004f3
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_LO 0x000004f4
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_HI 0x000004f5
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_LO 0x000004f6
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_HI 0x000004f7
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_LO 0x000004f8
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_HI 0x000004f9
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000507
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000508
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000509
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000050a
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_0 0x00000cd8
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x0000be11
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x0000be12
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x0000be13
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x0000be14
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x0000be15
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_0 0x0000a610
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_1 0x0000a611
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_2 0x0000a612
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_3 0x0000a613
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_4 0x0000a614
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_5 0x0000a615
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_6 0x0000a616
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08
+
+#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09
+
+#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e1c
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e1d
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e1e
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e1f
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e20
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e21
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e22
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e23
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_8 0x00000e24
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_9 0x00000e25
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_10 0x00000e26
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_11 0x00000e27
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_0 0x0000ae10
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_1 0x0000ae11
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_2 0x0000ae12
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_3 0x0000ae13
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_4 0x0000ae14
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_5 0x0000ae15
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_6 0x0000ae16
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_7 0x0000ae17
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_8 0x0000ae18
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_9 0x0000ae19
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_10 0x0000ae1a
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_11 0x0000ae1b
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_12 0x0000ae1c
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_13 0x0000ae1d
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_14 0x0000ae1e
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_15 0x0000ae1f
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_16 0x0000ae20
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_17 0x0000ae21
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_18 0x0000ae22
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_19 0x0000ae23
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_20 0x0000ae24
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_21 0x0000ae25
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_22 0x0000ae26
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_23 0x0000ae27
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_2 0x0000b612
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_3 0x0000b613
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_4 0x0000b614
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_5 0x0000b615
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_6 0x0000b616
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_7 0x0000b617
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_8 0x0000b618
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_9 0x0000b619
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_10 0x0000b61a
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_11 0x0000b61b
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
+
+#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
+#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
+#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X1_BIN_SIZE 0x000080a1
+#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X2_BIN_SIZE 0x00008800
+#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X3_BIN_SIZE 0x000088d3
+#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_LO 0x00000c03
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_HI 0x00000c04
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+
+#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+
+#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI 0x00000c35
+
+#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+
+static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
+
+#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_XCOORD 0x00000040
+#define A6XX_GRAS_CNTL_YCOORD 0x00000080
+#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
+#define A6XX_GRAS_CNTL_WCOORD 0x00000200
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0 0x00008010
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0 0x00008011
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0 0x00008012
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0 0x00008013
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0 0x00008014
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0 0x00008015
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8099 0x00008099
+
+#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_UNKNOWN_80A4 0x000080a4
+
+#define REG_A6XX_GRAS_UNKNOWN_80A5 0x000080a5
+
+#define REG_A6XX_GRAS_UNKNOWN_80A6 0x000080a6
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x000080b0
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x000080b1
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x000080d0
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x000080d1
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO 0x00008103
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+#define A6XX_GRAS_2D_SRC_BR_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_UNKNOWN_8804 0x00008804
+
+#define REG_A6XX_RB_UNKNOWN_8805 0x00008805
+
+#define REG_A6XX_RB_UNKNOWN_8806 0x00008806
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A6XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_LO 0x00008875
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+
+#define REG_A6XX_RB_UNKNOWN_8878 0x00008878
+
+#define REG_A6XX_RB_UNKNOWN_8879 0x00008879
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_LO 0x00008884
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
+
+#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
+#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
+#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_RB_2D_DST_LO 0x00008c18
+
+#define REG_A6XX_RB_2D_DST_HI 0x00008c19
+
+#define REG_A6XX_RB_2D_DST_SIZE 0x00008c1a
+#define A6XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
+
+#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+
+#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
+
+#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+
+#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9236 0x00009236
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_PACK 0x00009301
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK 0x0000ff00
+#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT 8
+static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+
+#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
+#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A6XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+
+#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_1 0x00009b01
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK 0x0000007f
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
+
+#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
+
+#define REG_A6XX_SP_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
+
+#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
+
+#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
+
+#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
+
+#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_GS_UNKNOWN_A871 0x0000a871
+
+#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
+
+#define REG_A6XX_SP_GS_OBJ_START_HI 0x0000a88e
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_VS_TEX_SAMP_LO 0x0000a8a0
+
+#define REG_A6XX_SP_VS_TEX_SAMP_HI 0x0000a8a1
+
+#define REG_A6XX_SP_HS_TEX_SAMP_LO 0x0000a8a2
+
+#define REG_A6XX_SP_HS_TEX_SAMP_HI 0x0000a8a3
+
+#define REG_A6XX_SP_DS_TEX_SAMP_LO 0x0000a8a4
+
+#define REG_A6XX_SP_DS_TEX_SAMP_HI 0x0000a8a5
+
+#define REG_A6XX_SP_GS_TEX_SAMP_LO 0x0000a8a6
+
+#define REG_A6XX_SP_GS_TEX_SAMP_HI 0x0000a8a7
+
+#define REG_A6XX_SP_VS_TEX_CONST_LO 0x0000a8a8
+
+#define REG_A6XX_SP_VS_TEX_CONST_HI 0x0000a8a9
+
+#define REG_A6XX_SP_HS_TEX_CONST_LO 0x0000a8aa
+
+#define REG_A6XX_SP_HS_TEX_CONST_HI 0x0000a8ab
+
+#define REG_A6XX_SP_DS_TEX_CONST_LO 0x0000a8ac
+
+#define REG_A6XX_SP_DS_TEX_CONST_HI 0x0000a8ad
+
+#define REG_A6XX_SP_GS_TEX_CONST_LO 0x0000a8ae
+
+#define REG_A6XX_SP_GS_TEX_CONST_HI 0x0000a8af
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
+
+#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
+
+#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
+
+#define REG_A6XX_SP_CS_TEX_SAMP_LO 0x0000a9e2
+
+#define REG_A6XX_SP_CS_TEX_SAMP_HI 0x0000a9e3
+
+#define REG_A6XX_SP_FS_TEX_CONST_LO 0x0000a9e4
+
+#define REG_A6XX_SP_FS_TEX_CONST_HI 0x0000a9e5
+
+#define REG_A6XX_SP_CS_TEX_CONST_LO 0x0000a9e6
+
+#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_CS_OBJ_START_LO 0x0000a9b4
+
+#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+
+#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
+
+#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
+
+#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
+
+#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+
+#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
+
+#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_UPDATE_CNTL 0x0000bb08
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BB11 0x0000bb11
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+{
+ return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 000000000000..bbb8126ec5c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
+ tasklet_schedule(&gmu->hfi_tasklet);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check to see if the GX rail is still powered */
+static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((index << 24) & 0xff) | (3 & 0xf));
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ val == 0xbabeface, 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+ const char *name;
+
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ name = "GPU_SET";
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ ack = GMU_OOB_BOOT_SLUMBER_ACK;
+ name = "BOOT_SLUMBER";
+ break;
+ case GMU_OOB_DCVS_SET:
+ request = GMU_OOB_DCVS_REQUEST;
+ ack = GMU_OOB_DCVS_ACK;
+ name = "GPU_DCVS";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR);
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+ break;
+ case GMU_OOB_DCVS_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_DCVS_CLEAR);
+ break;
+ }
+}
+
+/* Enable CPU control of SPTP power power collapse */
+static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (!ret) {
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Re-enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
+ }
+
+ dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ /* Disable SDE clock gating */
+ gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ /* Fall through */
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ static bool rpmh_init;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int i, ret;
+ u32 chipid;
+ u32 *image;
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Sanity check the size of the firmware that was loaded */
+ if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
+ dev_err(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ /* We only need to load the RPMh microcode once */
+ if (!rpmh_init) {
+ a6xx_gmu_rpmh_init(gmu);
+ rpmh_init = true;
+ } else if (state != GMU_RESET) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ }
+
+ image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
+
+ for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
+ gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
+ image[i]);
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ chipid = adreno_gpu->rev.core << 24;
+ chipid |= adreno_gpu->rev.major << 16;
+ chipid |= adreno_gpu->rev.minor << 12;
+ chipid |= adreno_gpu->rev.patchid << 8;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
+ A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
+{
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
+ ~A6XX_GMU_IRQ_MASK);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ ~A6XX_HFI_IRQ_MASK);
+
+ enable_irq(gmu->gmu_irq);
+ enable_irq(gmu->hfi_irq);
+}
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+ u32 val;
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+
+ /* Force off the GX GSDC */
+ regulator_force_disable(gmu->gx);
+
+ /* Disable the resources */
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+ pm_runtime_put_sync(gmu->dev);
+
+ /* Re-enable the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
+ if (!ret)
+ ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+
+ /* Set the GPU back to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ return 0;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+
+ /* Set the GPU to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ /* Make sure to turn off the boot OOB request on error */
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->mmio)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Temporary until we can recover safely */
+ BUG_ON(ret);
+
+ /* tell the GMU we want to slumber */
+ a6xx_gmu_notify_slumber(gmu);
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+{
+ int count, i;
+ u64 iova;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ count = bo->size >> PAGE_SHIFT;
+ iova = bo->iova;
+
+ for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+ iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+}
+
+static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
+ size_t size)
+{
+ struct a6xx_gmu_bo *bo;
+ int ret, count, i;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ bo->size = PAGE_ALIGN(size);
+
+ count = bo->size >> PAGE_SHIFT;
+
+ bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+ if (!bo->pages) {
+ kfree(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ bo->pages[i] = alloc_page(GFP_KERNEL);
+ if (!bo->pages[i])
+ goto err;
+ }
+
+ bo->iova = gmu->uncached_iova_base;
+
+ for (i = 0; i < count; i++) {
+ ret = iommu_map(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ page_to_phys(bo->pages[i]), PAGE_SIZE,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+
+ for (i = i - 1 ; i >= 0; i--)
+ iommu_unmap(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ PAGE_SIZE);
+
+ goto err;
+ }
+ }
+
+ bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->virt)
+ goto err;
+
+ /* Align future IOVA addresses on 1MB boundaries */
+ gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
+ return bo;
+
+err:
+ for (i = 0; i < count; i++) {
+ if (bo->pages[i])
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /*
+ * The GMU address space is hardcoded to treat the range
+ * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+ * between the GMU and the CPU will live in this space
+ */
+ gmu->uncached_iova_base = 0x60000000;
+
+
+ gmu->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!gmu->domain)
+ return -ENODEV;
+
+ ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+ if (ret) {
+ iommu_domain_free(gmu->domain);
+ gmu->domain = NULL;
+ }
+
+ return ret;
+}
+
+/* Get the list of RPMh voltage levels from cmd-db */
+static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
+{
+ u32 len = cmd_db_read_aux_data_len(id);
+
+ if (!len)
+ return 0;
+
+ if (WARN_ON(len > size))
+ return -EINVAL;
+
+ cmd_db_read_aux_data(id, vals, len);
+
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ return len >> 1;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ u32 val = 0;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+
+ if (np) {
+ of_property_read_u32(np, "qcom,level", &val);
+ of_node_put(np);
+ }
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count,
+ u16 *pri, int pri_count,
+ u16 *sec, int sec_count)
+{
+ int i, j;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ dev_err(dev,
+ "Level %u not found in in the RPMh list\n",
+ level);
+ dev_err(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ dev_err(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ u16 gx[16], cx[16], mx[16];
+ u32 gxcount, cxcount, mxcount;
+ int ret;
+
+ /* Get the list of available voltage levels for each component */
+ gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
+ cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
+ mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs,
+ gx, gxcount, mx, mxcount);
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs,
+ cx, cxcount, mx, mxcount);
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = dev_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ret) {
+ dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
+ name, gmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return;
+
+ pm_runtime_disable(gmu->dev);
+ a6xx_gmu_stop(a6xx_gpu);
+
+ a6xx_gmu_irq_disable(gmu);
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+}
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, false);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+ gmu->gx = devm_regulator_get(gmu->dev, "vdd");
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Set up the IOMMU context bank */
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Allocate memory for for the HFI queues */
+ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->hfi))
+ goto err;
+
+ /* Allocate memory for the GMU debug region */
+ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->debug))
+ goto err;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+
+ /* Map the GPU power domain controller registers */
+ gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+
+ if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ goto err;
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ goto err;
+
+ /* Set up a tasklet to handle GMU HFI responses */
+ tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ return 0;
+err:
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ if (gmu->domain) {
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 000000000000..d9a386c18799
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/interrupt.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ void *virt;
+ size_t size;
+ u64 iova;
+ struct page **pages;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/* The GMU is being soft reset after a fault */
+#define GMU_RESET 2
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ void * __iomem mmio;
+ void * __iomem pdc_mmio;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct regulator *gx;
+
+ struct iommu_domain *domain;
+ u64 uncached_iova_base;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo *hfi;
+ struct a6xx_gmu_bo *debug;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ struct a6xx_hfi_queue queues[2];
+
+ struct tasklet_struct hfi_tasklet;
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->pdc_mmio + (offset << 2));
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ GMU_OOB_BOOT_SLUMBER = 0,
+ GMU_OOB_GPU_SET,
+ GMU_OOB_DCVS_SET,
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+
+/*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
+#define GMU_OOB_BOOT_SLUMBER_ACK 30
+#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
+
+/*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+#define GMU_OOB_DCVS_REQUEST 23
+#define GMU_OOB_DCVS_ACK 31
+#define GMU_OOB_DCVS_CLEAR 31
+
+/*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+#define GMU_OOB_GPU_SET_REQUEST 16
+#define GMU_OOB_GPU_SET_ACK 24
+#define GMU_OOB_GPU_SET_CLEAR 24
+
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+
+void a6xx_hfi_task(unsigned long data);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 000000000000..ef68098d2adc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,382 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
+#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
+#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 000000000000..c629f742a1d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ break;
+ }
+ }
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ a6xx_flush(gpu, ring);
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a6xx_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int i;
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
+ gpu_write(gpu, a6xx_hwcg[i].offset,
+ state ? a6xx_hwcg[i].value : 0);
+
+ /* Enable SP clock */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a6xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
+ REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
+
+ return 0;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF start */
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+
+ /* Setting the mem pool size */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values */
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* FIXME: not sure if this should live here or in a6xx_gmu.c */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
+ 0xff000000);
+ gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+ 0xff, 0x20);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
+ 0x01);
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+ A6XX_PROTECT_RDONLY(0x600, 0x51));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+ A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+ A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+ A6XX_PROTECT_RDONLY(0x501, 0xa));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+ A6XX_PROTECT_RDONLY(0x511, 0x44));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+ A6XX_PROTECT_RW(0xbe20, 0x11f3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+ A6XX_PROTECT_RDONLY(0x8d0, 0x23));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
+ A6XX_PROTECT_RDONLY(0x980, 0x4));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ ret = a6xx_ucode_init(gpu);
+ if (ret)
+ goto out;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+#define VBIF_RESET_ACK_TIMEOUT 100
+#define VBIF_RESET_ACK_MASK 0x00f0
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ msm_gpu_hw_init(gpu);
+}
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+ struct msm_gpu *gpu = arg;
+
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ return -EFAULT;
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_A6XX_CP_RB_RPTR_ADDR_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A6XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
+};
+
+static const u32 a6xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
+ 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
+ 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
+ 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+ 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
+ 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
+ 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
+ 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
+ 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
+ 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
+ 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
+ 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
+ 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
+ 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
+ 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
+ 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
+ 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
+ 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
+ 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
+ 0xa610, 0xa617, 0xa630, 0xa630,
+ ~0
+};
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ ret = a6xx_gmu_resume(a6xx_gpu);
+
+ gpu->needs_hw_init = true;
+
+ return ret;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return a6xx_gmu_stop(a6xx_gpu);
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ adreno_show(gpu, state, p);
+}
+#endif
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ if (a6xx_gpu->sqe_iova)
+ msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ }
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a6xx_gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a6xx_hw_init,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .flush = a6xx_flush,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a6xx_show,
+#endif
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->reg_offsets = a6xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ ret = a6xx_gmu_probe(a6xx_gpu, node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 000000000000..dd69e5b0e692
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+#define A6XX_PROTECT_RW(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 000000000000..f19ef4cb6ea4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
+ u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+struct a6xx_hfi_response {
+ u32 id;
+ u32 seqnum;
+ struct list_head node;
+ struct completion complete;
+
+ u32 error;
+ u32 payload[16];
+};
+
+/*
+ * Incoming HFI ack messages can come in out of order so we need to store all
+ * the pending messages on a list until they are handled.
+ */
+static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
+static LIST_HEAD(hfi_ack_list);
+
+static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_response *resp;
+ u32 id, seqnum;
+
+ /* msg->ret_header contains the header of the message being acked */
+ id = HFI_HEADER_ID(msg->ret_header);
+ seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
+
+ spin_lock(&hfi_ack_lock);
+ list_for_each_entry(resp, &hfi_ack_list, node) {
+ if (resp->id == id && resp->seqnum == seqnum) {
+ resp->error = msg->error;
+ memcpy(resp->payload, msg->payload,
+ sizeof(resp->payload));
+
+ complete(&resp->complete);
+ spin_unlock(&hfi_ack_lock);
+ return;
+ }
+ }
+ spin_unlock(&hfi_ack_lock);
+
+ dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
+}
+
+static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+
+ dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
+}
+
+void a6xx_hfi_task(unsigned long data)
+{
+ struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ struct a6xx_hfi_msg_response resp;
+
+ for (;;) {
+ u32 id;
+ int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* Returns the number of bytes copied or negative on error */
+ if (ret <= 0) {
+ if (ret < 0)
+ dev_err(gmu->dev,
+ "Unable to read the HFI message queue\n");
+ break;
+ }
+
+ id = HFI_HEADER_ID(resp.header);
+
+ if (id == HFI_F2H_MSG_ACK)
+ a6xx_hfi_handle_ack(gmu, &resp);
+ else if (id == HFI_F2H_MSG_ERROR)
+ a6xx_hfi_handle_error(gmu, &resp);
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ struct a6xx_hfi_response resp = { 0 };
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ init_completion(&resp.complete);
+ resp.id = id;
+ resp.seqnum = seqnum;
+
+ spin_lock_bh(&hfi_ack_lock);
+ list_add_tail(&resp.node, &hfi_ack_list);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ goto out;
+ }
+
+ /* Wait up to 5 seconds for the response */
+ ret = wait_for_completion_timeout(&resp.complete,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ ret = -ETIMEDOUT;
+ } else
+ ret = 0;
+
+out:
+ spin_lock_bh(&hfi_ack_lock);
+ list_del(&resp.node);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ if (ret)
+ return ret;
+
+ if (resp.error) {
+ dev_err(gmu->dev, "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ if (payload && payload_size) {
+ int copy = min_t(u32, payload_size, sizeof(resp.payload));
+
+ memcpy(payload, resp.payload, copy);
+ }
+
+ return 0;
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug->iova;
+ msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.1 */
+ msg.supported_version = (1 << 28) | (1 << 16);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+
+ /*
+ * The sdm845 GMU doesn't do bus frequency scaling on its own but it
+ * does need at least one entry in the list because it might be accessed
+ * when the GMU is shutting down. Send a single "off" entry.
+ */
+
+ msg.bw_level_num = 1;
+
+ msg.ddr_cmds_num = 3;
+ msg.ddr_wait_bitmask = 0x07;
+
+ msg.ddr_cmds_addrs[0] = 0x50000;
+ msg.ddr_cmds_addrs[1] = 0x5005c;
+ msg.ddr_cmds_addrs[2] = 0x5000c;
+
+ msg.ddr_cmds_data[0][0] = 0x40000000;
+ msg.ddr_cmds_data[0][1] = 0x40000000;
+ msg.ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg.cnoc_cmds_num = 3;
+ msg.cnoc_wait_bitmask = 0x05;
+
+ msg.cnoc_cmds_addrs[0] = 0x50034;
+ msg.cnoc_cmds_addrs[1] = 0x5007c;
+ msg.cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg.cnoc_cmds_data[0][0] = 0x40000000;
+ msg.cnoc_cmds_data[0][1] = 0x00000000;
+ msg.cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg.cnoc_cmds_data[1][0] = 0x60000001;
+ msg.cnoc_cmds_data[1][1] = 0x20000001;
+ msg.cnoc_cmds_data[1][2] = 0x60000001;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, 4);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 000000000000..60d1319fa44f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index b634cf71352b..5dace1350810 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -44,6 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+enum chip {
+ A2XX = 0,
+ A3XX = 0,
+ A4XX = 0,
+ A5XX = 0,
+ A6XX = 0,
+};
+
enum adreno_pa_su_sc_draw {
PC_DRAW_POINTS = 0,
PC_DRAW_LINES = 1,
@@ -181,6 +191,12 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0ae5ace65462..7d3e9a129ac7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -35,6 +35,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
@@ -45,6 +46,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
@@ -55,6 +57,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
@@ -65,6 +68,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
@@ -75,6 +79,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
@@ -85,6 +90,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
@@ -96,10 +102,25 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .revn = 630,
+ .name = "A630",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .init = a6xx_gpu_init,
},
};
@@ -116,6 +137,8 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -144,6 +167,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
int ret;
if (pdev)
@@ -154,11 +178,31 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- pm_runtime_get_sync(&pdev->dev);
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ /* Make sure pm runtime is active and reset any previous errors */
+ pm_runtime_set_active(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ return NULL;
+ }
+
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
@@ -316,6 +360,7 @@ static int adreno_suspend(struct device *dev)
#endif
static const struct dev_pm_ops adreno_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 17d0506d058c..da1363a0c54d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,7 +17,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/ascii85.h>
+#include <linux/kernel.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -70,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
int ret;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
@@ -87,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -106,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -126,19 +133,23 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
- return ERR_PTR(-ENOENT);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
}
-static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
int i;
@@ -368,40 +379,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
int i;
- seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+ char out[ASCII85_BUFSZ];
+ long l, datalen, i;
+
+ if (!ptr || !len)
+ return;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will any data
+ * completely fill the entire allocated size of the buffer
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++) {
+ if (ptr[i])
+ datalen = (i << 2) + 1;
+ }
+
+ /* Skip printing the object if it is empty */
+ if (datalen == 0)
+ return;
+
+ l = ascii85_encode_len(datalen);
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(ptr[i], out));
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- for (i = 0; i < gpu->nr_rings; i++) {
- struct msm_ringbuffer *ring = gpu->rb[i];
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
- seq_printf(m, "rb %d: fence: %d/%d\n", i,
- ring->memptrs->fence, ring->seqno);
+ drm_puts(p, "ringbuffer:\n");
- seq_printf(m, " rptr: %d\n",
- get_rptr(adreno_gpu, ring));
- seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
+ drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, state->ring[i].data,
+ state->ring[i].data_size);
}
- /* dump these out in a form that can be parsed by demsm: */
- seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
- for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
- uint32_t start = adreno_gpu->registers[i];
- uint32_t end = adreno_gpu->registers[i+1];
- uint32_t addr;
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
- for (addr = start; addr <= end; addr++) {
- uint32_t val = gpu_read(gpu, addr);
- seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+
+ adreno_show_object(p, state->bos[i].data,
+ state->bos[i].size);
}
}
+
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
#endif
@@ -565,7 +721,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_get_pwrlevels(&pdev->dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index d6b0e7b813f4..de6e6ee42fba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -50,7 +50,9 @@ enum adreno_regs {
enum {
ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
ADRENO_FW_GPMU = 2,
ADRENO_FW_MAX,
};
@@ -84,6 +86,7 @@ struct adreno_info {
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
+ u32 inactive_period;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -214,8 +217,9 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
@@ -226,7 +230,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
/* ringbuffer helpers (the parts that are adreno specific) */
@@ -328,6 +337,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
enum adreno_regs lo, enum adreno_regs hi, u64 data)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index fb605a3534cf..03a91e10b310 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -71,7 +73,8 @@ enum vgt_event_type {
FLUSH_SO_1 = 18,
FLUSH_SO_2 = 19,
FLUSH_SO_3 = 20,
- UNK_19 = 25,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
UNK_1C = 28,
UNK_1D = 29,
BLIT = 30,
@@ -199,9 +202,12 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
CP_EXEC_CS = 51,
CP_PERFCOUNTER_ACTION = 80,
CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
CP_CONTEXT_REG_BUNCH = 92,
CP_YIELD_ENABLE = 28,
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
@@ -215,7 +221,10 @@ enum adreno_pm4_type3_packets {
CP_COMPUTE_CHECKPOINT = 110,
CP_MEM_TO_MEM = 115,
CP_BLIT = 44,
- CP_UNK_39 = 57,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -224,6 +233,11 @@ enum adreno_pm4_type3_packets {
IN_INCR_UPDT_STATE = 85,
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ CP_UNK_A6XX_14 = 20,
+ CP_UNK_A6XX_36 = 54,
+ CP_UNK_A6XX_55 = 85,
+ UNK_A6XX_6D = 109,
};
enum adreno_state_block {
@@ -278,6 +292,33 @@ enum a4xx_state_src {
SS4_INDIRECT = 2,
};
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_SSBO = 14,
+ SB6_CS_SSBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_INDIRECT = 2,
+};
+
enum a4xx_index_size {
INDEX4_SIZE_8_BIT = 0,
INDEX4_SIZE_16_BIT = 1,
@@ -300,6 +341,7 @@ enum render_mode_cmd {
GMEM = 3,
BLIT2D = 5,
BLIT2DSCALE = 7,
+ END2D = 8,
};
enum cp_blit_cmd {
@@ -308,6 +350,22 @@ enum cp_blit_cmd {
BLIT_OP_SCALE = 3,
};
+enum a6xx_render_mode {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_BLIT2D = 5,
+ RM6_RESOLVE = 6,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -349,7 +407,7 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
}
#define REG_CP_LOAD_STATE4_0 0x00000000
-#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
{
@@ -396,6 +454,54 @@ static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
}
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -580,6 +686,153 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+}
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -593,6 +846,12 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK 0x00f00000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT 20
+static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
+}
#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -708,6 +967,22 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
}
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+}
+
#define REG_CP_REG_TO_MEM_0 0x00000000
#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
#define CP_REG_TO_MEM_0_REG__SHIFT 0
@@ -732,6 +1007,46 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0000ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_64B 0x40000000
+#define CP_MEM_TO_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
#define REG_CP_MEM_TO_MEM_0 0x00000000
#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
@@ -953,15 +1268,15 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-
-#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
{
- return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
@@ -978,6 +1293,8 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
@@ -1032,13 +1349,13 @@ static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
}
#define REG_CP_BLIT_1 0x00000001
-#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
#define CP_BLIT_1_SRC_X1__SHIFT 0
static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
{
return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
}
-#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
#define CP_BLIT_1_SRC_Y1__SHIFT 16
static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
{
@@ -1046,13 +1363,13 @@ static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
}
#define REG_CP_BLIT_2 0x00000002
-#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
#define CP_BLIT_2_SRC_X2__SHIFT 0
static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
{
return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
}
-#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
#define CP_BLIT_2_SRC_Y2__SHIFT 16
static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
{
@@ -1060,13 +1377,13 @@ static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
}
#define REG_CP_BLIT_3 0x00000003
-#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
#define CP_BLIT_3_DST_X1__SHIFT 0
static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
{
return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
}
-#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
#define CP_BLIT_3_DST_Y1__SHIFT 16
static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
{
@@ -1074,13 +1391,13 @@ static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
}
#define REG_CP_BLIT_4 0x00000004
-#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
#define CP_BLIT_4_DST_X2__SHIFT 0
static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
{
return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
}
-#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
#define CP_BLIT_4_DST_Y2__SHIFT 16
static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
{
@@ -1113,5 +1430,129 @@ static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
}
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A2XX_CP_SET_MARKER_0 0x00000000
+#define A2XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_MODE__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_IFPC 0x00000100
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A2XX_CP_REG_TEST_0 0x00000000
+#define A2XX_CP_REG_TEST_0_REG__MASK 0x00000fff
+#define A2XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A2XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A2XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A2XX_CP_REG_TEST_0_UNK25 0x02000000
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..879c13fe74e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct dpu_kms *dpu_kms = arg;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: dpu_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+ dpu_kms->hw_intr,
+ irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type, u32 instance_idx)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->irq_obj.enable_counts ||
+ !dpu_kms->irq_obj.irq_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+ if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = dpu_kms->hw_intr->ops.enable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+ if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = dpu_kms->hw_intr->ops.disable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_irq *irq_obj = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+ parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+ dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+ }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+ return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ kfree(dpu_kms->irq_obj.irq_cb_tbl);
+ kfree(dpu_kms->irq_obj.enable_counts);
+ kfree(dpu_kms->irq_obj.irq_counts);
+ dpu_kms->irq_obj.irq_cb_tbl = NULL;
+ dpu_kms->irq_obj.enable_counts = NULL;
+ dpu_kms->irq_obj.irq_counts = NULL;
+ dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+ * dpu_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ dpu_kms->hw_intr->ops.dispatch_irqs(
+ dpu_kms->hw_intr,
+ dpu_core_irq_callback_handler,
+ dpu_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..5e98bba46af5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @dpu_kms: DPU handle
+ * @intr_type: DPU HW interrupt type for lookup
+ * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+ struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE 128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
+
+ if (!crtc)
+ goto end;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ DPU_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ intf_connected = true;
+ goto end;
+ }
+ }
+
+end:
+ return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_cstate = to_dpu_crtc_state(state);
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (!dpu_cstate->bw_control) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
+ DPU_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ bool is_video_mode;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _dpu_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
+
+ DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!dpu_cstate->bw_control) {
+ DPU_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc, u32 bus_id)
+{
+ struct dpu_core_perf_params perf = { { 0 } };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int ret = 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+ DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ dpu_cstate->new_perf.bw_ctl[bus_id]);
+ }
+ }
+ return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ /* only do this for command mode rt client */
+ if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ dpu_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ dpu_crtc->cur_perf.bw_ctl[i] = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ }
+ }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+ struct dss_clk *core_clk = kms->perf.core_clk;
+
+ if (core_clk->max_rate && (rate > core_clk->max_rate))
+ rate = core_clk->max_rate;
+
+ core_clk->rate = rate;
+ return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DPU_DEBUG("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ priv = kms->dev->dev_private;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ DPU_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = ~0;
+ update_clk = 1;
+ }
+ trace_dpu_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
+
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i)) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+ crtc->base.id, i);
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set %s clock rate %llu\n",
+ kms->perf.core_clk->clk_name, clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ u32 perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EFAULT;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ DPU_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_perf_debugfs_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = core_clk->max_rate;
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_power_handle *phandle;
+ struct dss_clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..80cbf75bc2ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
+#define DPU_DRM_BLEND_OP_OPAQUE 1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
+#define DPU_DRM_BLEND_OP_COVERAGE 3
+#define DPU_DRM_BLEND_OP_MAX 4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid dpu crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+ struct dpu_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+ continue;
+ DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ _dpu_crtc_rp_reclaim(rp, false);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ list_del_init(&rp->rp_list);
+ _dpu_crtc_rp_reclaim(rp, true);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+ DPU_DEBUG("res://%pK\n", val);
+ dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+ struct dpu_crtc_respool *dup_rp)
+{
+ struct dpu_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp || !rp->rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ mutex_lock(rp->rp_lock);
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+ if (!dup_res) {
+ mutex_unlock(rp->rp_lock);
+ return;
+ }
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+ DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+
+ dup_rp->rp_lock = rp->rp_lock;
+ dup_rp->rp_head = rp->rp_head;
+ INIT_LIST_HEAD(&dup_rp->rp_list);
+ list_add_tail(&dup_rp->rp_list, rp->rp_head);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+ struct mutex *rp_lock, struct list_head *rp_head)
+{
+ if (!rp || !rp_lock || !rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ mutex_lock(rp_lock);
+ rp->rp_lock = rp_lock;
+ rp->rp_head = rp_head;
+ INIT_LIST_HEAD(&rp->rp_list);
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _dpu_crtc_hw_blk_get;
+ rp->ops.put = _dpu_crtc_hw_blk_put;
+ list_add_tail(&rp->rp_list, rp->rp_head);
+ mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ DPU_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ dpu_crtc->phandle = NULL;
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&dpu_crtc->crtc_lock);
+ kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+ DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg *stage_cfg;
+
+ u32 flush_mask;
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+
+ if (!dpu_crtc || !mixer) {
+ DPU_ERROR("invalid dpu_crtc or mixer\n");
+ return;
+ }
+
+ ctl = mixer->hw_ctl;
+ lm = mixer->hw_lm;
+ stage_cfg = &dpu_crtc->stage_cfg;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+ DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ DPU_ERROR("invalid format\n");
+ return;
+ }
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ dpu_plane_pipe(plane);
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_crtc_state;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+
+ int i;
+
+ if (!crtc)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+ mixer = dpu_crtc->mixers;
+
+ DPU_DEBUG("%s\n", dpu_crtc->name);
+
+ if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &dpu_crtc->stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ DPU_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+ _dpu_crtc_complete_flip(crtc);
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+ unsigned long flags;
+ bool frame_done = false;
+
+ if (!work) {
+ DPU_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct dpu_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+ crtc->base.id,
+ fevent->event,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&dpu_crtc->frame_pending));
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+ dpu_core_perf_crtc_update(crtc, 0, false);
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_rm *rm = &dpu_kms->rm;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+ dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+ dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+ mixer = &dpu_crtc->mixers[i];
+
+ if (!dpu_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+ DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ DPU_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ mixer->encoder = enc;
+
+ dpu_crtc->num_mixers++;
+ DPU_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ DPU_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ unsigned long flags;
+ struct dpu_crtc_smmu_state_data *smmu_state;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dev = crtc->dev;
+ smmu_state = &dpu_crtc->smmu_state;
+
+ if (!dpu_crtc->num_mixers) {
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
+
+ if (dpu_crtc->event) {
+ WARN_ON(dpu_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /* encoder will trigger pending mask now */
+ dpu_encoder_trigger_kickoff_pending(encoder);
+ }
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ event_thread = &priv->event_thread[crtc->index];
+
+ if (dpu_crtc->event) {
+ DPU_DEBUG("already received dpu_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ dpu_plane_restore(plane);
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_rp_destroy(&cstate->rp);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DPU_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ priv = dpu_kms->dev->dev_private;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct dpu_encoder_kickoff_params params = { 0 };
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ dpu_encoder_prepare_for_kickoff(encoder, &params);
+ }
+
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_kickoff(encoder);
+ }
+
+end:
+ reinit_completion(&dpu_crtc->frame_done_comp);
+ DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+ struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ dev = crtc->dev;
+
+ if (enable) {
+ int ret;
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc,
+ dpu_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ DPU_ERROR("invalid crtc kms\n");
+ return;
+ }
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /*
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
+ */
+ trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+ if (dpu_crtc->suspend == enable)
+ DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+
+ dpu_crtc->suspend = enable;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ old_cstate = to_dpu_crtc_state(crtc->state);
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+ return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ dpu_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+ &dpu_crtc->rp_head);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct dpu_crtc_mixer *m;
+ u32 i, misr_status;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+ switch (event_type) {
+ case DPU_POWER_EVENT_POST_ENABLE:
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_virt_restore(encoder);
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, true,
+ dpu_crtc->misr_frame_count);
+ }
+ break;
+ case DPU_POWER_EVENT_PRE_DISABLE:
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ }
+ break;
+ case DPU_POWER_EVENT_POST_DISABLE:
+ /**
+ * Nothing to do. All the planes on the CRTC will be
+ * programmed for every frame
+ */
+ break;
+ default:
+ DPU_DEBUG("event:%d not handled\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ mode = &cstate->base.adjusted_mode;
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, true);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ if (dpu_crtc->power_event)
+ dpu_power_handle_unregister_event(dpu_crtc->phandle,
+ dpu_crtc->power_event);
+
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = true;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+
+ dpu_crtc->power_event = dpu_power_handle_register_event(
+ dpu_crtc->phandle,
+ DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+ DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct plane_state *pstates;
+ struct dpu_crtc_state *cstate;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ if (!state->enable || !state->active) {
+ DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ goto end;
+ }
+
+ mode = &state->adjusted_mode;
+ DPU_DEBUG("%s: check", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst) ||
+ !drm_rect_equals(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i]) {
+ dpu_plane_clear_multirect(pipe_staged[i]);
+
+ if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+ DPU_ERROR(
+ "r1 only virt plane:%d not supported\n",
+ pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = dpu_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ _dpu_crtc_rp_free_unused(&cstate->rp);
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->vblank_requested = en;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ DPU_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ dpu_crtc->misr_enable = enable;
+ dpu_crtc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ dpu_crtc->misr_data[i] = 0;
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ u32 misr_status;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (!dpu_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ dpu_crtc->misr_data[i]);
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_res *res;
+ struct dpu_crtc_respool *rp;
+ int i;
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
+
+ mutex_lock(&dpu_crtc->rp_lock);
+ list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+ seq_printf(s, "rp.%d: ", rp->sequence_id);
+ list_for_each_entry(res, &rp->res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dpu_crtc->rp_lock);
+
+ return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_crtc_misr_read,
+ .write = _dpu_crtc_misr_setup,
+ };
+
+ if (!crtc)
+ return -EINVAL;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms)
+ return -EINVAL;
+
+ dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+ if (!dpu_crtc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0400,
+ dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ dpu_crtc->debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_misr_fops);
+
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return;
+ dpu_crtc = to_dpu_crtc(crtc);
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+ _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .late_register = dpu_crtc_late_register,
+ .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct dpu_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_dpu_kms(priv->kms);
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&dpu_crtc->crtc_lock);
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ mutex_init(&dpu_crtc->rp_lock);
+ INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ dpu_crtc->phandle = &kms->phandle;
+
+ DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..e87109e608e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *hw_ctl;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ * especially in the case of DSC Merge.
+ * @mixers : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count : misr frame count provided by client
+ * @misr_data : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @rp_lock : serialization lock for resource pool
+ * @rp_head : list of active resource pool
+ * @scl3_cfg_lut : qseed3 lut config
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ bool mixers_swapped;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct dpu_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+ bool misr_enable;
+ u32 misr_frame_count;
+ u32 misr_data[CRTC_DUAL_MIXERS];
+
+ struct dpu_power_handle *phandle;
+ struct dpu_power_event *power_event;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct mutex rp_lock;
+ struct list_head rp_head;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct dpu_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+ struct mutex *rp_lock;
+ struct list_head *rp_head;
+ struct list_head rp_list;
+ u32 sequence_id;
+ struct list_head res_list;
+ struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit : Whether current topology requires PPSplit special handling
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+
+ bool is_ppsplit;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+ struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ u32 mixer_width;
+
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+ return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return -EINVAL;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate =
+ crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+ if (!cstate)
+ return NRT_CLIENT;
+
+ return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_DPU "dpu"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_AXI_INTF 0x194
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+ struct list_head reg_base_head;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ void (*cb)(void *ptr);
+ void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+ void (*analyzer)(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct dpu_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+ struct list_head reg_base_list;
+ struct device *dev;
+
+ struct work_struct dump_work;
+
+ struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+ struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 84, 1},
+ { DBGBUS_DSPP, 84, 2},
+ { DBGBUS_DSPP, 84, 3},
+ { DBGBUS_DSPP, 84, 4},
+ { DBGBUS_DSPP, 84, 5},
+ { DBGBUS_DSPP, 84, 6},
+ { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 85, 1},
+ { DBGBUS_DSPP, 85, 2},
+ { DBGBUS_DSPP, 85, 3},
+ { DBGBUS_DSPP, 85, 4},
+ { DBGBUS_DSPP, 85, 5},
+ { DBGBUS_DSPP, 85, 6},
+ { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 86, 1},
+ { DBGBUS_DSPP, 86, 2},
+ { DBGBUS_DSPP, 86, 3},
+ { DBGBUS_DSPP, 86, 4},
+ { DBGBUS_DSPP, 86, 5},
+ { DBGBUS_DSPP, 86, 6},
+ { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 87, 1},
+ { DBGBUS_DSPP, 87, 2},
+ { DBGBUS_DSPP, 87, 3},
+ { DBGBUS_DSPP, 87, 4},
+ { DBGBUS_DSPP, 87, 5},
+ { DBGBUS_DSPP, 87, 6},
+ { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 88, 1},
+ { DBGBUS_DSPP, 88, 2},
+ { DBGBUS_DSPP, 88, 3},
+ { DBGBUS_DSPP, 88, 4},
+ { DBGBUS_DSPP, 88, 5},
+ { DBGBUS_DSPP, 88, 6},
+ { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 89, 1},
+ { DBGBUS_DSPP, 89, 2},
+ { DBGBUS_DSPP, 89, 3},
+ { DBGBUS_DSPP, 89, 4},
+ { DBGBUS_DSPP, 89, 5},
+ { DBGBUS_DSPP, 89, 6},
+ { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 90, 1},
+ { DBGBUS_DSPP, 90, 2},
+ { DBGBUS_DSPP, 90, 3},
+ { DBGBUS_DSPP, 90, 4},
+ { DBGBUS_DSPP, 90, 5},
+ { DBGBUS_DSPP, 90, 6},
+ { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 91, 1},
+ { DBGBUS_DSPP, 91, 2},
+ { DBGBUS_DSPP, 91, 3},
+ { DBGBUS_DSPP, 91, 4},
+ { DBGBUS_DSPP, 91, 5},
+ { DBGBUS_DSPP, 91, 6},
+ { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 92, 1},
+ { DBGBUS_DSPP, 92, 2},
+ { DBGBUS_DSPP, 92, 3},
+ { DBGBUS_DSPP, 92, 4},
+ { DBGBUS_DSPP, 92, 5},
+ { DBGBUS_DSPP, 92, 6},
+ { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 93, 1},
+ { DBGBUS_DSPP, 93, 2},
+ { DBGBUS_DSPP, 93, 3},
+ { DBGBUS_DSPP, 93, 4},
+ { DBGBUS_DSPP, 93, 5},
+ { DBGBUS_DSPP, 93, 6},
+ { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 94, 1},
+ { DBGBUS_DSPP, 94, 2},
+ { DBGBUS_DSPP, 94, 3},
+ { DBGBUS_DSPP, 94, 4},
+ { DBGBUS_DSPP, 94, 5},
+ { DBGBUS_DSPP, 94, 6},
+ { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 95, 1},
+ { DBGBUS_DSPP, 95, 2},
+ { DBGBUS_DSPP, 95, 3},
+ { DBGBUS_DSPP, 95, 4},
+ { DBGBUS_DSPP, 95, 5},
+ { DBGBUS_DSPP, 95, 6},
+ { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM5 */
+ { DBGBUS_DSPP, 110, 1},
+ { DBGBUS_DSPP, 110, 2},
+ { DBGBUS_DSPP, 110, 3},
+ { DBGBUS_DSPP, 110, 4},
+ { DBGBUS_DSPP, 110, 5},
+ { DBGBUS_DSPP, 110, 6},
+ { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 111, 1},
+ { DBGBUS_DSPP, 111, 2},
+ { DBGBUS_DSPP, 111, 3},
+ { DBGBUS_DSPP, 111, 4},
+ { DBGBUS_DSPP, 111, 5},
+ { DBGBUS_DSPP, 111, 6},
+ { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 112, 1},
+ { DBGBUS_DSPP, 112, 2},
+ { DBGBUS_DSPP, 112, 3},
+ { DBGBUS_DSPP, 112, 4},
+ { DBGBUS_DSPP, 112, 5},
+ { DBGBUS_DSPP, 112, 6},
+ { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 113, 1},
+ { DBGBUS_DSPP, 113, 2},
+ { DBGBUS_DSPP, 113, 3},
+ { DBGBUS_DSPP, 113, 4},
+ { DBGBUS_DSPP, 113, 5},
+ { DBGBUS_DSPP, 113, 6},
+ { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 114, 1},
+ { DBGBUS_DSPP, 114, 2},
+ { DBGBUS_DSPP, 114, 3},
+ { DBGBUS_DSPP, 114, 4},
+ { DBGBUS_DSPP, 114, 5},
+ { DBGBUS_DSPP, 114, 6},
+ { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 115, 1},
+ { DBGBUS_DSPP, 115, 2},
+ { DBGBUS_DSPP, 115, 3},
+ { DBGBUS_DSPP, 115, 4},
+ { DBGBUS_DSPP, 115, 5},
+ { DBGBUS_DSPP, 115, 6},
+ { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 116, 1},
+ { DBGBUS_DSPP, 116, 2},
+ { DBGBUS_DSPP, 116, 3},
+ { DBGBUS_DSPP, 116, 4},
+ { DBGBUS_DSPP, 116, 5},
+ { DBGBUS_DSPP, 116, 6},
+ { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 117, 1},
+ { DBGBUS_DSPP, 117, 2},
+ { DBGBUS_DSPP, 117, 3},
+ { DBGBUS_DSPP, 117, 4},
+ { DBGBUS_DSPP, 117, 5},
+ { DBGBUS_DSPP, 117, 6},
+ { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 118, 1},
+ { DBGBUS_DSPP, 118, 2},
+ { DBGBUS_DSPP, 118, 3},
+ { DBGBUS_DSPP, 118, 4},
+ { DBGBUS_DSPP, 118, 5},
+ { DBGBUS_DSPP, 118, 6},
+ { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 119, 1},
+ { DBGBUS_DSPP, 119, 2},
+ { DBGBUS_DSPP, 119, 3},
+ { DBGBUS_DSPP, 119, 4},
+ { DBGBUS_DSPP, 119, 5},
+ { DBGBUS_DSPP, 119, 6},
+ { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 120, 1},
+ { DBGBUS_DSPP, 120, 2},
+ { DBGBUS_DSPP, 120, 3},
+ { DBGBUS_DSPP, 120, 4},
+ { DBGBUS_DSPP, 120, 5},
+ { DBGBUS_DSPP, 120, 6},
+ { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ /* intf0-3 */
+ { DBGBUS_PERIPH, 0, 0},
+ { DBGBUS_PERIPH, 1, 0},
+ { DBGBUS_PERIPH, 2, 0},
+ { DBGBUS_PERIPH, 3, 0},
+
+ /* te counter wrapper */
+ { DBGBUS_PERIPH, 60, 0},
+
+ /* dsc0 */
+ { DBGBUS_PERIPH, 47, 0},
+ { DBGBUS_PERIPH, 47, 1},
+ { DBGBUS_PERIPH, 47, 2},
+ { DBGBUS_PERIPH, 47, 3},
+ { DBGBUS_PERIPH, 47, 4},
+ { DBGBUS_PERIPH, 47, 5},
+ { DBGBUS_PERIPH, 47, 6},
+ { DBGBUS_PERIPH, 47, 7},
+
+ /* dsc1 */
+ { DBGBUS_PERIPH, 48, 0},
+ { DBGBUS_PERIPH, 48, 1},
+ { DBGBUS_PERIPH, 48, 2},
+ { DBGBUS_PERIPH, 48, 3},
+ { DBGBUS_PERIPH, 48, 4},
+ { DBGBUS_PERIPH, 48, 5},
+ { DBGBUS_PERIPH, 48, 6},
+ { DBGBUS_PERIPH, 48, 7},
+
+ /* dsc2 */
+ { DBGBUS_PERIPH, 51, 0},
+ { DBGBUS_PERIPH, 51, 1},
+ { DBGBUS_PERIPH, 51, 2},
+ { DBGBUS_PERIPH, 51, 3},
+ { DBGBUS_PERIPH, 51, 4},
+ { DBGBUS_PERIPH, 51, 5},
+ { DBGBUS_PERIPH, 51, 6},
+ { DBGBUS_PERIPH, 51, 7},
+
+ /* dsc3 */
+ { DBGBUS_PERIPH, 52, 0},
+ { DBGBUS_PERIPH, 52, 1},
+ { DBGBUS_PERIPH, 52, 2},
+ { DBGBUS_PERIPH, 52, 3},
+ { DBGBUS_PERIPH, 52, 4},
+ { DBGBUS_PERIPH, 52, 5},
+ { DBGBUS_PERIPH, 52, 6},
+ { DBGBUS_PERIPH, 52, 7},
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* cdwn */
+ { DBGBUS_PERIPH, 80, 0},
+ { DBGBUS_PERIPH, 80, 1},
+ { DBGBUS_PERIPH, 80, 2},
+
+ { DBGBUS_PERIPH, 81, 0},
+ { DBGBUS_PERIPH, 81, 1},
+ { DBGBUS_PERIPH, 81, 2},
+
+ { DBGBUS_PERIPH, 82, 0},
+ { DBGBUS_PERIPH, 82, 1},
+ { DBGBUS_PERIPH, 82, 2},
+ { DBGBUS_PERIPH, 82, 3},
+ { DBGBUS_PERIPH, 82, 4},
+ { DBGBUS_PERIPH, 82, 5},
+ { DBGBUS_PERIPH, 82, 6},
+ { DBGBUS_PERIPH, 82, 7},
+
+ /* hdmi */
+ { DBGBUS_PERIPH, 68, 0},
+ { DBGBUS_PERIPH, 68, 1},
+ { DBGBUS_PERIPH, 68, 2},
+ { DBGBUS_PERIPH, 68, 3},
+ { DBGBUS_PERIPH, 68, 4},
+ { DBGBUS_PERIPH, 68, 5},
+
+ /* edp */
+ { DBGBUS_PERIPH, 69, 0},
+ { DBGBUS_PERIPH, 69, 1},
+ { DBGBUS_PERIPH, 69, 2},
+ { DBGBUS_PERIPH, 69, 3},
+ { DBGBUS_PERIPH, 69, 4},
+ { DBGBUS_PERIPH, 69, 5},
+
+ /* dsi0 */
+ { DBGBUS_PERIPH, 70, 0},
+ { DBGBUS_PERIPH, 70, 1},
+ { DBGBUS_PERIPH, 70, 2},
+ { DBGBUS_PERIPH, 70, 3},
+ { DBGBUS_PERIPH, 70, 4},
+ { DBGBUS_PERIPH, 70, 5},
+
+ /* dsi1 */
+ { DBGBUS_PERIPH, 71, 0},
+ { DBGBUS_PERIPH, 71, 1},
+ { DBGBUS_PERIPH, 71, 2},
+ { DBGBUS_PERIPH, 71, 3},
+ { DBGBUS_PERIPH, 71, 4},
+ { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+ if (enable)
+ pm_runtime_get_sync(dpu_dbg_base.dev);
+ else
+ pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct dpu_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+ offset = DBGBUS_DSPP_STATUS;
+ /* keep DSPP test point enabled */
+ if (head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+ } else {
+ offset = head->wr_addr + 0x4;
+ }
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ if (head->analyzer)
+ head->analyzer(mem_base, head, status);
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+ head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+ }
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(dpu_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, &reg)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(dpu_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (dump_dbgbus_dpu)
+ _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+ if (dump_dbgbus_vbif_rt)
+ _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+ _dpu_dump_array("dpudump_workitem",
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+ return;
+
+ if (!queue_work) {
+ _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+ return;
+ }
+
+ /* schedule work to dump later */
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _dpu_dump_array("dump_debugfs", true, true);
+ return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+ .open = dpu_dbg_debugfs_open,
+ .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+ char debug_name[80] = "";
+
+ if (!debugfs_root)
+ return -EINVAL;
+
+ debugfs_create_file("dump", 0600, debugfs_root, NULL,
+ &dpu_dbg_dump_fops);
+
+ if (dbg->dbgbus_dpu.entries) {
+ dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_dpu.cmn.name);
+ dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_dpu.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+
+ return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+ memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ if (IS_MSM8998_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+ dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+ dbg->dbgbus_dpu.cmn.entries_size =
+ ARRAY_SIZE(dbg_bus_dpu_sdm845);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ /* vbif is unchanged vs 8998 */
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else {
+ pr_err("unsupported chipset id %X\n", hwversion);
+ }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+ if (!dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+ dpu_dbg_base.dev = dev;
+
+ INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+ return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+ _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+ dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+ DPU_DBG_DUMP_IN_LOG = BIT(0),
+ DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev: device handle
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root: debugfs root in which to create dpu debug entries
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns: none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @dump_dbgbus: dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns: none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+ bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..1b4de3486ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE 256
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @misr_enable: misr enable/disable status
+ * @misr_frame_count: misr frame count before start capturing the data
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @mode_set_complete: flag to indicate modeset completion
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ bool intfs_swapped;
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+ bool misr_enable;
+ u32 misr_frame_count;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct kthread_delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+ bool mode_set_complete;
+
+ u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &dpu_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info)
+{
+ struct dpu_encoder_irq *irq;
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq->irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq->hw_idx,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+ irq->irq_idx, true);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ irq->cb.func(phys_enc, irq->irq_idx);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret = 0;
+
+ if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ if (irq->irq_idx >= 0) {
+ DPU_DEBUG_PHYS(phys_enc,
+ "skipping already registered irq %s type %d\n",
+ irq->name, irq->intr_type);
+ return 0;
+ }
+
+ irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+ irq->intr_type, irq->hw_idx);
+ if (irq->irq_idx < 0) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n",
+ irq->name, irq->intr_type);
+ return -EINVAL;
+ }
+
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n",
+ irq->name);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ irq->irq_idx, &irq->cb);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* silently skip irqs that weren't registered */
+ if (irq->irq_idx < 0) {
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return 0;
+ }
+
+ ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ irq->irq_idx = -EINVAL;
+
+ return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+
+ kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags |= cur_mode->private_flags;
+ }
+ }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology;
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* User split topology for width > 1080 */
+ topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret) {
+ /*
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+ conn_state, topology, true);
+ dpu_enc->mode_set_complete = false;
+ }
+ }
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct dpu_rm_hw_iter pp_iter;
+ struct msm_display_topology topology;
+ enum dpu_rm_topology_name topology_name;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+ return;
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, topology, false);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ dpu_enc->hw_pp[i] = NULL;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ }
+
+ topology_name = dpu_rm_get_topology_name(topology);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ phys->topology_name = topology_name;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+
+ dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ if (dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ dpu_enc->cur_master->hw_mdptop,
+ dpu_kms->catalog);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+ phys->ops.restore(phys);
+ }
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ dpu_enc->cur_master = NULL;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+ dpu_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ if (phys != dpu_enc->cur_master) {
+ if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
+
+ if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+ phys->ops.setup_misr(phys, true,
+ dpu_enc->misr_frame_count);
+ }
+
+ if (dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i])
+ dpu_enc->phys_encs[i]->connector = NULL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc_vblank_cb)
+ dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_vblank_cb = vbl_cb;
+ dpu_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!drm_enc || !phys) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+
+ if (!ctl || !ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->hw_ctl) {
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+ ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ if (!drm_enc || !params) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys) {
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.hw_reset)
+ phys->ops.hw_reset(phys);
+ }
+ }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ atomic_set(&dpu_enc->frame_done_timeout,
+ DPU_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&dpu_enc->frame_done_timer, jiffies +
+ ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_enc = s->private;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->misr_enable = enable;
+ dpu_enc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.setup_misr)
+ continue;
+
+ phys->ops.setup_misr(phys, enable, frame_count);
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ if (!dpu_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (dpu_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.collect_misr)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+ return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int i;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_encoder_misr_read,
+ .write = _dpu_encoder_misr_setup,
+ };
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+ if (!dpu_enc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i] &&
+ dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ if (!drm_enc)
+ return;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc || !dpu_kms) {
+ DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+ dpu_enc != 0, dpu_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ DPU_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_DP;
+ } else {
+ DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+ (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+ dpu_enc,
+ &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ u32 event;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .mode_set = dpu_encoder_virt_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_kms_encoder_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+
+ /* This is called by dpu_kms_encoder_enable */
+ .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ mutex_init(&dpu_enc->enc_lock);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ dpu_enc->cur_master = NULL;
+ spin_lock_init(&dpu_enc->enc_spinlock);
+
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+
+
+ mutex_init(&dpu_enc->rc_lock);
+ kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(ENOMEM);
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!phys)
+ continue;
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ };
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..60f809fc7c13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ * interface
+ * @topology: Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+ enum dpu_intf_mode intfs[INTF_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays: bitmask, bit set means the ROI of the commit lies within
+ * the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+ unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ * @params: kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+ struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..c7df8aad6613
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ * @hw_reset: Issue HW recovery such as CTL reset and clear
+ * DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+ void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name: string name of interrupt
+ * @intr_type: Encoder interrupt type
+ * @intr_idx: Encoder interrupt enumeration
+ * @hw_idx: HW Block ID
+ * @irq_idx: IRQ interface lookup index from DPU IRQ framework
+ * will be -EINVAL if IRQ is not registered
+ * @irq_cb: interrupt callback
+ */
+struct dpu_encoder_irq {
+ const char *name;
+ enum dpu_intr_type intr_type;
+ enum dpu_intr_idx intr_idx;
+ int hw_idx;
+ int irq_idx;
+ struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @topology_name: topology selected for the display
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_rm_topology_name topology_name;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_intf: Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+ struct dpu_encoder_phys base;
+ struct dpu_hw_intf *hw_intf;
+ struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl hw reset. If state is currently
+ * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..3084675ed425
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+ struct dpu_encoder_phys_cmd *cmd_enc)
+{
+ return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc)
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->hw_idx = phys_enc->hw_ctl->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->hw_idx = phys_enc->intf_idx;
+ irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+
+ if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ return -EINVAL;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_CTL_START);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_CTL_START);
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable dpu hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+ /* required for both controllers */
+ if (!rc && cmd_enc->serialize_wait4pp)
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->name = "ctl_start";
+ irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+ irq->intr_idx = INTR_IDX_CTL_START;
+ irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->name = "pp_done";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+ irq->intr_idx = INTR_IDX_PINGPONG;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->name = "pp_rd_ptr";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+ irq->intr_idx = INTR_IDX_RDPTR;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..14fc7c2a6bb7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct dpu_encoder_phys_vid *vid_enc =
+ to_dpu_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+
+ vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+ return true;
+
+ return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ /*
+ * Initialize irq->hw_idx only when irq is not registered.
+ * Prevent invalidating irq->irq_idx as modeset may be
+ * called many times during dfps.
+ */
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_rm *rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->dpu_kms) {
+ DPU_ERROR("invalid encoder/kms\n");
+ return;
+ }
+
+ rm = &phys_enc->dpu_kms->rm;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct dpu_encoder_phys_vid *vid_enc;
+ int refcount;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_intf *intf;
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc, bool notify)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ if (notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc || !params) {
+ DPU_ERROR("invalid encoder/parameters\n");
+ return;
+ }
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ return;
+
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+ bool enable, u32 frame_count)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->mode_set = dpu_encoder_phys_vid_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+ ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_vid *vid_enc = NULL;
+ struct dpu_rm_hw_iter iter;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ DPU_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->name = "vsync_irq";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+ irq->intr_idx = INTR_IDX_VSYNC;
+ irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ dpu_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..bfcd165e96df
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+ DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in dpu_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..a54451d8d011
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hw_blk->list);
+ hw_blk->type = type;
+ hw_blk->id = id;
+ atomic_set(&hw_blk->refcount, 0);
+
+ if (ops)
+ hw_blk->ops = *ops;
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_add(&hw_blk->list, &dpu_hw_blk_list);
+ mutex_unlock(&dpu_hw_blk_lock);
+
+ return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk: pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ if (atomic_read(&hw_blk->refcount))
+ pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+ hw_blk->id);
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_del(&hw_blk->list);
+ mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+ struct dpu_hw_blk *curr;
+ int rc, refcount;
+
+ if (!hw_blk) {
+ mutex_lock(&dpu_hw_blk_lock);
+ list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+ if ((curr->type != type) ||
+ (id >= 0 && curr->id != id) ||
+ (id < 0 &&
+ atomic_read(&curr->refcount)))
+ continue;
+
+ hw_blk = curr;
+ break;
+ }
+ mutex_unlock(&dpu_hw_blk_lock);
+ }
+
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
+ }
+
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
+ return hw_blk;
+
+error_start:
+ dpu_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+ atomic_read(&hw_blk->refcount));
+
+ if (!atomic_read(&hw_blk->refcount)) {
+ pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+ return;
+ }
+
+ if (atomic_dec_return(&hw_blk->refcount))
+ return;
+
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+ int (*start)(struct dpu_hw_blk *);
+ void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+ struct list_head list;
+ u32 type;
+ int id;
+ atomic_t refcount;
+ struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..44ee06398b1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .has_dest_scaler = true,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+ .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .maxhdeciexp = MAX_HORZ_DECIMATION,
+ .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = DPU_SSPP_SCALER_QSEED3, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .virt_format_list = plane_formats, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .virt_format_list = plane_formats, \
+ }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = VIG_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_VIG, \
+ .clk_ctrl = _clkctrl \
+ }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = DMA_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_DMA, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+ sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+ SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+ sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+ SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+ sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+ SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+ sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+ SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+ sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+ SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+ sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+ SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+ sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+ SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+ sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = MIXER_SDM845_MASK, \
+ .sblk = &sdm845_lm_sblk, \
+ .ds = _ds, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair) \
+ }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+ .name = "ds_top_0", .id = DS_TOP,
+ .base = 0x60000, .len = 0xc,
+ .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x800, \
+ .features = DPU_SSPP_SCALER_QSEED3, \
+ .top = &sdm845_ds_top \
+ }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+ DS_BLK("ds_0", DS_0, 0x800),
+ DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .sblk = &sdm845_pp_sblk_te \
+ }
+#define PP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .sblk = &sdm845_pp_sblk \
+ }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = 24 \
+ }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+ {
+ .name = "cdm_0", .id = CDM_0,
+ .base = 0x79200, .len = 0x224,
+ .features = 0,
+ .intf_connect = BIT(INTF_3),
+ },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_0", .id = VBIF_0,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .core_ib_ff = "6.0",
+ .core_clk_ff = "1.0",
+ .comp_ratio_rt =
+ "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+ .comp_ratio_nrt =
+ "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .ds_count = ARRAY_SIZE(sdm845_ds),
+ .ds = sdm845_ds,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .cdm_count = ARRAY_SIZE(sdm845_cdm),
+ .cdm = sdm845_cdm,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sdm845_perf_data,
+ };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+ kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+ struct dpu_mdss_cfg *dpu_cfg;
+
+ dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+ if (!dpu_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ cfg_handler[i].cfg_init(dpu_cfg);
+ dpu_cfg->hwversion = hw_rev;
+ return dpu_cfg;
+ }
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+ dpu_hw_catalog_deinit(dpu_cfg);
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct dpu_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ * (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ * (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+ u32 maxlinewidth;
+ u32 pixel_ram_size;
+ u32 maxhdeciexp;
+ u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+ const struct dpu_sspp_blks_common *common;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const struct dpu_format_extended *format_list;
+ const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @has_dest_scaler: indicates support of destination scaler
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_static;
+ u32 ubwc_swizzle;
+ bool has_dest_scaler;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds: ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 ds;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying features
+ * @version hw version of dest scaler
+ * @maxinputwidth maximum input line width
+ * @maxoutputwidth maximum output line width
+ * @maxupscale maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 maxinputwidth;
+ u32 maxoutputwidth;
+ u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id enum identifying this block
+ * @base register offset wrt DS top offset
+ * @features bit mask identifying features
+ * @version hw version of the qseed block
+ * @top DS top information
+ */
+struct dpu_ds_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+ unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @core_ib_ff core instantaneous bandwidth fudge factor
+ * @core_clk_ff core clock fudge factor
+ * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ const char *core_ib_ff;
+ const char *core_clk_ff;
+ const char *comp_ratio_rt;
+ const char *comp_ratio_nrt;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ u32 hwversion;
+
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ struct dpu_lm_cfg *mixer;
+
+ u32 ds_count;
+ struct dpu_ds_cfg *ds;
+
+ u32 pingpong_count;
+ struct dpu_pingpong_cfg *pingpong;
+
+ u32 cdm_count;
+ struct dpu_cdm_cfg *cdm;
+
+ u32 intf_count;
+ struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ struct dpu_vbif_cfg *vbif;
+
+ u32 reg_dma_count;
+ struct dpu_reg_dma_cfg dma_cfg;
+
+ u32 ad_count;
+
+ /* Add additional block data structures here */
+
+ struct dpu_perf_cfg perf;
+ struct dpu_format_extended *dma_formats;
+ struct dpu_format_extended *cursor_formats;
+ struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg: pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg: pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+ return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..3c9f028628ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..554874ba0c3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+ struct dpu_csc_cfg *data)
+{
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+ ops->enable = dpu_hw_cdm_enable;
+ ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp)
+{
+ struct dpu_hw_cdm *c;
+ struct dpu_cdm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_cdm_ops(&c->ops, c->caps->features);
+ c->hw_mdp = hw_mdp;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+ if (cdm)
+ dpu_hw_blk_destroy(&cdm->base);
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+ struct dpu_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* mdp top hw driver */
+ struct dpu_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= CTL_FLUSH_MASK_CTL;
+
+ return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ struct dpu_ctl_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_intf blk);
+
+ int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_cdm blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..c0b7f0049365
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in dpu_intr_type
+ * @instance_idx: instance index of the associated HW block in DPU
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct dpu_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+ /* irq_idx: 41-45 */
+ { DPU_IRQ_TYPE_CTL_START, CTL_0,
+ DPU_INTR_CTL_0_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_1,
+ DPU_INTR_CTL_1_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_2,
+ DPU_INTR_CTL_2_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_3,
+ DPU_INTR_CTL_3_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_4,
+ DPU_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ DPU_INTR_VIDEO_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ DPU_INTR_VIDEO_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ DPU_INTR_VIDEO_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ DPU_INTR_VIDEO_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ DPU_INTR_VIDEO_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+ /* irq_idx: 256-259 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 260-263 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 264-267 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 268-271 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 272-275 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 276-279 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 280-283 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 284-287 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+ /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+ /* irq_idx: 288-291 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 292-295 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 296-299 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 300-303 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 304-307 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 308-311 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 312-315 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 315-319 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+ if (intr_type == dpu_irq_map[i].intr_type &&
+ instance_idx == dpu_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ if (!intr)
+ return;
+
+ DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for dpu_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+ end_idx > ARRAY_SIZE(dpu_irq_map))
+ continue;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the dpu_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+ (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_intr_status_nolock(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+ uint32_t *mask)
+{
+ if (!intr || !mask)
+ return -EINVAL;
+
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+ return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ dpu_irq_map[irq_idx].irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ dpu_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+ ops->set_mask = dpu_hw_intr_set_mask;
+ ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+ ops->enable_irq = dpu_hw_intr_enable_irq;
+ ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+ ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+ ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->base_off = addr;
+ hw->blk_off = m->mdp[0].base;
+ hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..61e4cba36562
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START: Control start
+ * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum dpu_intr_type {
+ DPU_IRQ_TYPE_WB_ROT_COMP,
+ DPU_IRQ_TYPE_WB_WFD_COMP,
+ DPU_IRQ_TYPE_PING_PONG_COMP,
+ DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+ DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+ DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+ DPU_IRQ_TYPE_INTF_UNDER_RUN,
+ DPU_IRQ_TYPE_INTF_VSYNC,
+ DPU_IRQ_TYPE_CWB_OVERFLOW,
+ DPU_IRQ_TYPE_HIST_VIG_DONE,
+ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ DPU_IRQ_TYPE_HIST_DSPP_DONE,
+ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ DPU_IRQ_TYPE_WD_TIMER,
+ DPU_IRQ_TYPE_SFI_VIDEO_IN,
+ DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_0_IN,
+ DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_1_IN,
+ DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_2_IN,
+ DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+ DPU_IRQ_TYPE_PROG_LINE,
+ DPU_IRQ_TYPE_AD4_BL_DONE,
+ DPU_IRQ_TYPE_CTL_START,
+ DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct dpu_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in dpu_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum dpu_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within DPU. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct dpu_hw_intr *intr,
+ uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ struct dpu_intf_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ if (intf)
+ dpu_hw_blk_destroy(&intf->base);
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count);
+
+ u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == DPU_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_gc = dpu_hw_lm_gc;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ struct dpu_lm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ if (lm)
+ dpu_hw_blk_destroy(&lm->base);
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct dpu_hw_mixer *mixer,
+ void *cfg);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_CDM,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ds {
+ DS_TOP,
+ DS_0,
+ DS_1,
+ DS_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+ DPU_IOMMU_DOMAIN_UNSECURE,
+ DPU_IOMMU_DOMAIN_SECURE,
+ DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_CDM (1 << 1)
+#define DPU_DBG_MASK_INTF (1 << 2)
+#define DPU_DBG_MASK_LM (1 << 3)
+#define DPU_DBG_MASK_CTL (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP (1 << 6)
+#define DPU_DBG_MASK_WB (1 << 7)
+#define DPU_DBG_MASK_TOP (1 << 8)
+#define DPU_DBG_MASK_VBIF (1 << 9)
+#define DPU_DBG_MASK_ROT (1 << 10)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ goto line_count_exit;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+line_count_exit:
+ return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+ const struct dpu_pingpong_cfg *hw_cap)
+{
+ ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+ ops->enable_tearcheck = dpu_hw_pp_enable_te;
+ ops->connect_external_te = dpu_hw_pp_connect_external_te;
+ ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+ ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ struct dpu_pingpong_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(&c->ops, c->caps);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ if (pp)
+ dpu_hw_blk_destroy(&pp->base);
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ struct dpu_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (dpu_hw_sspp_multirect_enabled(c->cap))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ struct dpu_sspp_cfg *cfg;
+ int rc;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return hw_pipe;
+
+blk_init_error:
+ kzfree(hw_pipe);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+ (1UL << DPU_SSPP_SCALER_QSEED2) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct dpu_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..db2798e862fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+#define UBWC_STATIC 0x144
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 out_ctl = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+ ops->reset_ubwc = dpu_hw_reset_ubwc;
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+ int rc;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+
+blk_init_error:
+ kzfree(mdp);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ if (mdp)
+ dpu_hw_blk_destroy(&mdp->base);
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..4cabae480a7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+ scaler_offset);
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..1240f505ca53
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+ bool is_configured;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..790d39f816dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+
+ return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i, rc = 0;
+ const char *clock_name;
+ int num_clk = 0;
+
+ if (!pdev || !mp)
+ return -EINVAL;
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ return 0;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk,
+ GFP_KERNEL);
+ if (!mp->clk_config)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i,
+ &clock_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ i);
+ break;
+ }
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ goto err;
+ }
+
+ rc = of_clk_set_defaults(pdev->dev.of_node, false);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ u32 rate = clk_get_rate(mp->clk_config[i].clk);
+ if (!rate)
+ continue;
+ mp->clk_config[i].rate = rate;
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+ mp->num_clk = num_clk;
+ return 0;
+
+err:
+ msm_dss_put_clk(mp->clk_config, num_clk);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int rc;
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = dpu_core_irq_postinstall(dpu_kms);
+
+ return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..7dd6bd2d6d37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DRM_ERROR("failed to get memory resource: %s\n", name);
+ return 0;
+ }
+
+ return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_danger);
+ dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!dpu_kms->debugfs_danger) {
+ DPU_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset;
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ dpu_kms = regset->dpu_kms;
+ if (!dpu_kms || !dpu_kms->mmio)
+ return 0;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+ }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+ void *p;
+ int rc;
+
+ p = dpu_hw_util_get_log_mask_ptr();
+
+ if (!dpu_kms || !p)
+ return -EINVAL;
+
+ dpu_kms->debugfs_root = debugfs_create_dir("debug",
+ dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+ DRM_ERROR("debugfs create_dir failed %ld\n",
+ PTR_ERR(dpu_kms->debugfs_root));
+ return PTR_ERR(dpu_kms->debugfs_root);
+ }
+
+ rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+ if (rc) {
+ DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+ return rc;
+ }
+
+ /* allow root to be NULL */
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+ (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+ rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (dpu_kms) {
+ dpu_debugfs_vbif_destroy(dpu_kms);
+ dpu_debugfs_danger_destroy(dpu_kms);
+ dpu_debugfs_core_irq_destroy(dpu_kms);
+ debugfs_remove_recursive(dpu_kms->debugfs_root);
+ }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+
+ if (!kms)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ if (!dev || !dev->dev_private)
+ return;
+ priv = dev->dev_private;
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc != NULL)
+ dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+ const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /* Forward this enable call to the commit hook */
+ if (funcs && funcs->commit)
+ funcs->commit(encoder);
+
+ if (crtc && crtc->state->active) {
+ trace_dpu_kms_enc_enable(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /* If modeset is required, kickoff is run in encoder_enable */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (crtc->state->active) {
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+ return;
+ priv = dpu_kms->dev->dev_private;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int i, rc;
+
+ /*TODO: Support two independent DSI connectors */
+ encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR_OR_NULL(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i]) {
+ DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+ return;
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ continue;
+ }
+ }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+ /**
+ * Extend this function to initialize other
+ * types of displays
+ */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, i, ret;
+ int max_crtc_count;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1, 0);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _dpu_kms_drm_obj_destroy(dpu_kms);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_device *dev;
+ int rc;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+
+ rc = _dpu_debugfs_init(dpu_kms);
+ if (rc)
+ DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+ return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ int i;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ if (dpu_kms->power_event)
+ dpu_power_handle_unregister_event(
+ &dpu_kms->phandle, dpu_kms->power_event);
+
+ /* safe to call these more than once during shutdown */
+ _dpu_debugfs_destroy(dpu_kms);
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ if (dpu_kms->catalog)
+ dpu_hw_catalog_deinit(dpu_kms->catalog);
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->core_client)
+ dpu_power_client_destroy(&dpu_kms->phandle,
+ dpu_kms->core_client);
+ dpu_kms->core_client = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ dpu_dbg_destroy();
+ _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct dpu_kms *dpu_kms;
+ int ret = 0, num_crtcs = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ /* disable hot-plug polling */
+ drm_kms_helper_poll_disable(ddev);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+ ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+ if (ret)
+ goto unlock;
+
+ /* save current state for resume */
+ if (dpu_kms->suspend_state)
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+ if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ dpu_kms->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+ /* check for nothing to do */
+ if (num_crtcs == 0) {
+ DRM_DEBUG("all crtcs are already in the off state\n");
+ drm_atomic_state_put(state);
+ goto suspended;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_put(state);
+ goto unlock;
+ }
+
+suspended:
+ dpu_kms->suspend_block = true;
+
+unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ DPU_ATRACE_END("kms_pm_suspend");
+ return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct dpu_kms *dpu_kms;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ DPU_ATRACE_BEGIN("kms_pm_resume");
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ dpu_kms->suspend_block = false;
+
+ if (dpu_kms->suspend_state) {
+ dpu_kms->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(dpu_kms->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ }
+ dpu_kms->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
+ drm_kms_helper_poll_enable(ddev);
+
+ DPU_ATRACE_END("kms_pm_resume");
+ return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ struct msm_display_info info;
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ int i, rc = 0;
+
+ memset(&info, 0, sizeof(info));
+
+ info.intf_type = encoder->encoder_type;
+ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
+ }
+
+ rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_irq_uninstall,
+ .irq = dpu_irq,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .commit = dpu_kms_commit,
+ .complete_commit = dpu_kms_complete_commit,
+ .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .round_pixclk = dpu_kms_round_pixclk,
+ .pm_suspend = dpu_kms_pm_suspend,
+ .pm_resume = dpu_kms_pm_resume,
+ .destroy = dpu_kms_destroy,
+ .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return 0;
+
+ aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+ domain, "dpu1");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ dpu_kms->base.aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ DPU_ERROR("failed to attach iommu %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+ char *clock_name)
+{
+ struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+ return &mp->clk_config[i];
+ }
+
+ return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct dss_clk *clk;
+
+ clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct dpu_kms *dpu_kms = usr;
+
+ if (!dpu_kms)
+ return;
+
+ if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+ dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+ if (!dev) {
+ DPU_ERROR("invalid device\n");
+ goto end;
+ }
+
+ rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+ if (rc) {
+ DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("invalid private data\n");
+ goto dbg_destroy;
+ }
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ } else {
+ dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+ "vbif_nrt");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ } else {
+ dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+ }
+
+ dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+ "core");
+ if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+ rc = PTR_ERR(dpu_kms->core_client);
+ if (!dpu_kms->core_client)
+ rc = -EINVAL;
+ DPU_ERROR("dpu power client create failed: %d\n", rc);
+ dpu_kms->core_client = NULL;
+ goto error;
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ _dpu_kms_core_hw_rev_init(dpu_kms);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+ dpu_kms->dev);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ if (!dpu_kms->hw_mdp)
+ rc = -EINVAL;
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ if (!dpu_kms->hw_vbif[vbif_idx])
+ rc = -EINVAL;
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ &dpu_kms->phandle,
+ _dpu_kms_get_clk(dpu_kms, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ /*
+ * Handle (re)initializations during power enable
+ */
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms->power_event = dpu_power_handle_register_event(
+ &dpu_kms->phandle,
+ DPU_POWER_EVENT_POST_ENABLE,
+ dpu_kms_handle_power_event, dpu_kms, "kms");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+ dpu_dbg_destroy();
+end:
+ return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int irq;
+
+ if (!dev || !dev->dev_private) {
+ DPU_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (irq < 0) {
+ DPU_ERROR("failed to get irq: %d\n", irq);
+ return ERR_PTR(irq);
+ }
+ dpu_kms->base.irq = irq;
+
+ return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct dpu_kms *dpu_kms;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ mp = &dpu_kms->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+
+ dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+ platform_set_drvdata(pdev, dpu_kms);
+
+ msm_kms_init(&dpu_kms->base, &kms_funcs);
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+ return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+ .bind = dpu_bind,
+ .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_ops);
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, false);
+ if (rc)
+ DPU_ERROR("resource disable failed: %d\n", rc);
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (rc)
+ DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+ return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, true);
+ if (rc)
+ DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,sdm845-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..66d466628e2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_power_handle phandle;
+ struct dpu_power_client *core_client;
+ struct dpu_power_event *power_event;
+
+ /* directory entry for debugfs */
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+ unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+ struct dpu_irq irq_obj;
+
+ struct dpu_core_perf perf;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+ bool suspend_block;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+ struct dss_module_power mp;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+ ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+ if (!ddev_to_msm_kms(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ * suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+ if (!dpu_kms_is_suspend_state(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..9e533b86682c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS 0x0010
+
+struct dpu_mdss {
+ struct msm_mdss base;
+ void __iomem *mmio;
+ unsigned long mmio_len;
+ u32 hwversion;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+ struct dpu_mdss *dpu_mdss = arg;
+ u32 interrupts;
+
+ interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+ return IRQ_NONE;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+ hwirq, mapping, rc);
+ return IRQ_NONE;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+ .name = "dpu_mdss",
+ .irq_mask = dpu_mdss_irq_mask,
+ .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct dpu_mdss *dpu_mdss = domain->host_data;
+ int ret;
+
+ irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+ ret = irq_set_chip_data(irq, dpu_mdss);
+
+ return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+ .map = dpu_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = dpu_mdss->base.dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &dpu_mdss_irqdomain_ops, dpu_mdss);
+ if (!domain) {
+ DPU_ERROR("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ dpu_mdss->irq_controller.enabled_mask = 0;
+ dpu_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+ if (dpu_mdss->irq_controller.domain) {
+ irq_domain_remove(dpu_mdss->irq_controller.domain);
+ dpu_mdss->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret)
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+
+ pm_runtime_disable(dev->dev);
+ priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = dpu_mdss_enable,
+ .disable = dpu_mdss_disable,
+ .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct resource *res;
+ struct dpu_mdss *dpu_mdss;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ if (!dpu_mdss)
+ return -ENOMEM;
+
+ dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+ if (IS_ERR(dpu_mdss->mmio))
+ return PTR_ERR(dpu_mdss->mmio);
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+ if (!res) {
+ DRM_ERROR("failed to get memory resource for mdss\n");
+ return -ENOMEM;
+ }
+ dpu_mdss->mmio_len = resource_size(res);
+
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ goto clk_parse_err;
+ }
+
+ dpu_mdss->base.dev = dev;
+ dpu_mdss->base.funcs = &mdss_funcs;
+
+ ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+ if (ret)
+ goto irq_domain_error;
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+ if (ret) {
+ DPU_ERROR("failed to init irq: %d\n", ret);
+ goto irq_error;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_get_sync(dev->dev);
+ dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+ pm_runtime_put_sync(dev->dev);
+
+ priv->mdss = &dpu_mdss->base;
+
+ return ret;
+
+irq_error:
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..b640e39ebaca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE 60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct dpu_hw_pipe *pipe_hw;
+ struct dpu_hw_pipe_cfg pipe_cfg;
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ bool is_virtual;
+ struct list_head mplane_list;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_csc_cfg *csc_ptr;
+
+ const struct dpu_sspp_sub_blks *pipe_sblk;
+ char pipe_name[DPU_NAME_SIZE];
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct dpu_debugfs_regset32 debugfs_src;
+ struct dpu_debugfs_regset32 debugfs_scaler;
+ struct dpu_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+ return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+ return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+ list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ if (!dpu_plane_enabled(tmp->base.state))
+ continue;
+ DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+ pdpu->base.base.id, tmp->base.base.id,
+ src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ src_width = max_t(u32, src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ }
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ plane->base.id, pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_plane_get_qos_lut(
+ &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+ pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+ pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+ pdpu->pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_sblk->danger_vblank;
+ pdpu->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ pdpu->pipe_qos_cfg.danger_safe_en,
+ pdpu->pipe_qos_cfg.vblank_en,
+ pdpu->pipe_qos_cfg.creq_vblank,
+ pdpu->pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ goto end;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ DPU_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_qos_params qos_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ plane->base.id, qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+ struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct dpu_kms *kms;
+
+ if (!pdpu || !pstate || !aspace) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_plane_get_kms(&pdpu->base);
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ *aspace = kms->base.aspace;
+
+ return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ DPU_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+
+ if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ DPU_ERROR(
+ "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ pstate->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pstate->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+ static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ else
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ pdpu->csc_ptr->csc_mv[0],
+ pdpu->csc_ptr->csc_mv[1],
+ pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill)
+{
+ struct dpu_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!pdpu || !fmt || !pstate) {
+ DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ pdpu != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &pstate->pixel_ext;
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h =
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v =
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect),
+ drm_rect_height(&pdpu->pipe_cfg.src_rect),
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ &pstate->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+
+ if (!pdpu || !pdpu->base.state) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ plane = &pdpu->base;
+ pstate = to_dpu_plane_state(plane->state);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pdpu->pipe_cfg.src_rect.x1 = 0;
+ pdpu->pipe_cfg.src_rect.y1 = 0;
+ pdpu->pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ pdpu->pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!drm_state)
+ return;
+
+ pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ if (dpu_plane[R0]->is_virtual) {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+ } else {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+ };
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !flush_sspp) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ pstate = to_dpu_plane_state(plane->state);
+
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /* cache aspace */
+ pstate->aspace = aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ if (fence)
+ drm_atomic_set_fence_for_plane(new_state, fence);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t max_upscale = 1, max_downscale = 1;
+ uint32_t min_src_size, max_linewidth;
+ int hscale = 1, vscale = 1;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(state);
+
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ fb_rect.x2 = state->fb->width;
+ fb_rect.y2 = state->fb->height;
+
+ max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+ if (pdpu->features & DPU_SSPP_SCALER) {
+ max_downscale = pdpu->pipe_sblk->maxdwnscale;
+ max_upscale = pdpu->pipe_sblk->maxupscale;
+ }
+ if (drm_rect_width(&src) < drm_rect_width(&dst))
+ hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+ else
+ hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+ if (drm_rect_height(&src) < drm_rect_height(&dst))
+ vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+ else
+ vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+ DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+ dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+ if (!dpu_plane_enabled(state))
+ goto exit;
+
+ fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pdpu->features & DPU_SSPP_SCALER) ||
+ !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ | BIT(DPU_SSPP_CSC_10BIT))))) {
+ DPU_ERROR_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ ret = -E2BIG;
+
+ /* check scaler capability */
+ } else if (hscale < 0 || vscale < 0) {
+ DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+ DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+ ret = -E2BIG;
+ }
+
+exit:
+ return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+ return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ uint32_t nplanes, src_flags;
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_pstate;
+ const struct dpu_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return -EINVAL;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return -EINVAL;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+
+ pstate = to_dpu_plane_state(state);
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_dpu_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+ crtc->base.id, DRM_RECT_ARG(&dst),
+ (char *)&fmt->base.pixel_format,
+ DPU_FORMAT_IS_UBWC(fmt));
+
+ pdpu->pipe_cfg.src_rect = src;
+ pdpu->pipe_cfg.dst_rect = dst;
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return 0;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ if (pdpu->pipe_hw->ops.setup_pe &&
+ (pstate->multirect_index != DPU_SSPP_RECT_1))
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ }
+
+ /* update csc */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ _dpu_plane_setup_csc(pdpu);
+ else
+ pdpu->csc_ptr = 0;
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc);
+ }
+
+ _dpu_plane_set_qos_remap(plane);
+ return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+ pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_dpu_plane_virtual(plane) &&
+ pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+ DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = false;
+ state = plane->state;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!dpu_plane_sspp_enabled(state)) {
+ _dpu_plane_atomic_disable(plane, old_state);
+ } else {
+ int ret;
+
+ ret = dpu_plane_sspp_atomic_update(plane, old_state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* last plane state is same as current state */
+ dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ drm_plane_helper_disable(plane, NULL);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ pstate = to_dpu_plane_state(state);
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_kms *kms;
+ struct msm_drm_private *priv;
+ const struct dpu_sspp_sub_blks *sblk = 0;
+ const struct dpu_sspp_cfg *cfg = 0;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (pdpu && pdpu->pipe_hw)
+ cfg = pdpu->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (!sblk)
+ return 0;
+
+ /* create overall sub-directory for the pipe */
+ pdpu->debugfs_root =
+ debugfs_create_dir(pdpu->pipe_name,
+ plane->dev->primary->debugfs_root);
+
+ if (!pdpu->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_x32("features", 0600,
+ pdpu->debugfs_root, &pdpu->features);
+
+ /* add register dump support */
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_src);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0600,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_default_scale);
+ }
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_csc);
+ }
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ 0600,
+ pdpu->debugfs_root,
+ kms, &dpu_plane_danger_enable);
+
+ return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+ pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+ return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+ _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .late_register = dpu_plane_late_register,
+ .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id)
+{
+ struct drm_plane *plane = NULL, *master_plane = NULL;
+ const struct dpu_format_extended *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ enum drm_plane_type type;
+ int zpos_max = DPU_ZPOS_MAX;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ DPU_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_dpu_kms(priv->kms);
+
+ if (!kms->catalog) {
+ DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+ pdpu->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&pdpu->mplane_list);
+ master_plane = drm_plane_find(dev, NULL, master_plane_id);
+ if (master_plane) {
+ struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+ list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+ }
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+ master_plane_id != 0);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ pdpu->features = pdpu->pipe_hw->cap->features;
+ pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (!master_plane_id)
+ format_list = pdpu->pipe_sblk->format_list;
+ else
+ format_list = pdpu->pipe_sblk->virt_format_list;
+
+ pdpu->nformats = dpu_populate_formats(format_list,
+ pdpu->formats,
+ 0,
+ ARRAY_SIZE(pdpu->formats));
+
+ if (!pdpu->nformats) {
+ DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu->formats, pdpu->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ if (kms->catalog->mixer_count &&
+ kms->catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+ zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+ }
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ /* save user friendly pipe name for later */
+ snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ pipe, plane->base.id, master_plane_id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: cached plane property values
+ * @aspace: pointer to address space for input/output buffers
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg: CDP configuration
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ void *input_fence;
+ enum dpu_stage stage;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ /* scaler configuration */
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ * false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane: Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ * a regular plane initialization. A non-zero primary plane
+ * id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ * validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a75eebca2f37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+ [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+ u32 event_type)
+{
+ struct dpu_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+ struct dpu_power_handle *phandle, char *client_name)
+{
+ struct dpu_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ client->active = true;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("dpu power deinit already done\n");
+ kfree(client);
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ phandle->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ struct dpu_power_client *curr_client, *next_client;
+ struct dpu_power_event *curr_event, *next_event;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("client:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, bool enable)
+{
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct dpu_power_client *client;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_ENABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_ENABLE);
+
+ } else {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_DISABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_DISABLE);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct dpu_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void dpu_power_handle_unregister_event(
+ struct dpu_power_handle *phandle,
+ struct dpu_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE 0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+ DPU_POWER_HANDLE_DBUS_ID_MNOC,
+ DPU_POWER_HANDLE_DBUS_ID_LLCC,
+ DPU_POWER_HANDLE_DBUS_ID_EBI,
+ DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+ char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+ struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+ struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+ (t).num_comp_enc == (r).num_enc && \
+ (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+ enum dpu_rm_topology_name top_name;
+ int num_lm;
+ int num_comp_enc;
+ int num_intf;
+ int num_ctl;
+ int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+ { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
+ { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
+ { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
+ { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl: topology control preference from kernel client
+ * @top: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ uint64_t top_ctrl;
+ const struct dpu_rm_topology_def *topology;
+ struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+ struct list_head list;
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_rsvp *rsvp_nxt;
+ enum dpu_hw_blk_type type;
+ uint32_t id;
+ struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+ DPU_RM_STAGE_BEGIN,
+ DPU_RM_STAGE_AFTER_CLEAR,
+ DPU_RM_STAGE_AFTER_RSVPNEXT,
+ DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+ struct dpu_rm *rm,
+ enum dpu_rm_dbg_rsvp_stage stage)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ DPU_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+ DPU_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ DPU_DEBUG("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ DPU_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ DPU_DEBUG("found type %d id %d for enc %d\n",
+ i->type, i->blk->id, i->enc_id);
+ return true;
+ }
+ }
+
+ DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _dpu_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ dpu_hw_lm_destroy(hw);
+ break;
+ case DPU_HW_BLK_CTL:
+ dpu_hw_ctl_destroy(hw);
+ break;
+ case DPU_HW_BLK_CDM:
+ dpu_hw_cdm_destroy(hw);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ dpu_hw_pingpong_destroy(hw);
+ break;
+ case DPU_HW_BLK_INTF:
+ dpu_hw_intf_destroy(hw);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+ enum dpu_hw_blk_type type;
+
+ if (!rm) {
+ DPU_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ dpu_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+ struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ enum dpu_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct dpu_rm_hw_blk *blk;
+ struct dpu_hw_mdp *hw_mdp;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ hw = dpu_hw_lm_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw = dpu_hw_ctl_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CDM:
+ hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ hw = dpu_hw_pingpong_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw = dpu_hw_intf_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _dpu_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum dpu_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < DPU_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ rm->dev = dev;
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ DPU_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ DPU_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ DPU_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ DPU_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ DPU_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ DPU_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ DPU_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs,
+ struct dpu_rm_hw_blk *lm,
+ struct dpu_rm_hw_blk **pp,
+ struct dpu_rm_hw_blk *primary_lm)
+{
+ const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+ struct dpu_rm_hw_iter iter;
+
+ *pp = NULL;
+
+ DPU_DEBUG("check lm %d pp %d\n",
+ lm_cfg->id, lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ const struct dpu_lm_cfg *prim_lm_cfg =
+ to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ return false;
+ }
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+
+{
+ struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+ struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, lm[lm_count],
+ &pp[lm_count], NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, iter_j.blk,
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology->num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+
+ trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+ pp[i]->id);
+ }
+
+ return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ const struct dpu_rm_topology_def *top)
+{
+ struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+ unsigned long features = ctl->caps->features;
+ bool has_split_display;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+ if (top->needs_split_display != has_split_display)
+ continue;
+
+ ctls[i] = iter.blk;
+ DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == top->num_ctl)
+ break;
+ }
+
+ if (i != top->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+ rsvp->enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type)
+{
+ struct dpu_rm_hw_iter iter;
+
+ DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+ const struct dpu_cdm_cfg *caps = cdm->caps;
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &caps->intf_connect);
+
+ DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+ iter.blk->type, iter.blk->id, rsvp->enc_id,
+ caps->intf_connect, match);
+
+ if (!match)
+ continue;
+
+ trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ iter.blk->rsvp_nxt = rsvp;
+ break;
+ }
+
+ if (!iter.hw) {
+ DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct dpu_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ dpu_rm_init_hw_iter(&iter, 0, type);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ DPU_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ break;
+ }
+
+ /* Shouldn't happen since intfs are fixed at probe */
+ if (!iter.hw) {
+ DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ if (needs_cdm)
+ ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+ struct dpu_rm_topology_def topology;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+ }
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+ ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ int i;
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+ req_topology)) {
+ reqs->topology = &g_top_table[i];
+ break;
+ }
+ }
+
+ if (!reqs->topology) {
+ DPU_ERROR("invalid topology for the display\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Set the requirement based on caps if not set from user space
+ * This will ensure to select LM tied with DS blocks
+ * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+ */
+ if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+ conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+ reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+ DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+ reqs->topology->num_lm, reqs->topology->num_ctl,
+ reqs->topology->top_name,
+ reqs->topology->needs_split_display);
+
+ return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _dpu_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _dpu_rm_get_connector(enc);
+ if (!conn) {
+ DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+ int ret = 0;
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret)
+ DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+
+ return ret;
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only)
+{
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+
+ mutex_lock(&rm->rm_lock);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ffd1841a6067
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+ DPU_RM_TOPOLOGY_NONE = 0,
+ DPU_RM_TOPOLOGY_SINGLEPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+ DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+ DPU_RM_TOPCTL_RESERVE_LOCK,
+ DPU_RM_TOPCTL_RESERVE_CLEAR,
+ DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[DPU_HW_BLK_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct dpu_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+ void *hw;
+ struct dpu_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ * definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..ae0ca5076238
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( struct drm_plane_state*,state )
+ __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->state = state;
+ __entry->pstate = pstate;
+ __entry->stage_idx = stage_idx;
+ __entry->sspp = sspp;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+ "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id,
+ __entry->state->fb ? __entry->state->fb->base.id : -1,
+ __entry->state->src_w >> 16, __entry->state->src_h >> 16,
+ __entry->state->src_x >> 16, __entry->state->src_y >> 16,
+ __entry->state->crtc_w, __entry->state->crtc_h,
+ __entry->state->crtc_x, __entry->state->crtc_y,
+ __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+ __entry->pstate->multirect_index,
+ __entry->pstate->multirect_mode, __entry->pixel_format,
+ __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field( struct drm_rect *, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+ "vblank_req:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field( struct dpu_hw_fmt_layout*, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout->width,
+ __entry->layout->height, __entry->layout->plane_addr[0],
+ __entry->layout->plane_size[0],
+ __entry->layout->plane_addr[1],
+ __entry->layout->plane_size[1],
+ __entry->layout->plane_addr[2],
+ __entry->layout->plane_size[2],
+ __entry->layout->plane_addr[3],
+ __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+ __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+ uint32_t pp_id),
+ TP_ARGS(id, type, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( int, enable_count )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->enable_count = enable_count;
+ ),
+ TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+ __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( struct dpu_irq_callback *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __field( struct drm_device *, dev )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ DPU_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = dpu_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = dpu_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DPU_DEBUG("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DPU_DEBUG("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ params->vbif_idx, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+ vbif->idx - VBIF_0, pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+ dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+ if (!dpu_kms->debugfs_vbif) {
+ DPU_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ dpu_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+ (void)height;
+ (void)width;
+
+ /*
+ * In the future, calculate the size based on the w/h but just
+ * hardcode it for now since 16K satisfies all current usecases.
+ */
+ return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+ int color_fmt, int width, int height)
+{
+ const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+ unsigned int uv_alignment = 0, size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane +
+ MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_MVTB:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane;
+ size = 2 * size + extra_size;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2 +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+ int color_fmt, int width, int height)
+{
+ unsigned int offset = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_MVTB:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ offset = y_plane + uv_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea30d391..4b36b8954bae 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b001699297c4..457c29dba4a1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->fb));
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4b646bf9c214..44d1cda56974 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a645926edb7..2bfb39082f54 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
}
@@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86b7aae..5368e621999c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- if (!mdp4_lvds_connector->panel)
+ if (!mdp4_lvds_connector->panel) {
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
return mdp4_lvds_connector->panel ?
connector_status_connected :
@@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 20e956e14c21..79ff653d8081 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
-
- plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e02ee41..784d98989e3a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 10271359789e..b1da9ce54379 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -65,7 +65,7 @@ struct mdp5_crtc {
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
- uint32_t x, y;
+ int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -760,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
+ * be at the top left of the cursor image.
*
+ * Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
*/
- *roi_w = min(mdp5_crtc->cursor.width, xres -
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
- *roi_h = min(mdp5_crtc->cursor.height, yres -
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -783,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
- uint32_t x, y, width, height;
+ uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
@@ -800,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
get_roi(crtc, &roi_w, &roi_h);
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -812,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
@@ -932,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (unlikely(!crtc->state->enable))
return 0;
- mdp5_crtc->cursor.x = x = max(x, 0);
- mdp5_crtc->cursor.y = y = max(y, 0);
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
@@ -1207,7 +1242,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 9af94e35f678..fcd44d1d1068 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -319,7 +319,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
- mdp5_cstate->defer_start = true;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6e12e275deba..bddd625ab91b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
+ drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..1cc4e57f0226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -20,12 +20,10 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
- struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+ struct msm_mdss base;
void __iomem *mmio, *vbif;
@@ -41,22 +39,22 @@ struct msm_mdss {
} irqcontroller;
};
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
{
- msm_writel(data, mdss->mmio + reg);
+ msm_writel(data, mdp5_mdss->mmio + reg);
}
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
{
- return msm_readl(mdss->mmio + reg);
+ return msm_readl(mdp5_mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
- struct msm_mdss *mdss = arg;
+ struct mdp5_mdss *mdp5_mdss = arg;
u32 intr;
- intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+ intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
- mdss->irqcontroller.domain, hwirq));
+ mdp5_mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct msm_mdss *mdss = d->host_data;
+ struct mdp5_mdss *mdp5_mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdss);
+ irq_set_chip_data(irq, mdp5_mdss);
return 0;
}
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
};
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdss->dev->dev;
+ struct device *dev = mdp5_mdss->base.dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdss);
+ mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
- mdss->irqcontroller.enabled_mask = 0;
- mdss->irqcontroller.domain = d;
+ mdp5_mdss->irqcontroller.enabled_mask = 0;
+ mdp5_mdss->irqcontroller.domain = d;
return 0;
}
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- clk_prepare_enable(mdss->ahb_clk);
- if (mdss->axi_clk)
- clk_prepare_enable(mdss->axi_clk);
- if (mdss->vsync_clk)
- clk_prepare_enable(mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->ahb_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdss->vsync_clk)
- clk_disable_unprepare(mdss->vsync_clk);
- if (mdss->axi_clk)
- clk_disable_unprepare(mdss->axi_clk);
- clk_disable_unprepare(mdss->ahb_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
}
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
- struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+ struct platform_device *pdev =
+ to_platform_device(mdp5_mdss->base.dev->dev);
- mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdss->ahb_clk))
- mdss->ahb_clk = NULL;
+ mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdp5_mdss->ahb_clk))
+ mdp5_mdss->ahb_clk = NULL;
- mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdss->axi_clk))
- mdss->axi_clk = NULL;
+ mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdp5_mdss->axi_clk))
+ mdp5_mdss->axi_clk = NULL;
- mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdss->vsync_clk))
- mdss->vsync_clk = NULL;
+ mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdp5_mdss->vsync_clk))
+ mdp5_mdss->vsync_clk = NULL;
return 0;
}
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
- if (!mdss)
+ if (!mdp5_mdss)
return;
- irq_domain_remove(mdss->irqcontroller.domain);
- mdss->irqcontroller.domain = NULL;
+ irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+ mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
pm_runtime_disable(dev->dev);
}
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = mdp5_mdss_enable,
+ .disable = mdp5_mdss_disable,
+ .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss;
+ struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
- mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
- if (!mdss) {
+ mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdss->dev = dev;
+ mdp5_mdss->base.dev = dev;
- mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
- if (IS_ERR(mdss->mmio)) {
- ret = PTR_ERR(mdss->mmio);
+ mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdp5_mdss->mmio)) {
+ ret = PTR_ERR(mdp5_mdss->mmio);
goto fail;
}
- mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdss->vbif)) {
- ret = PTR_ERR(mdss->vbif);
+ mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_mdss->vbif)) {
+ ret = PTR_ERR(mdp5_mdss->vbif);
goto fail;
}
- ret = msm_mdss_get_clocks(mdss);
+ ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
- mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdss->vdd)) {
- ret = PTR_ERR(mdss->vdd);
+ mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdp5_mdss->vdd)) {
+ ret = PTR_ERR(mdp5_mdss->vdd);
goto fail;
}
- ret = regulator_enable(mdss->vdd);
+ ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdss);
+ mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
- ret = mdss_irq_domain_init(mdss);
+ ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
- priv->mdss = mdss;
+ mdp5_mdss->base.funcs = &mdss_funcs;
+ priv->mdss = &mdp5_mdss->base;
pm_runtime_enable(dev->dev);
return 0;
fail_irq:
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index e09bc53a0e65..7d306c5acd09 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,7 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
- mdp5_crtc_get_pipeline(plane->crtc);
+ mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
- plane->fb = fb;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c407be44..d420c8044e23 100644
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index b744bcc7d8ad..ff8164cc6738 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -208,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ goto fail;
+
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 70d9a9a47acd..08f3fc6771b7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -100,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -149,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
#endif
/* dsi host */
+struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -162,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings);
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -175,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index f6a9471b70c8..21f489a737d7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 0327bb54b01b..dcdfb1bb54f9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -136,20 +136,58 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
- &msm8974_apq8084_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 9cfdcf1c95d5..16c507911110 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -40,10 +40,22 @@ struct msm_dsi_config {
const int num_dsi;
};
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
struct msm_dsi_cfg_handler {
u32 major;
u32 minor;
const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 2f1a2780658a..96fb5f635314 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -118,6 +118,7 @@ struct msm_dsi_host {
struct clk *byte_intf_clk;
u32 byte_clk_rate;
+ u32 pixel_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -379,19 +428,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
- cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
- msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
- if (IS_ERR(msm_host->byte_intf_clk)) {
- ret = PTR_ERR(msm_host->byte_intf_clk);
- pr_err("%s: can't find byte_intf clock. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- } else {
- msm_host->byte_intf_clk = NULL;
- }
-
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -406,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = msm_clk_get(pdev, "src");
- if (IS_ERR(msm_host->src_clk)) {
- ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find src clock. ret=%d\n",
- __func__, ret);
- msm_host->src_clk = NULL;
- goto exit;
- }
-
- msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
- if (!msm_host->esc_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get esc clock parent. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
- if (!msm_host->dsi_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get src clock parent. ret=%d\n",
- __func__, ret);
- }
- }
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
@@ -498,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -511,7 +524,7 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -566,7 +579,7 @@ error:
return ret;
}
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -592,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -634,98 +647,121 @@ error:
return ret;
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- return dsi_link_clk_enable_6g(msm_host);
- else
- return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 pclk_rate;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- } else {
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->src_clk);
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- }
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For dual DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_dual_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
}
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate;
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
- if (!mode) {
- pr_err("%s: mode not set\n", __func__);
- return -EINVAL;
- }
-
- pclk_rate = mode->clock * 1000;
- if (lanes > 0) {
- msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
- } else {
+ if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ lanes = 1;
}
- DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ do_div(pclk_bpp, (8 * lanes));
- msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- unsigned int esc_mhz, esc_div;
- unsigned long byte_mhz;
+ DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
- msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
- /*
- * esc clock is byte clock followed by a 4 bit divider,
- * we need to find an escape clock frequency within the
- * mipi DSI spec range within the maximum divider limit
- * We iterate here between an escape clock frequencey
- * between 20 Mhz to 5 Mhz and pick up the first one
- * that can be supported by our divider
- */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_calc_pclk(msm_host, is_dual_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
- byte_mhz = msm_host->byte_clk_rate / 1000000;
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
- for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
- esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+ dsi_calc_pclk(msm_host, is_dual_dsi);
- /*
- * TODO: Ideally, we shouldn't know what sort of divider
- * is available in mmss_cc, we're just assuming that
- * it'll always be a 4 bit divider. Need to come up with
- * a better way here.
- */
- if (esc_div >= 1 && esc_div <= 16)
- break;
- }
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
- if (esc_mhz < 5)
- return -EINVAL;
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
- msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
- msm_host->src_clk_rate);
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
}
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
return 0;
}
@@ -885,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CTRL, data);
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -897,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
u32 wc;
DBG("");
+ /*
+ * For dual DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_dual_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -921,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
/* image data and 1 byte write_memory_start cmd */
- wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -931,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -1015,50 +1067,37 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
}
}
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- int ret;
uint64_t iova;
+ u8 *data;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n",
- __func__, ret);
- msm_host->tx_gem_obj = NULL;
- return ret;
- }
+ data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ priv->kms->aspace,
+ &msm_host->tx_gem_obj, &iova);
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &iova);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
- }
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
- msm_host->tx_size = msm_host->tx_gem_obj->size;
- } else {
- msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
- if (!msm_host->tx_buf) {
- ret = -ENOMEM;
- pr_err("%s: failed to allocate tx buf, %d\n",
- __func__, ret);
- return ret;
- }
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
- msm_host->tx_size = size;
- }
+ msm_host->tx_size = size;
return 0;
}
@@ -1089,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_buf_paddr);
}
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
/*
* prepare cmd buffer to be txed
*/
@@ -1113,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
return -EINVAL;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
- }
- } else {
- data = msm_host->tx_buf;
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
}
/* MSM specific command format in memory */
@@ -1142,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- msm_gem_put_vaddr(msm_host->tx_gem_obj);
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
@@ -1190,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &dma_base);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
- }
- } else {
- dma_base = msm_host->tx_buf_paddr;
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
}
reinit_completion(&msm_host->dma_comp);
@@ -1845,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct platform_device *pdev = msm_host->pdev;
int ret;
@@ -1865,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
}
msm_host->dev = dev;
- ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
@@ -1898,7 +1963,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* output
*/
if (check_defer && msm_host->device_node) {
- if (!of_drm_find_panel(msm_host->device_node))
+ if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
}
@@ -1923,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1935,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
- dsi_link_clk_enable(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1956,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1965,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
@@ -2129,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -2155,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ if (msm_host->dsi_clk_src) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
+ }
+ if (msm_host->esc_clk_src) {
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2189,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = dsi_calc_clk_rate(msm_host);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
@@ -2256,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2277,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = dsi_link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
@@ -2291,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host);
+ dsi_timing_setup(msm_host, is_dual_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, phy_shared_timings);
@@ -2304,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
@@ -2316,6 +2388,7 @@ unlock_ret:
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
@@ -2330,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4cb1cb68878b..5224010d90e4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -134,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
+ bool is_dual_dsi = IS_DUAL_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -305,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
kfree(dsi_connector);
}
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
- struct drm_display_mode *mode, *m;
-
- /* Only support left-right mode */
- list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
- mode->clock >>= 1;
- mode->hdisplay >>= 1;
- mode->hsync_start >>= 1;
- mode->hsync_end >>= 1;
- mode->htotal >>= 1;
- drm_mode_set_name(mode);
- }
-}
-
-static int dsi_dual_connector_tile_init(
- struct drm_connector *connector, int id)
-{
- struct drm_display_mode *mode;
- /* Fake topology id */
- char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
- if (connector->tile_group) {
- DBG("Tile property has been initialized");
- return 0;
- }
-
- /* Use the first mode only for now */
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode,
- head);
- if (!mode)
- return -EINVAL;
-
- connector->tile_group = drm_mode_get_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group)
- connector->tile_group = drm_mode_create_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group) {
- pr_err("%s: failed to create tile group\n", __func__);
- return -ENOMEM;
- }
-
- connector->has_tile = true;
- connector->tile_is_single_monitor = true;
-
- /* mode has been fixed */
- connector->tile_h_size = mode->hdisplay;
- connector->tile_v_size = mode->vdisplay;
-
- /* Only support left-right mode */
- connector->num_h_tile = 2;
- connector->num_v_tile = 1;
-
- connector->tile_v_loc = 0;
- connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
- return 0;
-}
-
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- int ret, num;
+ int num;
if (!panel)
return 0;
- /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
- * panel should not attach to any connector.
- * Only temporarily attach panel to the current connector here,
- * to let panel set mode to this connector.
+ /*
+ * In dual DSI mode, we have one connector that can be
+ * attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel);
- drm_panel_detach(panel);
if (!num)
return 0;
- if (IS_DUAL_DSI()) {
- /* report half resolution to user */
- dsi_dual_connector_fix_modes(connector);
- ret = dsi_dual_connector_tile_init(connector, id);
- if (ret)
- return ret;
- ret = drm_mode_connector_set_tile_property(connector);
- if (ret) {
- pr_err("%s: set tile property failed, %d\n",
- __func__, ret);
- return ret;
- }
- }
-
return num;
}
@@ -454,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (ret)
goto phy_en_fail;
- /* Do nothing with the host if it is DSI 1 in case of dual DSI */
- if (is_dual_dsi && (DSI_1 == id))
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
@@ -466,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1]);
+ &phy_shared_timings[DSI_1], is_dual_dsi);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -556,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
@@ -621,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -684,11 +608,28 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
+ drm_connector_attach_encoder(connector, msm_dsi->encoder);
return connector;
}
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+ bool is_dual_dsi = IS_DUAL_DSI();
+
+ /*
+ * For dual DSI, we only have one drm panel. For this
+ * use case, we register only one bridge/connector.
+ * Skip bridge/connector initialisation if it is
+ * slave-DSI for dual DSI configuration.
+ */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+ return false;
+ }
+ return true;
+}
+
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -751,12 +692,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
connector_list = &dev->mode_config.connector_list;
list_for_each_entry(connector, connector_list, head) {
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
- }
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
}
return ERR_PTR(-ENODEV);
@@ -836,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
+ bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -850,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
+ cmd_mode = !(device_flags &
+ MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
- if (!(device_flags & MIPI_DSI_MODE_VIDEO))
- kms->funcs->set_encoder_mode(kms, encoder, true);
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 57cf7fa7f1c4..874265314413 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index c4c37a7df637..4c03f0b7343e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -798,6 +798,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENOMEM);
}
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
pll = &pll_10nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 9d4d1feaefd7..07c48ddb5301 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f150d4a47707..9cb6e6fe9810 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 6f3fc6b0f0a3..058ff92a0207 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- drm_mode_connector_update_edid_property(connector, drm_edid);
+ drm_connector_update_edid_property(connector, drm_edid);
if (drm_edid)
ret = drm_add_edid_modes(connector, drm_edid);
@@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_mode_connector_attach_encoder(connector, edp->encoder);
+ drm_connector_attach_encoder(connector, edp->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index ecebf8b623ab..3eff3ea3b271 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index c0848dfedd50..e9c9a0af508e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
return ERR_PTR(ret);
}
- drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+ drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index da646deedf4b..7717d4269662 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0635c3da7f4..c1f1779c980f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -71,12 +71,15 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, state);
+ }
+
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1ff3fda245d1..f0da0d3c8a80 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -16,26 +16,101 @@
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- pm_runtime_get_sync(&gpu->pdev->dev);
- gpu->funcs->show(gpu, m);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ if (!gpu)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ kfree(show_priv);
+ return ret;
}
- return 0;
+ show_priv->dev = dev;
+
+ return single_open(file, msm_gpu_show, show_priv);
}
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
}
static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
@@ -158,6 +232,9 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 021a0b6f9a59..c1abad8a8612 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@@ -78,6 +81,63 @@ module_param(modeset, bool, 0600);
* Util/helpers:
*/
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
+{
+ struct property *prop;
+ const char *name;
+ struct clk_bulk_data *local;
+ int i = 0, ret, count;
+
+ count = of_property_count_strings(dev->of_node, "clock-names");
+ if (count < 1)
+ return 0;
+
+ local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
+ count, GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!local[i].id) {
+ devm_kfree(dev, local);
+ return -ENOMEM;
+ }
+
+ i++;
+ }
+
+ ret = devm_clk_bulk_get(dev, count, local);
+
+ if (ret) {
+ for (i = 0; i < count; i++)
+ devm_kfree(dev, (void *) local[i].id);
+ devm_kfree(dev, local);
+
+ return ret;
+ }
+
+ *bulk = local;
+ return count;
+}
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name)
+{
+ int i;
+ char n[32];
+
+ snprintf(n, sizeof(n), "%s_clk", name);
+
+ for (i = 0; bulk && i < count; i++) {
+ if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
+ return bulk[i].clk;
+ }
+
+
+ return NULL;
+}
+
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
@@ -149,7 +209,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -197,7 +257,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -208,19 +269,36 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit/event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ kthread_flush_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -243,9 +321,6 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- flush_workqueue(priv->atomic_wq);
- destroy_workqueue(priv->atomic_wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -258,7 +333,8 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- msm_mdss_destroy(ddev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -268,6 +344,10 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU 3
+
static int get_mdp_ver(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -357,7 +437,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct msm_mdss *mdss;
+ int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@@ -369,53 +451,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- drm_dev_unref(ddev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unref_drm_dev;
}
ddev->dev_private = priv;
priv->dev = ddev;
- ret = msm_mdss_init(ddev);
- if (ret) {
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(ddev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(ddev);
+ break;
+ default:
+ ret = 0;
+ break;
}
+ if (ret)
+ goto err_free_priv;
+
+ mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
- priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
- if (ret) {
- msm_mdss_destroy(ddev);
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
- }
+ if (ret)
+ goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(ddev);
priv->kms = kms;
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
+ case KMS_DPU:
+ kms = dpu_kms_init(ddev);
+ priv->kms = kms;
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -430,24 +520,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
- goto fail;
+ goto err_msm_uninit;
}
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
- goto fail;
+ goto err_msm_uninit;
}
}
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = ddev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_commit kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
+ /* clean up previously created threads if any */
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+ goto err_msm_uninit;
+ }
+ }
+
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto fail;
+ goto err_msm_uninit;
}
if (kms) {
@@ -456,13 +622,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_mode_config_reset(ddev);
@@ -473,15 +639,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = msm_debugfs_late_init(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
return 0;
-fail:
+err_msm_uninit:
msm_drm_uninit(dev);
return ret;
+err_destroy_mdss:
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
+err_free_priv:
+ kfree(priv);
+err_unref_drm_dev:
+ drm_dev_unref(ddev);
+ return ret;
}
/*
@@ -894,16 +1068,35 @@ static struct drm_driver msm_driver = {
static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_suspend)
+ return kms->funcs->pm_suspend(dev);
drm_kms_helper_poll_disable(ddev);
+ priv->pm_state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(priv->pm_state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(priv->pm_state);
+ }
+
return 0;
}
static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_resume)
+ return kms->funcs->pm_resume(dev);
+ drm_atomic_helper_resume(ddev, priv->pm_state);
drm_kms_helper_poll_enable(ddev);
return 0;
@@ -915,11 +1108,12 @@ static int msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_disable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
@@ -928,11 +1122,12 @@ static int msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_enable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
return 0;
}
@@ -1031,12 +1226,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
- * MDP5 based devices don't have a flat hierarchy. There is a top level
- * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
- * children devices, find the MDP5 node, and then add the interfaces
- * to our components list.
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
*/
- if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@@ -1146,8 +1342,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
- { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
+ { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+ { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+ { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1169,6 +1366,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
+ msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -1185,6 +1383,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
+ msm_dpu_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b2da1fbf81e0..8e510d5c758a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -54,6 +56,12 @@ struct msm_fence_context;
struct msm_gem_address_space;
struct msm_gem_vma;
+#define MAX_CRTCS 8
+#define MAX_PLANES 20
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
@@ -68,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
struct msm_drm_private {
@@ -84,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5 only) */
+ /* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@@ -115,22 +188,24 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
- struct workqueue_struct *atomic_wq;
unsigned int num_planes;
- struct drm_plane *planes[16];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -150,6 +225,7 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+ struct drm_atomic_state *pm_state;
};
struct msm_format {
@@ -174,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -184,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -285,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -306,6 +387,10 @@ static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 7a16242bf8bf..2a7348aeb38d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct drm_gem_object *bo = msm_fb->planes[i];
-
- drm_gem_object_put_unlocked(bo);
- }
-
- kfree(msm_fb);
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
- .create_handle = msm_framebuffer_create_handle,
- .destroy = msm_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- msm_gem_describe(msm_fb->planes[i], m);
+ msm_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], aspace);
+ msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- if (!msm_fb->planes[plane])
+ if (!fb->obj[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+ return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->planes[plane];
+ return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- msm_fb->planes[i] = bos[i];
+ msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f583bb4222f9..f59ca27a4a35 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
- int ret;
+ int err;
+ vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&msm_obj->lock);
- if (ret)
+ err = mutex_lock_interruptible(&msm_obj->lock);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
goto out;
+ }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
@@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
@@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+ ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return ret;
}
/** get mmap offset */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 1c09acfb4028..5e808cfec345 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,10 +20,11 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
/*
* Power Management:
@@ -87,7 +88,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
+ if (!gpu->funcs->gpu_busy || !gpu->core_clk)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -141,8 +142,6 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- int i;
-
if (gpu->core_clk && gpu->fast_rate)
clk_set_rate(gpu->core_clk, gpu->fast_rate);
@@ -150,28 +149,12 @@ static int enable_clk(struct msm_gpu *gpu)
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_prepare(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_enable(gpu->grp_clks[i]);
-
- return 0;
+ return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
- int i;
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_disable(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_unprepare(gpu->grp_clks[i]);
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
@@ -273,6 +256,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ /* Only store the data for buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ)) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ return;
+
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ return;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++)
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+ char *cmd)
+{
+}
+#endif
+
/*
* Hangcheck detection for locked gpu:
*/
@@ -314,6 +414,7 @@ static void recover_worker(struct work_struct *work)
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
@@ -327,7 +428,7 @@ static void recover_worker(struct work_struct *work)
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- char *cmd;
+ comm = kstrdup(task->comm, GFP_ATOMIC);
/*
* So slightly annoying, in other paths like
@@ -340,22 +441,28 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
mutex_lock(&dev->struct_mutex);
+ }
+ rcu_read_unlock();
+ if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, task->comm, cmd);
+ gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", task->comm, cmd);
-
- kfree(cmd);
- } else {
+ "offending task: %s (%s)", comm, cmd);
+ } else
msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
- rcu_read_unlock();
}
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
@@ -660,44 +767,22 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static struct clk *get_clock(struct device *dev, const char *name)
-{
- struct clk *clk = devm_clk_get(dev, name);
-
- return IS_ERR(clk) ? NULL : clk;
-}
-
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- struct device *dev = &pdev->dev;
- struct property *prop;
- const char *name;
- int i = 0;
+ int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
- gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
- if (gpu->nr_clocks < 1) {
+ if (ret < 1) {
gpu->nr_clocks = 0;
- return 0;
- }
-
- gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
- GFP_KERNEL);
- if (!gpu->grp_clks) {
- gpu->nr_clocks = 0;
- return -ENOMEM;
+ return ret;
}
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- gpu->grp_clks[i] = get_clock(dev, name);
+ gpu->nr_clocks = ret;
- /* Remember the key clocks that we need to control later */
- if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
- gpu->core_clk = gpu->grp_clks[i];
- else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
- gpu->rbbmtimer_clk = gpu->grp_clks[i];
+ gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "core");
- ++i;
- }
+ gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "rbbmtimer");
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b8241179175a..9122ee6e55e4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -64,11 +65,14 @@ struct msm_gpu_funcs {
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
- void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -108,7 +112,7 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk **grp_clks;
+ struct clk_bulk_data *grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
@@ -129,6 +133,8 @@ struct msm_gpu {
u64 busy_cycles;
ktime_t time;
} devfreq;
+
+ struct msm_gpu_state *crashstate;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -175,6 +181,38 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -254,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dfd92947de2c..fd88cebb6adb 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -42,6 +43,7 @@ struct msm_kms_funcs {
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
@@ -50,6 +52,11 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -60,6 +67,9 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
+ /* pm suspend/resume hooks */
+ int (*pm_suspend)(struct device *dev);
+ int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -86,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+ int (*enable)(struct msm_mdss *mdss);
+ int (*disable)(struct msm_mdss *mdss);
+ void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+ struct drm_device *dev;
+ const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6aa6ee16dcbd..2c569e264df3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 4feab0a5419d..e7af95d37ddb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -556,6 +556,6 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 9805d2cdc1a1..73d41abbb510 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -716,6 +716,6 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
entry->location != DCB_LOC_ON_CHIP)
nv04_tmds_slave_init(encoder);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 501d2d290e9c..70dce544984e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->init = nv04_display_init;
nouveau_display(dev)->fini = nv04_display_fini;
+ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+ dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 01664357d3e1..de4490b4ed30 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -244,7 +244,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Attach it to the specified connector. */
get_slave_funcs(encoder)->create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 6d99f11fee4e..6a4ca139cf5d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -821,6 +821,6 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b83465ae7c1b..8412119bd940 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -136,12 +136,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv50_disp_core_channel_dma_v0 *args = data;
+ u8 type = NVIF_MEM_COHERENT;
int ret;
mutex_init(&dmac->lock);
- ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
- &dmac->push);
+ /* Pascal added support for 47-bit physical addresses, but some
+ * parts of EVO still only accept 40-bit PAs.
+ *
+ * To avoid issues on systems with large amounts of RAM, and on
+ * systems where an IOMMU maps pages at a high address, we need
+ * to allocate push buffers in VRAM instead.
+ *
+ * This appears to match NVIDIA's behaviour on Pascal.
+ */
+ if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ type |= NVIF_MEM_VRAM;
+
+ ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
if (ret)
return ret;
@@ -216,6 +228,19 @@ void
evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
+
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
@@ -424,7 +449,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -850,7 +875,7 @@ nv50_mstc_get_modes(struct drm_connector *connector)
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
- drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ drm_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
@@ -927,11 +952,11 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
nouveau_conn_attach_properties(&mstc->connector);
for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(&mstc->connector, path);
+ drm_connector_set_path_property(&mstc->connector, path);
return 0;
}
@@ -1007,7 +1032,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
mstc->port = NULL;
drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
- drm_connector_unreference(&mstc->connector);
+ drm_connector_put(&mstc->connector);
}
static void
@@ -1418,7 +1443,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -1576,7 +1601,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -1585,8 +1610,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
*****************************************************************************/
static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
@@ -1618,6 +1644,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
}
static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+ if (wndw->func->update)
+ wndw->func->update(wndw, interlock);
+ }
+ }
+}
+
+static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -1684,7 +1726,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1693,15 +1736,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
-
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1762,18 +1798,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Flush update. */
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
+ nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+ interlock[NV50_DISP_INTERLOCK_OVLY] ||
+ interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
@@ -1871,7 +1903,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
nv50_disp_atomic_commit_tail(state);
drm_for_each_crtc(crtc, dev) {
- if (crtc->state->enable) {
+ if (crtc->state->active) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
return 0;
@@ -2119,10 +2151,6 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
int
nv50_display_create(struct drm_device *dev)
{
@@ -2147,8 +2175,6 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
- if (nouveau_atomic)
- dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2231,6 +2257,9 @@ nv50_display_create(struct drm_device *dev)
connector->funcs->destroy(connector);
}
+ /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
+ dev->vblank_disable_immediate = true;
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index c5a9bc1af5af..2187922e8dc2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -586,7 +586,6 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
wndw->id = index;
wndw->interlock.type = interlock_type;
wndw->interlock.data = interlock_data;
- wndw->ctxdma.parent = &wndw->wndw.base.user;
wndw->ctxdma.parent = &wndw->wndw.base.user;
INIT_LIST_HEAD(&wndw->ctxdma.list);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 20754d9e6883..8407651f6ac6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -78,7 +78,7 @@ struct nvif_mclass {
#define nvif_mclass(o,m) ({ \
struct nvif_object *object = (o); \
struct nvif_sclass *sclass; \
- const typeof(m[0]) *mclass = (m); \
+ typeof(m[0]) *mclass = (m); \
int ret = -ENODEV; \
int cnt, i, j; \
\
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e2211bb2cf79..e67a471331b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->gem);
}
if (chan->heap.block_size)
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index debbbf0fd4bd..408b955e5c39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
INIT_LIST_HEAD(&drm->bl_connectors);
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
return 0;
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
break;
}
}
-
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7b557c354307..51932c72334e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -363,19 +363,11 @@ module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
- struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i, id;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- id = connector->encoder_ids[i];
- if (!id)
- break;
+ int i;
- enc = drm_encoder_find(dev, NULL, id);
- if (!enc)
- continue;
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -420,7 +412,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
- struct nouveau_encoder *nv_encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -436,14 +428,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
}
}
- for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- int id = connector->encoder_ids[i];
- if (id == 0)
- break;
-
- encoder = drm_encoder_find(dev, NULL, id);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
@@ -565,7 +550,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -590,7 +575,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
else
nv_connector->edid = drm_get_edid(connector, i2c);
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
@@ -672,7 +657,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -736,7 +721,7 @@ out:
status = connector_status_unknown;
#endif
- drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ drm_connector_update_edid_property(connector, nv_connector->edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -1208,14 +1193,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int type, ret = 0;
bool dummy;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
nv_connector = nouveau_connector(connector);
- if (nv_connector->index == index)
+ if (nv_connector->index == index) {
+ drm_connector_list_iter_end(&conn_iter);
return connector;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a4d1a059bd3d..dc7454e7f19a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,6 +33,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
struct nvkm_i2c_port;
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+ const struct nouveau_encoder *nv_encoder;
+ const struct drm_encoder *encoder;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+ if (!nv_encoder)
+ return false;
+
+ encoder = &nv_encoder->base.base;
+ return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+ drm_for_each_connector_iter(connector, iter) \
+ for_each_if(!nouveau_connector_is_mst(connector))
+
static inline struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct nouveau_connector *nv_connector = NULL;
struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder && connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ nv_connector = nouveau_connector(connector);
+ break;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
- return NULL;
+ return nv_connector;
}
struct drm_connector *
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 963a4dba8213..9109b69cd052 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
args.ustate = value;
}
+ ret = pm_runtime_get_sync(drm->dev);
+ if (IS_ERR_VALUE(ret) && ret != -EACCES)
+ return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 774b429142bc..139368b31916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -205,7 +205,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -287,7 +287,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (ret == 0)
return &fb->base;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int ret;
ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
/* enable flip completion events */
nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
nvif_notify_put(&drm->flip);
/* disable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_put(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
@@ -939,7 +945,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return ret;
}
@@ -954,7 +960,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 775443c9af94..c7ec86d6c3c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+ if (nouveau_atomic)
+ driver_pci.driver_features |= DRIVER_ATOMIC;
+
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
static int
nouveau_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
@@ -912,8 +908,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
- return ret;
+ if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
ret = nouveau_cli_init(drm, name, cli);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 85c1f10bc2b6..844498c4267c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_unreference(&nouveau_fb->base);
+ drm_framebuffer_put(&nouveau_fb->base);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 300daee74209..b56524d343c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -274,7 +274,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
return ret;
}
@@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
}
}
@@ -400,14 +400,14 @@ retry:
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
ret = -EINVAL;
break;
}
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
@@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
@@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -930,7 +930,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 44178b4c3599..08a1ab6b150d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -69,8 +69,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
value / 1000);
@@ -102,8 +102,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
value / 1000);
@@ -156,7 +156,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
@@ -179,7 +179,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 1ada186fab77..039e23548e08 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -36,7 +36,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(drm, 0);
if (ret < 0) {
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8c093ca4222e..8edb9f2a4269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
- * All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index d0322ce85172..1a47c40e171b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -87,11 +87,12 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->info) {
- if ((engine = nvkm_engine_ref(engine))) {
+ if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
int ret = engine->func->info(engine, mthd, data);
nvkm_engine_unref(&engine);
return ret;
}
+ return PTR_ERR(engine);
}
return -ENOSYS;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78597da6313a..0e372a190d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -23,6 +23,10 @@
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
@@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
unsigned long pgsize_bitmap;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
index 29e6dd58ac48..525f95d06429 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -52,7 +52,7 @@ void
gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00000001 << chan->chid.user;
+ const u32 mask = 0x00000001 << chan->chid.user;
if (!en) {
nvkm_mask(device, 0x610090, mask, 0x00000000);
nvkm_mask(device, 0x6100a0, mask, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 57719f675eec..bcf32d92ee5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -166,8 +166,8 @@ void
nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00010001 << chan->chid.user;
- const u64 data = en ? 0x00010000 : 0x00000000;
+ const u32 mask = 0x00010001 << chan->chid.user;
+ const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
nvkm_mask(device, 0x610028, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 19173ea19096..3b3327789ae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -25,24 +25,31 @@
#include <nvif/class.h>
static void
-gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
- u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
+ u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80)));
+ u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80)));
const struct nvkm_enum *warp;
char glob[128];
nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
- nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+ nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: "
"global %08x [%s] warp %04x [%s]\n",
- gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+ gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : "");
+
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
+}
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
+static void
+gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+{
+ gv100_gr_trap_sm(gr, gpc, tpc, 0);
+ gv100_gr_trap_sm(gr, gpc, tpc, 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
index 20b6fc8243e0..71524548de32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -58,8 +58,14 @@ nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
h->ecount = nvbios_rd08(b, h->offset + 0x5);
h->base_id = nvbios_rd08(b, h->offset + 0x0f);
- h->boost_id = nvbios_rd08(b, h->offset + 0x10);
- h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ if (h->hlen > 0x10)
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ else
+ h->boost_id = 0xff;
+ if (h->hlen > 0x11)
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ else
+ h->tdp_id = 0xff;
return 0;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 007bf4af33b9..16ad91c91a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -133,8 +133,14 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
}
}
- return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
- &fault->event);
+ ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
+ &fault->event);
+ if (ret)
+ return ret;
+
+ if (fault->func->oneinit)
+ ret = fault->func->oneinit(fault);
+ return ret;
}
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 73c7728b5969..3cd610d7deb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -176,8 +176,17 @@ gv100_fault_init(struct nvkm_fault *fault)
nvkm_notify_get(&fault->nrpfb);
}
+static int
+gv100_fault_oneinit(struct nvkm_fault *fault)
+{
+ return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
+ gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+ &fault->nrpfb);
+}
+
static const struct nvkm_fault_func
gv100_fault = {
+ .oneinit = gv100_fault_oneinit,
.init = gv100_fault_init,
.fini = gv100_fault_fini,
.intr = gv100_fault_intr,
@@ -192,15 +201,5 @@ int
gv100_fault_new(struct nvkm_device *device, int index,
struct nvkm_fault **pfault)
{
- struct nvkm_fault *fault;
- int ret;
-
- ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
- *pfault = fault;
- if (ret)
- return ret;
-
- return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
- &fault->nrpfb);
+ return nvkm_fault_new_(&gv100_fault, device, index, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 44843ecf12b0..e4d2f5234fd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -20,6 +20,7 @@ int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
int index, struct nvkm_fault **);
struct nvkm_fault_func {
+ int (*oneinit)(struct nvkm_fault *);
void (*init)(struct nvkm_fault *);
void (*fini)(struct nvkm_fault *);
void (*intr)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 73b5d46104bd..434d2fc5bb1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_remapper)
+ fb->func->init_remapper(fb);
+
if (fb->func->init_page) {
ret = fb->func->init_page(fb);
if (WARN_ON(ret))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index dffe1f5e1071..8205ce436b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
}
void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ /* Disable address remapper. */
+ nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
+void
gp100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -56,6 +64,7 @@ gp100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b84b9861ef26..b4d74e815674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -31,6 +31,7 @@ gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 2857f31466bf..1e4ad61c19e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_remapper)(struct nvkm_fb *);
int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
int gm200_fb_init_page(struct nvkm_fb *);
+void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index a721354249ce..d02e183717dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -414,6 +414,20 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ /* Figure out how large we need gdesc to be. */
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -421,7 +435,6 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
@@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct fw_bl_desc *hsbl_desc;
void *bl, *blob_data, *hsbl_code, *hsbl_data;
u32 code_size;
- u8 bl_desc[bl_desc_size];
+ u8 *bl_desc;
+
+ bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
+ if (!bl_desc)
+ return -ENOMEM;
/* Find the bootloader descriptor for our blob and copy it */
if (blob == acr->load_blob) {
@@ -802,7 +821,6 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
code_size, hsbl_desc->start_tag, 0, false);
/* Generate the BL header */
- memset(bl_desc, 0, bl_desc_size);
acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
/*
@@ -811,6 +829,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
bl_desc_size, 0);
+ kfree(bl_desc);
return hsbl_desc->start_tag << 8;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 866877b88797..978ad0790367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -265,6 +265,19 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -272,7 +285,6 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -298,6 +310,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 30491d132d59..df8b919dcf09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -129,6 +129,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
@@ -144,3 +145,4 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
index 632e9545e292..28ca29d0eeee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
@@ -74,6 +74,7 @@ gp10b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
@@ -91,3 +92,4 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 92fe125ce22e..f34c06bb5bd7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -4,7 +4,7 @@
* Copyright (C) 2010 Nokia Corporation
*
* Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
* Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index b5d8a00df811..a1f1dc18407a 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -2,7 +2,7 @@
* Toppoly TD028TTEC1 panel support
*
* Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Neo 1973 code (jbt6k74.c):
* Copyright (C) 2006-2007 by OpenMoko, Inc.
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index acef7ece5783..07d00a186f15 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -82,7 +82,7 @@ static void __exit omap_dss_exit(void)
module_init(omap_dss_init);
module_exit(omap_dss_exit);
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 7f3ac6b13b56..84f274c4a4cb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 424143128cd4..9e7fcbd57e52 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 3d662e6805eb..9fcc50217133 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index d4a680629825..74467b308721 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 0b908e9de792..cb80ddaa19d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 847c78ade024..38302631b64b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -180,6 +180,9 @@ struct dss_pll_hw {
/* DRA7 errata i886: use high N & M to avoid jitter */
bool errata_i886;
+
+ /* DRA7 errata i932: retry pll lock on failure */
+ bool errata_i932;
};
struct dss_pll {
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 078b0e8216c3..ff362b38bf0d 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -16,6 +16,7 @@
#define DSS_SUBSYS_NAME "PLL"
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -381,6 +382,22 @@ static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
return -ETIMEDOUT;
}
+static bool pll_is_locked(u32 stat)
+{
+ /*
+ * Required value for each bitfield listed below
+ *
+ * PLL_STATUS[6] = 0 PLL_BYPASS
+ * PLL_STATUS[5] = 0 PLL_HIGHJITTER
+ *
+ * PLL_STATUS[3] = 0 PLL_LOSSREF
+ * PLL_STATUS[2] = 0 PLL_RECAL
+ * PLL_STATUS[1] = 1 PLL_LOCK
+ * PLL_STATUS[0] = 1 PLL_CTRL_RESET_DONE
+ */
+ return ((stat & 0x6f) == 0x3);
+}
+
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo)
{
@@ -436,18 +453,54 @@ int dss_pll_write_config_type_a(struct dss_pll *pll,
l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
writel_relaxed(l, base + PLL_CONFIGURATION2);
- writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+ if (hw->errata_i932) {
+ int cnt = 0;
+ u32 sleep_time;
+ const u32 max_lock_retries = 20;
- if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
- DSSERR("DSS DPLL GO bit not going down.\n");
- r = -EIO;
- goto err;
- }
+ /*
+ * Calculate wait time for PLL LOCK
+ * 1000 REFCLK cycles in us.
+ */
+ sleep_time = DIV_ROUND_UP(1000*1000*1000, cinfo->fint);
- if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
- DSSERR("cannot lock DSS DPLL\n");
- r = -EIO;
- goto err;
+ for (cnt = 0; cnt < max_lock_retries; cnt++) {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ /**
+ * read the register back to ensure the write is
+ * flushed
+ */
+ readl_relaxed(base + PLL_GO);
+
+ usleep_range(sleep_time, sleep_time + 5);
+ l = readl_relaxed(base + PLL_STATUS);
+
+ if (pll_is_locked(l) &&
+ !(readl_relaxed(base + PLL_GO) & 0x1))
+ break;
+
+ }
+
+ if (cnt == max_lock_retries) {
+ DSSERR("cannot lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+ } else {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+ DSSERR("DSS DPLL GO bit not going down.\n");
+ r = -EIO;
+ goto err;
+ }
+
+ if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock DSS DPLL\n");
+ r = -EIO;
+ goto err;
+ }
}
l = readl_relaxed(base + PLL_CONFIGURATION2);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 1e2c931f6acf..69c3b7a3d5c7 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 24d1ced210bd..ac01907dcc34 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* VENC settings from TI's DSS driver
*
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 585ed94ccf17..cb46311f92c9 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -134,6 +134,7 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.has_refsel = true,
.errata_i886 = true,
+ .errata_i932 = true,
};
struct dss_pll *dss_video_pll_init(struct dss_device *dss,
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5cde26ac937b..2ddb856666c4 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -126,14 +126,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
drm_edid_is_valid(edid)) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
omap_connector->hdmi_mode =
drm_detect_hdmi_monitor(edid);
} else {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, NULL);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index b42e286616b0..91cf043f2b6b 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -30,16 +30,11 @@ static int gem_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(m, "All Objects:\n");
+ mutex_lock(&priv->list_lock);
omap_gem_describe_objects(&priv->obj_list, m);
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->list_lock);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index ef3b0e3571ec..1b6601e9b107 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -274,7 +274,7 @@ static int omap_modeset_init(struct drm_device *dev)
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << crtc_idx);
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -493,7 +493,7 @@ static struct drm_driver omap_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
+ .gem_free_object_unlocked = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
@@ -540,7 +540,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- spin_lock_init(&priv->list_lock);
+ mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 6eaee4df4559..f27c8e216adf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -71,7 +71,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
/* lock for obj_list below */
- spinlock_t list_lock;
+ struct mutex list_lock;
/* list of GEM objects: */
struct list_head obj_list;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 5fd22ca73913..9f1e3d8f8488 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
- struct drm_gem_object *bo;
- u32 pitch;
- u32 offset;
dma_addr_t dma_addr;
};
@@ -68,56 +66,28 @@ struct omap_framebuffer {
struct mutex lock;
};
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
-
- drm_gem_object_unreference_unlocked(plane->bo);
- }
-
- kfree(omap_fb);
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
- .create_handle = omap_framebuffer_create_handle,
- .destroy = omap_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct plane *plane = &omap_fb->planes[n];
u32 offset;
- offset = plane->offset
+ offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
- + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+ + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- struct plane *plane = &omap_fb->planes[0];
-
- return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
- omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+ omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
- info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
+ info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
break;
}
- info->paddr = get_linear_addr(plane, format, 0, x, y);
+ info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
- info->screen_width = plane->pitch;
+ info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
- omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
- info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+ info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+ ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
- omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+ omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from)
-{
- struct drm_device *dev = fb->dev;
- struct list_head *connector_list = &dev->mode_config.connector_list;
- struct drm_connector *connector = from;
-
- if (!from)
- return list_first_entry_or_null(connector_list, typeof(*from),
- head);
-
- list_for_each_entry_from(connector, connector_list, head) {
- if (connector != from) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- if (crtc && crtc->primary->fb == fb)
- return connector;
-
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
- i, plane->offset, plane->pitch);
- omap_gem_describe(plane->bo, m);
+ i, fb->offsets[n], fb->pitches[i]);
+ omap_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
- plane->bo = bos[i];
- plane->offset = mode_cmd->offsets[i];
- plane->pitch = pitch;
+ fb->obj[i] = bos[i];
plane->dma_addr = 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 94ad5f9e4404..c20cb4bc714d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 0f66c74a54b0..d958cc813a94 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
goto fail;
}
- mutex_lock(&dev->struct_mutex);
-
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 17a53d207978..4ba5d035c590 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -47,6 +47,9 @@ struct omap_gem_object {
/** roll applied when mapping to DMM */
u32 roll;
+ /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
+ struct mutex lock;
+
/**
* dma_addr contains the buffer DMA address. It is valid for
*
@@ -137,14 +140,12 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
-static u64 mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
@@ -156,7 +157,7 @@ static u64 mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
-static bool is_contiguous(struct omap_gem_object *omap_obj)
+static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
@@ -171,14 +172,14 @@ static bool is_contiguous(struct omap_gem_object *omap_obj)
* Eviction
*/
-static void evict_entry(struct drm_gem_object *obj,
+static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
- loff_t off = mmap_offset(obj) +
+ loff_t off = omap_gem_mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
@@ -199,7 +200,7 @@ static void evict_entry(struct drm_gem_object *obj,
}
/* Evict a buffer from usergart, if it is mapped there */
-static void evict(struct drm_gem_object *obj)
+static void omap_gem_evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
@@ -213,7 +214,7 @@ static void evict(struct drm_gem_object *obj)
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
- evict_entry(obj, fmt, entry);
+ omap_gem_evict_entry(obj, fmt, entry);
}
}
}
@@ -222,7 +223,10 @@ static void evict(struct drm_gem_object *obj)
* Page Management
*/
-/** ensure backing pages are allocated */
+/*
+ * Ensure backing pages are allocated. Must be called with the omap_obj.lock
+ * held.
+ */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -232,7 +236,14 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
int i, ret;
dma_addr_t *addrs;
- WARN_ON(omap_obj->pages);
+ lockdep_assert_held(&omap_obj->lock);
+
+ /*
+ * If not using shmem (in which case backing pages don't need to be
+ * allocated) or if pages are already allocated we're done.
+ */
+ if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
+ return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
@@ -288,35 +299,15 @@ free_pages:
return ret;
}
-/* acquire pages when needed (for example, for DMA where physically
- * contiguous buffer is not required
- */
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
- ret = omap_gem_attach_pages(obj);
- if (ret) {
- dev_err(obj->dev->dev, "could not attach pages\n");
- return ret;
- }
- }
-
- /* TODO: even phys-contig.. we should have a list of pages? */
- *pages = omap_obj->pages;
-
- return 0;
-}
-
-/** release backing pages */
+/* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned int npages = obj->size >> PAGE_SHIFT;
unsigned int i;
+ lockdep_assert_held(&omap_obj->lock);
+
for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
@@ -336,16 +327,6 @@ u32 omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
-u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
-{
- u64 offset;
-
- mutex_lock(&obj->dev->struct_mutex);
- offset = mmap_offset(obj);
- mutex_unlock(&obj->dev->struct_mutex);
- return offset;
-}
-
/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{
@@ -371,7 +352,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
*/
/* Normal handling for the case of faulting in non-tiled buffers */
-static int fault_1d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -385,18 +366,19 @@ static int fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
- BUG_ON(!is_contiguous(omap_obj));
+ BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address,
+ __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
-static int fault_2d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -407,7 +389,8 @@ static int fault_2d(struct drm_gem_object *obj,
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
unsigned long vaddr;
- int i, ret, slots;
+ int i, err, slots;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
/*
* Note the height of the slot is also equal to the number of pages
@@ -443,7 +426,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
- evict_entry(entry->obj, fmt, entry);
+ omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
@@ -473,9 +456,10 @@ static int fault_2d(struct drm_gem_object *obj,
memset(pages + slots, 0,
sizeof(struct page *) * (n - slots));
- ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
- if (ret) {
- dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (err) {
+ ret = vmf_error(err);
+ dev_err(obj->dev->dev, "failed to pin: %d\n", err);
return ret;
}
@@ -485,7 +469,10 @@ static int fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed(vma,
+ vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ if (ret & VM_FAULT_ERROR)
+ break;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
@@ -494,7 +481,7 @@ static int fault_2d(struct drm_gem_object *obj,
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
- return 0;
+ return ret;
}
/**
@@ -509,24 +496,25 @@ static int fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int omap_gem_fault(struct vm_fault *vmf)
+vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- struct drm_device *dev = obj->dev;
- struct page **pages;
- int ret;
+ int err;
+ vm_fault_t ret;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if a shmem backed object, make sure we have pages attached now */
- ret = get_pages(obj, &pages);
- if (ret)
+ err = omap_gem_attach_pages(obj);
+ if (err) {
+ ret = vmf_error(err);
goto fail;
+ }
/* where should we do corresponding put_pages().. we are mapping
* the original page, rather than thru a GART, so we can't rely
@@ -535,28 +523,14 @@ int omap_gem_fault(struct vm_fault *vmf)
*/
if (omap_obj->flags & OMAP_BO_TILED)
- ret = fault_2d(obj, vma, vmf);
+ ret = omap_gem_fault_2d(obj, vma, vmf);
else
- ret = fault_1d(obj, vma, vmf);
+ ret = omap_gem_fault_1d(obj, vma, vmf);
fail:
- mutex_unlock(&dev->struct_mutex);
- switch (ret) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ mutex_unlock(&omap_obj->lock);
+ return ret;
}
/** We override mainly to fix up some of the vm mapping flags.. */
@@ -689,21 +663,22 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
omap_obj->roll = roll;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) {
- struct page **pages;
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+
+ ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
+ roll, true);
if (ret)
dev_err(obj->dev->dev, "could not repin: %d\n", ret);
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -722,7 +697,7 @@ fail:
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU.
*/
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
+static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -738,7 +713,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
if (omap_obj->dma_addrs[pgoff]) {
@@ -758,7 +733,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
struct page **pages = omap_obj->pages;
bool dirty = false;
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
for (i = 0; i < npages; i++) {
@@ -804,18 +779,17 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
- if (!is_contiguous(omap_obj) && priv->has_dmm) {
+ if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
- struct page **pages;
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
@@ -835,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
/* TODO: enable async refill.. */
- ret = tiler_pin(block, pages, npages,
+ ret = tiler_pin(block, omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
tiler_release(block);
@@ -853,7 +827,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->dma_addr_cnt++;
*dma_addr = omap_obj->dma_addr;
- } else if (is_contiguous(omap_obj)) {
+ } else if (omap_gem_is_contiguous(omap_obj)) {
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
@@ -861,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -879,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if (omap_obj->dma_addr_cnt > 0) {
omap_obj->dma_addr_cnt--;
if (omap_obj->dma_addr_cnt == 0) {
@@ -898,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
}
}
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -911,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
- mutex_unlock(&obj->dev->struct_mutex);
+
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -944,17 +922,27 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap)
{
- int ret;
- if (!remap) {
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (!omap_obj->pages)
- return -ENOMEM;
- *pages = omap_obj->pages;
- return 0;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&omap_obj->lock);
+
+ if (remap) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret)
+ goto unlock;
}
- mutex_lock(&obj->dev->struct_mutex);
- ret = get_pages(obj, pages);
- mutex_unlock(&obj->dev->struct_mutex);
+
+ if (!omap_obj->pages) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ *pages = omap_obj->pages;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -969,23 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
-/* Get kernel virtual address for CPU access.. this more or less only
- * exists for omap_fbdev. This should be called with struct_mutex
- * held.
+/*
+ * Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev.
*/
void *omap_gem_vaddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ void *vaddr;
+ int ret;
+
+ mutex_lock(&omap_obj->lock);
+
if (!omap_obj->vaddr) {
- struct page **pages;
- int ret = get_pages(obj, &pages);
- if (ret)
- return ERR_PTR(ret);
- omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ vaddr = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
- return omap_obj->vaddr;
+
+ vaddr = omap_obj->vaddr;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+ return vaddr;
}
#endif
@@ -1001,6 +1000,7 @@ int omap_gem_resume(struct drm_device *dev)
struct omap_gem_object *omap_obj;
int ret = 0;
+ mutex_lock(&priv->list_lock);
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
@@ -1012,12 +1012,14 @@ int omap_gem_resume(struct drm_device *dev)
omap_obj->roll, true);
if (ret) {
dev_err(dev->dev, "could not repin: %d\n", ret);
- return ret;
+ goto done;
}
}
}
- return 0;
+done:
+ mutex_unlock(&priv->list_lock);
+ return ret;
}
#endif
@@ -1033,6 +1035,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
+ mutex_lock(&omap_obj->lock);
+
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
@@ -1050,6 +1054,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, " %zu", obj->size);
}
+ mutex_unlock(&omap_obj->lock);
+
seq_printf(m, "\n");
}
@@ -1081,17 +1087,21 @@ void omap_gem_free_object(struct drm_gem_object *obj)
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- evict(obj);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ omap_gem_evict(obj);
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
- /* this means the object is still pinned.. which really should
- * not happen. I think..
+ /*
+ * We own the sole reference to the object at this point, but to keep
+ * lockdep happy, we must still take the omap_obj_lock to call
+ * omap_gem_detach_pages(). This should hardly make any difference as
+ * there can't be any lock contention.
*/
+ mutex_lock(&omap_obj->lock);
+
+ /* The object should not be pinned. */
WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) {
@@ -1110,8 +1120,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
+ mutex_unlock(&omap_obj->lock);
+
drm_gem_object_release(obj);
+ mutex_destroy(&omap_obj->lock);
+
kfree(omap_obj);
}
@@ -1167,6 +1181,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
obj = &omap_obj->base;
omap_obj->flags = flags;
+ mutex_init(&omap_obj->lock);
if (flags & OMAP_BO_TILED) {
/*
@@ -1206,9 +1221,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto err_release;
}
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
return obj;
@@ -1231,16 +1246,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
if (sgt->orig_nents != 1 && !priv->has_dmm)
return ERR_PTR(-EINVAL);
- mutex_lock(&dev->struct_mutex);
-
gsize.bytes = PAGE_ALIGN(size);
obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
- if (!obj) {
- obj = ERR_PTR(-ENOMEM);
- goto done;
- }
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
omap_obj = to_omap_bo(obj);
+
+ mutex_lock(&omap_obj->lock);
+
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
@@ -1276,7 +1290,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
done:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return obj;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index a78bde05193a..c1c45fbde155 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -21,6 +21,7 @@
#define __OMAPDRM_GEM_H__
#include <linux/types.h>
+#include <linux/mm_types.h>
enum dma_data_direction;
@@ -80,7 +81,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-int omap_gem_fault(struct vm_fault *vmf);
+vm_fault_t omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 8e41d649e248..ec04a69ade46 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -93,23 +93,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
return 0;
}
-
-static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
- unsigned long page_num)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync_page(obj, page_num);
- return kmap_atomic(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
- unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
unsigned long page_num)
{
@@ -148,8 +131,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map_atomic = omap_gem_dmabuf_kmap_atomic,
- .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 25682ff3449a..6020c30a33b3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -46,6 +46,15 @@ config DRM_PANEL_ILITEK_IL9322
Say Y here if you want to enable support for Ilitek IL9322
QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
+config DRM_PANEL_ILITEK_ILI9881C
+ tristate "Ilitek ILI9881C-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Ilitek ILI9881c controller.
+
config DRM_PANEL_INNOLUX_P079ZCA
tristate "Innolux P079ZCA panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f26efc11d746..5ccaaa9d13af 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
new file mode 100644
index 000000000000..3ad4a46c4e94
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Bootlin
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct ili9881c {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *power;
+ struct gpio_desc *reset;
+};
+
+enum ili9881c_op {
+ ILI9881C_SWITCH_PAGE,
+ ILI9881C_COMMAND,
+};
+
+struct ili9881c_instr {
+ enum ili9881c_op op;
+
+ union arg {
+ struct cmd {
+ u8 cmd;
+ u8 data;
+ } cmd;
+ u8 page;
+ } arg;
+};
+
+#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
+ { \
+ .op = ILI9881C_SWITCH_PAGE, \
+ .arg = { \
+ .page = (_page), \
+ }, \
+ }
+
+#define ILI9881C_COMMAND_INSTR(_cmd, _data) \
+ { \
+ .op = ILI9881C_COMMAND, \
+ .arg = { \
+ .cmd = { \
+ .cmd = (_cmd), \
+ .data = (_data), \
+ }, \
+ }, \
+ }
+
+static const struct ili9881c_instr ili9881c_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x03),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x06),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x18),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x04),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x03),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x25),
+ ILI9881C_COMMAND_INSTR(0x10, 0x25),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x02),
+ ILI9881C_COMMAND_INSTR(0x62, 0x02),
+ ILI9881C_COMMAND_INSTR(0x63, 0x02),
+ ILI9881C_COMMAND_INSTR(0x64, 0x02),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x06),
+ ILI9881C_COMMAND_INSTR(0x70, 0x07),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x02),
+ ILI9881C_COMMAND_INSTR(0x78, 0x02),
+ ILI9881C_COMMAND_INSTR(0x79, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x85, 0x06),
+ ILI9881C_COMMAND_INSTR(0x86, 0x07),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x22),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x53, 0xDC),
+ ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+ ILI9881C_COMMAND_INSTR(0x50, 0x78),
+ ILI9881C_COMMAND_INSTR(0x51, 0x78),
+ ILI9881C_COMMAND_INSTR(0x31, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x39),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x46),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x12),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x19),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x85),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x45),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x11),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x24),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x96),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+};
+
+static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9881c, panel);
+}
+
+/*
+ * The panel seems to accept some private DCS commands that map
+ * directly to registers.
+ *
+ * It is organised by page, with each page having its own set of
+ * registers, and the first page looks like it's holding the standard
+ * DCS commands.
+ *
+ * So before any attempt at sending a command or data, we have to be
+ * sure if we're in the right page or not.
+ */
+static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+{
+ u8 buf[4] = { 0xff, 0x98, 0x81, page };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+{
+ u8 buf[2] = { cmd, data };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_prepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ unsigned int i;
+ int ret;
+
+ /* Power the panel */
+ ret = regulator_enable(ctx->power);
+ if (ret)
+ return ret;
+ msleep(5);
+
+ /* And reset it */
+ gpiod_set_value(ctx->reset, 1);
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
+ const struct ili9881c_instr *instr = &ili9881c_init[i];
+
+ if (instr->op == ILI9881C_SWITCH_PAGE)
+ ret = ili9881c_switch_page(ctx, instr->arg.page);
+ else if (instr->op == ILI9881C_COMMAND)
+ ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = ili9881c_switch_page(ctx, 0);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_enable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ msleep(120);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int ili9881c_disable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int ili9881c_unprepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ regulator_disable(ctx->power);
+ gpiod_set_value(ctx->reset, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode bananapi_default_mode = {
+ .clock = 62000,
+ .vrefresh = 60,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 10,
+ .hsync_end = 720 + 10 + 20,
+ .htotal = 720 + 10 + 20 + 30,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 10,
+ .vsync_end = 1280 + 10 + 10,
+ .vtotal = 1280 + 10 + 10 + 20,
+};
+
+static int ili9881c_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+ if (!mode) {
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ bananapi_default_mode.hdisplay,
+ bananapi_default_mode.vdisplay,
+ bananapi_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 62;
+ panel->connector->display_info.height_mm = 110;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ili9881c_funcs = {
+ .prepare = ili9881c_prepare,
+ .unprepare = ili9881c_unprepare,
+ .enable = ili9881c_enable,
+ .disable = ili9881c_disable,
+ .get_modes = ili9881c_get_modes,
+};
+
+static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device_node *np;
+ struct ili9881c *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &ili9881c_funcs;
+
+ ctx->power = devm_regulator_get(&dsi->dev, "power");
+ if (IS_ERR(ctx->power)) {
+ dev_err(&dsi->dev, "Couldn't get our power regulator\n");
+ return PTR_ERR(ctx->power);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
+ if (np) {
+ ctx->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!ctx->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ili9881c_of_match[] = {
+ { .compatible = "bananapi,lhr050h41" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9881c_of_match);
+
+static struct mipi_dsi_driver ili9881c_dsi_driver = {
+ .probe = ili9881c_dsi_probe,
+ .remove = ili9881c_dsi_remove,
+ .driver = {
+ .name = "ili9881c-dsi",
+ .of_match_table = ili9881c_of_match,
+ },
+};
+module_mipi_dsi_driver(ili9881c_dsi_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 57df39b5c589..72edb334d997 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
@@ -20,12 +21,41 @@
#include <video/mipi_display.h>
+struct panel_init_cmd {
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_CMD(...) { \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+struct panel_desc {
+ const struct drm_display_mode *mode;
+ unsigned int bpc;
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+
+ unsigned long flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ unsigned int sleep_mode_delay;
+ unsigned int power_down_delay;
+};
+
struct innolux_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
+ const struct panel_desc *desc;
struct backlight_device *backlight;
- struct regulator *supply;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
struct gpio_desc *enable_gpio;
bool prepared;
@@ -72,12 +102,16 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
return err;
}
+ if (innolux->desc->sleep_mode_delay)
+ msleep(innolux->desc->sleep_mode_delay);
+
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- /* T8: 80ms - 1000ms */
- msleep(80);
+ if (innolux->desc->power_down_delay)
+ msleep(innolux->desc->power_down_delay);
- err = regulator_disable(innolux->supply);
+ err = regulator_bulk_disable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
@@ -89,24 +123,55 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
static int innolux_panel_prepare(struct drm_panel *panel)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
- int err, regulator_err;
+ int err;
if (innolux->prepared)
return 0;
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- err = regulator_enable(innolux->supply);
+ err = regulator_bulk_enable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
- /* T2: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */
+ usleep_range(20000, 21000);
gpiod_set_value_cansleep(innolux->enable_gpio, 1);
- /* T4: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t4, p097pfg: t5 */
+ usleep_range(20000, 21000);
+
+ if (innolux->desc->init_cmds) {
+ const struct panel_init_cmd *cmds =
+ innolux->desc->init_cmds;
+ unsigned int i;
+
+ for (i = 0; cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &cmds[i];
+
+ err = mipi_dsi_generic_write(innolux->link, cmd->data,
+ cmd->len);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ goto poweroff;
+ }
+
+ /*
+ * Included by random guessing, because without this
+ * (or at least, some delay), the panel sometimes
+ * didn't appear to pick up the command sequence.
+ */
+ err = mipi_dsi_dcs_nop(innolux->link);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to send DCS nop: %d\n", err);
+ goto poweroff;
+ }
+ }
+ }
err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
if (err < 0) {
@@ -133,12 +198,9 @@ static int innolux_panel_prepare(struct drm_panel *panel)
return 0;
poweroff:
- regulator_err = regulator_disable(innolux->supply);
- if (regulator_err)
- DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
- regulator_err);
-
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+ regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies);
+
return err;
}
@@ -162,7 +224,11 @@ static int innolux_panel_enable(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const char * const innolux_p079zca_supply_names[] = {
+ "power",
+};
+
+static const struct drm_display_mode innolux_p079zca_mode = {
.clock = 56900,
.hdisplay = 768,
.hsync_start = 768 + 40,
@@ -175,15 +241,181 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
+static const struct panel_desc innolux_p079zca_panel_desc = {
+ .mode = &innolux_p079zca_mode,
+ .bpc = 8,
+ .size = {
+ .width = 120,
+ .height = 160,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .supply_names = innolux_p079zca_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names),
+ .power_down_delay = 80, /* T8: 80ms - 1000ms */
+};
+
+static const char * const innolux_p097pfg_supply_names[] = {
+ "avdd",
+ "avee",
+};
+
+static const struct drm_display_mode innolux_p097pfg_mode = {
+ .clock = 229000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 100,
+ .hsync_end = 1536 + 100 + 24,
+ .htotal = 1536 + 100 + 24 + 100,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 100,
+ .vsync_end = 2048 + 100 + 2,
+ .vtotal = 2048 + 100 + 2 + 18,
+ .vrefresh = 60,
+};
+
+/*
+ * Display manufacturer failed to provide init sequencing according to
+ * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/
+ * so the init sequence stems from a register dump of a working panel.
+ */
+static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = {
+ /* page 0 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00),
+ _INIT_CMD(0xB1, 0xE8, 0x11),
+ _INIT_CMD(0xB2, 0x25, 0x02),
+ _INIT_CMD(0xB5, 0x08, 0x00),
+ _INIT_CMD(0xBC, 0x0F, 0x00),
+ _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00),
+ _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14),
+ _INIT_CMD(0x6F, 0x01),
+ _INIT_CMD(0xC0, 0x03),
+ _INIT_CMD(0x6F, 0x02),
+ _INIT_CMD(0xC1, 0x0D),
+ _INIT_CMD(0xD9, 0x01, 0x09, 0x70),
+ _INIT_CMD(0xC5, 0x12, 0x21, 0x00),
+ _INIT_CMD(0xBB, 0x93, 0x93),
+
+ /* page 1 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01),
+ _INIT_CMD(0xB3, 0x3C, 0x3C),
+ _INIT_CMD(0xB4, 0x0F, 0x0F),
+ _INIT_CMD(0xB9, 0x45, 0x45),
+ _INIT_CMD(0xBA, 0x14, 0x14),
+ _INIT_CMD(0xCA, 0x02),
+ _INIT_CMD(0xCE, 0x04),
+ _INIT_CMD(0xC3, 0x9B, 0x9B),
+ _INIT_CMD(0xD8, 0xC0, 0x03),
+ _INIT_CMD(0xBC, 0x82, 0x01),
+ _INIT_CMD(0xBD, 0x9E, 0x01),
+
+ /* page 2 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02),
+ _INIT_CMD(0xB0, 0x82),
+ _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5,
+ 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40),
+ _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29,
+ 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0),
+ _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C,
+ 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC),
+ _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF),
+ _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5,
+ 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75),
+ _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D,
+ 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03),
+ _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94,
+ 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED),
+ _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF),
+
+ /* page 3 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03),
+ _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85),
+ _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80),
+ _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01,
+ 0x40, 0x80),
+ _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC4, 0x00, 0x00),
+ _INIT_CMD(0xEF, 0x41),
+
+ /* page 4 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04),
+ _INIT_CMD(0xEC, 0x4C),
+
+ /* page 5 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05),
+ _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01),
+ _INIT_CMD(0xB1, 0x30, 0x00),
+ _INIT_CMD(0xB2, 0x02, 0x02, 0x00),
+ _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D),
+ _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57),
+ _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A),
+ _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56),
+ _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C),
+ _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00),
+ _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05),
+ _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00),
+ _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00),
+
+ /* page 6 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06),
+ _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12),
+ _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D),
+ _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB4, 0x3D, 0x32),
+ _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18),
+ _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D),
+ _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB9, 0x3D, 0x32),
+ _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17),
+ _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D),
+ _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC4, 0x3D, 0x32),
+ _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11),
+ _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D),
+ _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC9, 0x3D, 0x32),
+
+ {},
+};
+
+static const struct panel_desc innolux_p097pfg_panel_desc = {
+ .mode = &innolux_p097pfg_mode,
+ .bpc = 8,
+ .size = {
+ .width = 147,
+ .height = 196,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = innolux_p097pfg_init_cmds,
+ .lanes = 4,
+ .supply_names = innolux_p097pfg_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names),
+ .sleep_mode_delay = 100, /* T15 */
+};
+
static int innolux_panel_get_modes(struct drm_panel *panel)
{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ const struct drm_display_mode *m = innolux->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(panel->drm, m);
if (!mode) {
DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ m->hdisplay, m->vdisplay, m->vrefresh);
return -ENOMEM;
}
@@ -191,9 +423,11 @@ static int innolux_panel_get_modes(struct drm_panel *panel)
drm_mode_probed_add(panel->connector, mode);
- panel->connector->display_info.width_mm = 120;
- panel->connector->display_info.height_mm = 160;
- panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm =
+ innolux->desc->size.width;
+ panel->connector->display_info.height_mm =
+ innolux->desc->size.height;
+ panel->connector->display_info.bpc = innolux->desc->bpc;
return 1;
}
@@ -207,19 +441,42 @@ static const struct drm_panel_funcs innolux_panel_funcs = {
};
static const struct of_device_id innolux_of_match[] = {
- { .compatible = "innolux,p079zca", },
+ { .compatible = "innolux,p079zca",
+ .data = &innolux_p079zca_panel_desc
+ },
+ { .compatible = "innolux,p097pfg",
+ .data = &innolux_p097pfg_panel_desc
+ },
{ }
};
MODULE_DEVICE_TABLE(of, innolux_of_match);
-static int innolux_panel_add(struct innolux_panel *innolux)
+static int innolux_panel_add(struct mipi_dsi_device *dsi,
+ const struct panel_desc *desc)
{
- struct device *dev = &innolux->link->dev;
- int err;
+ struct innolux_panel *innolux;
+ struct device *dev = &dsi->dev;
+ int err, i;
+
+ innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
+ if (!innolux)
+ return -ENOMEM;
+
+ innolux->desc = desc;
+
+ innolux->supplies = devm_kcalloc(dev, desc->num_supplies,
+ sizeof(*innolux->supplies),
+ GFP_KERNEL);
+ if (!innolux->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < desc->num_supplies; i++)
+ innolux->supplies[i].supply = desc->supply_names[i];
- innolux->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(innolux->supply))
- return PTR_ERR(innolux->supply);
+ err = devm_regulator_bulk_get(dev, desc->num_supplies,
+ innolux->supplies);
+ if (err < 0)
+ return err;
innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
@@ -230,15 +487,21 @@ static int innolux_panel_add(struct innolux_panel *innolux)
}
innolux->backlight = devm_of_find_backlight(dev);
-
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
drm_panel_init(&innolux->base);
innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = &innolux->link->dev;
+ innolux->base.dev = dev;
+
+ err = drm_panel_add(&innolux->base);
+ if (err < 0)
+ return err;
+
+ mipi_dsi_set_drvdata(dsi, innolux);
+ innolux->link = dsi;
- return drm_panel_add(&innolux->base);
+ return 0;
}
static void innolux_panel_del(struct innolux_panel *innolux)
@@ -249,28 +512,19 @@ static void innolux_panel_del(struct innolux_panel *innolux)
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
{
- struct innolux_panel *innolux;
+ const struct panel_desc *desc;
int err;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM;
-
- innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
-
- mipi_dsi_set_drvdata(dsi, innolux);
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
- innolux->link = dsi;
-
- err = innolux_panel_add(innolux);
+ err = innolux_panel_add(dsi, desc);
if (err < 0)
return err;
- err = mipi_dsi_attach(dsi);
- return err;
+ return mipi_dsi_attach(dsi);
}
static int innolux_panel_remove(struct mipi_dsi_device *dsi)
@@ -292,7 +546,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
- drm_panel_detach(&innolux->base);
innolux_panel_del(innolux);
return 0;
@@ -318,5 +571,6 @@ static struct mipi_dsi_driver innolux_panel_driver = {
module_mipi_dsi_driver(innolux_panel_driver);
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
MODULE_DESCRIPTION("Innolux P079ZCA panel driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 0a94ab79a6c0..99caa7835e7b 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
ret);
- drm_panel_detach(&jdi->base);
jdi_panel_del(jdi);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 5185819c5b79..8a1687887ae9 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&lvds->panel);
drm_panel_remove(&lvds->panel);
panel_lvds_disable(&lvds->panel);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 90f1ae4af93c..87fa316e1d7b 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -14,8 +14,6 @@
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
-#define DRV_NAME "orisetech_otm8009a"
-
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
DRM_WARN("mipi dsi dcs write buffer failed\n");
}
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* data will be sent in dsi hs mode (ie. no lpm) */
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ otm8009a_dcs_write_buf(ctx, data, len);
+
+ /* restore back the dsi lpm mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0; /* This is not an issue so we return 0 here */
- /* Power off the backlight. Note: end-user still controls brightness */
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- ret = backlight_update_status(ctx->bl_dev);
- if (ret)
- return ret;
+ backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
ctx->prepared = true;
- /*
- * Power on the backlight. Note: end-user still controls brightness
- * Note: ctx->prepared must be true before updating the backlight.
- */
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->bl_dev);
-
return 0;
}
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->bl_dev);
+
ctx->enabled = true;
return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
return 0;
}
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.dev = dev;
ctx->panel.funcs = &otm8009a_drm_funcs;
- ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
- &otm8009a_backlight_ops, NULL);
+ ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+ dsi->host->dev, ctx,
+ &otm8009a_backlight_ops,
+ NULL);
if (IS_ERR(ctx->bl_dev)) {
- dev_err(dev, "failed to register backlight device\n");
- return PTR_ERR(ctx->bl_dev);
+ ret = PTR_ERR(ctx->bl_dev);
+ dev_err(dev, "failed to register backlight: %d\n", ret);
+ return ret;
}
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return ret;
}
- DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh,
- mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
return 0;
}
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- backlight_device_unregister(ctx->bl_dev);
-
return 0;
}
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
.probe = otm8009a_probe,
.remove = otm8009a_remove,
.driver = {
- .name = DRV_NAME "_panel",
+ .name = "panel-orisetech-otm8009a",
.of_match_table = orisetech_otm8009a_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 74a806121f80..cb4dfb98be0f 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&wuxga_nt->base);
wuxga_nt_panel_del(wuxga_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index a188a3959f1a..6ad827b93ae1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
int ret, i;
ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
- if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+ if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
dev_err(ctx->dev, "read id failed\n");
ctx->error = -EIO;
return;
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 71c09ed436ae..75f925390551 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
seiko_panel_disable(&panel->base);
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 6bf8730f1a21..02fc0f5423d4 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- drm_panel_detach(&sharp->base);
sharp_panel_del(sharp);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 494aa9b1628a..e5cae0050f52 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&sharp_nt->base);
sharp_nt_panel_del(sharp_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index cbf1ab404ee7..5b5d0a24e713 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -252,7 +252,7 @@ static int panel_simple_get_modes(struct drm_panel *panel)
/* probe EDID if a DDC bus is available */
if (p->ddc) {
struct edid *edid = drm_get_edid(panel->connector, p->ddc);
- drm_mode_connector_update_edid_property(panel->connector, edid);
+ drm_connector_update_edid_property(panel->connector, edid);
if (edid) {
num += drm_add_edid_modes(panel->connector, edid);
kfree(edid);
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g070vvn01_timings = {
+ .pixelclock = { 33300000, 34209000, 45000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 20, 40, 200 },
+ .hback_porch = { 87, 40, 1 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 5, 13, 200 },
+ .vback_porch = { 31, 31, 29 },
+ .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+ .timings = &auo_g070vvn01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
.enable = 450,
.unprepare = 500,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -745,6 +772,28 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode boe_hv070wsa_mode = {
+ .clock = 40800,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 90,
+ .hsync_end = 1024 + 90 + 90,
+ .htotal = 1024 + 90 + 90 + 90,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 4,
+ .vtotal = 600 + 3 + 4 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_hv070wsa = {
+ .modes = &boe_hv070wsa_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+};
+
static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
{
.clock = 71900,
@@ -857,6 +906,61 @@ static const struct panel_desc chunghwa_claa101wb01 = {
},
};
+static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 128,
+ .htotal = 800 + 40 + 128 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc dataimage_scf0700c48ggu18 = {
+ .modes = &dataimage_scf0700c48ggu18_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct display_timing dlc_dlc0700yzg_1_timing = {
+ .pixelclock = { 45000000, 51200000, 57000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 100, 106, 113 },
+ .hback_porch = { 100, 106, 113 },
+ .hsync_len = { 100, 108, 114 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 8, 11, 15 },
+ .vback_porch = { 8, 11, 15 },
+ .vsync_len = { 9, 13, 15 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc0700yzg_1 = {
+ .timings = &dlc_dlc0700yzg_1_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 30,
+ .enable = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -909,6 +1013,18 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct panel_desc edt_etm0700g0bdh6 = {
+ .modes = &edt_etm0700g0dh6_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.clock = 32260,
.hdisplay = 800,
@@ -1086,6 +1202,36 @@ static const struct panel_desc innolux_at070tn92 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing innolux_g070y2_l01_timing = {
+ .pixelclock = { 28000000, 29500000, 32000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 61, 91, 141 },
+ .hback_porch = { 60, 90, 140 },
+ .hsync_len = { 12, 12, 12 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 4, 9, 30 },
+ .vback_porch = { 4, 8, 28 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070y2_l01 = {
+ .timings = &innolux_g070y2_l01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 10,
+ .enable = 100,
+ .disable = 100,
+ .unprepare = 800,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
@@ -1217,6 +1363,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_tv123wam_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+ .modes = &innolux_tv123wam_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -1247,8 +1417,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
- .vfront_porch = { 6, 21, 33.5 },
- .vback_porch = { 6, 21, 33.5 },
+ .vfront_porch = { 6, 21, 33 },
+ .vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -1511,6 +1681,33 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
+ .modes = &newhaven_nhd_43_480272ef_atxl_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_POSEDGE,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1696,6 +1893,36 @@ static const struct panel_desc qd43003c0_40 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing rocktech_rk070er9427_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc rocktech_rk070er9427 = {
+ .timings = &rocktech_rk070er9427_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 41,
+ .enable = 50,
+ .unprepare = 41,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -1764,6 +1991,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct drm_display_mode sharp_lq035q7db03_mode = {
+ .clock = 5500,
+ .hdisplay = 240,
+ .hsync_start = 240 + 16,
+ .hsync_end = 240 + 16 + 7,
+ .htotal = 240 + 16 + 7 + 5,
+ .vdisplay = 320,
+ .vsync_start = 320 + 9,
+ .vsync_end = 320 + 9 + 1,
+ .vtotal = 320 + 9 + 1 + 7,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq035q7db03 = {
+ .modes = &sharp_lq035q7db03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 54,
+ .height = 72,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct display_timing sharp_lq101k1ly04_timing = {
.pixelclock = { 60000000, 65000000, 80000000 },
.hactive = { 1280, 1280, 1280 },
@@ -2095,6 +2346,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g070vvn01",
+ .data = &auo_g070vvn01,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2113,6 +2367,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "boe,hv070wsa-100",
+ .data = &boe_hv070wsa
+ }, {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
@@ -2125,6 +2382,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
+ .compatible = "dataimage,scf0700c48ggu18",
+ .data = &dataimage_scf0700c48ggu18,
+ }, {
+ .compatible = "dlc,dlc0700yzg-1",
+ .data = &dlc_dlc0700yzg_1,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
@@ -2134,6 +2397,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "edt,etm0700g0bdh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
+ .compatible = "edt,etm0700g0edh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
@@ -2155,10 +2424,13 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,at070tn92",
.data = &innolux_at070tn92,
}, {
- .compatible ="innolux,g101ice-l01",
+ .compatible = "innolux,g070y2-l01",
+ .data = &innolux_g070y2_l01,
+ }, {
+ .compatible = "innolux,g101ice-l01",
.data = &innolux_g101ice_l01
}, {
- .compatible ="innolux,g121i1-l01",
+ .compatible = "innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
.compatible = "innolux,g121x1-l03",
@@ -2170,6 +2442,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,tv123wam",
+ .data = &innolux_tv123wam,
+ }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
@@ -2206,6 +2481,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "newhaven,nhd-4.3-480272ef-atxl",
+ .data = &newhaven_nhd_43_480272ef_atxl,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -2227,6 +2505,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
+ .compatible = "rocktech,rk070er9427",
+ .data = &rocktech_rk070er9427,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -2236,6 +2517,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,lq035q7db03",
+ .data = &sharp_lq035q7db03,
+ }, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 358c64ef1922..74284e5afc5d 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
- drm_panel_detach(&ctx->panel);
drm_panel_remove(&ctx->panel);
if (ctx->backlight)
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 19a8189dc54f..0c70f0e91d21 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -4,6 +4,7 @@ pl111_drm-y += pl111_display.o \
pl111_drv.o
pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
+pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
obj-$(CONFIG_DRM_PL111) += pl111_drm.o
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 19b0d006a54a..754f6b25f265 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -63,7 +63,7 @@ pl111_mode_valid(struct drm_crtc *crtc,
* We use the pixelclock to also account for interlaced modes, the
* resulting bandwidth is in bytes per second.
*/
- bw = mode->clock * 1000; /* In Hz */
+ bw = mode->clock * 1000ULL; /* In Hz */
bw = bw * mode->hdisplay * mode->vdisplay * cpp;
bw = div_u64(bw, mode->htotal * mode->vtotal);
@@ -223,48 +223,84 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
/* Hard-code TFT panel */
cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+ /* On the ST Micro variant, assume all 24 bits are connected */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_CDWID_24;
- /* Note that the the hardware's format reader takes 'r' from
+ /*
+ * Note that the the ARM hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
- * to low bit as you read left to right.
+ * to low bit as you read left to right. The ST Micro version of
+ * the PL110 (LCDC) however uses the standard DRM format.
*/
switch (fb->format->format) {
+ case DRM_FORMAT_BGR888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED | CNTL_BGR;
+ break;
+ case DRM_FORMAT_RGB888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED;
+ break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
- cntl |= CNTL_LCDBPP24;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ else
+ cntl |= CNTL_LCDBPP24;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
- cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24;
+ else
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
break;
case DRM_FORMAT_BGR565:
if (priv->variant->is_pl110)
cntl |= CNTL_LCDBPP16;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565 | CNTL_BGR;
else
cntl |= CNTL_LCDBPP16_565;
break;
case DRM_FORMAT_RGB565:
if (priv->variant->is_pl110)
- cntl |= CNTL_LCDBPP16;
+ cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565;
else
- cntl |= CNTL_LCDBPP16_565;
- cntl |= CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551;
+ else
+ cntl |= CNTL_BGR;
break;
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XBGR4444:
cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
- cntl |= CNTL_LCDBPP16_444 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444;
+ else
+ cntl |= CNTL_BGR;
break;
default:
WARN_ONCE(true, "Unknown FB format 0x%08x\n",
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index ce4501d0ab48..1aa015ccacef 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -36,11 +36,14 @@ struct drm_minor;
* struct pl111_variant_data - encodes IP differences
* @name: the name of this variant
* @is_pl110: this is the early PL110 variant
+ * @is_lcdc: this is the ST Microelectronics Nomadik LCDC variant
* @external_bgr: this is the Versatile Pl110 variant with external
* BGR/RGB routing
* @broken_clockdivider: the clock divider is broken and we need to
* use the supplied clock directly
* @broken_vblank: the vblank IRQ is broken on this variant
+ * @st_bitmux_control: this variant is using the ST Micro bitmux
+ * extensions to the control register
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
* @fb_bpp: desired bits per pixel on the default framebuffer
@@ -48,9 +51,11 @@ struct drm_minor;
struct pl111_variant_data {
const char *name;
bool is_pl110;
+ bool is_lcdc;
bool external_bgr;
bool broken_clockdivider;
bool broken_vblank;
+ bool st_bitmux_control;
const u32 *formats;
unsigned int nformats;
unsigned int fb_bpp;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 454ff0804642..47fe30223444 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -75,6 +75,7 @@
#include "pl111_drm.h"
#include "pl111_versatile.h"
+#include "pl111_nomadik.h"
#define DRIVER_DESC "DRM module for PL111"
@@ -249,6 +250,8 @@ static struct drm_driver pl111_drm_driver = {
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -288,8 +291,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
priv->memory_bw = 0;
}
- /* The two variants swap this register */
- if (variant->is_pl110) {
+ /* The two main variants swap this register */
+ if (variant->is_pl110 || variant->is_lcdc) {
priv->ienb = CLCD_PL110_IENB;
priv->ctrl = CLCD_PL110_CNTL;
} else {
@@ -301,13 +304,15 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (IS_ERR(priv->regs)) {
dev_err(dev, "%s failed mmio\n", __func__);
ret = PTR_ERR(priv->regs);
- goto dev_unref;
+ goto dev_put;
}
/* This may override some variant settings */
ret = pl111_versatile_init(dev, priv);
if (ret)
- goto dev_unref;
+ goto dev_put;
+
+ pl111_nomadik_init(dev);
/* turn off interrupts before requesting the irq */
writel(0, priv->regs + priv->ienb);
@@ -321,16 +326,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
ret = pl111_modeset_init(drm);
if (ret != 0)
- goto dev_unref;
+ goto dev_put;
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto dev_unref;
+ goto dev_put;
return 0;
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return ret;
@@ -347,7 +352,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return 0;
@@ -400,16 +405,50 @@ static const struct pl111_variant_data pl111_variant = {
.fb_bpp = 32,
};
+static const u32 pl110_nomadik_pixel_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl110_nomadik_variant = {
+ .name = "LCDC (PL110 Nomadik)",
+ .formats = pl110_nomadik_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_nomadik_pixel_formats),
+ .is_lcdc = true,
+ .st_bitmux_control = true,
+ .broken_vblank = true,
+ .fb_bpp = 16,
+};
+
static const struct amba_id pl111_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000fffff,
- .data = (void*)&pl110_variant,
+ .data = (void *)&pl110_variant,
+ },
+ {
+ .id = 0x00180110,
+ .mask = 0x00fffffe,
+ .data = (void *)&pl110_nomadik_variant,
},
{
.id = 0x00041111,
.mask = 0x000fffff,
- .data = (void*)&pl111_variant,
+ .data = (void *)&pl111_variant,
},
{0, 0},
};
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.c b/drivers/gpu/drm/pl111/pl111_nomadik.c
new file mode 100644
index 000000000000..6f385e59be22
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include "pl111_nomadik.h"
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+void pl111_nomadik_init(struct device *dev)
+{
+ struct regmap *pmu_regmap;
+
+ /*
+ * Just bail out of this is not found, we could be running
+ * multiplatform on something else than Nomadik.
+ */
+ pmu_regmap =
+ syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+ if (IS_ERR(pmu_regmap))
+ return;
+
+ /*
+ * This bit in the PMU controller multiplexes the two graphics
+ * blocks found in the Nomadik STn8815. The other one is called
+ * MDIF (Master Display Interface) and gets muxed out here.
+ */
+ regmap_update_bits(pmu_regmap,
+ PMU_CTRL_OFFSET,
+ PMU_CTRL_LCDNDIF,
+ 0);
+ dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
+}
+EXPORT_SYMBOL_GPL(pl111_nomadik_init);
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
new file mode 100644
index 000000000000..19d663d46353
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+
+#ifndef PL111_NOMADIK_H
+#define PL111_NOMADIK_H
+#endif
+
+#ifdef CONFIG_ARCH_NOMADIK
+
+void pl111_nomadik_init(struct device *dev);
+
+#else
+
+static inline void pl111_nomadik_init(struct device *dev)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 768207fbbae3..0570c6826bff 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1086,7 +1086,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
- drm_mode_connector_attach_encoder(&qxl_output->base,
+ drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 7cb214577275..e37f0097f744 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
return "release";
}
-static bool qxl_nop_signaling(struct dma_fence *fence)
-{
- /* fences are always automatically signaled, so just pretend we did this.. */
- return true;
-}
-
static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
@@ -119,7 +113,6 @@ signaled:
static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
- .enable_signaling = qxl_nop_signaling,
.wait = qxl_fence_wait,
};
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index b9302c918271..d587779a80b4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,19 +5676,29 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
- else
- pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 7c73bc7e2f85..ebce4601a305 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9499,9 +9499,10 @@ int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -9516,23 +9517,24 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -9548,7 +9550,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -9636,9 +9638,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 73d4c5348116..5e044c98fca2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1327,9 +1327,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
case RADEON_PCIE_GEN3:
return RADEON_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
+ if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
return RADEON_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
+ else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
return RADEON_PCIE_GEN2;
else
return RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a2eb409aacc..1a6f6edb3515 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1653,6 +1653,10 @@ struct radeon_pm {
struct radeon_dpm dpm;
};
+#define RADEON_PCIE_SPEED_25 1
+#define RADEON_PCIE_SPEED_50 2
+#define RADEON_PCIE_SPEED_80 4
+
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2aea2bdff99b..414642e5b7a3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -244,23 +244,15 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -270,7 +262,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
radeon_atombios_connected_scratch_regs(connector, encoder, connected);
else
radeon_combios_connected_scratch_regs(connector, encoder, connected);
-
}
}
@@ -279,17 +270,11 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -383,20 +368,23 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
int ret;
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ drm_connector_update_edid_property(connector, radeon_connector->edid);
ret = drm_add_edid_modes(connector, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -436,19 +424,19 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
struct radeon_connector *radeon_conflict;
- int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+ struct drm_encoder *enc;
+ int i;
+
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (conflict->encoder_ids[i] == 0)
- break;
+ drm_connector_for_each_possible_encoder(conflict, enc, i) {
/* if the IDs match */
- if (conflict->encoder_ids[i] == encoder->base.id) {
+ if (enc == encoder) {
if (conflict->status != connector_status_connected)
continue;
@@ -1256,7 +1244,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1374,15 +1362,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1458,18 +1440,11 @@ exit:
/* okay need to be smart in here about which encoder to pick */
static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1484,8 +1459,9 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1628,14 +1604,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct radeon_encoder *radeon_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1657,14 +1626,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index cd8a3ee16649..f920be236cc9 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -195,11 +195,11 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
radeon_connector->edid = edid;
DRM_DEBUG_KMS("edid retrieved %p\n", edid);
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ drm_connector_update_edid_property(&radeon_connector->base, NULL);
return ret;
}
@@ -290,7 +290,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
return connector;
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c6ee80216cf4..c341fb2a5b56 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -211,7 +211,7 @@ radeon_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
radeon_encoder_add_backlight(radeon_encoder, connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index abd24975c9b1..f8b35df44c60 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -118,19 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ int ret = 0;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ /* TODO we should be able to split locking for interval tree and
+ * the tear down.
+ */
+ if (blockable)
+ mutex_lock(&rmn->lock);
+ else if (!mutex_trylock(&rmn->lock))
+ return -EAGAIN;
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -138,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
+ if (!blockable) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -166,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
}
}
+out_unlock:
mutex_unlock(&rmn->lock);
+
+ return ret;
}
static const struct mmu_notifier_ops radeon_mn_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index edbb4cd519fd..ba2fd295697f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 48f4b273e316..0c7f228db6e3 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8689fcca051c..cbb67e9ffb3a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -947,11 +947,11 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
-static int radeon_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
- int r;
+ vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL) {
@@ -959,9 +959,9 @@ static int radeon_ttm_fault(struct vm_fault *vmf)
}
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- r = ttm_vm_ops->fault(vmf);
+ ret = ttm_vm_ops->fault(vmf);
up_read(&rdev->pm.mclk_lock);
- return r;
+ return ret;
}
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1907c950d76f..85c604d29235 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7082,9 +7082,10 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -7099,23 +7100,24 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -7131,7 +7133,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -7219,9 +7221,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index fea88078cf8e..8fb60b3af015 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,8 +6899,9 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
@@ -6910,11 +6911,20 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
- else
- si_pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 155ad840f3c5..4c39de3f4f0f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -353,7 +353,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
@@ -434,8 +434,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
ret = -EPROBE_DEFER;
} else {
lvds->panel = of_drm_find_panel(remote);
- if (!lvds->panel)
- ret = -EPROBE_DEFER;
+ if (IS_ERR(lvds->panel))
+ ret = PTR_ERR(lvds->panel);
}
done:
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c6fbdcd87c16..8ad0d773dc33 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
dp->sink_has_audio = drm_detect_monitor_audio(edid);
ret = drm_add_edid_modes(connector, edid);
if (ret)
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
}
mutex_unlock(&dp->lock);
@@ -1062,7 +1062,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("failed to attach connector and encoder\n");
goto err_free_connector;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index eb3042c6d1b2..3105965fc260 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -792,7 +792,6 @@ err_config_video:
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
- u32 val;
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
return ret;
}
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
+ writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index d53d5a09547f..662b6cb5d3f0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -595,7 +595,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel)
+ if (!IS_ERR(dsi->panel))
return drm_panel_attach(dsi->panel, &dsi->connector);
return -EINVAL;
@@ -1129,7 +1129,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
&dw_mipi_dsi_atomic_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 88d0774c97bd..1c02b3e61299 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -565,7 +565,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
if (edid) {
hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -634,7 +634,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index d4f4118b482d..ea18cb2a76c0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -18,52 +18,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
-{
- struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
- if (plane >= ROCKCHIP_MAX_FB_BUFFER)
- return NULL;
-
- return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- int i;
-
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
- drm_framebuffer_cleanup(fb);
- kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- rockchip_fb->obj[0], handle);
-}
-
static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
- .destroy = rockchip_drm_fb_destroy,
- .create_handle = rockchip_drm_fb_create_handle,
- .dirty = rockchip_drm_fb_dirty,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = rockchip_drm_fb_dirty,
};
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
int ret;
int i;
- rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
- if (!rockchip_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
- rockchip_fb->obj[i] = obj[i];
+ fb->obj[i] = obj[i];
- ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
- &rockchip_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize framebuffer: %d\n",
ret);
- kfree(rockchip_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return rockchip_fb;
+ return fb;
}
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
objs[i] = obj;
}
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(rockchip_fb)) {
- ret = PTR_ERR(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto err_gem_object_unreference;
}
- return &rockchip_fb->fb;
+ return fb;
err_gem_object_unreference:
for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
- if (IS_ERR(rockchip_fb))
- return ERR_CAST(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(fb))
+ return ERR_CAST(fb);
- return &rockchip_fb->fb;
+ return fb;
}
void rockchip_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 2fe47f1ee98f..f1265cb1aee8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2121345a61af..1359e5c773e4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -243,18 +243,6 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
-static bool is_yuv_support(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV24:
- return true;
- default:
- return false;
- }
-}
-
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -298,7 +286,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint16_t cbcr_ver_scl_mode = SCALE_NONE;
int hsub = drm_format_horz_chroma_subsampling(pixel_format);
int vsub = drm_format_vert_chroma_subsampling(pixel_format);
- bool is_yuv = is_yuv_support(pixel_format);
+ const struct drm_format_info *info;
+ bool is_yuv = false;
uint16_t cbcr_src_w = src_w / hsub;
uint16_t cbcr_src_h = src_h / vsub;
uint16_t vsu_mode;
@@ -306,6 +295,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint32_t val;
int vskiplines;
+ info = drm_format_info(pixel_format);
+
+ if (info->is_yuv)
+ is_yuv = true;
+
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
return;
@@ -486,6 +480,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
+static int vop_core_clks_enable(struct vop *vop)
+{
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0)
+ goto err_disable_hclk;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+ return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -497,17 +516,13 @@ static int vop_enable(struct drm_crtc *crtc)
return ret;
}
- ret = clk_enable(vop->hclk);
+ ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
- goto err_disable_hclk;
-
- ret = clk_enable(vop->aclk);
- if (WARN_ON(ret < 0))
- goto err_disable_dclk;
+ goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@@ -519,7 +534,7 @@ static int vop_enable(struct drm_crtc *crtc)
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
- goto err_disable_aclk;
+ goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
@@ -552,18 +567,14 @@ static int vop_enable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
- enable_irq(vop->irq);
-
drm_crtc_vblank_on(crtc);
return 0;
-err_disable_aclk:
- clk_disable(vop->aclk);
err_disable_dclk:
clk_disable(vop->dclk);
-err_disable_hclk:
- clk_disable(vop->hclk);
+err_disable_core:
+ vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
@@ -599,8 +610,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_dsp_hold_valid_irq_disable(vop);
- disable_irq(vop->irq);
-
vop->is_enabled = false;
/*
@@ -609,8 +618,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
- clk_disable(vop->aclk);
- clk_disable(vop->hclk);
+ vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
@@ -666,7 +674,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
@@ -728,7 +736,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
return;
}
- obj = rockchip_fb_get_gem_obj(fb, 0);
+ obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
@@ -753,12 +761,12 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
- if (is_yuv_support(fb->format->format)) {
+ if (fb->format->is_yuv) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
- uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+ uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
@@ -1178,6 +1186,18 @@ static irqreturn_t vop_isr(int irq, void *data)
int ret = IRQ_NONE;
/*
+ * The irq is shared with the iommu. If the runtime-pm state of the
+ * vop-device is disabled the irq has to be targeted at the iommu.
+ */
+ if (!pm_runtime_get_if_in_use(vop->dev))
+ return IRQ_NONE;
+
+ if (vop_core_clks_enable(vop)) {
+ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+ goto out;
+ }
+
+ /*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
@@ -1192,7 +1212,7 @@ static irqreturn_t vop_isr(int irq, void *data)
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
- return IRQ_NONE;
+ goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
@@ -1218,6 +1238,10 @@ static irqreturn_t vop_isr(int irq, void *data)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
+out_disable:
+ vop_core_clks_disable(vop);
+out:
+ pm_runtime_put(vop->dev);
return ret;
}
@@ -1278,7 +1302,7 @@ static int vop_create_crtc(struct vop *vop)
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = vop_win->data;
- unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+ unsigned long possible_crtcs = drm_crtc_mask(crtc);
if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
continue;
@@ -1596,9 +1620,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
-
return 0;
err_disable_pm_runtime:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 084acdd0019a..fcb91041a666 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -331,16 +331,19 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
{
int lb_mode;
- if (width > 2560)
- lb_mode = LB_RGB_3840X2;
- else if (width > 1920)
- lb_mode = LB_RGB_2560X4;
- else if (!is_yuv)
- lb_mode = LB_RGB_1920X5;
- else if (width > 1280)
- lb_mode = LB_YUV_3840X5;
- else
- lb_mode = LB_YUV_2560X8;
+ if (is_yuv) {
+ if (width > 1280)
+ lb_mode = LB_YUV_3840X5;
+ else
+ lb_mode = LB_YUV_2560X8;
+ } else {
+ if (width > 2560)
+ lb_mode = LB_RGB_3840X2;
+ else if (width > 1920)
+ lb_mode = LB_RGB_2560X4;
+ else
+ lb_mode = LB_RGB_1920X5;
+ }
return lb_mode;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index e67f4ea28c0e..456bd9f13bae 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -432,7 +434,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach encoder: %d\n", ret);
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- lvds->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
goto err_free_encoder;
}
- encoder->bridge = lvds->bridge;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 2db89bed52e8..7559a820bd43 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -971,7 +971,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dma && dma->buflist) {
- if (cmdbuf->dma_idx > dma->buf_count) {
+ if (cmdbuf->dma_idx >= dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
cmdbuf->dma_idx, dma->buf_count - 1);
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index bd0377c0d2ee..7665883f81d4 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,7 +20,6 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
#
-ccflags-y := -Iinclude/drm
gpu-sched-y := gpu_scheduler.o sched_fence.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 44d480768dfe..4fc211e19d6e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -21,6 +21,29 @@
*
*/
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ * the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
@@ -39,14 +62,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-/* Initialize a given run queue struct */
-static void drm_sched_rq_init(struct drm_sched_rq *rq)
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
+ rq->sched = sched;
}
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -57,6 +96,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
spin_unlock(&rq->lock);
}
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -70,9 +117,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
}
/**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
*
- * @rq The run queue to check.
+ * @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
@@ -112,30 +159,33 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
+ *
+ * @entity: scheduler entity to init
+ * @rq_list: the list of run queue on which jobs from this
+ * entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ * is found to be guilty causing a timeout
*
- * @sched The pointer to the scheduler
- * @entity The pointer to a valid drm_sched_entity
- * @rq The run queue this entity belongs
- * @guilty atomic_t set to 1 when a job on this queue
- * is found to be guilty causing a timeout
+ * Note: the rq_list should have atleast one element to schedule
+ * the entity
*
- * return 0 if succeed. negative error code on failure
+ * Returns 0 on success or a negative error code on failure.
*/
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty)
{
- if (!(sched && entity && rq))
+ if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq;
- entity->sched = sched;
+ entity->rq = rq_list[0];
entity->guilty = guilty;
- entity->fini_status = 0;
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -149,40 +199,27 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
- * Query if entity is initialized
+ * drm_sched_entity_is_idle - Check if entity is idle
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
- *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
-{
- return entity->sched == sched &&
- entity->rq != NULL;
-}
-
-/**
- * Check if entity is idle
+ * @entity: scheduler entity
*
- * @entity The pointer to a valid scheduler entity
- *
- * Return true if entity don't has any unscheduled jobs.
+ * Returns true if the entity does not have any unscheduled jobs.
*/
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+ if (list_empty(&entity->list) ||
+ spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
}
/**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
*
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
*
* Return true if entity could provide a job.
*/
@@ -210,44 +247,67 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/**
- * Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
*
- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
* removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
*/
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
- if (!drm_sched_entity_is_initialized(sched, entity))
- return;
+ struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
+ long ret = timeout;
+
+ sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
*/
- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- entity->fini_status = -ERESTARTSYS;
- else
- entity->fini_status = wait_event_killable(sched->job_scheduled,
- drm_sched_entity_is_idle(entity));
- drm_sched_entity_set_rq(entity, NULL);
+ if (current->flags & PF_EXITING) {
+ if (timeout)
+ ret = wait_event_timeout(
+ sched->job_scheduled,
+ drm_sched_entity_is_idle(entity),
+ timeout);
+ } else
+ wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+ /* For killed process disable any more IBs enqueue right now */
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ return ret;
}
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
/**
- * Destroy a context entity
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @entity: scheduler entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
*
- * The second one then goes over the entity and signals all jobs with an error code.
*/
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- if (entity->fini_status) {
+ struct drm_gpu_scheduler *sched;
+
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ /* Consumption of existing IBs wasn't completed. Forcefully
+ * remove them here.
+ */
+ if (spsc_queue_peek(&entity->job_queue)) {
struct drm_sched_job *job;
int r;
@@ -267,27 +327,43 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
struct drm_sched_fence *s_fence = job->s_fence;
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (r == -ENOENT)
+
+ /*
+ * When pipe is hanged by older entity, new entity might
+ * not even have chance to submit it's first job to HW
+ * and so entity->last_scheduled will remain NULL
+ */
+ if (!entity->last_scheduled) {
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ } else {
+ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (r == -ENOENT)
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
+ }
}
}
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
}
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
- drm_sched_entity_do_release(sched, entity);
- drm_sched_entity_cleanup(sched, entity);
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
}
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
@@ -295,7 +371,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -306,29 +382,43 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+/**
+ * drm_sched_entity_set_rq - Sets the run queue for an entity
+ *
+ * @entity: scheduler entity
+ * @rq: scheduler run queue
+ *
+ * Sets the run queue for an entity and removes the entity from the previous
+ * run queue in which was present.
+ */
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
- spin_lock(&entity->rq_lock);
-
- if (entity->rq)
- drm_sched_rq_remove_entity(entity->rq, entity);
+ BUG_ON(!rq);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
- if (rq)
- drm_sched_rq_add_entity(rq, entity);
-
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_rq);
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
@@ -345,7 +435,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
@@ -390,7 +480,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
@@ -413,9 +503,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
}
/**
- * Submit a job to the job queue
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
*
- * @sched_job The pointer to job required to submit
+ * @sched_job: job to submit
+ * @entity: scheduler entity
*
* Note: To guarantee that the order of insertion to queue matches
* the job's fence sequence number this function should be
@@ -431,12 +522,18 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
+ if (!entity->rq) {
+ DRM_ERROR("Trying to push to a killed entity\n");
+ spin_unlock(&entity->rq_lock);
+ return;
+ }
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
drm_sched_wakeup(sched);
@@ -452,24 +549,28 @@ static void drm_sched_job_finish(struct work_struct *work)
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
- /* remove job from ring_mirror_list */
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct drm_sched_job *next;
-
- spin_unlock(&sched->job_list_lock);
- cancel_delayed_work_sync(&s_job->work_tdr);
- spin_lock(&sched->job_list_lock);
+ /*
+ * Canceling the timeout without removing our job from the ring mirror
+ * list is safe, as we will only end up in this worker if our jobs
+ * finished fence has been signaled. So even if some another worker
+ * manages to find this job as the next job in the list, the fence
+ * signaled check below will prevent the timeout to be restarted.
+ */
+ cancel_delayed_work_sync(&s_job->work_tdr);
- /* queue TDR for next job */
- next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ spin_lock(&sched->job_list_lock);
+ /* queue TDR for next job */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
+ struct drm_sched_job *next = list_next_entry(s_job, node);
- if (next)
+ if (!dma_fence_is_signaled(&next->s_fence->finished))
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
+ /* remove job from ring_mirror_list */
+ list_del(&s_job->node);
spin_unlock(&sched->job_list_lock);
+
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -506,6 +607,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->timedout_job(job);
}
+/**
+ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job;
@@ -550,6 +658,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
}
EXPORT_SYMBOL(drm_sched_hw_job_reset);
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
@@ -599,16 +713,23 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_job_recovery);
/**
- * Init a sched_job with basic field
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
*
- * Note: Refer to drm_sched_entity_push_job documentation
+ * Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
@@ -626,7 +747,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
*/
static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
@@ -635,7 +760,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
}
/**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
*/
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
@@ -644,8 +772,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
}
/**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
@@ -665,6 +797,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return entity;
}
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct drm_sched_fence *s_fence =
@@ -680,6 +820,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
wake_up_interruptible(&sched->wake_up_worker);
}
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
@@ -690,6 +837,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
return false;
}
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -744,15 +898,17 @@ static int drm_sched_main(void *param)
}
/**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
*
- * @sched The pointer to the scheduler
- * @ops The backend operations for this scheduler.
- * @hw_submissions Number of hw submissions to do.
- * @name Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
*
* Return 0 on success, otherwise error code.
-*/
+ */
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
@@ -767,7 +923,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->timeout = timeout;
sched->hang_limit = hang_limit;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
- drm_sched_rq_init(&sched->sched_rq[i]);
+ drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -788,9 +944,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_init);
/**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
*
- * @sched The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index df4461648e3f..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
-static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
/**
* drm_sched_fence_free - free up the fence memory
*
@@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_scheduled,
};
const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_finished,
};
@@ -172,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
- fence->sched = entity->sched;
+ fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 54acc117550c..6b943ea1c57d 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
selftest(evict, igt_evict)
selftest(evict_range, igt_evict_range)
selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
selftest(color, igt_color)
selftest(color_evict, igt_color_evict)
selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 933af1c25387..fbed2c90fd51 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1825,6 +1825,77 @@ err:
return ret;
}
+static int __igt_once(unsigned int mode)
+{
+ struct drm_mm mm;
+ struct drm_mm_node rsvd_lo, rsvd_hi, node;
+ int err;
+
+ drm_mm_init(&mm, 0, 7);
+
+ memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+ rsvd_lo.start = 1;
+ rsvd_lo.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_lo);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err;
+ }
+
+ memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+ rsvd_hi.start = 5;
+ rsvd_hi.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_hi);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err_lo;
+ }
+
+ if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+ pr_err("Expected a hole after lo and high nodes!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+ memset(&node, 0, sizeof(node));
+ err = drm_mm_insert_node_generic(&mm, &node,
+ 2, 0, 0,
+ mode | DRM_MM_INSERT_ONCE);
+ if (!err) {
+ pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+ node.start);
+ err = -EINVAL;
+ goto err_node;
+ }
+
+ err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+ if (err) {
+ pr_err("Could not insert the node into the available hole!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+err_node:
+ drm_mm_remove_node(&node);
+err_hi:
+ drm_mm_remove_node(&rsvd_hi);
+err_lo:
+ drm_mm_remove_node(&rsvd_lo);
+err:
+ drm_mm_takedown(&mm);
+ return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 40df8887fc17..fc66167b0641 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -675,7 +675,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_backlight;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index df0a282b9615..57b870e1e696 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,7 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 90c46b49c931..832fc43960ee 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -224,7 +224,7 @@ static int sti_bind(struct device *dev)
ret = sti_init(ddev);
if (ret)
- goto err_drm_dev_unref;
+ goto err_drm_dev_put;
ret = component_bind_all(ddev->dev, ddev);
if (ret)
@@ -248,8 +248,8 @@ err_register:
drm_mode_config_cleanup(ddev);
err_cleanup:
sti_cleanup(ddev);
-err_drm_dev_unref:
- drm_dev_unref(ddev);
+err_drm_dev_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -259,7 +259,7 @@ static void sti_unbind(struct device *dev)
drm_dev_unregister(ddev);
sti_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
}
static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index a5979cd25cc7..b08376b7611b 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -387,7 +387,9 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
- if (dvo->panel)
+ if (IS_ERR(dvo->panel))
+ dvo->panel = NULL;
+ else
drm_panel_attach(dvo->panel, connector);
}
@@ -484,7 +486,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 9b2c47051b51..c32de6cbf061 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct drm_crtc *crtc = drm_plane->crtc;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock(&drm_plane->mutex, NULL);
+ crtc = drm_plane->state->crtc;
+ drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -879,7 +883,7 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 67bbdb49fffc..49438337f70d 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -709,7 +709,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 58f431102512..34cdc4644435 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -977,7 +977,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
kfree(edid);
return count;
@@ -1290,7 +1290,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi->drm_connector = drm_connector;
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 106be8c4e58b..03ac3b4a4469 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,7 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8698e08313e1..f2021b23554d 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -148,16 +148,16 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drv_load(ddev);
if (ret)
- goto err_unref;
+ goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_unref;
+ goto err_put;
return 0;
-err_unref:
- drm_dev_unref(ddev);
+err_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -170,7 +170,7 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drv_unload(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d997a6014d6c..808d9fb627e9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -457,6 +457,14 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ result = clk_round_rate(ldev->pixel_clk, target);
+
+ DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+
+ /* Filter modes according to the max frequency supported by the pads */
+ if (result > ldev->caps.pad_max_freq_hz)
+ return MODE_CLOCK_HIGH;
+
/*
* Accept all "preferred" modes:
* - this is important for panels because panel clock tolerances are
@@ -468,10 +476,6 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return MODE_OK;
- result = clk_round_rate(ldev->pixel_clk, target);
-
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
-
/*
* Filter modes according to the clock value, particularly useful for
* hdmi modes that require precise pixel clocks.
@@ -991,11 +995,15 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
+ ldev->caps.pad_max_freq_hz = 90000000;
+ if (ldev->caps.hw_version == HWVER_10200)
+ ldev->caps.pad_max_freq_hz = 65000000;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
+ ldev->caps.pad_max_freq_hz = 150000000;
break;
default:
return -ENODEV;
@@ -1074,8 +1082,11 @@ int ltdc_load(struct drm_device *ddev)
}
}
- if (!IS_ERR(rstc))
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ usleep_range(10, 20);
reset_control_deassert(rstc);
+ }
/* Disable interrupts */
reg_clear(ldev->regs, LTDC_IER,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 1e16d6afb0d2..d5afb8960867 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -18,6 +18,7 @@ struct ltdc_caps {
u32 bus_width; /* bus width (32 or 64 bits) */
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
+ int pad_max_freq_hz; /* max frequency supported by pad */
};
#define LTDC_MAX_LAYER 4
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 156a865c3e6d..c2c042287c19 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -68,4 +68,11 @@ config DRM_SUN8I_MIXER
graphics mixture and feed graphics to TCON, If M is
selected the module will be called sun8i-mixer.
+config DRM_SUN8I_TCON_TOP
+ tristate
+ default DRM_SUN4I if DRM_SUN8I_MIXER!=n
+ help
+ TCON TOP is responsible for configuring display pipeline for
+ HTMI, TVE and LCD.
+
endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 2589f4acd5ae..0eb38ac8e86e 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -32,8 +32,12 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
+endif
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index de0a76dfa1a2..d7950b52a1fd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -86,12 +86,6 @@ static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
}
}
-static inline bool sun4i_backend_format_is_yuv(uint32_t format)
-{
- return sun4i_backend_format_is_planar_yuv(format) ||
- sun4i_backend_format_is_packed_yuv422(format);
-}
-
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -304,7 +298,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
val);
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_format(backend, layer, plane);
ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
@@ -384,7 +378,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
*/
paddr -= PHYS_OFFSET;
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
/* Write the 32 lower bits of the address (in bits) */
@@ -502,7 +496,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (sun4i_backend_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
num_yuv_planes++;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 2d7c57406715..3eedf335a935 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -242,7 +242,7 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
/* Set possible_crtcs to this crtc for overlay planes */
for (i = 0; planes[i]; i++) {
- uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+ uint32_t possible_crtcs = drm_crtc_mask(&scrtc->crtc);
struct drm_plane *plane = planes[i];
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 50d19605c38f..dd19d674055c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -26,6 +26,7 @@
#include "sun4i_frontend.h"
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+#include "sun8i_tcon_top.h"
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
@@ -143,7 +144,7 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -156,7 +157,7 @@ static void sun4i_drv_unbind(struct device *dev)
sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops sun4i_drv_master_ops = {
@@ -197,6 +198,28 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
return !!of_match_node(sun4i_tcon_of_table, node);
}
+static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(sun4i_tcon_of_table, node);
+ if (match) {
+ struct sun4i_tcon_quirks *quirks;
+
+ quirks = (struct sun4i_tcon_quirks *)match->data;
+
+ return quirks->has_channel_0;
+ }
+
+ return false;
+}
+
+static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -231,12 +254,69 @@ struct endpoint_list {
DECLARE_KFIFO(fifo, struct device_node *, 16);
};
+static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
+ struct device_node *node,
+ int port_id)
+{
+ struct device_node *ep, *remote, *port;
+
+ port = of_graph_get_port_by_id(node, port_id);
+ if (!port) {
+ DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
+ return;
+ }
+
+ for_each_available_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+ continue;
+ }
+
+ if (sun4i_drv_node_is_tcon(node)) {
+ /*
+ * TCON TOP is always probed before TCON. However, TCON
+ * points back to TCON TOP when it is source for HDMI.
+ * We have to skip it here to prevent infinite looping
+ * between TCON TOP and TCON.
+ */
+ if (sun4i_drv_node_is_tcon_top(remote)) {
+ DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ /*
+ * If the node is our TCON with channel 0, the first
+ * port is used for panel or bridges, and will not be
+ * part of the component framework.
+ */
+ if (sun4i_drv_node_is_tcon_with_ch0(node)) {
+ struct of_endpoint endpoint;
+
+ if (of_graph_parse_endpoint(ep, &endpoint)) {
+ DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ if (!endpoint.id) {
+ DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+ }
+ }
+
+ kfifo_put(&list->fifo, remote);
+ }
+}
+
static int sun4i_drv_add_endpoints(struct device *dev,
struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
- struct device_node *port, *ep, *remote;
int count = 0;
/*
@@ -272,41 +352,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
count++;
}
- /* Inputs are listed first, then outputs */
- port = of_graph_get_port_by_id(node, 1);
- if (!port) {
- DRM_DEBUG_DRIVER("No output to bind\n");
- return count;
- }
+ /* each node has at least one output */
+ sun4i_drv_traverse_endpoints(list, node, 1);
- for_each_available_child_of_node(port, ep) {
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- DRM_DEBUG_DRIVER("Error retrieving the output node\n");
- of_node_put(remote);
- continue;
- }
-
- /*
- * If the node is our TCON, the first port is used for
- * panel or bridges, and will not be part of the
- * component framework.
- */
- if (sun4i_drv_node_is_tcon(node)) {
- struct of_endpoint endpoint;
-
- if (of_graph_parse_endpoint(ep, &endpoint)) {
- DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
- continue;
- }
-
- if (!endpoint.id) {
- DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
- continue;
- }
- }
-
- kfifo_put(&list->fifo, remote);
+ /* TCON TOP has second and third output */
+ if (sun4i_drv_node_is_tcon_top(node)) {
+ sun4i_drv_traverse_endpoints(list, node, 3);
+ sun4i_drv_traverse_endpoints(list, node, 5);
}
return count;
@@ -366,6 +418,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
+ { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index fa4bcd092eaf..061d2e0d9011 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -220,7 +220,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -623,7 +623,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = cec_register_adapter(hdmi->cec_adap, dev);
if (ret < 0)
goto err_cleanup_connector;
- drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+ drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index be3f14d7746d..af7dcb6da351 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -136,7 +136,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The LVDS encoder can only work with the TCON channel 0 */
- lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&lvds->connector,
@@ -149,7 +149,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&lvds->connector,
+ drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
ret = drm_panel_attach(tcon->panel, &lvds->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f2fa1f210509..bf068da6b12e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -202,7 +202,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The RGB encoder can only work with the TCON channel 0 */
- rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&rgb->connector,
@@ -215,7 +215,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&rgb->connector,
+ drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
ret = drm_panel_attach(tcon->panel, &rgb->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 8232b39e16ca..3fb084f802e2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -766,12 +766,14 @@ static int sun4i_tcon_init_regmap(struct device *dev,
*/
static struct sunxi_engine *
sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
- struct device_node *node)
+ struct device_node *node,
+ u32 port_id)
{
struct device_node *port, *ep, *remote;
struct sunxi_engine *engine = ERR_PTR(-EINVAL);
+ u32 reg = 0;
- port = of_graph_get_port_by_id(node, 0);
+ port = of_graph_get_port_by_id(node, port_id);
if (!port)
return ERR_PTR(-EINVAL);
@@ -801,8 +803,21 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
if (remote == engine->node)
goto out_put_remote;
+ /*
+ * According to device tree binding input ports have even id
+ * number and output ports have odd id. Since component with
+ * more than one input and one output (TCON TOP) exits, correct
+ * remote input id has to be calculated by subtracting 1 from
+ * remote output id. If this for some reason can't be done, 0
+ * is used as input port id.
+ */
+ of_node_put(port);
+ port = of_graph_get_remote_port(ep);
+ if (!of_property_read_u32(port, "reg", &reg) && reg > 0)
+ reg -= 1;
+
/* keep looking through upstream ports */
- engine = sun4i_tcon_find_engine_traverse(drv, remote);
+ engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
out_put_remote:
of_node_put(remote);
@@ -925,7 +940,7 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
/* Fallback to old method by traversing input endpoints */
of_node_put(port);
- return sun4i_tcon_find_engine_traverse(drv, node);
+ return sun4i_tcon_find_engine_traverse(drv, node, 0);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -1067,23 +1082,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_dotclock;
}
- /*
- * If we have an LVDS panel connected to the TCON, we should
- * just probe the LVDS connector. Otherwise, just probe RGB as
- * we used to.
- */
- remote = of_graph_get_remote_node(dev->of_node, 1, 0);
- if (of_device_is_compatible(remote, "panel-lvds"))
- if (can_lvds)
- ret = sun4i_lvds_init(drm, tcon);
+ if (tcon->quirks->has_channel_0) {
+ /*
+ * If we have an LVDS panel connected to the TCON, we should
+ * just probe the LVDS connector. Otherwise, just probe RGB as
+ * we used to.
+ */
+ remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (of_device_is_compatible(remote, "panel-lvds"))
+ if (can_lvds)
+ ret = sun4i_lvds_init(drm, tcon);
+ else
+ ret = -EINVAL;
else
- ret = -EINVAL;
- else
- ret = sun4i_rgb_init(drm, tcon);
- of_node_put(remote);
+ ret = sun4i_rgb_init(drm, tcon);
+ of_node_put(remote);
- if (ret < 0)
- goto err_free_dotclock;
+ if (ret < 0)
+ goto err_free_dotclock;
+ }
if (tcon->quirks->needs_de_be_mux) {
/*
@@ -1137,13 +1154,19 @@ static const struct component_ops sun4i_tcon_ops = {
static int sun4i_tcon_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct sun4i_tcon_quirks *quirks;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
- if (ret == -EPROBE_DEFER)
- return ret;
+ quirks = of_device_get_match_data(&pdev->dev);
+
+ /* panels and bridges are present only on TCONs with channel 0 */
+ if (quirks->has_channel_0) {
+ ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index b070d522ed8d..1a838d208211 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -623,7 +623,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
}
tv->connector.interlace_allowed = true;
- drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+ drm_connector_attach_encoder(&tv->connector, &tv->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index b5e071a49045..88eb268fdf73 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index bfbf761f0c1d..e3b34a345546 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/phy/phy.h>
@@ -247,10 +248,8 @@ static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
return crc_ccitt(0xffff, buffer, len);
}
-static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len)
{
- u8 buffer[len];
-
memset(buffer, pd, len);
return sun6i_dsi_crc_compute(buffer, len);
@@ -274,11 +273,11 @@ static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
wc & 0xff, wc >> 8);
}
-static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len)
{
u32 val = SUN6I_DSI_BLK_PD(pd);
- return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+ return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len));
}
static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
@@ -452,6 +451,54 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp, hfp, hsa, hblk, vblk;
+ size_t bytes;
+ u8 *buffer;
+
+ /* Do all timing calculations up front to allocate buffer space */
+
+ /*
+ * A sync period is composed of a blanking packet (4 bytes +
+ * payload + 2 bytes) and a sync event packet (4 bytes). Its
+ * minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HBP_PACKET_OVERHEAD 6
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HFP_PACKET_OVERHEAD 6
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * hblk seems to be the line + porches length.
+ */
+ hblk = mode->htotal * Bpp - hsa;
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+
+ /* How many bytes do we need to send all payloads? */
+ bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
+ buffer = kmalloc(bytes, GFP_KERNEL);
+ if (WARN_ON(!buffer))
+ return;
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
@@ -485,63 +532,37 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ /* sync */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hsa));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
- sun6i_dsi_build_blk1_pkt(0, hsa));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hsa));
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+ /* backporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hbp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hbp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hbp));
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+ /* frontporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hfp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hfp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hfp));
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
+ /* hblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, hblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hblk));
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ /* vblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, vblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, vblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, vblk));
+
+ kfree(buffer);
}
static int sun6i_dsi_start(struct sun6i_dsi *dsi,
@@ -812,8 +833,8 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
dsi->device = device;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!dsi->panel)
- return -EINVAL;
+ if (IS_ERR(dsi->panel))
+ return PTR_ERR(dsi->panel);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -920,7 +941,7 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
drm_panel_attach(dsi->panel, &dsi->connector);
return 0;
@@ -1040,7 +1061,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1069,7 +1090,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
return 0;
}
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 9f40a44b456b..31875b636434 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include "sun8i_dw_hdmi.h"
+#include "sun8i_tcon_top.h"
static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -41,6 +42,44 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct device_node *port, *ep, *remote, *remote_port;
+ u32 crtcs = 0;
+
+ remote = of_graph_get_remote_node(node, 0, -1);
+ if (!remote)
+ return 0;
+
+ if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
+ port = of_graph_get_port_by_id(remote, 4);
+ if (!port)
+ goto crtcs_exit;
+
+ for_each_child_of_node(port, ep) {
+ remote_port = of_graph_get_remote_port(ep);
+ if (remote_port) {
+ crtcs |= drm_of_crtc_port_mask(drm, remote_port);
+ of_node_put(remote_port);
+ }
+ }
+ } else {
+ crtcs = drm_of_find_possible_crtcs(drm, node);
+ }
+
+crtcs_exit:
+ of_node_put(remote);
+
+ return crtcs;
+}
+
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -63,7 +102,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ encoder->possible_crtcs =
+ sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
@@ -181,7 +221,7 @@ static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
-struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
.remove = sun8i_dw_hdmi_remove,
.driver = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 79154f0f674a..aadbe0a10b0c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -98,7 +98,8 @@
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN BIT(29)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN BIT(28)
#define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 BIT(27)
-#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT 26
#define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN BIT(25)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x) ((x) << 22)
#define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x) ((x) << 20)
@@ -147,6 +148,7 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
+ bool has_second_pll;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
void (*phy_disable)(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy);
@@ -160,6 +162,7 @@ struct sun8i_hdmi_phy {
struct clk *clk_mod;
struct clk *clk_phy;
struct clk *clk_pll0;
+ struct clk *clk_pll1;
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
@@ -188,6 +191,7 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent);
#endif /* _SUN8I_DW_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 5a52fc489a9d..82502b351aec 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -183,7 +183,13 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
- regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+ /*
+ * NOTE: We have to be careful not to overwrite PHY parent
+ * clock selection bit and clock divider.
+ */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ (u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ pll_cfg1_init);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
(u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
pll_cfg2_init);
@@ -352,6 +358,10 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
+ /* reset PHY PLL clock parent */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
+
/* set HW control of CEC pins */
regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
@@ -386,6 +396,14 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
.name = "phy"
};
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+ .has_phy_clk = true,
+ .has_second_pll = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
.phy_init = &sun8i_hdmi_phy_init_a83t,
.phy_disable = &sun8i_hdmi_phy_disable_a83t,
@@ -401,6 +419,10 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
+ .compatible = "allwinner,sun50i-a64-hdmi-phy",
+ .data = &sun50i_a64_hdmi_phy,
+ },
+ {
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
@@ -472,18 +494,30 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
goto err_put_clk_mod;
}
- ret = sun8i_phy_clk_create(phy, dev);
+ if (phy->variant->has_second_pll) {
+ phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
+ if (IS_ERR(phy->clk_pll1)) {
+ dev_err(dev, "Could not get pll-1 clock\n");
+ ret = PTR_ERR(phy->clk_pll1);
+ goto err_put_clk_pll0;
+ }
+ }
+
+ ret = sun8i_phy_clk_create(phy, dev,
+ phy->variant->has_second_pll);
if (ret) {
dev_err(dev, "Couldn't create the PHY clock\n");
- goto err_put_clk_pll0;
+ goto err_put_clk_pll1;
}
+
+ clk_prepare_enable(phy->clk_phy);
}
phy->rst_phy = of_reset_control_get_shared(node, "phy");
if (IS_ERR(phy->rst_phy)) {
dev_err(dev, "Could not get phy reset control\n");
ret = PTR_ERR(phy->rst_phy);
- goto err_put_clk_pll0;
+ goto err_disable_clk_phy;
}
ret = reset_control_deassert(phy->rst_phy);
@@ -514,9 +548,12 @@ err_deassert_rst_phy:
reset_control_assert(phy->rst_phy);
err_put_rst_phy:
reset_control_put(phy->rst_phy);
+err_disable_clk_phy:
+ clk_disable_unprepare(phy->clk_phy);
+err_put_clk_pll1:
+ clk_put(phy->clk_pll1);
err_put_clk_pll0:
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
err_put_clk_mod:
clk_put(phy->clk_mod);
err_put_clk_bus:
@@ -531,13 +568,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
+ clk_disable_unprepare(phy->clk_phy);
reset_control_assert(phy->rst_phy);
reset_control_put(phy->rst_phy);
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
index faea449812f8..a4d31fe3abff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
@@ -22,35 +22,45 @@ static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
{
unsigned long rate = req->rate;
unsigned long best_rate = 0;
+ struct clk_hw *best_parent = NULL;
struct clk_hw *parent;
int best_div = 1;
- int i;
+ int i, p;
- parent = clk_hw_get_parent(hw);
-
- for (i = 1; i <= 16; i++) {
- unsigned long ideal = rate * i;
- unsigned long rounded;
-
- rounded = clk_hw_round_rate(parent, ideal);
+ for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+ parent = clk_hw_get_parent_by_index(hw, p);
+ if (!parent)
+ continue;
- if (rounded == ideal) {
- best_rate = rounded;
- best_div = i;
- break;
+ for (i = 1; i <= 16; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ break;
+ }
+
+ if (!best_rate ||
+ abs(rate - rounded / i) <
+ abs(rate - best_rate / best_div)) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ }
}
- if (!best_rate ||
- abs(rate - rounded / i) <
- abs(rate - best_rate / best_div)) {
- best_rate = rounded;
- best_div = i;
- }
+ if (best_rate / best_div == rate)
+ break;
}
req->rate = best_rate / best_div;
req->best_parent_rate = best_rate;
- req->best_parent_hw = parent;
+ req->best_parent_hw = best_parent;
return 0;
}
@@ -95,22 +105,58 @@ static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+ u32 reg;
+
+ regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, &reg);
+ reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
+
+ return reg;
+}
+
+static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+
+ if (index > 1)
+ return -EINVAL;
+
+ regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
+
+ return 0;
+}
+
static const struct clk_ops sun8i_phy_clk_ops = {
.determine_rate = sun8i_phy_clk_determine_rate,
.recalc_rate = sun8i_phy_clk_recalc_rate,
.set_rate = sun8i_phy_clk_set_rate,
+
+ .get_parent = sun8i_phy_clk_get_parent,
+ .set_parent = sun8i_phy_clk_set_parent,
};
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent)
{
struct clk_init_data init;
struct sun8i_phy_clk *priv;
- const char *parents[1];
+ const char *parents[2];
parents[0] = __clk_get_name(phy->clk_pll0);
if (!parents[0])
return -ENODEV;
+ if (second_parent) {
+ parents[1] = __clk_get_name(phy->clk_pll1);
+ if (!parents[1])
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -118,7 +164,7 @@ int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
init.name = "hdmi-phy-clk";
init.ops = &sun8i_phy_clk_ops;
init.parent_names = parents;
- init.num_parents = 1;
+ init.num_parents = second_parent ? 2 : 1;
init.flags = CLK_SET_RATE_PARENT;
priv->phy = phy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 126899d6f0d3..fc3713608f78 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -21,8 +21,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
-#include <linux/reset.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
@@ -322,6 +323,42 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.max_register = 0xbfffc, /* guessed */
};
+static int sun8i_mixer_of_get_id(struct device_node *node)
+{
+ struct device_node *port, *ep;
+ int ret = -EINVAL;
+
+ /* output is port 1 */
+ port = of_graph_get_port_by_id(node, 1);
+ if (!port)
+ return -EINVAL;
+
+ /* try to find downstream endpoint */
+ for_each_available_child_of_node(port, ep) {
+ struct device_node *remote;
+ u32 reg;
+
+ remote = of_graph_get_remote_endpoint(ep);
+ if (!remote)
+ continue;
+
+ ret = of_property_read_u32(remote, "reg", &reg);
+ if (!ret) {
+ of_node_put(remote);
+ of_node_put(ep);
+ of_node_put(port);
+
+ return reg;
+ }
+
+ of_node_put(remote);
+ }
+
+ of_node_put(port);
+
+ return ret;
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -353,8 +390,16 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, mixer);
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
- /* The ID of the mixer currently doesn't matter */
- mixer->engine.id = -1;
+
+ /*
+ * While this function can fail, we shouldn't do anything
+ * if this happens. Some early DE2 DT entries don't provide
+ * mixer id but work nevertheless because matching between
+ * TCON and mixer is done by comparing node pointers (old
+ * way) instead comparing ids. If this function fails and
+ * id is needed, it will fail during id matching anyway.
+ */
+ mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node);
mixer->cfg = of_device_get_match_data(dev);
if (!mixer->cfg)
@@ -432,14 +477,14 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
- /* Fixed zpos for now */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210);
-
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
SUN8I_MIXER_BLEND_MODE_DEF);
+ regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+
return 0;
err_disable_bus_clk:
@@ -500,6 +545,22 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 297000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
+ .ccsc = 1,
+ .mod_rate = 297000000,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -522,6 +583,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
+ .compatible = "allwinner,sun8i-r40-de2-mixer-0",
+ .data = &sun8i_r40_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun8i-r40-de2-mixer-1",
+ .data = &sun8i_r40_mixer1_cfg,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index f34e70c42adf..406c42e752d7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -44,6 +44,7 @@
#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
/* colors are always in AARRGGBB format */
@@ -51,6 +52,9 @@
/* The following numbers are some still unknown magic numbers */
#define SUN8I_MIXER_BLEND_MODE_DEF 0x03010301
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n) (0xf << ((n) << 2))
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n) ((n) << 2)
+
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
#define SUN8I_MIXER_FBFMT_ARGB8888 0
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
new file mode 100644
index 000000000000..55fe398d8290
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include <drm/drmP.h>
+
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "sun8i_tcon_top.h"
+
+static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
+{
+ return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 val;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (tcon < 2 || tcon > 3) {
+ dev_err(dev, "TCON index must be 2 or 3!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+ val &= ~TCON_TOP_HDMI_SRC_MSK;
+ val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1);
+ writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src);
+
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 reg;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (mixer > 1) {
+ dev_err(dev, "Mixer index is too high!\n");
+ return -EINVAL;
+ }
+
+ if (tcon > 3) {
+ dev_err(dev, "TCON index is too high!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+ if (mixer == 0) {
+ reg &= ~TCON_TOP_PORT_DE0_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon);
+ } else {
+ reg &= ~TCON_TOP_PORT_DE1_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon);
+ }
+ writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_de_config);
+
+
+static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
+ const char *parent,
+ void __iomem *regs,
+ spinlock_t *lock,
+ u8 bit, int name_index)
+{
+ const char *clk_name, *parent_name;
+ int ret, index;
+
+ index = of_property_match_string(dev->of_node, "clock-names", parent);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ parent_name = of_clk_get_parent_name(dev->of_node, index);
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", name_index,
+ &clk_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_register_gate(dev, clk_name, parent_name,
+ CLK_SET_RATE_PARENT,
+ regs + TCON_TOP_GATE_SRC_REG,
+ bit, 0, lock);
+};
+
+static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct clk_hw_onecell_data *clk_data;
+ struct sun8i_tcon_top *tcon_top;
+ struct resource *res;
+ void __iomem *regs;
+ int ret, i;
+
+ tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
+ if (!tcon_top)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * CLK_NUM,
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+ tcon_top->clk_data = clk_data;
+
+ spin_lock_init(&tcon_top->reg_lock);
+
+ tcon_top->rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tcon_top->rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon_top->rst);
+ }
+
+ tcon_top->bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(tcon_top->bus)) {
+ dev_err(dev, "Couldn't get the bus clock\n");
+ return PTR_ERR(tcon_top->bus);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ tcon_top->regs = regs;
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ret = reset_control_deassert(tcon_top->rst);
+ if (ret) {
+ dev_err(dev, "Could not deassert ctrl reset control\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tcon_top->bus);
+ if (ret) {
+ dev_err(dev, "Could not enable bus clock\n");
+ goto err_assert_reset;
+ }
+
+ /*
+ * TCON TOP has two muxes, which select parent clock for each TCON TV
+ * channel clock. Parent could be either TCON TV or TVE clock. For now
+ * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
+ * implemented. Once it is, graph needs to be traversed to determine
+ * if TVE is active on each TCON TV. If it is, mux should be switched
+ * to TVE clock parent.
+ */
+ clk_data->hws[CLK_TCON_TOP_TV0] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV0_GATE, 0);
+
+ clk_data->hws[CLK_TCON_TOP_TV1] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV1_GATE, 1);
+
+ clk_data->hws[CLK_TCON_TOP_DSI] =
+ sun8i_tcon_top_register_gate(dev, "dsi", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_DSI_GATE, 2);
+
+ for (i = 0; i < CLK_NUM; i++)
+ if (IS_ERR(clk_data->hws[i])) {
+ ret = PTR_ERR(clk_data->hws[i]);
+ goto err_unregister_gates;
+ }
+
+ clk_data->num = CLK_NUM;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ goto err_unregister_gates;
+
+ dev_set_drvdata(dev, tcon_top);
+
+ return 0;
+
+err_unregister_gates:
+ for (i = 0; i < CLK_NUM; i++)
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
+ clk_disable_unprepare(tcon_top->bus);
+err_assert_reset:
+ reset_control_assert(tcon_top->rst);
+
+ return ret;
+}
+
+static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
+ int i;
+
+ of_clk_del_provider(dev->of_node);
+ for (i = 0; i < CLK_NUM; i++)
+ clk_hw_unregister_gate(clk_data->hws[i]);
+
+ clk_disable_unprepare(tcon_top->bus);
+ reset_control_assert(tcon_top->rst);
+}
+
+static const struct component_ops sun8i_tcon_top_ops = {
+ .bind = sun8i_tcon_top_bind,
+ .unbind = sun8i_tcon_top_unbind,
+};
+
+static int sun8i_tcon_top_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun8i_tcon_top_ops);
+}
+
+static int sun8i_tcon_top_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun8i_tcon_top_ops);
+
+ return 0;
+}
+
+/* sun4i_drv uses this list to check if a device node is a TCON TOP */
+const struct of_device_id sun8i_tcon_top_of_table[] = {
+ { .compatible = "allwinner,sun8i-r40-tcon-top" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
+EXPORT_SYMBOL(sun8i_tcon_top_of_table);
+
+static struct platform_driver sun8i_tcon_top_platform_driver = {
+ .probe = sun8i_tcon_top_probe,
+ .remove = sun8i_tcon_top_remove,
+ .driver = {
+ .name = "sun8i-tcon-top",
+ .of_match_table = sun8i_tcon_top_of_table,
+ },
+};
+module_platform_driver(sun8i_tcon_top_platform_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
new file mode 100644
index 000000000000..0390584a330e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_TCON_TOP_H_
+#define _SUN8I_TCON_TOP_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define TCON_TOP_TCON_TV_SETUP_REG 0x00
+
+#define TCON_TOP_PORT_SEL_REG 0x1C
+#define TCON_TOP_PORT_DE0_MSK GENMASK(1, 0)
+#define TCON_TOP_PORT_DE1_MSK GENMASK(5, 4)
+
+#define TCON_TOP_GATE_SRC_REG 0x20
+#define TCON_TOP_HDMI_SRC_MSK GENMASK(29, 28)
+#define TCON_TOP_TCON_TV1_GATE 24
+#define TCON_TOP_TCON_TV0_GATE 20
+#define TCON_TOP_TCON_DSI_GATE 16
+
+#define CLK_NUM 3
+
+struct sun8i_tcon_top {
+ struct clk *bus;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *regs;
+ struct reset_control *rst;
+
+ /*
+ * spinlock is used to synchronize access to same
+ * register where multiple clock gates can be set.
+ */
+ spinlock_t reg_lock;
+};
+
+extern const struct of_device_id sun8i_tcon_top_of_table[];
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon);
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon);
+
+#endif /* _SUN8I_TCON_TOP_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 9a540330cb79..28c15c6ef1ef 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -27,7 +27,8 @@
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -43,18 +44,36 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
@@ -137,10 +156,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -236,30 +255,35 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_ui_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_ui_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_ui_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
@@ -307,6 +331,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
int channel = mixer->cfg->vi_num + index;
struct sun8i_ui_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -327,8 +352,10 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, channel,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 5877f8ef5895..f4fe97813f94 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -21,7 +21,8 @@
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -37,18 +38,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
@@ -130,10 +149,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -264,30 +283,35 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_vi_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_vi_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_vi_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
@@ -351,6 +375,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
struct sun8i_vi_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -368,8 +393,10 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, index);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, index,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c3afe7b2237e..965088afcfad 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2312,7 +2312,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE;
+ u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
struct device *partner;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 776c1513e582..a2bd5876c633 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
- if (offset & 3 || offset >= obj->gem.size) {
+ if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 87c5d89bc9ba..ee6ca8fa1c65 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1052,7 +1052,7 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&dsi->output.connector,
+ drm_connector_attach_encoder(&dsi->output.connector,
&dsi->output.encoder);
drm_connector_register(&dsi->output.connector);
@@ -1411,6 +1411,9 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct tegra_output *output = &dsi->output;
output->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(output->panel))
+ output->panel = NULL;
+
if (output->panel && output->connector.dev) {
drm_panel_attach(output->panel, &output->connector);
drm_helper_hpd_irq_event(output->connector.dev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 00a5c9f32254..4f80100ff5f3 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
- unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
- unsigned long page,
- void *addr)
-{
-}
-
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map_atomic = tegra_gem_prime_kmap_atomic,
- .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 784739a9f497..0082468f703c 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1488,7 +1488,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->output.connector,
+ drm_connector_attach_encoder(&hdmi->output.connector,
&hdmi->output.encoder);
drm_connector_register(&hdmi->output.connector);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index ffe34bd0bb9d..c662efc7e413 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -37,7 +37,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, output->ddc);
cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
err = drm_add_edid_modes(connector, edid);
@@ -110,8 +110,8 @@ int tegra_output_probe(struct tegra_output *output)
panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
if (panel) {
output->panel = of_drm_find_panel(panel);
- if (!output->panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(output->panel))
+ return PTR_ERR(output->panel);
of_node_put(panel);
}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 78ec5193741d..28a78d3120bc 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -289,7 +289,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&output->connector,
+ drm_connector_attach_encoder(&output->connector,
&output->encoder);
drm_connector_register(&output->connector);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7d2a955fc515..d7fe9f15def1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2622,7 +2622,7 @@ static int tegra_sor_init(struct host1x_client *client)
encoder, NULL);
drm_encoder_helper_add(&sor->output.encoder, helpers);
- drm_mode_connector_attach_encoder(&sor->output.connector,
+ drm_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
drm_connector_register(&sor->output.connector);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b8a5e4ed22e6..0fb300d41a09 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -378,7 +378,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
if (!priv->external_connector &&
((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
dev_err(dev, "no encoders/connectors found\n");
- ret = -ENXIO;
+ ret = -EPROBE_DEFER;
goto init_failed;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index d651bdd6597e..b4eaf9bc87f8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -103,12 +103,11 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
- int i;
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
+ }
dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
encoder->name, encoder->base.id);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index d616d64a6725..a1acab39d87f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -223,7 +223,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c45cabb38db0..daebf1aa6b0a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -173,7 +173,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -240,7 +240,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 4592a5e3f20b..16f4b5c91f1b 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -20,6 +20,17 @@ config TINYDRM_ILI9225
If M is selected the module will be called ili9225.
+config TINYDRM_ILI9341
+ tristate "DRM support for ILI9341 display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Ilitek ILI9341 panels:
+ * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+
+ If M is selected the module will be called ili9341.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 49a111929724..14d99080665a 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
+obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 24a33bf862fa..19c7f70adfa5 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -204,7 +204,7 @@ static int tinydrm_register(struct tinydrm_device *tdev)
if (ret)
return ret;
- ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+ ret = drm_fbdev_generic_setup(drm, 0);
if (ret)
DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
@@ -214,7 +214,6 @@ static int tinydrm_register(struct tinydrm_device *tdev)
static void tinydrm_unregister(struct tinydrm_device *tdev)
{
drm_atomic_helper_shutdown(tdev->drm);
- drm_fb_cma_fbdev_fini(tdev->drm);
drm_dev_unregister(tdev->drm);
}
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 841c69aba059..455fefe012f5 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -368,7 +368,6 @@ static struct drm_driver ili9225_driver = {
DRIVER_ATOMIC,
.fops = &ili9225_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
new file mode 100644
index 000000000000..6701037749a7
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9341 panels
+ *
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on mi0283qt.c:
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define ILI9341_FRMCTR1 0xb1
+#define ILI9341_DISCTRL 0xb6
+#define ILI9341_ETMOD 0xb7
+
+#define ILI9341_PWCTRL1 0xc0
+#define ILI9341_PWCTRL2 0xc1
+#define ILI9341_VMCTRL1 0xc5
+#define ILI9341_VMCTRL2 0xc7
+#define ILI9341_PWCTRLA 0xcb
+#define ILI9341_PWCTRLB 0xcf
+
+#define ILI9341_PGAMCTRL 0xe0
+#define ILI9341_NGAMCTRL 0xe1
+#define ILI9341_DTCTRLA 0xe8
+#define ILI9341_DTCTRLB 0xea
+#define ILI9341_PWRSEQ 0xed
+
+#define ILI9341_EN3GAM 0xf2
+#define ILI9341_PUMPCTRL 0xf7
+
+#define ILI9341_MADCTL_BGR BIT(3)
+#define ILI9341_MADCTL_MV BIT(5)
+#define ILI9341_MADCTL_MX BIT(6)
+#define ILI9341_MADCTL_MY BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+ mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+ mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+ /* Power Control */
+ mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
+ mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+ /* VCOM */
+ mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
+ mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+
+ /* Memory Access Control */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /* Frame Rate */
+ mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+ /* Gamma */
+ mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
+ 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
+ mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
+ 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
+
+ /* DDRAM */
+ mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+ /* Display */
+ mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+out_enable:
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ILI9341_MADCTL_MX;
+ break;
+ case 90:
+ addr_mode = ILI9341_MADCTL_MV;
+ break;
+ case 180:
+ addr_mode = ILI9341_MADCTL_MY;
+ break;
+ case 270:
+ addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+ ILI9341_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9341_MADCTL_BGR;
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+ TINYDRM_MODE(240, 320, 37, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+
+static struct drm_driver ili9341_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .fops = &ili9341_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9341",
+ .desc = "Ilitek ILI9341",
+ .date = "20180514",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9341_of_match[] = {
+ { .compatible = "adafruit,yx240qv29" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9341_of_match);
+
+static const struct spi_device_id ili9341_id[] = {
+ { "yx240qv29", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9341_id);
+
+static int ili9341_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
+ &ili9341_driver, &yx240qv29_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9341_spi_driver = {
+ .driver = {
+ .name = "ili9341",
+ .of_match_table = ili9341_of_match,
+ },
+ .id_table = ili9341_id,
+ .probe = ili9341_probe,
+ .shutdown = ili9341_shutdown,
+};
+module_spi_driver(ili9341_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 015d03f2acba..d7bb4c5e6657 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -154,7 +154,6 @@ static struct drm_driver mi0283qt_driver = {
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 4d1fb31a781f..cb3441e51d5f 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -260,6 +260,8 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
* @mipi: MIPI DBI structure
+ * @crtc_state: CRTC state
+ * @plane_state: Plane state
*
* This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
* enables the backlight. Drivers can use this in their
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 5c29e3803ecb..2fcbc3067d71 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -304,7 +304,6 @@ static struct drm_driver st7586_driver = {
DRIVER_ATOMIC,
.fops = &st7586_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 6c7b15c9da4f..3081bc57c116 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -120,7 +120,6 @@ static struct drm_driver st7735r_driver = {
DRIVER_ATOMIC,
.fops = &st7735r_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8688e522d1..7c484729f9b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret) {
if (bdev->driver->move_notify) {
- struct ttm_mem_reg tmp_mem = *mem;
- *mem = bo->mem;
- bo->mem = tmp_mem;
+ swap(*mem, bo->mem);
bdev->driver->move_notify(bo, false, mem);
- bo->mem = *mem;
- *mem = tmp_mem;
+ swap(*mem, bo->mem);
}
goto out_err;
@@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref)
kref_put(&bo->list_kref, ttm_bo_release_list);
}
+void ttm_bo_put(struct ttm_buffer_object *bo)
+{
+ kref_put(&bo->kref, ttm_bo_release);
+}
+EXPORT_SYMBOL(ttm_bo_put);
+
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
*p_bo = NULL;
- kref_put(&bo->kref, ttm_bo_release);
+ ttm_bo_put(bo);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
if (!resv)
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f2c167702eef..046a6dda690a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
- ttm_bo_unref(&fbo->bo);
+ ttm_bo_put(fbo->bo);
kfree(fbo);
}
@@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
+ ttm_bo_get(bo);
fbo->base = *bo;
- fbo->bo = ttm_bo_reference(bo);
+ fbo->bo = bo;
/**
* Fix up members that we shouldn't copy directly:
@@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
}
*old_mem = *new_mem;
@@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->ttm = NULL;
ttm_bo_unreserve(ghost);
- ttm_bo_unref(&ghost);
+ ttm_bo_put(ghost);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c7ece7613a6a..6fe91c1b692d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -44,10 +44,11 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- int ret = 0;
+ vm_fault_t ret = 0;
+ int err = 0;
if (likely(!bo->moving))
goto out_unlock;
@@ -67,20 +68,20 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = dma_fence_wait(bo->moving, true);
- if (unlikely(ret != 0)) {
- ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_fence_wait(bo->moving, true);
+ if (unlikely(err != 0)) {
+ ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
- int ret;
+ int err;
int i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
@@ -129,17 +131,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- ret = ttm_bo_reserve(bo, true, true, NULL);
- if (unlikely(ret != 0)) {
- if (ret != -EBUSY)
+ err = ttm_bo_reserve(bo, true, true, NULL);
+ if (unlikely(err != 0)) {
+ if (err != -EBUSY)
return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bdev->driver->fault_reserve_notify) {
- ret = bdev->driver->fault_reserve_notify(bo);
- switch (ret) {
+ err = bdev->driver->fault_reserve_notify(bo);
+ switch (err) {
case 0:
break;
case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_unlock;
}
- ret = ttm_mem_io_lock(man, true);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_lock(man, true);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
- ret = ttm_mem_io_reserve_vm(bo);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (vma->vm_flags & VM_MIXEDMAP)
- ret = vm_insert_mixed(&cvma, address,
+ ret = vmf_insert_mixed(&cvma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
else
- ret = vm_insert_pfn(&cvma, address, pfn);
+ ret = vmf_insert_pfn(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
*/
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
break;
- else if (unlikely(ret != 0)) {
- ret =
- (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
- }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
@@ -303,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- (void)ttm_bo_reference(bo);
+ ttm_bo_get(bo);
}
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
@@ -462,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_mmap);
@@ -472,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (vma->vm_pgoff != 0)
return -EACCES;
+ ttm_bo_get(bo);
+
vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_private_data = bo;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 6e2d1300b457..f841accc2c00 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,13 +47,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
-#ifndef CONFIG_X86
-static int set_pages_wb(struct page *page, int numpages)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
-#endif
- return 0;
-}
-
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif
-
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
unsigned int i, pages_nr = (1 << order);
if (order == 0) {
- if (set_pages_array_wb(pages, npages))
+ if (ttm_set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
}
for (i = 0; i < npages; ++i) {
if (order > 0) {
- if (set_pages_wb(pages[i], pages_nr))
+ if (ttm_set_pages_wb(pages[i], pages_nr))
pr_err("Failed to set %d pages to wb!\n", pages_nr);
}
__free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
/* Set page caching */
switch (cstate) {
case tt_uncached:
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3f14c1cc0789..507be7ac1165 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,12 +50,7 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
.default_attrs = ttm_pool_attrs,
};
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
struct page *page = d_page->p;
- unsigned i, num_pages;
+ unsigned num_pages;
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED)) {
num_pages = pool->size / PAGE_SIZE;
- for (i = 0; i < num_pages; ++i, ++page) {
- if (set_pages_array_wb(&page, 1)) {
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
- }
- }
+ if (ttm_set_pages_wb(page, num_pages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, num_pages);
}
list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
+ ttm_set_pages_array_wb(pages, npages))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1e543972ca7..e3a0691582ff 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,9 +38,7 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
+static int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
int ret = 0;
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
- ret = set_pages_wb(p, 1);
+ ret = ttm_set_pages_wb(p, 1);
if (ret)
return ret;
}
if (c_new == tt_wc)
- ret = set_memory_wc((unsigned long) page_address(p), 1);
+ ret = ttm_set_pages_wc(p, 1);
else if (c_new == tt_uncached)
- ret = set_pages_uc(p, 1);
+ ret = ttm_set_pages_uc(p, 1);
return ret;
}
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- return 0;
-}
-#endif /* CONFIG_X86 */
/*
* Change caching policy for the linear kernel map
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 09dc585aa46f..68e88bed77ca 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -99,7 +99,7 @@ static int udl_get_modes(struct drm_connector *connector)
struct udl_drm_connector,
connector);
- drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+ drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
@@ -200,7 +200,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 0a20695eb120..556f62662aa9 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
return NULL;
}
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
- .map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
- .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 55c0cc309198..e9e9b1ff678e 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -16,6 +16,7 @@
#include <linux/usb.h>
#include <drm/drm_gem.h>
+#include <linux/mm_types.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -112,7 +113,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..dbb62f6eb48a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
- int bpp = fb->base.format->cpp[0];
+ int log_bpp;
+
+ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
+ log_bpp = __ffs(fb->base.format->cpp[0]);
if (!fb->active_16)
return 0;
@@ -125,19 +128,22 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x * bpp);
- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
- if (udl_render_hline(dev, bpp, &urb,
+ const int byte_offset = line_offset + (x << log_bpp);
+ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
+ if (udl_render_hline(dev, log_bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ width << log_bpp,
&bytes_identical, &bytes_sent))
goto error;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *) urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
@@ -146,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add(width*height*bpp, &udl->bytes_rendered);
+ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
@@ -172,7 +178,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
pos = (unsigned long)info->fix.smem_start + offset;
- pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+ pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
/* We don't want the framebuffer to be mapped encrypted */
@@ -218,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -230,7 +236,7 @@ static int udl_fb_open(struct fb_info *info, int user)
}
#endif
- pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+ pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
return 0;
@@ -255,7 +261,7 @@ static int udl_fb_release(struct fb_info *info, int user)
}
#endif
- pr_warn("released /dev/fb%d user=%d count=%d\n",
+ pr_debug("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
return 0;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 9a15cce22cce..d5a23295dd80 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
- int ret = 0;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, vmf->address, page);
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index d518de8f496b..f455f095a146 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -170,25 +170,19 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
- unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
+ down(&udl->urbs.limit_sem);
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
-
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
@@ -205,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -231,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -246,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
@@ -265,7 +267,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
- unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -276,14 +277,14 @@ struct urb *udl_get_urb(struct drm_device *dev)
goto error;
}
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 5bcae7649795..7e37765cf5ac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -243,7 +243,7 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
- DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+ DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
return retval;
}
@@ -366,7 +366,6 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
struct drm_framebuffer *old_fb = crtc->primary->fb;
if (old_fb) {
@@ -377,10 +376,10 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (event)
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
return 0;
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..ce87661e544f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
-#include <linux/prefetch.h>
#include <asm/unaligned.h>
#include <drm/drmP.h>
@@ -51,9 +50,6 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
int start = width;
int end = width;
- prefetch((void *) front);
- prefetch((void *) back);
-
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
@@ -83,12 +79,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
((pixel >> 8) & 0xf800));
}
-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
{
- u16 pixel_val16 = 0;
- if (bpp == 2)
+ u16 pixel_val16;
+ if (log_bpp == 1)
pixel_val16 = *(const uint16_t *)pixel;
- else if (bpp == 4)
+ else
pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
return pixel_val16;
}
@@ -125,8 +121,9 @@ static void udl_compress_hline16(
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
- const uint8_t *const cmd_buffer_end, int bpp)
+ const uint8_t *const cmd_buffer_end, int log_bpp)
{
+ const int bpp = 1 << log_bpp;
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
@@ -139,8 +136,6 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
- prefetchw((void *) cmd); /* pull in one cache line at least */
-
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
@@ -153,12 +148,11 @@ static void udl_compress_hline16(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
- min((int)(pixel_end - pixel) / bpp,
- (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel) >> log_bpp,
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
@@ -170,7 +164,7 @@ static void udl_compress_hline16(
pixel += bpp;
while (pixel < cmd_pixel_end) {
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
if (pixel_val16 != repeating_pixel_val16)
break;
pixel += bpp;
@@ -179,10 +173,10 @@ static void udl_compress_hline16(
if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
- raw_pixel_start) / bpp) + 1) & 0xFF;
+ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -192,11 +186,14 @@ static void udl_compress_hline16(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
+ } else {
+ /* undo unused byte */
+ cmd--;
}
- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
+ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
@@ -219,19 +216,19 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(bpp == 2 || bpp == 4));
+ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
@@ -241,7 +238,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
udl_compress_hline16(&next_pixel,
line_end, &base16,
- (u8 **) &cmd, (u8 *) cmd_end, bpp);
+ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 7b1e2a549a71..54d96518a131 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -227,37 +227,19 @@ v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
}
-int v3d_gem_fault(struct vm_fault *vmf)
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct v3d_bo *bo = to_v3d_bo(obj);
- unsigned long pfn;
+ pfn_t pfn;
pgoff_t pgoff;
- int ret;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(bo->pages[pgoff]);
-
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
}
int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index cdb582043b4f..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
+ struct drm_sched_rq *rq;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- drm_sched_entity_init(&v3d->queue[i].sched,
- &v3d_priv->sched_entity[i],
- &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
}
file->driver_priv = v3d_priv;
@@ -146,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_fini(&v3d->queue[q].sched,
- &v3d_priv->sched_entity[q]);
+ drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..e6fed696ad86 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -2,6 +2,7 @@
/* Copyright (C) 2015-2018 Broadcom */
#include <linux/reservation.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
@@ -25,7 +26,6 @@ struct v3d_queue_state {
u64 fence_context;
u64 emit_seqno;
- u64 finished_seqno;
};
struct v3d_dev {
@@ -85,6 +85,11 @@ struct v3d_dev {
*/
struct mutex reset_lock;
+ /* Lock taken when creating and pushing the GPU scheduler
+ * jobs, to keep the sched-fence seqnos in order.
+ */
+ struct mutex sched_lock;
+
struct {
u32 num_allocated;
u32 pages_allocated;
@@ -179,6 +184,8 @@ struct v3d_job {
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
+
+ u32 timedout_ctca, timedout_ctra;
};
struct v3d_exec_info {
@@ -248,7 +255,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_gem_fault(struct vm_fault *vmf);
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 087d49c8cb12..50bfcf9a8a1a 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -35,24 +35,7 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
return "v3d-render";
}
-static bool v3d_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
- struct v3d_fence *f = to_v3d_fence(fence);
- struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
- return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
const struct dma_fence_ops v3d_fence_ops = {
.get_driver_name = v3d_fence_get_driver_name,
.get_timeline_name = v3d_fence_get_timeline_name,
- .enable_signaling = v3d_fence_enable_signaling,
- .signaled = v3d_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,9 +550,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
+ mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
- &v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@@ -567,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
- &v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)
@@ -576,6 +575,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_get(&exec->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&exec->render.base,
&v3d_priv->sched_entity[V3D_RENDER]);
+ mutex_unlock(&v3d->sched_lock);
v3d_attach_object_fences(exec);
@@ -594,6 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
fail:
v3d_exec_put(exec);
@@ -615,6 +616,7 @@ v3d_gem_init(struct drm_device *dev)
spin_lock_init(&v3d->job_lock);
mutex_init(&v3d->bo_lock);
mutex_init(&v3d->reset_lock);
+ mutex_init(&v3d->sched_lock);
/* Note: We don't allocate address 0. Various bits of HW
* treat 0 as special, such as the occlusion query counters
@@ -650,17 +652,14 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- enum v3d_queue q;
v3d_sched_fini(v3d);
/* Waiting for exec to finish would need to be done before
* unregistering V3D.
*/
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- WARN_ON(v3d->queue[q].emit_seqno !=
- v3d->queue[q].finished_seqno);
- }
+ WARN_ON(v3d->bin_job);
+ WARN_ON(v3d->render_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 77e1fa046c10..e07514eb11b5 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- v3d->queue[V3D_BIN].finished_seqno++;
dma_fence_signal(v3d->bin_job->bin.done_fence);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- v3d->queue[V3D_RENDER].finished_seqno++;
dma_fence_signal(v3d->render_job->render.done_fence);
-
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index fc13282dfc2f..854046565989 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -222,6 +222,7 @@
#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
#define V3D_CLE_CT0RA 0x00118
#define V3D_CLE_CT1RA 0x0011c
+#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n)
#define V3D_CLE_CT0LC 0x00120
#define V3D_CLE_CT1LC 0x00124
#define V3D_CLE_CT0PC 0x00128
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b07bece9417d..a5501581d96b 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -14,8 +14,8 @@
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
* v3d_job_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs with using the
- * HW's semaphores to interlock between them.
+ * render, instead of having the clients submit jobs using the HW's
+ * semaphores to interlock between them.
*/
#include <linux/kthread.h>
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, q);
- if (!fence)
- return fence;
+ if (IS_ERR(fence))
+ return NULL;
if (job->done_fence)
dma_fence_put(job->done_fence);
@@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_exec_info *exec = job->exec;
struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
enum v3d_queue q;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+
+ schedule_delayed_work(&job->base.work_tdr,
+ job->base.sched->timeout);
+ return;
+ }
mutex_lock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 4a3a868235f8..b303703bc7f3 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -19,6 +19,7 @@ vc4-y := \
vc4_plane.o \
vc4_render_cl.o \
vc4_trace_points.o \
+ vc4_txp.o \
vc4_v3d.o \
vc4_validate.o \
vc4_validate_shaders.o
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index add9cc97a3b6..8dcce7182bb7 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -721,7 +721,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
return dmabuf;
}
-int vc4_fault(struct vm_fault *vmf)
+vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c8650bbcbcb3..0e6a121858d1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,6 +46,8 @@ struct vc4_crtc_state {
struct drm_crtc_state base;
/* Dlist area for this CRTC configuration. */
struct drm_mm_node mm;
+ bool feed_txp;
+ bool txp_armed;
};
static inline struct vc4_crtc_state *
@@ -324,10 +326,8 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
return NULL;
}
-static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -338,12 +338,6 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
- bool debug_dump_regs = false;
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
- }
/* Reset the PV fifo. */
CRTC_WRITE(PV_CONTROL, 0);
@@ -419,6 +413,49 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_CONTROL_CLK_SELECT) |
PV_CONTROL_FIFO_CLR |
PV_CONTROL_EN);
+}
+
+static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ bool debug_dump_regs = false;
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
+ vc4_crtc_dump_regs(vc4_crtc);
+ }
+
+ if (vc4_crtc->channel == 2) {
+ u32 dispctrl;
+ u32 dsp3_mux;
+
+ /*
+ * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
+ * FIFO X'.
+ * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
+ *
+ * DSP3 is connected to FIFO2 unless the transposer is
+ * enabled. In this case, FIFO 2 is directly accessed by the
+ * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
+ * route.
+ */
+ if (vc4_state->feed_txp)
+ dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
+ else
+ dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL) &
+ ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
+ }
+
+ if (!vc4_state->feed_txp)
+ vc4_crtc_config_pv(crtc);
HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
SCALER_DISPBKGND_AUTOHS |
@@ -499,6 +536,13 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
+void vc4_crtc_txp_armed(struct drm_crtc_state *state)
+{
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+ vc4_state->txp_armed = true;
+}
+
static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -514,8 +558,11 @@ static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
+
+ if (!vc4_state->feed_txp || vc4_state->txp_armed) {
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
@@ -533,8 +580,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_crtc_state *state = crtc->state;
- struct drm_display_mode *mode = &state->adjusted_mode;
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
require_hvs_enabled(dev);
@@ -546,15 +593,21 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
/* Turn on the scaler, which will wait for vstart to start
* compositing.
+ * When feeding the transposer, we should operate in oneshot
+ * mode.
*/
HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel),
VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) |
- SCALER_DISPCTRLX_ENABLE);
+ SCALER_DISPCTRLX_ENABLE |
+ (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0));
- /* Turn on the pixel valve, which will emit the vstart signal. */
- CRTC_WRITE(PV_V_CONTROL,
- CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+ /* When feeding the transposer block the pixelvalve is unneeded and
+ * should not be enabled.
+ */
+ if (!vc4_state->feed_txp)
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -579,8 +632,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
unsigned long flags;
const struct drm_plane_state *plane_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
u32 dlist_count = 0;
- int ret;
+ int ret, i;
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
@@ -600,6 +655,24 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
+ for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ /* The writeback connector is implemented using the transposer
+ * block which is directly taking its data from the HVS FIFO.
+ */
+ if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) {
+ state->no_vblank = true;
+ vc4_state->feed_txp = true;
+ } else {
+ state->no_vblank = false;
+ vc4_state->feed_txp = false;
+ }
+
+ break;
+ }
+
return 0;
}
@@ -713,7 +786,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (vc4_crtc->event &&
- (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
+ vc4_state->feed_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -721,6 +795,13 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
+{
+ crtc->t_vblank = ktime_get();
+ drm_crtc_handle_vblank(&crtc->base);
+ vc4_crtc_handle_page_flip(crtc);
+}
+
static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
{
struct vc4_crtc *vc4_crtc = data;
@@ -728,10 +809,8 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
- vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
- drm_crtc_handle_vblank(&vc4_crtc->base);
- vc4_crtc_handle_page_flip(vc4_crtc);
+ vc4_crtc_handle_vblank(vc4_crtc);
ret = IRQ_HANDLED;
}
@@ -862,7 +941,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- plane->fb = fb;
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
vc4_async_page_flip_complete);
@@ -885,12 +963,15 @@ static int vc4_page_flip(struct drm_crtc *crtc,
static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct vc4_crtc_state *vc4_state;
+ struct vc4_crtc_state *vc4_state, *old_vc4_state;
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
+ old_vc4_state = to_vc4_crtc_state(crtc->state);
+ vc4_state->feed_txp = old_vc4_state->feed_txp;
+
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
}
@@ -988,9 +1069,17 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ struct vc4_encoder *vc4_encoder;
int i;
+ /* HVS FIFO2 can feed the TXP IP. */
+ if (crtc_data->hvs_channel == 2 &&
+ encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ continue;
+ }
+
+ vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
vc4_encoder->clock_select = i;
@@ -1057,7 +1146,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
- primary_plane->crtc = crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1083,7 +1171,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(plane))
continue;
- plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+ plane->possible_crtcs = drm_crtc_mask(crtc);
}
/* Set up the legacy cursor after overlay initialization,
@@ -1092,8 +1180,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
- cursor_plane->crtc = crtc;
+ cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
@@ -1121,7 +1208,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
err_destroy_planes:
list_for_each_entry_safe(destroy_plane, temp,
&drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+ if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
destroy_plane->funcs->destroy(destroy_plane);
}
err:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 5db06bdb5f27..7a0003de71ab 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -21,6 +21,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
{"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"vec_regs", vc4_vec_debugfs_regs, 0},
+ {"txp_regs", vc4_txp_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 466d0a27b415..04270a14fcaa 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -288,7 +288,7 @@ static int vc4_drm_bind(struct device *dev)
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_unref;
+ goto dev_put;
drm_mode_config_init(drm);
@@ -313,8 +313,8 @@ unbind_all:
gem_destroy:
vc4_gem_destroy(drm);
vc4_bo_cache_destroy(drm);
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
return ret;
}
@@ -331,7 +331,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_atomic_private_obj_fini(&vc4->ctm_manager);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -344,6 +344,7 @@ static struct platform_driver *const component_drivers[] = {
&vc4_vec_driver,
&vc4_dpi_driver,
&vc4_dsi_driver,
+ &vc4_txp_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 554a4e810d5b..bd6ef1f31822 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/mm_types.h>
#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
@@ -72,6 +73,7 @@ struct vc4_dev {
struct vc4_dpi *dpi;
struct vc4_dsi *dsi1;
struct vc4_vec *vec;
+ struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -674,7 +676,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_fault(struct vm_fault *vmf);
+vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -697,6 +699,8 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_txp_armed(struct drm_crtc_state *state);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
@@ -744,6 +748,10 @@ int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
extern struct platform_driver vc4_vec_driver;
int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_txp.c */
+extern struct platform_driver vc4_txp_driver;
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
void vc4_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 8aa897835118..0c607eb33d7e 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -814,7 +814,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
+ drm_bridge_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
+ drm_bridge_post_disable(dsi->bridge);
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -1089,21 +1091,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
/* Display reset sequence timeout */
DSI_PORT_WRITE(PR_TO_CNT, 100000);
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- DSI_PORT_WRITE(DISP0_CTRL,
- VC4_SET_FIELD(dsi->divider,
- DSI_DISP0_PIX_CLK_DIV) |
- VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
- VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
- DSI_DISP0_LP_STOP_CTRL) |
- DSI_DISP0_ST_END |
- DSI_DISP0_ENABLE);
- } else {
- DSI_PORT_WRITE(DISP0_CTRL,
- DSI_DISP0_COMMAND_MODE |
- DSI_DISP0_ENABLE);
- }
-
/* Set up DISP1 for transferring long command payloads through
* the pixfifo.
*/
@@ -1128,6 +1115,25 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
+ drm_bridge_pre_enable(dsi->bridge);
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ VC4_SET_FIELD(dsi->divider,
+ DSI_DISP0_PIX_CLK_DIV) |
+ VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
+ VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+ DSI_DISP0_LP_STOP_CTRL) |
+ DSI_DISP0_ST_END |
+ DSI_DISP0_ENABLE);
+ } else {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ DSI_DISP0_COMMAND_MODE |
+ DSI_DISP0_ENABLE);
+ }
+
+ drm_bridge_enable(dsi->bridge);
+
if (debug_dump_regs) {
DRM_INFO("DSI regs after:\n");
vc4_dsi_dump_regs(dsi);
@@ -1606,8 +1612,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &dsi->bridge);
- if (ret)
+ if (ret) {
+ /* If the bridge or panel pointed by dev->of_node is not
+ * enabled, just return 0 here so that we don't prevent the DRM
+ * dev from being registered. Of course that means the DSI
+ * encoder won't be exposed, but that's not a problem since
+ * nothing is connected to it.
+ */
+ if (ret == -ENODEV)
+ return 0;
+
return ret;
+ }
if (panel) {
dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
@@ -1639,6 +1655,12 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
}
+ /* Disable the atomic helper calls into the bridge. We
+ * manually call the bridge pre_enable / enable / etc. calls
+ * from our driver, since we need to sequence them within the
+ * encoder's enable/disable paths.
+ */
+ dsi->encoder->bridge = NULL;
pm_runtime_enable(dev);
@@ -1652,7 +1674,8 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- pm_runtime_disable(dev);
+ if (dsi->bridge)
+ pm_runtime_disable(dev);
vc4_dsi_encoder_destroy(dsi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_fence.c b/drivers/gpu/drm/vc4/vc4_fence.c
index dbf5a5a5d5f5..580214e2158c 100644
--- a/drivers/gpu/drm/vc4/vc4_fence.c
+++ b/drivers/gpu/drm/vc4/vc4_fence.c
@@ -33,11 +33,6 @@ static const char *vc4_fence_get_timeline_name(struct dma_fence *fence)
return "vc4-v3d";
}
-static bool vc4_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool vc4_fence_signaled(struct dma_fence *fence)
{
struct vc4_fence *f = to_vc4_fence(fence);
@@ -49,8 +44,5 @@ static bool vc4_fence_signaled(struct dma_fence *fence)
const struct dma_fence_ops vc4_fence_ops = {
.get_driver_name = vc4_fence_get_driver_name,
.get_timeline_name = vc4_fence_get_timeline_name,
- .enable_signaling = vc4_fence_enable_signaling,
.signaled = vc4_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b8d50533e2bb..fd5522fd179e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -285,7 +285,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_rgb_quant_range_selectable(edid);
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -329,7 +329,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8a411e5f8776..ca5aa7fba769 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -153,18 +153,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Make sure that drm_atomic_helper_wait_for_vblanks()
- * actually waits for vblank. If we're doing a full atomic
- * modeset (as opposed to a vc4_update_plane() short circuit),
- * then we need to wait for scanout to be done with our
- * display lists before we free it and potentially reallocate
- * and overwrite the dlist memory with a new modeset.
- */
- state->legacy_cursor_update = false;
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 1d34619eb3fe..cfb50fedfa2b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ } else {
+ vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+ vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
@@ -467,12 +470,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
+ u32 hvs_format = format->hvs;
int ret, i;
ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -512,7 +517,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
- switch (fb->modifier) {
+ switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -535,6 +540,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
break;
}
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+ /* Column-based NV12 or RGBA.
+ */
+ if (fb->format->num_planes > 1) {
+ if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+ DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+ return -EINVAL;
+ }
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+ } else {
+ if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+ return -EINVAL;
+ }
+ }
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ break;
+ }
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ return -EINVAL;
+ }
+
+ pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+ break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -544,8 +592,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
+ VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
- (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -607,8 +656,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+ if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER_SRC_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
}
/* Colorspace conversion words */
@@ -810,18 +864,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct dma_fence *fence;
int ret;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ fence = reservation_object_get_excl_rcu(bo->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ if (plane->state->fb == state->fb)
+ return 0;
+
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
- fence = reservation_object_get_excl_rcu(bo->resv);
- drm_atomic_set_fence_for_plane(state, fence);
-
return 0;
}
@@ -848,7 +905,7 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
static void vc4_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
@@ -866,13 +923,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- return true;
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
@@ -900,6 +976,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ DRM_FORMAT_MOD_BROADCOM_SAND128,
+ DRM_FORMAT_MOD_BROADCOM_SAND64,
+ DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d1fb6fec46eb..d6864fa4bd14 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT 16
+#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT 0
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
new file mode 100644
index 000000000000..6e23c50168f9
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Broadcom
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_writeback.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* Base address of the output. Raster formats must be 4-byte aligned,
+ * T and LT must be 16-byte aligned or maybe utile-aligned (docs are
+ * inconsistent, but probably utile).
+ */
+#define TXP_DST_PTR 0x00
+
+/* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's
+ * the width in tiles.
+ */
+#define TXP_DST_PITCH 0x04
+/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
+ * shifted up.
+ */
+# define TXP_T_TILE_WIDTH_SHIFT 7
+/* For LT-tiled images, DST_PITCH should be the number of utiles wide,
+ * shifted up.
+ */
+# define TXP_LT_TILE_WIDTH_SHIFT 4
+
+/* Pre-rotation width/height of the image. Must match HVS config.
+ *
+ * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
+ * and width/height must be tile or utile-aligned as appropriate. If
+ * transposing (rotating), width is limited to 1920.
+ *
+ * Height is limited to various numbers between 4088 and 4095. I'd
+ * just use 4088 to be safe.
+ */
+#define TXP_DIM 0x08
+# define TXP_HEIGHT_SHIFT 16
+# define TXP_HEIGHT_MASK GENMASK(31, 16)
+# define TXP_WIDTH_SHIFT 0
+# define TXP_WIDTH_MASK GENMASK(15, 0)
+
+#define TXP_DST_CTRL 0x0c
+/* These bits are set to 0x54 */
+#define TXP_PILOT_SHIFT 24
+#define TXP_PILOT_MASK GENMASK(31, 24)
+/* Bits 22-23 are set to 0x01 */
+#define TXP_VERSION_SHIFT 22
+#define TXP_VERSION_MASK GENMASK(23, 22)
+
+/* Powers down the internal memory. */
+# define TXP_POWERDOWN BIT(21)
+
+/* Enables storing the alpha component in 8888/4444, instead of
+ * filling with ~ALPHA_INVERT.
+ */
+# define TXP_ALPHA_ENABLE BIT(20)
+
+/* 4 bits, each enables stores for a channel in each set of 4 bytes.
+ * Set to 0xf for normal operation.
+ */
+# define TXP_BYTE_ENABLE_SHIFT 16
+# define TXP_BYTE_ENABLE_MASK GENMASK(19, 16)
+
+/* Debug: Generate VSTART again at EOF. */
+# define TXP_VSTART_AT_EOF BIT(15)
+
+/* Debug: Terminate the current frame immediately. Stops AXI
+ * writes.
+ */
+# define TXP_ABORT BIT(14)
+
+# define TXP_DITHER BIT(13)
+
+/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
+ * !TXP_ALPHA_ENABLE.
+ */
+# define TXP_ALPHA_INVERT BIT(12)
+
+/* Note: I've listed the channels here in high bit (in byte 3/2/1) to
+ * low bit (in byte 0) order.
+ */
+# define TXP_FORMAT_SHIFT 8
+# define TXP_FORMAT_MASK GENMASK(11, 8)
+# define TXP_FORMAT_ABGR4444 0
+# define TXP_FORMAT_ARGB4444 1
+# define TXP_FORMAT_BGRA4444 2
+# define TXP_FORMAT_RGBA4444 3
+# define TXP_FORMAT_BGR565 6
+# define TXP_FORMAT_RGB565 7
+/* 888s are non-rotated, raster-only */
+# define TXP_FORMAT_BGR888 8
+# define TXP_FORMAT_RGB888 9
+# define TXP_FORMAT_ABGR8888 12
+# define TXP_FORMAT_ARGB8888 13
+# define TXP_FORMAT_BGRA8888 14
+# define TXP_FORMAT_RGBA8888 15
+
+/* If TFORMAT is set, generates LT instead of T format. */
+# define TXP_LINEAR_UTILE BIT(7)
+
+/* Rotate output by 90 degrees. */
+# define TXP_TRANSPOSE BIT(6)
+
+/* Generate a tiled format for V3D. */
+# define TXP_TFORMAT BIT(5)
+
+/* Generates some undefined test mode output. */
+# define TXP_TEST_MODE BIT(4)
+
+/* Request odd field from HVS. */
+# define TXP_FIELD BIT(3)
+
+/* Raise interrupt when idle. */
+# define TXP_EI BIT(2)
+
+/* Set when generating a frame, clears when idle. */
+# define TXP_BUSY BIT(1)
+
+/* Starts a frame. Self-clearing. */
+# define TXP_GO BIT(0)
+
+/* Number of lines received and committed to memory. */
+#define TXP_PROGRESS 0x10
+
+#define TXP_READ(offset) readl(txp->regs + (offset))
+#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+
+struct vc4_txp {
+ struct platform_device *pdev;
+
+ struct drm_writeback_connector connector;
+
+ void __iomem *regs;
+};
+
+static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_txp, connector.encoder);
+}
+
+static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
+{
+ return container_of(conn, struct vc4_txp, connector.base);
+}
+
+#define TXP_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} txp_regs[] = {
+ TXP_REG(TXP_DST_PTR),
+ TXP_REG(TXP_DST_PITCH),
+ TXP_REG(TXP_DIM),
+ TXP_REG(TXP_DST_CTRL),
+ TXP_REG(TXP_PROGRESS),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_txp *txp = vc4->txp;
+ int i;
+
+ if (!txp)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ txp_regs[i].name, txp_regs[i].reg,
+ TXP_READ(txp_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static int vc4_txp_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+vc4_txp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if (w < mode_config->min_width || w > mode_config->max_width)
+ return MODE_BAD_HVALUE;
+
+ if (h < mode_config->min_height || h > mode_config->max_height)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+static const u32 drm_fmts[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+};
+
+static const u32 txp_fmts[] = {
+ TXP_FORMAT_RGB888,
+ TXP_FORMAT_BGR888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+};
+
+static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_gem_cma_object *gem;
+ struct drm_framebuffer *fb;
+ int i;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
+ conn_state->crtc);
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != crtc_state->mode.hdisplay ||
+ fb->height != crtc_state->mode.vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(drm_fmts))
+ return -EINVAL;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ /* Pitch must be aligned on 16 bytes. */
+ if (fb->pitches[0] & GENMASK(3, 0))
+ return -EINVAL;
+
+ vc4_crtc_txp_armed(crtc_state);
+
+ return 0;
+}
+
+static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ struct drm_gem_cma_object *gem;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ u32 ctrl;
+ int i;
+
+ if (WARN_ON(!conn_state->writeback_job ||
+ !conn_state->writeback_job->fb))
+ return;
+
+ mode = &conn_state->crtc->state->adjusted_mode;
+ fb = conn_state->writeback_job->fb;
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
+ return;
+
+ ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
+ VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+
+ if (fb->format->has_alpha)
+ ctrl |= TXP_ALPHA_ENABLE;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+ TXP_WRITE(TXP_DIM,
+ VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+
+ TXP_WRITE(TXP_DST_CTRL, ctrl);
+
+ drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+}
+
+static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
+ .get_modes = vc4_txp_connector_get_modes,
+ .mode_valid = vc4_txp_connector_mode_valid,
+ .atomic_check = vc4_txp_connector_atomic_check,
+ .atomic_commit = vc4_txp_connector_atomic_commit,
+};
+
+static enum drm_connector_status
+vc4_txp_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void vc4_txp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vc4_txp_connector_funcs = {
+ .detect = vc4_txp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_txp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+
+ if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
+
+ while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
+ time_before(jiffies, timeout))
+ ;
+
+ WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
+ }
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+}
+
+static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
+ .disable = vc4_txp_encoder_disable,
+};
+
+static irqreturn_t vc4_txp_interrupt(int irq, void *data)
+{
+ struct vc4_txp *txp = data;
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
+ vc4_crtc_handle_vblank(to_vc4_crtc(txp->connector.base.state->crtc));
+ drm_writeback_signal_completion(&txp->connector, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ if (!txp)
+ return -ENOMEM;
+
+ txp->pdev = pdev;
+
+ txp->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(txp->regs))
+ return PTR_ERR(txp->regs);
+
+ drm_connector_helper_add(&txp->connector.base,
+ &vc4_txp_connector_helper_funcs);
+ ret = drm_writeback_connector_init(drm, &txp->connector,
+ &vc4_txp_connector_funcs,
+ &vc4_txp_encoder_helper_funcs,
+ drm_fmts, ARRAY_SIZE(drm_fmts));
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
+ dev_name(dev), txp);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, txp);
+ vc4->txp = txp;
+
+ return 0;
+}
+
+static void vc4_txp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp = dev_get_drvdata(dev);
+
+ vc4_txp_connector_destroy(&txp->connector.base);
+
+ vc4->txp = NULL;
+}
+
+static const struct component_ops vc4_txp_ops = {
+ .bind = vc4_txp_bind,
+ .unbind = vc4_txp_unbind,
+};
+
+static int vc4_txp_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_txp_ops);
+}
+
+static int vc4_txp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_txp_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_txp_dt_match[] = {
+ { .compatible = "brcm,bcm2835-txp" },
+ { /* sentinel */ },
+};
+
+struct platform_driver vc4_txp_driver = {
+ .probe = vc4_txp_probe,
+ .remove = vc4_txp_remove,
+ .driver = {
+ .name = "vc4_txp",
+ .of_match_table = vc4_txp_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 3a9a302247a2..8e7facb6514e 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -404,7 +404,7 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
VC4_VEC_TV_MODE_NTSC);
vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_mode_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, vec->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2524ff116f00..0e5620f76ee0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -61,23 +61,22 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
- if (page_offset > num_pages)
+ if (page_offset >= num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a5edd86603d9..25503b933599 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -28,6 +28,7 @@
#include "virtgpu_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb
- = to_virtio_gpu_framebuffer(fb);
-
- drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(virtio_gpu_fb);
-}
-
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb =
- to_virtio_gpu_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
- .create_handle = virtio_gpu_framebuffer_create_handle,
- .destroy = virtio_gpu_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
int ret;
struct virtio_gpu_object *bo;
- vgfb->obj = obj;
+ vgfb->base.obj[0] = obj;
bo = gem_to_virtio_gpu_obj(obj);
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
- vgfb->obj = NULL;
+ vgfb->base.obj[0] = NULL;
return ret;
}
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- primary->crtc = crtc;
- cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -314,7 +292,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
drm_connector_register(connector);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d25c8ca224aa..65605e207bbe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 8af69ab58b89..a121b1c79522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
if ((width <= 0) ||
(x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
struct drm_clip_rect norect;
struct drm_clip_rect *clips_ptr;
int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
- if (vgfb->obj)
- vgfb->obj = NULL;
+ if (vgfb->base.obj[0])
+ vgfb->base.obj[0] = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
drm_framebuffer_cleanup(&vgfb->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 23353521f903..00c742a441bf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,11 +36,6 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -67,9 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 71ba455af915..dc5b5b2b7aab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
handle = 0;
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
new file mode 100644
index 000000000000..986297da51bf
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -0,0 +1,3 @@
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+
+obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
new file mode 100644
index 000000000000..875fca662ac0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ struct drm_crtc *crtc = &output->crtc;
+ int ret_overrun;
+ bool ret;
+
+ ret = drm_crtc_handle_vblank(crtc);
+ if (!ret)
+ DRM_ERROR("vkms failure on handling vblank");
+
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ output->period_ns);
+
+ return HRTIMER_RESTART;
+}
+
+static int vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ out->period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+static void vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_output *output = &vkmsdev->output;
+
+ *vblank_time = output->vblank_hrtimer.node.expires;
+
+ return true;
+}
+
+static const struct drm_crtc_funcs vkms_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = vkms_enable_vblank,
+ .disable_vblank = vkms_disable_vblank,
+};
+
+static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+
+static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_flush = vkms_crtc_atomic_flush,
+ .atomic_enable = vkms_crtc_atomic_enable,
+ .atomic_disable = vkms_crtc_atomic_disable,
+};
+
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
new file mode 100644
index 000000000000..6e728b825259
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -0,0 +1,156 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "vkms_drv.h"
+
+#define DRIVER_NAME "vkms"
+#define DRIVER_DESC "Virtual Kernel Mode Setting"
+#define DRIVER_DATE "20180514"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static struct vkms_device *vkms_device;
+
+static const struct file_operations vkms_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_mmap,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .release = drm_release,
+};
+
+static const struct vm_operations_struct vkms_gem_vm_ops = {
+ .fault = vkms_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static void vkms_release(struct drm_device *dev)
+{
+ struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
+
+ platform_device_unregister(vkms->platform);
+ drm_atomic_helper_shutdown(&vkms->drm);
+ drm_mode_config_cleanup(&vkms->drm);
+ drm_dev_fini(&vkms->drm);
+}
+
+static struct drm_driver vkms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
+ .release = vkms_release,
+ .fops = &vkms_driver_fops,
+ .dumb_create = vkms_dumb_create,
+ .dumb_map_offset = vkms_dumb_map,
+ .gem_vm_ops = &vkms_gem_vm_ops,
+ .gem_free_object_unlocked = vkms_gem_free_object,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static const struct drm_mode_config_funcs vkms_mode_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int vkms_modeset_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vkms_mode_funcs;
+ dev->mode_config.min_width = XRES_MIN;
+ dev->mode_config.min_height = YRES_MIN;
+ dev->mode_config.max_width = XRES_MAX;
+ dev->mode_config.max_height = YRES_MAX;
+
+ return vkms_output_init(vkmsdev);
+}
+
+static int __init vkms_init(void)
+{
+ int ret;
+
+ vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL);
+ if (!vkms_device)
+ return -ENOMEM;
+
+ ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
+ if (ret)
+ goto out_free;
+
+ vkms_device->platform =
+ platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(vkms_device->platform)) {
+ ret = PTR_ERR(vkms_device->platform);
+ goto out_fini;
+ }
+
+ vkms_device->drm.irq_enabled = true;
+
+ ret = drm_vblank_init(&vkms_device->drm, 1);
+ if (ret) {
+ DRM_ERROR("Failed to vblank\n");
+ goto out_fini;
+ }
+
+ ret = vkms_modeset_init(vkms_device);
+ if (ret)
+ goto out_unregister;
+
+ ret = drm_dev_register(&vkms_device->drm, 0);
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+out_unregister:
+ platform_device_unregister(vkms_device->platform);
+
+out_fini:
+ drm_dev_fini(&vkms_device->drm);
+
+out_free:
+ kfree(vkms_device);
+ return ret;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (!vkms_device) {
+ DRM_INFO("vkms_device is NULL.\n");
+ return;
+ }
+
+ drm_dev_unregister(&vkms_device->drm);
+ drm_dev_put(&vkms_device->drm);
+
+ kfree(vkms_device);
+}
+
+module_init(vkms_init);
+module_exit(vkms_exit);
+
+MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>");
+MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
new file mode 100644
index 000000000000..07be29f2dc44
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -0,0 +1,78 @@
+#ifndef _VKMS_DRV_H_
+#define _VKMS_DRV_H_
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_encoder.h>
+#include <linux/hrtimer.h>
+
+#define XRES_MIN 32
+#define YRES_MIN 32
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+static const u32 vkms_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+struct vkms_output {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct hrtimer vblank_hrtimer;
+ ktime_t period_ns;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vkms_device {
+ struct drm_device drm;
+ struct platform_device *platform;
+ struct vkms_output output;
+};
+
+struct vkms_gem_object {
+ struct drm_gem_object gem;
+ struct mutex pages_lock; /* Page lock used in page fault handler */
+ struct page **pages;
+};
+
+#define drm_crtc_to_vkms_output(target) \
+ container_of(target, struct vkms_output, crtc)
+
+#define drm_device_to_vkms_device(target) \
+ container_of(target, struct vkms_device, drm)
+
+/* CRTC */
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor);
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+int vkms_output_init(struct vkms_device *vkmsdev);
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
+
+/* Gem stuff */
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size);
+
+int vkms_gem_fault(struct vm_fault *vmf);
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void vkms_gem_free_object(struct drm_gem_object *obj);
+
+#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
new file mode 100644
index 000000000000..c7e38368602b
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/shmem_fs.h>
+
+#include "vkms_drv.h"
+
+static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ size = roundup(size, PAGE_SIZE);
+ ret = drm_gem_object_init(dev, &obj->gem, size);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&obj->pages_lock);
+
+ return obj;
+}
+
+void vkms_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
+ gem);
+
+ kvfree(gem->pages);
+ mutex_destroy(&gem->pages_lock);
+ drm_gem_object_release(obj);
+ kfree(gem);
+}
+
+int vkms_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vkms_gem_object *obj = vma->vm_private_data;
+ unsigned long vaddr = vmf->address;
+ pgoff_t page_offset;
+ loff_t num_pages;
+ int ret;
+
+ page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
+ if (obj->pages) {
+ get_page(obj->pages[page_offset]);
+ vmf->page = obj->pages[page_offset];
+ ret = 0;
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
+ struct page *page;
+ struct address_space *mapping;
+
+ mapping = file_inode(obj->gem.filp)->i_mapping;
+ page = shmem_read_mapping_page(mapping, page_offset);
+
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ ret = 0;
+ } else {
+ switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ break;
+ case -EBUSY:
+ ret = VM_FAULT_RETRY;
+ break;
+ case -EFAULT:
+ case -EINVAL:
+ ret = VM_FAULT_SIGBUS;
+ break;
+ default:
+ WARN_ON(PTR_ERR(page));
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ if (!file || !dev || !handle)
+ return ERR_PTR(-EINVAL);
+
+ obj = __vkms_gem_create(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ ret = drm_gem_handle_create(file, &obj->gem, handle);
+ drm_gem_object_put_unlocked(&obj->gem);
+ if (ret) {
+ drm_gem_object_release(&obj->gem);
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return &obj->gem;
+}
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_obj;
+ u64 pitch, size;
+
+ if (!args || !dev || !file)
+ return -EINVAL;
+
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ size = pitch * args->height;
+
+ if (!size)
+ return -EINVAL;
+
+ gem_obj = vkms_gem_create(dev, file, &args->handle, size);
+ if (IS_ERR(gem_obj))
+ return PTR_ERR(gem_obj);
+
+ args->size = gem_obj->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+unref:
+ drm_gem_object_put_unlocked(obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
new file mode 100644
index 000000000000..901012cb1af1
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static void vkms_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vkms_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs vkms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+};
+
+int vkms_output_init(struct vkms_device *vkmsdev)
+{
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_connector *connector = &output->connector;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_crtc *crtc = &output->crtc;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = vkms_plane_init(vkmsdev);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = vkms_crtc_init(dev, crtc, primary, NULL);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init connector\n");
+ goto err_connector;
+ }
+
+ drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+
+ ret = drm_connector_register(connector);
+ if (ret) {
+ DRM_ERROR("Failed to register connector\n");
+ goto err_connector_register;
+ }
+
+ ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err_encoder;
+ }
+ encoder->possible_crtcs = 1;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+
+err_attach:
+ drm_encoder_cleanup(encoder);
+
+err_encoder:
+ drm_connector_unregister(connector);
+
+err_connector_register:
+ drm_connector_cleanup(connector);
+
+err_connector:
+ drm_crtc_cleanup(crtc);
+
+err_crtc:
+ drm_plane_cleanup(primary);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
new file mode 100644
index 000000000000..9f75b1e2c1c4
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static const struct drm_plane_funcs vkms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void vkms_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+ .atomic_update = vkms_primary_plane_update,
+};
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_plane *plane;
+ const u32 *formats;
+ int ret, nformats;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ formats = vkms_formats;
+ nformats = ARRAY_SIZE(vkms_formats);
+
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &vkms_primary_helper_funcs);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 8c308dac99c5..6b28a326f8bb 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && X86 && MMU
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 794cc9d5c9b0..09b2aa08363e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
- vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
index 9ce2466a5d00..69c4253fbfbb 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 2dfd57c5f463..9cbba0e8ce6a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -46,10 +47,10 @@
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
- * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * IDs between 1040 and 2999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * FIFO clients other than SVGA3D should stay below 1000, or at 3000
* and up.
*/
@@ -89,19 +90,19 @@ typedef enum {
SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
- SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
- SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
- SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
- SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
- SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
- SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
- SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
- SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
+ SVGA_3D_CMD_DEAD4 = 1072,
+ SVGA_3D_CMD_DEAD5 = 1073,
+ SVGA_3D_CMD_DEAD6 = 1074,
+ SVGA_3D_CMD_DEAD7 = 1075,
+ SVGA_3D_CMD_DEAD8 = 1076,
+ SVGA_3D_CMD_DEAD9 = 1077,
+ SVGA_3D_CMD_DEAD10 = 1078,
+ SVGA_3D_CMD_DEAD11 = 1079,
SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
SVGA_3D_CMD_SCREEN_DMA = 1082,
- SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
- SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
+ SVGA_3D_CMD_DEAD1 = 1083,
+ SVGA_3D_CMD_DEAD2 = 1084,
SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
@@ -217,7 +218,7 @@ typedef enum {
SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
SVGA_3D_CMD_DX_PRED_COPY = 1179,
- SVGA_3D_CMD_DX_STRETCHBLT = 1180,
+ SVGA_3D_CMD_DX_PRESENTBLT = 1180,
SVGA_3D_CMD_DX_GENMIPS = 1181,
SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
@@ -254,7 +255,7 @@ typedef enum {
SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
- SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
+ SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217,
SVGA_3D_CMD_DX_HINT = 1218,
SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
@@ -262,17 +263,47 @@ typedef enum {
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
/*
- * Reserve some IDs to be used for the DX11 shader types.
+ * Reserve some IDs to be used for the SM5 shader types.
*/
SVGA_3D_CMD_DX_RESERVED1 = 1223,
SVGA_3D_CMD_DX_RESERVED2 = 1224,
SVGA_3D_CMD_DX_RESERVED3 = 1225,
- SVGA_3D_CMD_DX_MAX = 1226,
- SVGA_3D_CMD_MAX = 1226,
+ SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226,
+ SVGA_3D_CMD_DX_MAX = 1227,
+
+ SVGA_3D_CMD_SCREEN_COPY = 1227,
+
+ /*
+ * Reserve some IDs to be used for video.
+ */
+ SVGA_3D_CMD_VIDEO_RESERVED1 = 1228,
+ SVGA_3D_CMD_VIDEO_RESERVED2 = 1229,
+ SVGA_3D_CMD_VIDEO_RESERVED3 = 1230,
+ SVGA_3D_CMD_VIDEO_RESERVED4 = 1231,
+ SVGA_3D_CMD_VIDEO_RESERVED5 = 1232,
+ SVGA_3D_CMD_VIDEO_RESERVED6 = 1233,
+ SVGA_3D_CMD_VIDEO_RESERVED7 = 1234,
+ SVGA_3D_CMD_VIDEO_RESERVED8 = 1235,
+
+ SVGA_3D_CMD_GROW_OTABLE = 1236,
+ SVGA_3D_CMD_DX_GROW_COTABLE = 1237,
+ SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238,
+
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239,
+
+ SVGA_3D_CMD_DX_RESOLVE_COPY = 1240,
+ SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241,
+ SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242,
+ SVGA_3D_CMD_DX_PRED_CONVERT = 1243,
+ SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244,
+
+ SVGA_3D_CMD_MAX = 1245,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
+#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)
+
/*
* FIFO command format definitions:
*/
@@ -301,7 +332,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -327,7 +358,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -459,6 +490,28 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+/*
+ * Perform a surface copy within the same image.
+ * The src/dest boxes are allowed to overlap.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId surface;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdIntraSurfaceCopy; /* SVGA_3D_CMD_INTRA_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 srcSid;
+ uint32 destSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWholeSurfaceCopy; /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -772,6 +825,17 @@ struct {
#include "vmware_pack_end.h"
SVGA3dVertexElement;
+/*
+ * Should the vertex element respect the stream value? The high bit of the
+ * stream should be set to indicate that the stream should be respected. If
+ * the high bit is not set, the stream will be ignored and replaced by the index
+ * of the position of the currently considered vertex element.
+ *
+ * All guests should set this bit and correctly specify the stream going
+ * forward.
+ */
+#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7)
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1102,8 +1166,6 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1147,38 +1209,6 @@ struct SVGA3dCmdScreenDMA {
SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
/*
- * Set Unity Surface Cookie
- *
- * Associates the supplied cookie with the surface id for use with
- * Unity. This cookie is a hint from guest to host, there is no way
- * for the guest to readback the cookie and the host is free to drop
- * the cookie association at will. The default value for the cookie
- * on all surfaces is 0.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdSetUnitySurfaceCookie {
- uint32 sid;
- uint64 cookie;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
-
-/*
- * Open a context-specific surface in a non-context-specific manner.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdOpenContextSurface {
- uint32 sid;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
-
-
-/*
* Logic ops
*/
@@ -1324,7 +1354,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceFormat format;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surface1Flags;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
@@ -1332,7 +1362,11 @@ struct {
SVGAMobId mobid;
uint32 arraySize;
uint32 mobPitch;
- uint32 pad[5];
+ SVGA3dSurface2Flags surface2Flags;
+ uint8 multisamplePattern;
+ uint8 qualityLevel;
+ uint8 pad0[2];
+ uint32 pad1[3];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
@@ -1360,7 +1394,8 @@ struct {
SVGAOTableShaderEntry;
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
-#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */
typedef uint32 SVGAScreenTargetFlags;
typedef
@@ -1528,6 +1563,25 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+/*
+ * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that
+ * the new OTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * (Otherwise, guests should use one of the SetOTableBase commands.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGrowOTable; /* SVGA_3D_CMD_GROW_OTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1615,7 +1669,7 @@ typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBSurface {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
uint32 numMipLevels;
uint32 multisampleCount;
@@ -1626,6 +1680,45 @@ struct SVGA3dCmdDefineGBSurface {
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
+ * Defines a guest-backed surface, adding the arraySize field.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+ uint32 sid;
+ SVGA3dSurface1Flags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Defines a guest-backed surface, adding the larger flags.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v3 {
+ uint32 sid;
+ SVGA3dSurfaceAllFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dMSPattern multisamplePattern;
+ SVGA3dMSQualityLevel qualityLevel;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
+
+/*
* Destroy a guest-backed surface.
*/
@@ -1672,7 +1765,7 @@ SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
typedef
#include "vmware_pack_begin.h"
-struct{
+struct SVGA3dCmdCondBindGBSurface {
uint32 sid;
SVGAMobId testMobid;
SVGAMobId mobid;
@@ -2066,6 +2159,26 @@ struct {
uint32 mobOffset;
}
#include "vmware_pack_end.h"
-SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
+SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId dest;
+
+ uint32 statusMobId;
+ uint32 statusMobOffset;
+
+ /* Reserved fields */
+ uint32 mustBeInvalidId;
+ uint32 mustBeZero;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */
+
+#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00
+#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
+#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index c18b663f360f..f256560049bf 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -229,9 +230,9 @@ typedef enum {
SVGA3D_DEVCAP_DEAD2 = 94,
/*
- * Does the device support the DX commands?
+ * Does the device support DXContexts?
*/
- SVGA3D_DEVCAP_DX = 95,
+ SVGA3D_DEVCAP_DXCONTEXT = 95,
/*
* What is the maximum size of a texture array?
@@ -241,21 +242,47 @@ typedef enum {
SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
/*
- * What is the maximum number of vertex buffers that can
- * be used in the DXContext inputAssembly?
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
/*
- * What is the maximum number of constant buffers
- * that can be expected to work correctly with a
- * DX context?
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
/*
* Does the device support provoking vertex control?
- * If zero, the first vertex will always be the provoking vertex.
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled. (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
*/
SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
@@ -281,7 +308,7 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
- SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
+ SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122,
SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
@@ -320,8 +347,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
- SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161,
+ SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
@@ -339,8 +366,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
- SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
- SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
+ SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180,
+ SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181,
SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
@@ -404,6 +431,17 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
+ /*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+ SVGA3D_DEVCAP_SM41 = 244,
+
+ SVGA3D_DEVCAP_MULTISAMPLE_2X = 245,
+ SVGA3D_DEVCAP_MULTISAMPLE_4X = 246,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -419,9 +457,7 @@ typedef enum {
* MIPS: Does the format support mip levels?
* ARRAY: Does the format support texture arrays?
* VOLUME: Does the format support having volume?
- * MULTISAMPLE_2: Does the format support 2x multisample?
- * MULTISAMPLE_4: Does the format support 4x multisample?
- * MULTISAMPLE_8: Does the format support 8x multisample?
+ * MULTISAMPLE: Does the format support multisample?
*/
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
@@ -432,20 +468,8 @@ typedef enum {
#define SVGA3D_DXFMT_ARRAY (1 << 6)
#define SVGA3D_DXFMT_VOLUME (1 << 7)
#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
-#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
-#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
-#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
-#define SVGADX_DXFMT_MAX (1 << 12)
-
-/*
- * Convenience mask for any multisample capability.
- *
- * The multisample bits imply both load and render capability.
- */
-#define SVGA3D_DXFMT_MULTISAMPLE ( \
- SVGADX_DXFMT_MULTISAMPLE_2 | \
- SVGADX_DXFMT_MULTISAMPLE_4 | \
- SVGADX_DXFMT_MULTISAMPLE_8 )
+#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9)
+#define SVGA3D_DXFMT_MAX (1 << 10)
typedef union {
Bool b;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 8c5ae608cfb4..7a49c94df221 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -56,6 +57,16 @@ typedef uint32 SVGA3dInputClassification;
#define SVGA3D_RESOURCE_TYPE_MAX 7
typedef uint32 SVGA3dResourceType;
+#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0)
+#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1)
+#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALL (SVGA3D_COLOR_WRITE_ENABLE_RED | \
+ SVGA3D_COLOR_WRITE_ENABLE_GREEN | \
+ SVGA3D_COLOR_WRITE_ENABLE_BLUE | \
+ SVGA3D_COLOR_WRITE_ENABLE_ALPHA)
+typedef uint8 SVGA3dColorWriteEnable;
+
#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
typedef uint8 SVGA3dDepthWriteMask;
@@ -88,17 +99,28 @@ typedef uint8 SVGA3dCullMode;
#define SVGA3D_COMPARISON_MAX 9
typedef uint8 SVGA3dComparisonFunc;
+/*
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives.
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41,
+ * disables MSAA for lines only.
+ */
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0
+#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1
+#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2
+#define SVGA3D_MULTISAMPLE_RAST_MAX 2
+typedef uint8 SVGA3dMultisampleRastEnable;
+
#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16
+#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32
#define SVGA3D_DX_MAX_SOTARGETS 4
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
-/* Id limits */
-static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
-static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
-typedef uint32 SVGA3dSurfaceId;
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
@@ -194,20 +216,6 @@ SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dReplyFormatData {
- uint32 formatSupport;
- uint32 msaa2xQualityLevels:5;
- uint32 msaa4xQualityLevels:5;
- uint32 msaa8xQualityLevels:5;
- uint32 msaa16xQualityLevels:5;
- uint32 msaa32xQualityLevels:5;
- uint32 pad:7;
-}
-#include "vmware_pack_end.h"
-SVGA3dReplyFormatData;
-
-typedef
-#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSingleConstantBuffer {
uint32 slot;
SVGA3dShaderType type;
@@ -624,6 +632,28 @@ SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvertRegion {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dBox destBox;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dBox srcBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvert {
+ SVGA3dSurfaceId dstSid;
+ SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBufferCopy {
SVGA3dSurfaceId dest;
SVGA3dSurfaceId src;
@@ -635,23 +665,57 @@ struct SVGA3dCmdDXBufferCopy {
SVGA3dCmdDXBufferCopy;
/* SVGA_3D_CMD_DX_BUFFER_COPY */
-typedef uint32 SVGA3dDXStretchBltMode;
-#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
-#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+/*
+ * Perform a surface copy between a multisample, and a non-multisampled
+ * surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXResolveCopy; /* SVGA_3D_CMD_DX_RESOLVE_COPY */
+
+/*
+ * Perform a predicated surface copy between a multisample, and a
+ * non-multisampled surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredResolveCopy; /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */
+
+typedef uint32 SVGA3dDXPresentBltMode;
+#define SVGADX_PRESENTBLT_LINEAR (1 << 0)
+#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1)
+#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2)
+#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3)
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dCmdDXStretchBlt {
+struct SVGA3dCmdDXPresentBlt {
SVGA3dSurfaceId srcSid;
uint32 srcSubResource;
SVGA3dSurfaceId dstSid;
uint32 destSubResource;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
- SVGA3dDXStretchBltMode mode;
+ SVGA3dDXPresentBltMode mode;
}
#include "vmware_pack_end.h"
-SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/
typedef
#include "vmware_pack_begin.h"
@@ -662,26 +726,6 @@ struct SVGA3dCmdDXGenMips {
SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
/*
- * Defines a resource/DX surface. Resources share the surfaceId namespace.
- *
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdDefineGBSurface_v2 {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- uint32 numMipLevels;
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- SVGA3dSize size;
- uint32 arraySize;
- uint32 pad;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
-
-/*
* Update a sub-resource in a guest-backed resource.
* (Inform the device that the guest-contents have been updated.)
*/
@@ -724,7 +768,8 @@ SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
/*
* Raw byte wise transfer from a buffer surface into another surface
- * of the requested box.
+ * of the requested box. Supported if 3d is enabled and SVGA_CAP_DX
+ * is set. This command does not take a context.
*/
typedef
#include "vmware_pack_begin.h"
@@ -773,6 +818,93 @@ struct SVGA3dCmdDXSurfaceCopyAndReadback {
SVGA3dCmdDXSurfaceCopyAndReadback;
/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+/*
+ * SVGA_DX_HINT_NONE: Does nothing.
+ *
+ * SVGA_DX_HINT_PREFETCH_OBJECT:
+ * SVGA_DX_HINT_PREEVICT_OBJECT:
+ * Consumes a SVGAObjectRef, and hints that the host should consider
+ * fetching/evicting the specified object.
+ *
+ * An id of SVGA3D_INVALID_ID can be used if the guest isn't sure
+ * what object was affected. (For instance, if the guest knows that
+ * it is about to evict a DXShader, but doesn't know precisely which one,
+ * the device can still use this to help limit it's search, or track
+ * how many page-outs have happened.)
+ *
+ * SVGA_DX_HINT_PREFETCH_COBJECT:
+ * SVGA_DX_HINT_PREEVICT_COBJECT:
+ * Same as the above, except they consume an SVGACObjectRef.
+ */
+typedef uint32 SVGADXHintId;
+#define SVGA_DX_HINT_NONE 0
+#define SVGA_DX_HINT_PREFETCH_OBJECT 1
+#define SVGA_DX_HINT_PREEVICT_OBJECT 2
+#define SVGA_DX_HINT_PREFETCH_COBJECT 3
+#define SVGA_DX_HINT_PREEVICT_COBJECT 4
+#define SVGA_DX_HINT_MAX 5
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAObjectRef {
+ SVGAOTableType type;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGAObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACObjectRef {
+ SVGACOTableType type;
+ uint32 cid;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGACObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXHint {
+ SVGADXHintId hintId;
+
+ /*
+ * Followed by variable sized data depending on the hintId.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXHint;
+/* SVGA_3D_CMD_DX_HINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferUpdate {
+ SVGA3dSurfaceId sid;
+ uint32 x;
+ uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferUpdate;
+/* SVGA_3D_CMD_DX_BUFFER_UPDATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetConstantBufferOffset {
+ uint32 slot;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetConstantBufferOffset;
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+
typedef
#include "vmware_pack_begin.h"
@@ -789,7 +921,7 @@ struct {
uint32 firstArraySlice;
uint32 mipLevels;
uint32 arraySize;
- } tex;
+ } tex; /* 1d, 2d, 3d, cube */
struct {
uint32 firstElement;
uint32 numElements;
@@ -844,6 +976,7 @@ struct SVGA3dRenderTargetViewDesc {
struct {
uint32 firstElement;
uint32 numElements;
+ uint32 padding0;
} buffer;
struct {
uint32 mipSlice;
@@ -964,9 +1097,6 @@ SVGA3dInputElementDesc;
typedef
#include "vmware_pack_begin.h"
struct {
- /*
- * XXX: How many of these can there be?
- */
uint32 elid;
uint32 numDescs;
SVGA3dInputElementDesc desc[32];
@@ -1007,7 +1137,7 @@ struct SVGA3dDXBlendStatePerRT {
uint8 srcBlendAlpha;
uint8 destBlendAlpha;
uint8 blendOpAlpha;
- uint8 renderTargetWriteMask;
+ SVGA3dColorWriteEnable renderTargetWriteMask;
uint8 logicOpEnable;
uint8 logicOp;
uint16 pad0;
@@ -1125,7 +1255,7 @@ struct {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1152,7 +1282,7 @@ struct SVGA3dCmdDXDefineRasterizerState {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1222,21 +1352,6 @@ struct SVGA3dCmdDXDestroySamplerState {
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
-/*
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dSignatureEntry {
- uint8 systemValue;
- uint8 reg; /* register is a reserved word */
- uint16 mask;
- uint8 registerComponentType;
- uint8 minPrecision;
- uint16 pad0;
-}
-#include "vmware_pack_end.h"
-SVGA3dSignatureEntry;
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
@@ -1254,12 +1369,7 @@ struct SVGACOTableDXShaderEntry {
uint32 sizeInBytes;
uint32 offsetInBytes;
SVGAMobId mobid;
- uint32 numInputSignatureEntries;
- uint32 numOutputSignatureEntries;
-
- uint32 numPatchConstantSignatureEntries;
-
- uint32 pad;
+ uint32 pad[4];
}
#include "vmware_pack_end.h"
SVGACOTableDXShaderEntry;
@@ -1283,6 +1393,25 @@ struct SVGA3dCmdDXBindShader {
#include "vmware_pack_end.h"
SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllShader {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllShader; /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCondBindAllShader {
+ uint32 cid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
+
/*
* The maximum number of streamout decl's in each streamout entry.
*/
@@ -1356,7 +1485,6 @@ SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
*
* This command allows the guest to bind a mob to a context-object table.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetCOTable {
@@ -1368,6 +1496,26 @@ struct SVGA3dCmdDXSetCOTable {
#include "vmware_pack_end.h"
SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+/*
+ * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that
+ * the new COTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * If there is an old cotable mob bound, it also has to still be valid.
+ *
+ * (Otherwise, guests should use the DXSetCOTableBase command.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGrowCOTable {
+ uint32 cid;
+ uint32 mobid;
+ SVGACOTableType type;
+ uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackCOTable {
@@ -1471,7 +1619,7 @@ struct SVGADXContextMobFormat {
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
- uint32 pad7[381];
+ uint32 pad7[380];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index a1c36877ad55..b22a67f15660 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -62,7 +63,9 @@
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
*/
-#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
+#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024)
+#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
+ sizeof(uint32))
#define SVGA3D_MAX_CLIP_PLANES 6
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index b44ce648f592..bdfc404c91e3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index babe7cb84fc2..f2bfd3d80598 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,189 +25,355 @@
*
**************************************************************************/
-#include <linux/kernel.h>
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
+/*
+ * svga3d_surfacedefs.h --
+ *
+ * Surface definitions and inlineable utilities for SVGA3d.
+ */
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
+#ifndef _SVGA3D_SURFACEDEFS_H_
+#define _SVGA3D_SURFACEDEFS_H_
-#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
-#endif /* __KERNEL__ */
+#include <linux/kernel.h>
+#include <drm/vmwgfx_drm.h>
#include "svga3d_reg.h"
+#define surf_size_struct struct drm_vmw_size
+
/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- * 1. Red, bump W, luminance and depth are stored in the first channel.
- * 2. Green, bump V and stencil are stored in the second channel.
- * 3. Blue and bump U are stored in the third channel.
- * 4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- * 1. For compressed formats, only the data channel is used and its size
- * is equal to that of a singular block in the compression scheme.
- * 2. For buffer formats, only the data channel is used and its size is
- * exactly one byte in length.
- * 3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
+ * enum svga3d_block_desc - describes generic properties about formats.
*/
-
enum svga3d_block_desc {
- SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
- SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
- data */
- SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
- data */
- SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
- U and V */
- SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
- data */
- SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
- data */
- SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
- channel */
- SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
- data */
- SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
- data */
- SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
- data */
- SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
- data */
- SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
- SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
- channel */
- SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
- data */
- SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
- data */
- SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
- data depending on the
- compression method used */
- SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
- floating point
- representation in
- all channels */
- SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
- data. */
- SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
- SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
- SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
- SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
- SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
- e.g., NV12. */
- SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
- Y, U, V, e.g., YV12. */
-
- SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
- SVGA3DBLOCKDESC_SRGB,
+ /* Nothing special can be said about this format. */
+ SVGA3DBLOCKDESC_NONE = 0,
+
+ /* Format contains Blue/U data */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0,
+ SVGA3DBLOCKDESC_W = 1 << 0,
+ SVGA3DBLOCKDESC_BUMP_L = 1 << 0,
+
+ /* Format contains Green/V data */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1,
+ SVGA3DBLOCKDESC_V = 1 << 1,
+
+ /* Format contains Red/W/Luminance data */
+ SVGA3DBLOCKDESC_RED = 1 << 2,
+ SVGA3DBLOCKDESC_U = 1 << 2,
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2,
+
+ /* Format contains Alpha/Q data */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3,
+ SVGA3DBLOCKDESC_Q = 1 << 3,
+
+ /* Format is a buffer */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4,
+
+ /* Format is compressed */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5,
+
+ /* Format uses IEEE floating point */
+ SVGA3DBLOCKDESC_FP = 1 << 6,
+
+ /* Three separate blocks store data. */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7,
+
+ /* 2 planes of Y, UV, e.g., NV12. */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8,
+
+ /* 3 planes of separate Y, U, V, e.g., YV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9,
+
+ /* Block with a stencil channel */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 11,
+
+ /* Typeless format */
+ SVGA3DBLOCKDESC_TYPELESS = 1 << 12,
+
+ /* Channels are signed integers */
+ SVGA3DBLOCKDESC_SINT = 1 << 13,
+
+ /* Channels are unsigned integers */
+ SVGA3DBLOCKDESC_UINT = 1 << 14,
+
+ /* Channels are normalized (when sampling) */
+ SVGA3DBLOCKDESC_NORM = 1 << 15,
+
+ /* Channels are in SRGB */
+ SVGA3DBLOCKDESC_SRGB = 1 << 16,
+
+ /* Shared exponent */
+ SVGA3DBLOCKDESC_EXP = 1 << 17,
+
+ /* Format contains color data. */
+ SVGA3DBLOCKDESC_COLOR = 1 << 18,
+ /* Format contains depth data. */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 19,
+ /* Format contains bump data. */
+ SVGA3DBLOCKDESC_BUMP = 1 << 20,
+
+ /* Format contains YUV video data. */
+ SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21,
+
+ /* For mixed unsigned/signed formats. */
+ SVGA3DBLOCKDESC_MIXED = 1 << 22,
+
+ /* For distingushing CxV8U8. */
+ SVGA3DBLOCKDESC_CX = 1 << 23,
+
+ /* Different compressed format groups. */
+ SVGA3DBLOCKDESC_BC1 = 1 << 24,
+ SVGA3DBLOCKDESC_BC2 = 1 << 25,
+ SVGA3DBLOCKDESC_BC3 = 1 << 26,
+ SVGA3DBLOCKDESC_BC4 = 1 << 27,
+ SVGA3DBLOCKDESC_BC5 = 1 << 28,
+
+ SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UNORM = SVGA3DBLOCKDESC_RG_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_SNORM = SVGA3DBLOCKDESC_RG_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_UNORM = SVGA3DBLOCKDESC_RGB_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_BUMP_L |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V |
- SVGA3DBLOCKDESC_W |
- SVGA3DBLOCKDESC_Q,
- SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q |
+ SVGA3DBLOCKDESC_BUMP,
+ SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
- SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
- SVGA3DBLOCKDESC_STENCIL,
- SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
- SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
- SVGA3DBLOCKDESC_Y |
- SVGA3DBLOCKDESC_U_VIDEO |
- SVGA3DBLOCKDESC_V_VIDEO,
- SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_EXP,
- SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_2PLANAR_YUV,
- SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_3PLANAR_YUV,
+ SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_EXP |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_TYPELESS,
+ SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DS_UNORM = SVGA3DBLOCKDESC_DS_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_FP,
+
+ SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM |
+ SVGA3DBLOCKDESC_CX,
+ SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
};
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- * 1. Block description.
- * 2. Dimensions of a block in the surface.
- * 3. Size of block in bytes.
- * 4. Bit depth of the pixel data.
- * 5. Channel bit depths and masks (if applicable).
- */
struct svga3d_channel_def {
union {
u8 blue;
- u8 u;
+ u8 w_bump;
+ u8 l_bump;
u8 uv_video;
u8 u_video;
};
union {
u8 green;
- u8 v;
u8 stencil;
+ u8 v_bump;
u8 v_video;
};
union {
u8 red;
- u8 w;
+ u8 u_bump;
u8 luminance;
- u8 y;
+ u8 y_video;
u8 depth;
u8 data;
};
union {
u8 alpha;
- u8 q;
+ u8 q_bump;
u8 exp;
};
};
+/*
+ * struct svga3d_surface_desc - describes the actual pixel data.
+ *
+ * @format: Format
+ * @block_desc: Block description
+ * @block_size: Dimensions in pixels of a block
+ * @bytes_per_block: Size of block in bytes
+ * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch
+ * @bit_depth: Channel bit depths
+ * @bit_offset: Channel bit masks (in bits offset from the start of the pointer)
+ */
struct svga3d_surface_desc {
SVGA3dSurfaceFormat format;
enum svga3d_block_desc block_desc;
+
surf_size_struct block_size;
u32 bytes_per_block;
u32 pitch_bytes_per_block;
- u32 total_bit_depth;
struct svga3d_channel_def bit_depth;
struct svga3d_channel_def bit_offset;
};
@@ -215,729 +381,728 @@ struct svga3d_surface_desc {
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 0, 0,
- 0, {{0}, {0}, {0}, {0}},
+ {{0}, {0}, {0}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 15, {{5}, {5}, {5}, {0}},
+ {{5}, {5}, {5}, {0}},
{{0}, {5}, {10}, {0}}},
- {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{4}, {4}, {4}, {4}},
+ {{4}, {4}, {4}, {4}},
{{0}, {4}, {8}, {12}}},
- {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {1}, {15}, {0}},
- {{0}, {15}, {0}, {0}}},
+ {{0}, {1}, {15}, {0}},
+ {{0}, {0}, {1}, {0}}},
- {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
- {1 , 1, 1}, 1, 1,
- 8, {{0}, {0}, {4}, {4}},
+ {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM,
+ {1, 1, 1}, 1, 1,
+ {{0}, {0}, {4}, {4}},
{{0}, {0}, {0}, {4}}},
- {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+ {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {8}, {8}},
{{0}, {0}, {0}, {8}}},
- {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
- {{0}, {0}, {0}, {8}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {6}, {0}},
- {{11}, {6}, {0}, {0}}},
+ {{6}, {5}, {5}, {0}},
+ {{10}, {5}, {0}, {0}}},
{SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
- {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+ {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 3, 3,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
+ {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
- {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
- {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
- {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
{SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{0}, {0}, {8}, {0}}},
{SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{8}, {0}, {0}, {0}}},
{SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
{SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+ {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+ {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
{SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 4, 4,
- 32, {{10}, {11}, {11}, {0}},
- {{0}, {10}, {21}, {0}}},
+ {{10}, {11}, {11}, {0}},
+ {{22}, {11}, {0}, {0}}},
- {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+ {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {24}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_P8, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+ {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP,
{1, 1, 1}, 4, 4,
- 32, {{9}, {9}, {9}, {5}},
+ {{9}, {9}, {9}, {5}},
{{18}, {9}, {0}, {27}}},
- {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
- {{0}, {8}, {0}, {0}}},
+ {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+ {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
{SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
{SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
{SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
{SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
-
};
static inline u32 clamped_umul32(u32 a, u32 b)
@@ -946,6 +1111,10 @@ static inline u32 clamped_umul32(u32 a, u32 b)
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
+/**
+ * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
+ * given format.
+ */
static inline const struct svga3d_surface_desc *
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
{
@@ -955,23 +1124,10 @@ svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
}
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- * Given a base level size and the mip level, compute the size of
- * the mip level.
- *
- * Results:
- * See above.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
+/**
+ * svga3dsurface_get_mip_size - Given a base level size and the mip level,
+ * compute the size of the mip level.
*/
-
static inline surf_size_struct
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
{
@@ -1018,28 +1174,17 @@ svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
return pitch;
}
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- * Return the number of bytes of buffer space required to store
- * one image of a surface, optionally using the specified pitch.
- *
- * If pitch is zero, it is assumed that rows are tightly packed.
+/**
+ * svga3dsurface_get_image_buffer_size - Calculates image buffer size.
*
- * This function is overflow-safe. If the result would have
- * overflowed, instead we return MAX_UINT32.
+ * Return the number of bytes of buffer space required to store one image of a
+ * surface, optionally using the specified pitch.
*
- * Results:
- * Byte count.
+ * If pitch is zero, it is assumed that rows are tightly packed.
*
- * Side effects:
- * None.
- *
- *-----------------------------------------------------------------------------
+ * This function is overflow-safe. If the result would have overflowed, instead
+ * we return MAX_UINT32.
*/
-
static inline u32
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
const surf_size_struct *size,
@@ -1067,6 +1212,9 @@ svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
return total_size;
}
+/**
+ * svga3dsurface_get_serialized_size - Get the serialized size for the image.
+ */
static inline u32
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
surf_size_struct base_level_size,
@@ -1087,6 +1235,26 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
return total_size * num_layers;
}
+/**
+ * svga3dsurface_get_serialized_size_extended - Returns the number of bytes
+ * required for a surface with given parameters. Support for sample count.
+ */
+static inline u32
+svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples)
+{
+ uint64_t total_size =
+ svga3dsurface_get_serialized_size(format,
+ base_level_size,
+ num_mip_levels,
+ num_layers);
+ total_size *= max_t(u32, 1, num_samples);
+
+ return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
+}
/**
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
@@ -1206,3 +1374,5 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
}
return svga3dsurface_is_dx_screen_target_format(format);
}
+
+#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 27b33ba88430..308370665a8e 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -44,9 +45,21 @@
#define SVGA3D_INVALID_ID ((uint32)-1)
+typedef uint8 SVGABool8; /* 8-bit Bool definition */
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
typedef uint32 SVGA3dColor; /* a, r, g, b */
+typedef uint32 SVGA3dSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 numerator;
+ uint32 denominator;
+}
+#include "vmware_pack_end.h"
+SVGA3dFraction64;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCopyRect {
@@ -145,7 +158,7 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_BUMPU8V8 = 20,
SVGA3D_BUMPL6V5U5 = 21,
SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_FORMAT_DEAD1 = 23,
SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
@@ -204,8 +217,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32G32_SINT = 59,
SVGA3D_R32G8X24_TYPELESS = 60,
SVGA3D_D32_FLOAT_S8X24_UINT = 61,
- SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
- SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R32_FLOAT_X8X24 = 62,
+ SVGA3D_X32_G8X24_UINT = 63,
SVGA3D_R10G10B10A2_TYPELESS = 64,
SVGA3D_R10G10B10A2_UINT = 65,
SVGA3D_R11G11B10_FLOAT = 66,
@@ -223,8 +236,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32_SINT = 78,
SVGA3D_R24G8_TYPELESS = 79,
SVGA3D_D24_UNORM_S8_UINT = 80,
- SVGA3D_R24_UNORM_X8_TYPELESS = 81,
- SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R24_UNORM_X8 = 81,
+ SVGA3D_X24_G8_UINT = 82,
SVGA3D_R8G8_TYPELESS = 83,
SVGA3D_R8G8_UNORM = 84,
SVGA3D_R8G8_UINT = 85,
@@ -296,92 +309,114 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
-typedef enum SVGA3dSurfaceFlags {
- SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+/*
+ * SVGA3d Surface Flags --
+ */
+#define SVGA3D_SURFACE_CUBEMAP (1 << 0)
- /*
- * HINT flags are not enforced by the device but are useful for
- * performance.
- */
- SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
- SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
- SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
- SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
- SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
- SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
- SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
- SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
- SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
+/*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1)
+#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2)
+#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3)
+#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4)
+#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5)
+#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6)
+#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7)
+#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8)
+#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10)
+
+#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11)
- /*
- * Is this surface using a base-level pitch for it's mob backing?
- *
- * This flag is not intended to be set by guest-drivers, but is instead
- * set by the device when the surface is bound to a mob with a specified
- * pitch.
- */
- SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
+/*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12)
- SVGA3D_SURFACE_INACTIVE = (1 << 13),
- SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
- SVGA3D_SURFACE_VOLUME = (1 << 15),
+#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13)
+#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14)
+#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15)
- /*
- * Required to be set on a surface to bind it to a screen target.
- */
- SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
+/*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16)
- /*
- * Align images in the guest-backing mob to 16-bytes.
- */
- SVGA3D_SURFACE_ALIGN16 = (1 << 17),
+/*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17)
- SVGA3D_SURFACE_1D = (1 << 18),
- SVGA3D_SURFACE_ARRAY = (1 << 19),
+#define SVGA3D_SURFACE_1D (CONST64U(1) << 18)
+#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19)
- /*
- * Bind flags.
- * These are enforced for any surface defined with DefineGBSurface_v2.
- */
- SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
- SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
- SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
- SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
- SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
- SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
- SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
+/*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20)
+#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21)
+#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22)
+#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23)
+#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24)
+#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25)
+#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26)
- /*
- * A note on staging flags:
- *
- * The STAGING flags notes that the surface will not be used directly by the
- * drawing pipeline, i.e. that it will not be bound to any bind point.
- * Staging surfaces may be used by copy operations to move data in and out
- * of other surfaces.
- *
- * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
- * updates indirectly, i.e. the surface will not be updated directly, but
- * will receive copies from staging surfaces.
- */
- SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
- SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
- SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
+/*
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces. No bind flags may be set on surfaces with this flag.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27)
+#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28)
+#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29)
- /*
- * Setting this flag allow this surface to be used with the
- * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
- * buffer surfaces, an no bind flags are allowed to be set on surfaces
- * with this flag.
- */
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
- /*
- * Marker for the last defined bit.
- */
- SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
-} SVGA3dSurfaceFlags;
+/*
+ * Reserved for video operations.
+ */
+#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
+
+/*
+ * Specifies that a surface is multisample, and therefore requires the full
+ * mob-backing to store all the samples.
+ */
+#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
+
+#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33)
+
+/*
+ * Surface flags types:
+ *
+ * SVGA3dSurface1Flags: Lower 32-bits of flags.
+ * SVGA3dSurface2Flags: Upper 32-bits of flags.
+ * SVGA3dSurfaceAllFlags: Full 64-bits of flags.
+ */
+typedef uint32 SVGA3dSurface1Flags;
+typedef uint32 SVGA3dSurface2Flags;
+typedef uint64 SVGA3dSurfaceAllFlags;
+
+#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32)
+#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK)
#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
( SVGA3D_SURFACE_MOB_PITCH | \
@@ -392,29 +427,41 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
- SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
@@ -426,12 +473,36 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_ARRAY | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_MOB_PITCH \
+ )
+
+#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_MOB_PITCH \
)
#define SVGA3D_SURFACE_DX_ONLY_MASK \
( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
#define SVGA3D_SURFACE_STAGING_MASK \
( SVGA3D_SURFACE_STAGING_UPLOAD | \
@@ -487,7 +558,7 @@ typedef enum {
/*
* Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified.
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
@@ -498,22 +569,22 @@ typedef enum {
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
+ * sampler will linearize the looked up data).
*/
SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
/*
- * Indicates that this format can be used in the bumpmap instructions
+ * Indicates that this format can be used in the bumpmap instructions.
*/
SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
/*
- * Indicates that this format can be sampled by the displacement map sampler
+ * Indicates that this format can be sampled by the displacement map sampler.
*/
SVGA3DFORMAT_OP_DMAP = 0x00020000,
/*
- * Indicates that this format cannot be used with texture filtering
+ * Indicates that this format cannot be used with texture filtering.
*/
SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
@@ -530,18 +601,18 @@ typedef enum {
SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
/*
- * Indicates that this format cannot be used with alpha blending
+ * Indicates that this format cannot be used with alpha blending.
*/
SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
/*
* Indicates that the device can auto-generated sublevels for resources
- * of this format
+ * of this format.
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
/*
- * Indicates that this format can be used by vertex texture sampler
+ * Indicates that this format can be used by vertex texture sampler.
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
@@ -1501,7 +1572,6 @@ union SVGADXQueryResultUnion {
#include "vmware_pack_end.h"
SVGADXQueryResultUnion;
-
typedef enum {
SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
@@ -1533,9 +1603,9 @@ typedef
struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
@@ -1547,19 +1617,27 @@ SVGA3dFogMode;
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSurfaceImageId {
- uint32 sid;
- uint32 face;
- uint32 mipmap;
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceImageId;
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dSubSurfaceId {
+ uint32 sid;
+ uint32 subResourceId;
+}
+#include "vmware_pack_end.h"
+SVGA3dSubSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
struct {
uint32 width;
uint32 height;
@@ -1582,13 +1660,18 @@ typedef enum {
SVGA_OTABLE_DX9_MAX = 5,
SVGA_OTABLE_DXCONTEXT = 5,
- SVGA_OTABLE_MAX = 6
-} SVGAOTableType;
+ SVGA_OTABLE_DX_MAX = 6,
-/*
- * Deprecated.
- */
-#define SVGA_OTABLE_COUNT 4
+ SVGA_OTABLE_RESERVED1 = 6,
+ SVGA_OTABLE_RESERVED2 = 7,
+
+ /*
+ * Additions to this table need to be tied to HW-version features and
+ * checkpointed accordingly.
+ */
+ SVGA_OTABLE_DEVEL_MAX = 8,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
typedef enum {
SVGA_COTABLE_MIN = 0,
@@ -1605,7 +1688,7 @@ typedef enum {
SVGA_COTABLE_DXSHADER = 10,
SVGA_COTABLE_DX10_MAX = 11,
SVGA_COTABLE_UAVIEW = 11,
- SVGA_COTABLE_MAX
+ SVGA_COTABLE_MAX = 12,
} SVGACOTableType;
/*
@@ -1626,8 +1709,37 @@ typedef enum SVGAMobFormat {
SVGA3D_MOBFMT_PREDX_MAX = 7,
SVGA3D_MOBFMT_EMPTY = 7,
SVGA3D_MOBFMT_MAX,
+
+ /*
+ * This isn't actually used by the guest, but is a mob-format used
+ * internally by the SVGA device (and is therefore not binary compatible).
+ */
+ SVGA3D_MOBFMT_HB,
} SVGAMobFormat;
#define SVGA3D_MOB_EMPTY_BASE 1
+/*
+ * Multisample pattern types.
+ */
+
+typedef enum SVGA3dMSPattern {
+ SVGA3D_MS_PATTERN_NONE = 0,
+ SVGA3D_MS_PATTERN_MIN = 0,
+ SVGA3D_MS_PATTERN_STANDARD = 1,
+ SVGA3D_MS_PATTERN_CENTER = 2,
+ SVGA3D_MS_PATTERN_MAX = 3,
+} SVGA3dMSPattern;
+
+/*
+ * Precision settings for each sample.
+ */
+
+typedef enum SVGA3dMSQualityLevel {
+ SVGA3D_MS_QUALITY_NONE = 0,
+ SVGA3D_MS_QUALITY_MIN = 0,
+ SVGA3D_MS_QUALITY_FULL = 1,
+ SVGA3D_MS_QUALITY_MAX = 2,
+} SVGA3dMSQualityLevel;
+
#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 884b1d1fb85f..acb41e28e46f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index faf6d9b2b891..e5385146e7fc 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 88e72bf9a534..056f54b35d73 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -63,16 +64,26 @@ typedef uint32 SVGAMobId;
#define SVGA_MAX_BITS_PER_PIXEL 32
#define SVGA_MAX_DEPTH 24
#define SVGA_MAX_DISPLAYS 10
+#define SVGA_MAX_SCREEN_SIZE 8192
+#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS)
+
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
-#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_HIDE 0x0
+#define SVGA_CURSOR_ON_SHOW 0x1
+
+/*
+ * Remove the cursor from the framebuffer
+ * because we need to see what's under it
+ */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2
+
+/* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3
/*
* The maximum framebuffer size that can traced for guests unless the
@@ -101,7 +112,10 @@ typedef uint32 SVGAMobId;
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+/*
+ * "Invalid" value for all SVGA IDs.
+ * (Version ID, screen object ID, surface ID...)
+ */
#define SVGA_ID_INVALID 0xFFFFFFFF
/* Port offsets, relative to BAR0 */
@@ -154,7 +168,7 @@ enum {
SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
- SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_GUEST_ID = 23, /* (Deprecated) */
SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
@@ -186,7 +200,14 @@ enum {
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
- SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+
+ /*
+ * Max primary memory.
+ * See SVGA_CAP_NO_BB_RESTRICTION.
+ */
+ SVGA_REG_MAX_PRIMARY_MEM = 50,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
+
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
@@ -194,7 +215,10 @@ enum {
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
- SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
+ SVGA_REG_BLANK_SCREEN_TARGETS = 58,
+ SVGA_REG_CAP2 = 59,
+ SVGA_REG_DEVEL_CAP = 60,
+ SVGA_REG_TOP = 61, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -392,6 +416,7 @@ typedef enum {
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
SVGA_CB_CONTEXT_MAX = 0x2,
+ SVGA_CB_CONTEXT_HP_MAX = 0x2,
} SVGACBContext;
@@ -448,6 +473,18 @@ typedef enum {
* due to an error. No IRQ is raised.
*/
SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+
+ /*
+ * Written by the host when the host finished a
+ * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer
+ * queue. The offset of the first byte not processed is stored in
+ * the errorOffset field of the command buffer header. All guest
+ * visible side effects of commands till that point are guaranteed
+ * to be finished before this is written. The
+ * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the
+ * SVGA_CB_FLAG_NO_IRQ is not set.
+ */
+ SVGA_CB_STATUS_PARTIAL_COMPLETE = 7,
} SVGACBStatus;
typedef enum {
@@ -460,8 +497,8 @@ typedef enum {
typedef
#include "vmware_pack_begin.h"
struct {
- volatile SVGACBStatus status;
- volatile uint32 errorOffset;
+ volatile SVGACBStatus status; /* Modified by device. */
+ volatile uint32 errorOffset; /* Modified by device. */
uint64 id;
SVGACBFlags flags;
uint32 length;
@@ -472,7 +509,9 @@ struct {
uint32 mobOffset;
} mob;
} ptr;
- uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+ uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise,
+ * modified by device.
+ */
uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
uint32 mustBeZero[6];
}
@@ -483,20 +522,26 @@ typedef enum {
SVGA_DC_CMD_NOP = 0,
SVGA_DC_CMD_START_STOP_CONTEXT = 1,
SVGA_DC_CMD_PREEMPT = 2,
- SVGA_DC_CMD_MAX = 3,
- SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
+ SVGA_DC_CMD_START_QUEUE = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_MAX = 6,
} SVGADeviceContextCmdId;
-typedef struct {
+/*
+ * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1.
+ */
+
+typedef struct SVGADCCmdStartStop {
uint32 enable;
- SVGACBContext context;
+ SVGACBContext context; /* Must be zero */
} SVGADCCmdStartStop;
/*
* SVGADCCmdPreempt --
*
* This command allows the guest to request that all command buffers
- * on the specified context be preempted that can be. After execution
+ * on SVGA_CB_CONTEXT_0 be preempted that can be. After execution
* of this command all command buffers that were preempted will
* already have SVGA_CB_STATUS_PREEMPTED written into the status
* field. The device might still be processing a command buffer,
@@ -506,12 +551,69 @@ typedef struct {
* command buffer header set to zero.
*/
-typedef struct {
- SVGACBContext context;
+typedef struct SVGADCCmdPreempt {
+ SVGACBContext context; /* Must be zero */
uint32 ignoreIDZero;
} SVGADCCmdPreempt;
/*
+ * Starts the requested command buffer processing queue. Valid only
+ * if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ *
+ * For a command queue to be considered runnable it must be enabled
+ * and any corresponding higher priority queues must also be enabled.
+ * For example in order for command buffers to be processed on
+ * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must
+ * be enabled. But for commands to be runnable on SVGA_CB_CONTEXT_1
+ * only that queue must be enabled.
+ */
+
+typedef struct SVGADCCmdStartQueue {
+ SVGACBContext context;
+} SVGADCCmdStartQueue;
+
+/*
+ * Requests the SVGA device to stop processing the requested command
+ * buffer queue as soon as possible. The guest knows the stop has
+ * completed when one of the following happens.
+ *
+ * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned
+ * 2) A command buffer error is encountered with would stop the queue
+ * regardless of the async stop request.
+ * 3) All command buffers that have been submitted complete successfully.
+ * 4) The stop completes synchronously if no command buffers are
+ * active on the queue when it is issued.
+ *
+ * If the command queue is not in a runnable state there is no
+ * guarentee this async stop will finish. For instance if the high
+ * priority queue is not enabled and a stop is requested on the low
+ * priority queue, the high priority queue must be reenabled to
+ * guarantee that the async stop will finish.
+ *
+ * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used
+ * to implement mid command buffer preemption.
+ *
+ * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ */
+
+typedef struct SVGADCCmdAsyncStopQueue {
+ SVGACBContext context;
+} SVGADCCmdAsyncStopQueue;
+
+/*
+ * Requests the SVGA device to throw away any full command buffers on
+ * the requested command queue that have not been started. For a
+ * driver to know which command buffers were thrown away a driver
+ * should only issue this command when the queue is stopped, for
+ * whatever reason.
+ */
+
+typedef struct SVGADCCmdEmptyQueue {
+ SVGACBContext context;
+} SVGADCCmdEmptyQueue;
+
+
+/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
@@ -536,7 +638,7 @@ typedef struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
@@ -672,8 +774,36 @@ SVGASignedPoint;
* SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces.
*
- * SVGA_CAP_CMD_BUFFERS_3 --
- * Enable support for command buffers in a mob.
+ * SVGA_CAP_DX --
+ * Enable support for DX commands, and command buffers in a mob.
+ *
+ * SVGA_CAP_HP_CMD_QUEUE --
+ * Enable support for the high priority command queue, and the
+ * ScreenCopy command.
+ *
+ * SVGA_CAP_NO_BB_RESTRICTION --
+ * Allow ScreenTargets to be defined without regard to the 32-bpp
+ * bounding-box memory restrictions. ie:
+ *
+ * The summed memory usage of all screens (assuming they were defined as
+ * 32-bpp) must always be less than the value of the
+ * SVGA_REG_MAX_PRIMARY_MEM register.
+ *
+ * If this cap is not present, the 32-bpp bounding box around all screens
+ * must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
+ * register.
+ *
+ * If the cap is present, the bounding box restriction is lifted (and only
+ * the screen-sum limit applies).
+ *
+ * (Note that this is a slight lie... there is still a sanity limit on any
+ * dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
+ * when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
+ * large enough to express any possible topology without holes between
+ * monitors.)
+ *
+ * SVGA_CAP_CAP2_REGISTER --
+ * If this cap is present, the SVGA_REG_CAP2 register is supported.
*/
#define SVGA_CAP_NONE 0x00000000
@@ -699,8 +829,30 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE 0x20000000
+#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000
+#define SVGA_CAP_CAP2_REGISTER 0x80000000
-#define SVGA_CAP_CMD_RESERVED 0x80000000
+/*
+ * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits.
+ *
+ * SVGA_CAP2_GROW_OTABLE --
+ * Allow the GrowOTable/DXGrowCOTable commands.
+ *
+ * SVGA_CAP2_INTRA_SURFACE_COPY --
+ * Allow the IntraSurfaceCopy command.
+ *
+ * SVGA_CAP2_DX2 --
+ * Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *
+ * SVGA_CAP2_RESERVED --
+ * Reserve the last bit for extending the SVGA capabilities to some
+ * future mechanisms.
+ */
+#define SVGA_CAP2_NONE 0x00000000
+#define SVGA_CAP2_GROW_OTABLE 0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2 0x00000004
+#define SVGA_CAP2_RESERVED 0x80000000
/*
@@ -722,7 +874,8 @@ typedef enum {
SVGABackdoorCapDeviceCaps = 0,
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
- SVGABackdoorCapMax = 3,
+ SVGABackdoorCapDeviceCaps2 = 3,
+ SVGABackdoorCapMax = 4,
} SVGABackdoorCapType;
@@ -1914,16 +2067,6 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
-/*
- * To simplify autoDetect display configuration, support a minimum of
- * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
- * numDisplays = 2
- * maxWidth = numDisplay * 1920 = 3840
- * maxHeight = rotated width of single monitor = 1920
- * vramSize = maxWidth * maxHeight * 4 = 29491200
- */
-#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
-
#if defined(VMX86_SERVER)
#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 2e8ba4df8de9..350bbc6fab02 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
+ * Copyright 2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -40,7 +41,10 @@ typedef uint64 PPN64;
typedef bool Bool;
+#define MAX_UINT64 U64_MAX
#define MAX_UINT32 U32_MAX
#define MAX_UINT16 U16_MAX
+#define CONST64U(x) x##ULL
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
index 7e7b0ce34aa2..75308bd0d970 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
index e2e440ed3d44..e93d6f28b68c 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
__packed
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 55d32ae43aa4..0b9ee7fb45d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index bf2e77ad5a20..6a2a9d69043b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index e8c94b19db7b..fc6673cde289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644
index 000000000000..2dda03345761
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+#include "drm/ttm/ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+ struct ttm_prime_object prime;
+ struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @placement: The placement to pin it.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, &ctx);
+
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ if (likely(ret == 0) || ret == -ERESTARTSYS)
+ goto out_unreserve;
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+ interruptible);
+}
+
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement placement;
+ struct ttm_place place;
+ int ret = 0;
+ uint32_t new_flags;
+
+ place = vmw_vram_placement.placement[0];
+ place.lpfn = bo->num_pages;
+ placement.num_placement = 1;
+ placement.placement = &place;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &place;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ /*
+ * Is this buffer already in vram but not at the start of it?
+ * In that case, evict it first because TTM isn't good at handling
+ * that situation.
+ */
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0 &&
+ buf->pin_count == 0) {
+ ctx.interruptible = false;
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ }
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ /* For some reason we didn't end up at the start of vram */
+ WARN_ON(ret == 0 && bo->offset != 0);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to unpin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(buf, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+ SVGAGuestPtr *ptr)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+ ptr->offset = bo->offset;
+ } else {
+ ptr->gmrId = bo->mem.start;
+ ptr->offset = 0;
+ }
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+ struct ttm_operation_ctx ctx = { false, true };
+ struct ttm_place pl;
+ struct ttm_placement placement;
+ struct ttm_buffer_object *bo = &vbo->base;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ if (pin) {
+ if (vbo->pin_count++ > 0)
+ return;
+ } else {
+ WARN_ON(vbo->pin_count <= 0);
+ if (--vbo->pin_count > 0)
+ return;
+ }
+
+ pl.fpfn = 0;
+ pl.lpfn = 0;
+ pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl;
+
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_buffer_object));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ vmw_bo_unmap(vmw_bo);
+ kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+ vmw_bo_unmap(&vmw_user_bo->vbo);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free)(struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ size_t acc_size;
+ int ret;
+ bool user = (bo_free == &vmw_user_bo_destroy);
+
+ WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+ acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->res_list);
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, interruptible, acc_size,
+ NULL, NULL, bo_free);
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ bo = &vmw_user_bo->vbo.base;
+ ttm_bo_unref(&bo);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ break;
+ default:
+ WARN_ONCE(true, "Undefined buffer object reference release.\n");
+ }
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(!user_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement, true,
+ &vmw_user_bo_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->vbo.base);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_bo_release,
+ &vmw_user_bo_ref_obj_release);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_vbo = &user_bo->vbo;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
+ *handle = user_bo->prime.base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_bo_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_buffer_object(bo);
+
+ /* Check that the caller has opened the object. */
+ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+ return 0;
+
+ DRM_ERROR("Could not grant buffer access.\n");
+ return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->vbo.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+ long lret;
+
+ lret = reservation_object_wait_timeout_rcu
+ (bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ if (!lret)
+ return -EBUSY;
+ else if (lret < 0)
+ return lret;
+ return 0;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_buffer_object *vbo;
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+ &buffer_base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object,
+ vbo);
+ ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_bo_unreference(&vbo);
+ ttm_base_object_unref(&buffer_base);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &vbo,
+ NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_bo_unreference(&vbo);
+
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_buffer_object **out,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+ if (p_base)
+ *p_base = base;
+ else
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->vbo;
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *vbo,
+ uint32_t *handle)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ if (vbo->base.destroy != vmw_user_bo_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+ *handle = user_bo->prime.base.hash.key;
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL) {
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+ dma_fence_put(&fence->base);
+ } else
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_buffer_object *vbo;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &vbo, NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ vmw_bo_unreference(&vbo);
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_buffer_object *out_buf;
+ int ret;
+
+ ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+ if (ret != 0)
+ return -EINVAL;
+
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ vmw_bo_unreference(&out_buf);
+ return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+ /* Is @bo embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_buffer_object *vbo;
+
+ if (mem == NULL)
+ return;
+
+ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ vbo = container_of(bo, struct vmw_buffer_object, base);
+
+ /*
+ * Kill any cached kernel maps before move to or from VRAM.
+ * With other types of moves, the underlying pages stay the same,
+ * and the map can be kept.
+ */
+ if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ vmw_bo_unmap(vbo);
+
+ /*
+ * If we're moving a backup MOB out of MOB placement, then make sure we
+ * read back all resource content first, and unbind the MOB from
+ * the resource.
+ */
+ if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ vmw_resource_unbind_list(vbo);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9f45d5004cae..e7e4655d3f36 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 36c7b6c839c0..3b75af9bf85f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3767ac335aca..7c3cb8efd11a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob)
+ struct vmw_buffer_object *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
- vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob)
- uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+ uctx->dx_query_mob = vmw_bo_reference(mob);
return 0;
}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
-struct vmw_dma_buffer *
+struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index cbf54ea7b4c0..1d45714e1d5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_dma_buffer *buf, *old_buf = res->backup;
+ struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+ true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Let go of the old mob. */
list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list);
- vmw_dmabuf_unreference(&old_buf);
+ vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
return 0;
@@ -491,7 +491,7 @@ out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644
index d59d9dd16ebc..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @placement: The placement to pin it.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, placement, &ctx);
-
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- goto out_unreserve;
- }
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto out_unreserve;
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-
-out_unreserve:
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
- interruptible);
-}
-
-/**
- * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to pin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
- int ret = 0;
- uint32_t new_flags;
-
- place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- /*
- * Is this buffer already in vram but not at the start of it?
- * In that case, evict it first because TTM isn't good at handling
- * that situation.
- */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0 &&
- buf->pin_count == 0) {
- ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
- }
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- /* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->offset != 0);
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-/**
- * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
- *
- * This function takes the reservation_sem in write mode.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to unpin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- vmw_bo_pin_reserved(buf, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
- SVGAGuestPtr *ptr)
-{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->offset;
- } else {
- ptr->gmrId = bo->mem.start;
- ptr->offset = 0;
- }
-}
-
-
-/**
- * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
- *
- * @vbo: The buffer object. Must be reserved.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
-{
- struct ttm_operation_ctx ctx = { false, true };
- struct ttm_place pl;
- struct ttm_placement placement;
- struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
- int ret;
-
- lockdep_assert_held(&bo->resv->lock.base);
-
- if (pin) {
- if (vbo->pin_count++ > 0)
- return;
- } else {
- WARN_ON(vbo->pin_count <= 0);
- if (--vbo->pin_count > 0)
- return;
- }
-
- pl.fpfn = 0;
- pl.lpfn = 0;
- pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
- | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
- if (pin)
- pl.flags |= TTM_PL_FLAG_NO_EVICT;
-
- memset(&placement, 0, sizeof(placement));
- placement.num_placement = 1;
- placement.placement = &pl;
-
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
-
-
-/*
- * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_dma_buffer_map_and_cache().
- */
-void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
-{
- if (vbo->map.bo == NULL)
- return;
-
- ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
- *
- * @vbo: The buffer object to map
- * Return: A kernel virtual address or NULL if mapping failed.
- *
- * This function maps a buffer object into the kernel address space, or
- * returns the virtual kernel address of an already existing map. The virtual
- * address remains valid as long as the buffer object is pinned or reserved.
- * The cached map is torn down on either
- * 1) Buffer object move
- * 2) Buffer object swapout
- * 3) Buffer object destruction
- *
- */
-void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
-{
- struct ttm_buffer_object *bo = &vbo->base;
- bool not_used;
- void *virtual;
- int ret;
-
- virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
- if (virtual)
- return virtual;
-
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
- if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
-
- return ttm_kmap_obj_virtual(&vbo->map, &not_used);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 09cc721160c4..bb6dbbe18835 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -137,6 +137,12 @@
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
+ union drm_vmw_gb_surface_create_ext_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
+ union drm_vmw_gb_surface_reference_ext_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -153,9 +159,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
@@ -219,11 +225,17 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
- vmw_user_dmabuf_synccpu_ioctl,
+ vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
+ vmw_gb_surface_define_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
+ vmw_gb_surface_reference_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -258,6 +270,15 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
+static void vmw_print_capabilities2(uint32_t capabilities2)
+{
+ DRM_INFO("Capabilities2:\n");
+ if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
+ DRM_INFO(" Grow oTable.\n");
+ if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
+ DRM_INFO(" IntraSurface copy.\n");
+}
+
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
@@ -321,7 +342,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct vmw_dma_buffer *vbo;
+ struct vmw_buffer_object *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
@@ -335,9 +356,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (!vbo)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_ne_placement, false,
- &vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
@@ -358,7 +379,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- vmw_dmabuf_unreference(&vbo);
+ vmw_bo_unreference(&vbo);
} else
dev_priv->dummy_query_bo = vbo;
@@ -460,7 +481,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
- vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
@@ -644,6 +665,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
+ mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
@@ -683,6 +705,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
+ dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
+ }
+
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
@@ -751,6 +779,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
vmw_print_capabilities(dev_priv->capabilities);
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
+ vmw_print_capabilities2(dev_priv->capabilities2);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
@@ -883,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->has_mob) {
spin_lock(&dev_priv->cap_lock);
- vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
}
@@ -898,9 +928,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
+ if (dev_priv->has_dx) {
+ /*
+ * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
+ * support
+ */
+ if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_SM41);
+ dev_priv->has_sm4_1 = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ }
+ }
+
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
- DRM_INFO("Atomic: %s\n",
- (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
+ DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ ? "yes." : "no.");
+ DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fcbe1620d50..1abe21758b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -43,10 +43,10 @@
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180322"
+#define VMWGFX_DRIVER_DATE "20180704"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -83,10 +83,10 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
- bool gb_aware;
+ bool gb_aware; /* user-space is guest-backed aware */
};
-struct vmw_dma_buffer {
+struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
@@ -166,7 +166,7 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- uint32_t flags;
+ SVGA3dSurfaceAllFlags flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size base_size;
@@ -180,6 +180,8 @@ struct vmw_surface {
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
struct list_head view_list;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
};
struct vmw_marker_queue {
@@ -304,7 +306,7 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size;
struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */
- struct vmw_dma_buffer *cur_query_bo;
+ struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +317,7 @@ struct vmw_sw_context{
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
};
@@ -386,6 +388,7 @@ struct vmw_private {
uint32_t initial_height;
u32 *mmio_virt;
uint32_t capabilities;
+ uint32_t capabilities2;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
@@ -397,6 +400,7 @@ struct vmw_private {
spinlock_t cap_lock;
bool has_dx;
bool assume_16bpp;
+ bool has_sm4_1;
/*
* VGA registers.
@@ -412,6 +416,15 @@ struct vmw_private {
uint32_t num_displays;
/*
+ * Currently requested_layout_mutex is used to protect the gui
+ * positionig state in display unit. With that use case currently this
+ * mutex is only taken during layout ioctl and atomic check_modeset.
+ * Other display unit state can be protected with this mutex but that
+ * needs careful consideration.
+ */
+ struct mutex requested_layout_mutex;
+
+ /*
* Framebuffer info.
*/
@@ -513,8 +526,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct vmw_dma_buffer *dummy_query_bo;
- struct vmw_dma_buffer *pinned_bo;
+ struct vmw_buffer_object *dummy_query_bo;
+ struct vmw_buffer_object *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -623,43 +636,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf);
+ struct vmw_buffer_object **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interuptable,
- void (*bo_free) (struct ttm_buffer_object *bo));
-extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle);
-extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node);
-extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out,
- struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +653,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
-extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
-extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
-extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
+ * Buffer object helper functions - vmwgfx_bo.c
*/
-extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
bool interruptible);
-extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
-extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
-extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_dma_buf,
+ struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *dma_buf,
+ uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_buffer_object **out,
+ struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +768,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
- * TTM buffer object driver - vmwgfx_buffer.c
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
@@ -1041,8 +1051,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob);
-extern struct vmw_dma_buffer *
+ struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1070,14 +1080,22 @@ extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out);
+extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
/*
* Shader management - vmwgfx_shader.c
@@ -1224,6 +1242,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
+/* Host messaging -vmwgfx_msg.c: */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+ char *buffer, size_t *length);
+int vmw_host_log(const char *log);
+
/**
* Inline helper functions
*/
@@ -1243,9 +1266,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
-static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{
- struct vmw_dma_buffer *tmp_buf = *buf;
+ struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf != NULL) {
@@ -1255,7 +1278,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
}
}
-static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
@@ -1302,10 +1326,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
-
-/**
- * Add vmw_msg module function
- */
-extern int vmw_host_log(const char *log);
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c9d5cc237124..1f134570b759 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
- struct vmw_dma_buffer *new_backup;
+ struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p);
+ struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
}
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
- vmw_dmabuf_unreference(&val->new_backup);
+ vmw_bo_unreference(&val->new_backup);
}
}
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
}
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
if (sw_context->dx_query_mob) {
- struct vmw_dma_buffer *expected_dx_query_mob;
+ struct vmw_buffer_object *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *backup = res->backup;
+ struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *new_query_bo,
+ struct vmw_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- vmw_dmabuf_reference(sw_context->cur_query_bo);
+ vmw_bo_reference(sw_context->cur_query_bo);
}
}
}
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1343,15 +1342,14 @@ out_no_reloc:
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdDXBindQuery q;
} *cmd;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
header);
out_no_surface:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_dma_buffer *dma_buf;
+ struct vmw_buffer_object *dma_buf;
int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
if (val_node->first_usage)
val_node->no_buffer_needed = true;
- vmw_dmabuf_unreference(&val_node->new_backup);
+ vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
@@ -3118,6 +3116,32 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
&cmd->body.destSid, NULL);
}
+/**
+ * vmw_cmd_intra_surface_copy -
+ * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdIntraSurfaceCopy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.surface.sid, NULL);
+}
+
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3232,9 +3256,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
false, false, false),
@@ -3473,6 +3497,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
+ true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3701,8 +3727,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
bool interruptible,
bool validate_as_mob)
{
- struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
- base);
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
@@ -4423,7 +4449,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
@@ -4432,7 +4458,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 54e300365a5c..b913a56f3426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -42,7 +42,7 @@ struct vmw_fb_par {
void *vmalloc;
struct mutex bo_mutex;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
- struct vmw_dma_buffer *vbo = par->vmw_bo;
+ struct vmw_buffer_object *vbo = par->vmw_bo;
void *virtual;
if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
- virtual = vmw_dma_buffer_map_and_cache(vbo);
+ virtual = vmw_bo_map_and_cache(vbo);
if (!virtual)
goto out_unreserve;
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
*/
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
- size_t size, struct vmw_dma_buffer **out)
+ size_t size, struct vmw_buffer_object **out)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock;
}
- ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
false,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
static int vmwgfx_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
- struct drm_framebuffer *fb;
- struct drm_crtc *tmp;
- struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
restart:
- /*
- * NOTE: ->set_config can also disable other crtcs (if we steal all
- * connectors from it), hence we need to refcount the fbs across all
- * crtcs. Atomic modeset will have saner semantics ...
- */
- drm_for_each_crtc(tmp, dev)
- tmp->primary->old_fb = tmp->primary->fb;
-
- fb = set->fb;
-
ret = crtc->funcs->set_config(set, &ctx);
- if (ret == 0) {
- crtc->primary->crtc = crtc;
- crtc->primary->fb = fb;
- }
-
- drm_for_each_crtc(tmp, dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
- }
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -516,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (par->vmw_bo && detach_bo && unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
+ vmw_bo_unreference(&par->vmw_bo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9ed544f8958f..3d546d409334 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb;
long ret = timeout;
- unsigned long irq_flags;
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
- spin_lock_irqsave(f->lock, irq_flags);
+ spin_lock(f->lock);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
cb.task = current;
list_add(&cb.base.node, &f->cb_list);
- while (ret > 0) {
+ for (;;) {
__vmw_fences_update(fman);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- break;
+ /*
+ * We can use the barrier free __set_current_state() since
+ * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
+ * fence spinlock.
+ */
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(f->lock, irq_flags);
- ret = schedule_timeout(ret);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
+ if (ret == 0 && timeout > 0)
+ ret = 1;
+ break;
+ }
- spin_lock_irqsave(f->lock, irq_flags);
- if (ret > 0 && intr && signal_pending(current))
+ if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
- }
+ break;
+ }
+ if (ret == 0)
+ break;
+
+ spin_unlock(f->lock);
+
+ ret = schedule_timeout(ret);
+
+ spin_lock(f->lock);
+ }
+ __set_current_state(TASK_RUNNING);
if (!list_empty(&cb.base.node))
list_del(&cb.base.node);
- __set_current_state(TASK_RUNNING);
out:
- spin_unlock_irqrestore(f->lock, irq_flags);
+ spin_unlock(f->lock);
vmw_seqno_waiter_remove(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 20224dba9d8e..c9382933c2b9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a1c68e6a689e..d0fd147ef75f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 66ffa1d4759c..007a0cc7f232 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index f2f9d88131f2..ddb1e9365a3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c5e8eae0dbe2..172a6ba6539c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -56,6 +56,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
+ case DRM_VMW_PARAM_HW_CAPS2:
+ param->value = dev_priv->capabilities2;
+ break;
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
@@ -113,6 +116,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_DX:
param->value = dev_priv->has_dx;
break;
+ case DRM_VMW_PARAM_SM4_1:
+ param->value = dev_priv->has_sm4_1;
+ break;
default:
return -EINVAL;
}
@@ -122,15 +128,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
{
- /* If the header is updated, update the format test as well! */
- BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
-
- if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
- cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
- fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
- SVGADX_DXFMT_MULTISAMPLE_4 |
- SVGADX_DXFMT_MULTISAMPLE_8);
- else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+ /*
+ * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
+ * to check the sample count supported by virtual device. Since there
+ * never was support for multisample count for backing MOB return 0.
+ */
+ if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
return 0;
return fmt_value;
@@ -377,8 +380,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ if (!vfb->bo) {
+ DRM_ERROR("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index b9239ba067c4..c3ad4478266b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 01f2dc9e6f52..23beff5d8e3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
-static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
+ ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
ttm_bo_kunmap(&map);
err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
+ ttm_bo_unreserve(&bo->base);
return ret;
}
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
- vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_dmabuf_reference(vps->dmabuf);
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vmw_bo_reference(vps->bo);
} else {
vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
}
du->cursor_surface = vps->surf;
- du->cursor_dmabuf = vps->dmabuf;
+ du->cursor_bo = vps->bo;
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->surf->snooper.image,
64, 64, hotspot_x,
hotspot_y);
- } else if (vps->dmabuf) {
- ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
- plane->state->crtc_w,
- plane->state->crtc_h,
- hotspot_x, hotspot_y);
+ } else if (vps->bo) {
+ ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
ret = -EINVAL;
}
- if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
+ if (!vmw_framebuffer_to_vfb(fb)->bo)
surface = vmw_framebuffer_to_vfbs(fb)->surface;
if (surface && !surface->snooper.image) {
@@ -535,9 +535,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
- int connector_mask = 1 << drm_connector_index(&du->connector);
+ int connector_mask = drm_connector_mask(&du->connector);
bool has_primary = new_state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != new_state->enable)
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
if (vps->surf)
(void) vmw_surface_reference(vps->surf);
- if (vps->dmabuf)
- (void) vmw_dmabuf_reference(vps->dmabuf);
+ if (vps->bo)
+ (void) vmw_bo_reference(vps->bo);
state = &vps->base;
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
/**
* vmw_kms_readback - Perform a readback from the screen system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd,
- bool is_dmabuf_proxy)
+ bool is_bo_proxy)
{
struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
- vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
+ vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
@@ -1038,30 +1038,30 @@ out_err1:
}
/*
- * Dmabuf framebuffer code
+ * Buffer-object framebuffer code
*/
-static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
- vmw_dmabuf_unreference(&vfbd->buffer);
+ vmw_bo_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
true, true, NULL);
break;
case vmw_du_screen_object:
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
+ clips, NULL, num_clips,
+ increment, true, NULL, NULL);
break;
case vmw_du_legacy:
- ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
- clips, num_clips, increment);
+ ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
break;
default:
ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
-static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .destroy = vmw_framebuffer_bo_destroy,
+ .dirty = vmw_framebuffer_bo_dirty,
};
/**
- * Pin the dmabuffer in a location suitable for access by the
+ * Pin the bofer in a location suitable for access by the
* display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_placement *placement;
int ret;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
switch (dev_priv->active_display_unit) {
case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
- if (vfb->dmabuf) {
+ if (vfb->bo) {
if (dev_priv->capabilities & SVGA_CAP_3D) {
/*
* Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
placement = &vmw_mob_placement;
}
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
- false);
+ return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
default:
return -EINVAL;
}
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (WARN_ON(!buf))
return 0;
- return vmw_dmabuf_unpin(dev_priv, buf, false);
+ return vmw_bo_unpin(dev_priv, buf, false);
}
/**
- * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
*
* @dev: DRM device
* @mode_cmd: parameters for the new surface
- * @dmabuf_mob: MOB backing the DMA buf
+ * @bo_mob: MOB backing the buffer object
* @srf_out: newly created surface
*
- * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * When the content FB is a buffer object, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach
*
* RETURNS:
* 0 on success, error code otherwise
*/
-static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_dma_buffer *dmabuf_mob,
- struct vmw_surface **srf_out)
+static int vmw_create_bo_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vmw_buffer_object *bo_mob,
+ struct vmw_surface **srf_out)
{
uint32_t format;
struct drm_vmw_size content_base_size = {0};
@@ -1239,15 +1238,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
content_base_size.depth = 1;
ret = vmw_surface_gb_priv_define(dev,
- 0, /* kernel visible only */
- 0, /* flags */
- format,
- true, /* can be a scanout buffer */
- 1, /* num of mip levels */
- 0,
- 0,
- content_base_size,
- srf_out);
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ SVGA3D_MS_PATTERN_NONE,
+ SVGA3D_MS_QUALITY_NONE,
+ srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1258,8 +1259,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_dmabuf_unreference(&res->backup);
- res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ vmw_bo_unreference(&res->backup);
+ res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1270,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
-static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd2
- *mode_cmd)
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd2
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
- struct vmw_framebuffer_dmabuf *vfbd;
+ struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1312,20 +1313,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
- vfbd->base.dmabuf = true;
- vfbd->buffer = vmw_dmabuf_reference(dmabuf);
+ vfbd->base.bo = true;
+ vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
+ &vmw_framebuffer_bo_funcs);
if (ret)
goto out_err2;
return 0;
out_err2:
- vmw_dmabuf_unreference(&dmabuf);
+ vmw_bo_unreference(&bo);
kfree(vfbd);
out_err1:
return ret;
@@ -1354,57 +1355,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
- * @only_2d: No presents will occur to this dma buffer based framebuffer. This
- * Helps the code to do some important optimizations.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_dmabuf_proxy = false;
+ bool is_bo_proxy = false;
int ret;
/*
* We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the DMA buf in a surface so we can use the
+ * therefore, wrap the buffer object in a surface so we can use the
* SurfaceCopy command.
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- dmabuf && only_2d &&
+ bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
- dmabuf, &surface);
+ ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ bo, &surface);
if (ret)
return ERR_PTR(ret);
- is_dmabuf_proxy = true;
+ is_bo_proxy = true;
}
/* Create the new framebuffer depending one what we have */
if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
- is_dmabuf_proxy);
+ is_bo_proxy);
/*
- * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * vmw_create_bo_proxy() adds a reference that is no longer
* needed
*/
- if (is_dmabuf_proxy)
+ if (is_bo_proxy)
vmw_surface_unreference(&surface);
- } else if (dmabuf) {
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
- mode_cmd);
+ } else if (bo) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ mode_cmd);
} else {
BUG();
}
@@ -1430,23 +1431,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *bo = NULL;
+ struct vmw_buffer_object *bo = NULL;
struct ttm_base_object *user_obj;
int ret;
- /**
- * This code should be conditioned on Screen Objects not being used.
- * If screen objects are used, we can allocate a GMR to hold the
- * requested framebuffer.
- */
-
- if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd->pitches[0],
- mode_cmd->height)) {
- DRM_ERROR("Requested mode exceed bounding box limit.\n");
- return ERR_PTR(-ENOMEM);
- }
-
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
@@ -1466,7 +1454,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code.
*/
- /* returns either a dmabuf or surface */
+ /* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd->handles[0],
&surface, &bo);
@@ -1494,7 +1482,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
- vmw_dmabuf_unreference(&bo);
+ vmw_bo_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
@@ -1508,7 +1496,168 @@ err_out:
return &vfb->base;
}
+/**
+ * vmw_kms_check_display_memory - Validates display memory required for a
+ * topology
+ * @dev: DRM device
+ * @num_rects: number of drm_rect in rects
+ * @rects: array of drm_rect representing the topology to validate indexed by
+ * crtc index.
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_display_memory(struct drm_device *dev,
+ uint32_t num_rects,
+ struct drm_rect *rects)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_rect bounding_box = {0};
+ u64 total_pixels = 0, pixel_mem, bb_mem;
+ int i;
+
+ for (i = 0; i < num_rects; i++) {
+ /*
+ * Currently this check is limiting the topology within max
+ * texture/screentarget size. This should change in future when
+ * user-space support multiple fb with topology.
+ */
+ if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
+ rects[i].x2 > mode_config->max_width ||
+ rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ return -EINVAL;
+ }
+ /* Bounding box upper left is at (0,0). */
+ if (rects[i].x2 > bounding_box.x2)
+ bounding_box.x2 = rects[i].x2;
+
+ if (rects[i].y2 > bounding_box.y2)
+ bounding_box.y2 = rects[i].y2;
+
+ total_pixels += (u64) drm_rect_width(&rects[i]) *
+ (u64) drm_rect_height(&rects[i]);
+ }
+
+ /* Virtual svga device primary limits are always in 32-bpp. */
+ pixel_mem = total_pixels * 4;
+
+ /*
+ * For HV10 and below prim_bb_mem is vram size. When
+ * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
+ * limit on primary bounding box
+ */
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large.\n");
+ return -EINVAL;
+ }
+
+ /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
+ if (dev_priv->active_display_unit != vmw_du_screen_target ||
+ !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
+ bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_kms_check_topology - Validates topology in drm_atomic_state
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_topology(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_rect *rects;
+ struct drm_crtc *crtc;
+ uint32_t i;
+ int ret = 0;
+
+ rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
+ GFP_KERNEL);
+ if (!rects)
+ return -ENOMEM;
+
+ mutex_lock(&dev_priv->requested_layout_mutex);
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+
+ i = drm_crtc_index(crtc);
+
+ if (crtc_state && crtc_state->enable) {
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ }
+ }
+
+ /* Determine change to topology due to new atomic state */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+
+ if (!new_crtc_state->enable && old_crtc_state->enable) {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
+ continue;
+ }
+
+ if (!du->pref_active) {
+ ret = -EINVAL;
+ goto clean;
+ }
+
+ /*
+ * For vmwgfx each crtc has only one connector attached and it
+ * is not changed so don't really need to check the
+ * crtc->connector_mask and iterate over it.
+ */
+ connector = &du->connector;
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ goto clean;
+ }
+
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ vmw_conn_state->gui_x = du->gui_x;
+ vmw_conn_state->gui_y = du->gui_y;
+
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
+ }
+
+ ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
+ rects);
+
+clean:
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ kfree(rects);
+ return ret;
+}
/**
* vmw_kms_atomic_check_modeset- validate state object for modeset changes
@@ -1520,36 +1669,39 @@ err_out:
* us to assign a value to mode->crtc_clock so that
* drm_calc_timestamping_constants() won't throw an error message
*
- * RETURNS
+ * Returns:
* Zero for success or -errno
*/
static int
vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- struct vmw_private *dev_priv = vmw_priv(dev);
- int i;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- unsigned long requested_bb_mem = 0;
+ struct drm_crtc_state *crtc_state;
+ bool need_modeset = false;
+ int i, ret;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- if (crtc->primary->fb) {
- int cpp = crtc->primary->fb->pitches[0] /
- crtc->primary->fb->width;
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
- requested_bb_mem += crtc->mode.hdisplay * cpp *
- crtc->mode.vdisplay;
- }
+ if (!state->allow_modeset)
+ return ret;
- if (requested_bb_mem > dev_priv->prim_bb_mem)
- return -EINVAL;
- }
+ /*
+ * Legacy path do not set allow_modeset properly like
+ * @drm_atomic_helper_update_plane, This will result in unnecessary call
+ * to vmw_kms_check_topology. So extra set of check.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ need_modeset = true;
}
- return drm_atomic_helper_check(dev, state);
+ if (need_modeset)
+ return vmw_kms_check_topology(dev, state);
+
+ return ret;
}
static const struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1841,40 +1993,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
}
-
-/*
- * Small shared kms functions.
+/**
+ * vmw_du_update_layout - Update the display unit with topology from resolution
+ * plugin and generate DRM uevent
+ * @dev_priv: device private
+ * @num_rects: number of drm_rect in rects
+ * @rects: toplogy to update
*/
-
-static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
+static int vmw_du_update_layout(struct vmw_private *dev_priv,
+ unsigned int num_rects, struct drm_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
+ struct drm_connector_list_iter conn_iter;
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- {
- unsigned int i;
-
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
+ /*
+ * Currently only gui_x/y is protected with requested_layout_mutex.
+ */
+ mutex_lock(&dev_priv->requested_layout_mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(con, &conn_iter) {
+ du = vmw_connector_to_du(con);
+ if (num_rects > du->unit) {
+ du->pref_width = drm_rect_width(&rects[du->unit]);
+ du->pref_height = drm_rect_height(&rects[du->unit]);
+ du->pref_active = true;
+ du->gui_x = rects[du->unit].x1;
+ du->gui_y = rects[du->unit].y1;
+ } else {
+ du->pref_width = 800;
+ du->pref_height = 600;
+ du->pref_active = false;
+ du->gui_x = 0;
+ du->gui_y = 0;
+ }
}
-#endif
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
- if (num > du->unit) {
- du->pref_width = rects[du->unit].w;
- du->pref_height = rects[du->unit].h;
- du->pref_active = true;
- du->gui_x = rects[du->unit].x;
- du->gui_y = rects[du->unit].y;
+ if (num_rects > du->unit) {
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
du->gui_x);
@@ -1882,9 +2043,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
(&con->base, dev->mode_config.suggested_y_property,
du->gui_y);
} else {
- du->pref_width = 800;
- du->pref_height = 600;
- du->pref_active = false;
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
0);
@@ -1894,8 +2052,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
}
con->status = vmw_du_connector_detect(con, true);
}
-
mutex_unlock(&dev->mode_config.mutex);
+
drm_sysfs_hotplug_event(dev);
return 0;
@@ -2110,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
@@ -2195,7 +2353,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
-
+/**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Update preferred topology of display unit as per ioctl request. The topology
+ * is expressed as array of drm_vmw_rect.
+ * e.g.
+ * [0 0 640 480] [640 0 800 600] [0 480 640 480]
+ *
+ * NOTE:
+ * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
+ * device limit on topology, x + w and y + h (lower right) cannot be greater
+ * than INT_MAX. So topology beyond these limits will return with error.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -2204,15 +2380,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
struct drm_vmw_rect *rects;
+ struct drm_rect *drm_rects;
unsigned rects_size;
- int ret;
- int i;
- u64 total_pixels = 0;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_vmw_rect bounding_box = {0};
+ int ret, i;
if (!arg->num_outputs) {
- struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ struct drm_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2231,52 +2404,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- for (i = 0; i < arg->num_outputs; ++i) {
- if (rects[i].x < 0 ||
- rects[i].y < 0 ||
- rects[i].x + rects[i].w > mode_config->max_width ||
- rects[i].y + rects[i].h > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
- ret = -EINVAL;
- goto out_free;
- }
-
- /*
- * bounding_box.w and bunding_box.h are used as
- * lower-right coordinates
- */
- if (rects[i].x + rects[i].w > bounding_box.w)
- bounding_box.w = rects[i].x + rects[i].w;
-
- if (rects[i].y + rects[i].h > bounding_box.h)
- bounding_box.h = rects[i].y + rects[i].h;
+ drm_rects = (struct drm_rect *)rects;
- total_pixels += (u64) rects[i].w * (u64) rects[i].h;
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- /*
- * For Screen Targets, the limits for a toplogy are:
- * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
- * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
- */
- u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
- u64 pixel_mem = total_pixels * 4;
+ for (i = 0; i < arg->num_outputs; i++) {
+ struct drm_vmw_rect curr_rect;
- if (bb_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Topology is beyond supported limits.\n");
- ret = -EINVAL;
+ /* Verify user-space for overflow as kernel use drm_rect */
+ if ((rects[i].x + rects[i].w > INT_MAX) ||
+ (rects[i].y + rects[i].h > INT_MAX)) {
+ ret = -ERANGE;
goto out_free;
}
- if (pixel_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Combined output size too large\n");
- ret = -EINVAL;
- goto out_free;
- }
+ curr_rect = rects[i];
+ drm_rects[i].x1 = curr_rect.x;
+ drm_rects[i].y1 = curr_rect.y;
+ drm_rects[i].x2 = curr_rect.x + curr_rect.w;
+ drm_rects[i].y2 = curr_rect.y + curr_rect.h;
}
- vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+ ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
+
+ if (ret == 0)
+ vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
out_free:
kfree(rects);
@@ -2322,9 +2472,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} else {
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->state->fb == &framebuffer->base)
+ units[num_units++] = vmw_crtc_to_du(crtc);
}
}
@@ -2422,7 +2573,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
* interrupted by a signal.
*/
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit)
@@ -2454,7 +2605,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
@@ -2477,7 +2628,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
@@ -2489,7 +2640,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL);
if (buf)
- vmw_fence_single_bo(&buf->base, fence);
+ vmw_bo_fence_single(&buf->base, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
@@ -2517,7 +2668,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2562,7 +2713,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (ret)
goto out_unreserve;
- ctx->buf = vmw_dmabuf_reference(res->backup);
+ ctx->buf = vmw_bo_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -2595,7 +2746,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2806,6 +2957,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
struct vmw_framebuffer *vfb;
mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2965,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
if (!du->is_implicit)
goto out_unlock;
- vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ vfb = vmw_framebuffer_to_vfb(plane->state->fb);
WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
dev_priv->implicit_fb != vfb);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6b7c012719f1..31311298ec0b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+ container_of(x, struct vmw_framebuffer_bo, base.base)
/**
* Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
- bool dmabuf;
+ bool bo;
struct ttm_base_object *user_obj;
uint32_t user_handle;
};
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
struct list_head head;
- bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
-struct vmw_framebuffer_dmabuf {
+struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
};
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
*
* @base DRM plane object
* @surf Display surface for STDU
- * @dmabuf display dmabuf for SOU
+ * @bo display bo for SOU
* @content_fb_type Used by STDU.
- * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
+ * @bo_size Size of the bo, used by Screen Object Display Unit
* @pinned pin count for STDU display surface
*/
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
- struct vmw_dma_buffer *dmabuf;
+ struct vmw_buffer_object *bo;
int content_fb_type;
- unsigned long dmabuf_size;
+ unsigned long bo_size;
int pinned;
@@ -192,6 +192,24 @@ struct vmw_connector_state {
struct drm_connector_state base;
bool is_implicit;
+
+ /**
+ * @gui_x:
+ *
+ * vmwgfx connector property representing the x position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as xRoot while creating screen.
+ */
+ int gui_x;
+
+ /**
+ * @gui_y:
+ *
+ * vmwgfx connector property representing the y position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as yRoot while creating screen.
+ */
+ int gui_y;
};
/**
@@ -209,7 +227,7 @@ struct vmw_display_unit {
struct drm_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_dma_buffer *cursor_dmabuf;
+ struct vmw_buffer_object *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -243,7 +261,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
};
#define vmw_crtc_to_du(x) \
@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep);
@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
*/
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- unsigned num_clips, int increment,
- bool interruptible,
- struct vmw_fence_obj **out_fence,
- struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ unsigned int num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4a5907e3f560..723578117191 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
}
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment)
{
size_t fifo_size;
int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index efd1ffd68185..e53bc639a754 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d07c585e3c1d..7ed179d30ec5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo);
@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
vmw_fifo_resource_dec(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 21d746bdc922..8b9270f31409 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +31,7 @@
#include <linux/frame.h>
#include <asm/hypervisor.h>
#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
(HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
- DRM_ERROR("Failed to get reply size\n");
+ DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) {
- DRM_ERROR("Cannot allocate memory for reply\n");
+ DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM;
}
@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
- DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+ DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
+ guest_info_param);
return -ENOMEM;
}
@@ -374,7 +376,7 @@ out_msg:
out_open:
*length = 0;
kfree(msg);
- DRM_ERROR("Failed to get %s", guest_info_param);
+ DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL;
}
@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
- DRM_ERROR("Cannot allocate memory for log message\n");
+ DRM_ERROR("Cannot allocate memory for host log message.\n");
return -ENOMEM;
}
@@ -422,7 +424,7 @@ out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
- DRM_ERROR("Failed to send log\n");
+ DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 8545488aa0cf..4907e50fb20a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -1,16 +1,29 @@
-/*
- * Copyright (C) 2016, VMware, Inc.
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
+ **************************************************************************
*
* Based on code from vmware.c and vmmouse.c.
* Author:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 222c9c2123a1..9f1b9d289bec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool pin, bool inter)
{
if (!pin)
- return vmw_dmabuf_unpin(dev_priv, buf, inter);
+ return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy)
- return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram(dev_priv, buf, inter);
- return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
}
if (!pause) {
- vmw_dmabuf_unreference(&stream->buf);
+ vmw_bo_unreference(&stream->buf);
stream->paused = false;
} else {
stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
if (stream->buf != buf)
- stream->buf = vmw_dmabuf_reference(buf);
+ stream->buf = vmw_bo_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct vmw_resource *res;
int ret;
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0d42a46521fc..0861c821a7fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -40,7 +40,6 @@
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.map = vmw_prime_dmabuf_kmap,
- .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.unmap = vmw_prime_dmabuf_kunmap,
- .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index dce798053a96..e99f6cdbb091 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6b3a942b18df..92003ea5a219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,7 +27,6 @@
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
@@ -35,29 +34,6 @@
#define VMW_RES_EVICT_ERR_COUNT 10
-struct vmw_user_dma_buffer {
- struct ttm_prime_object prime;
- struct vmw_dma_buffer dma;
-};
-
-struct vmw_bo_user_rep {
- uint32_t handle;
- uint64_t map_handle;
-};
-
-static inline struct vmw_dma_buffer *
-vmw_dma_buffer(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vmw_dma_buffer, base);
-}
-
-static inline struct vmw_user_dma_buffer *
-vmw_user_dma_buffer(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
-}
-
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref)
res->backup_dirty = false;
list_del_init(&res->mob_head);
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +263,7 @@ out_bad_resource:
}
/**
- * Helper function that looks either a surface or dmabuf.
+ * Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
+ struct vmw_buffer_object **out_buf)
{
struct vmw_resource *res;
int ret;
@@ -311,513 +287,11 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
return ret;
}
/**
- * Buffer management.
- */
-
-/**
- * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(vmw_bo);
- kfree(vmw_bo);
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(&vmw_user_bo->dma);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible,
- void (*bo_free) (struct ttm_buffer_object *bo))
-{
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- size_t acc_size;
- int ret;
- bool user = (bo_free == &vmw_user_dmabuf_destroy);
-
- BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
-
- acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
- memset(vmw_bo, 0, sizeof(*vmw_bo));
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
-
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, interruptible, acc_size,
- NULL, NULL, bo_free);
- return ret;
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- bo = &vmw_user_bo->dma.base;
- ttm_bo_unref(&bo);
-}
-
-static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_dma_buffer *user_bo;
- user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
- break;
- default:
- BUG();
- }
-}
-
-/**
- * vmw_user_dmabuf_alloc - Allocate a user dma buffer
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the dma buffer.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
- * should be assigned.
- */
-int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_buffer_object *tmp;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release,
- &vmw_user_dmabuf_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- goto out_no_base_object;
- }
-
- *p_dma_buf = &user_bo->dma;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.hash.key;
-
-out_no_base_object:
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_dma_buffer(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
- * access, idling previous GPU operations on the buffer and optionally
- * blocking it for further command submissions.
- *
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
- * @flags: Flags indicating how the grab should be performed.
- *
- * A blocking grab will be automatically released when @tfile is closed.
- */
-static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- struct ttm_buffer_object *bo = &user_bo->dma.base;
- bool existed;
- int ret;
-
- if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
- if (!lret)
- return -EBUSY;
- else if (lret < 0)
- return lret;
- return 0;
- }
-
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
- * and unblock command submission on the buffer if blocked.
- *
- * @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
- * @flags: Flags indicating the type of release.
- */
-static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
-
- return 0;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
- * functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- *
- * This function checks the ioctl arguments for validity and calls the
- * relevant synccpu functions.
- */
-int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_synccpu_arg *arg =
- (struct drm_vmw_synccpu_arg *) data;
- struct vmw_dma_buffer *dma_buf;
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
- int ret;
-
- if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
- || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
- drm_vmw_synccpu_dontblock |
- drm_vmw_synccpu_allow_cs)) != 0) {
- DRM_ERROR("Illegal synccpu flags.\n");
- return -EINVAL;
- }
-
- switch (arg->op) {
- case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
- &buffer_base);
- if (unlikely(ret != 0))
- return ret;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
- dma);
- ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
- vmw_dmabuf_unreference(&dma_buf);
- ttm_base_object_unref(&buffer_base);
- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
- ret != -EBUSY)) {
- DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- case drm_vmw_synccpu_release:
- ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
- arg->flags);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- default:
- DRM_ERROR("Invalid synccpu operation.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_dma_buffer *dma_buf;
- uint32_t handle;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &dma_buf,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_dmabuf_unreference(&dma_buf);
-
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_unref_dmabuf_arg *arg =
- (struct drm_vmw_unref_dmabuf_arg *)data;
-
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
-}
-
-int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_dma_buffer **out,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -ESRCH;
- }
-
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- (void)ttm_bo_reference(&vmw_user_bo->dma.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->dma;
-
- return 0;
-}
-
-int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle)
-{
- struct vmw_user_dma_buffer *user_bo;
-
- if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
- return -EINVAL;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
-
- *handle = user_bo->prime.base.hash.key;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
-}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_dma_buffer *dma_buf;
- int ret;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &dma_buf, NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- vmw_dmabuf_unreference(&dma_buf);
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *out_buf;
- int ret;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
- vmw_dmabuf_unreference(&out_buf);
- return 0;
-}
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
*
* @res: The resource for which to allocate a backup buffer.
@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
{
unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
int ret;
if (likely(res->backup)) {
@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
if (unlikely(!backup))
return -ENOMEM;
- ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
interruptible,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
- goto out_no_dmabuf;
+ goto out_no_bo;
res->backup = backup;
-out_no_dmabuf:
+out_no_bo:
return ret;
}
@@ -919,7 +393,7 @@ out_bind_failed:
*/
void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (new_backup) {
- res->backup = vmw_dmabuf_reference(new_backup);
+ res->backup = vmw_bo_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
+ * @ticket: The ww aqcquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* reserved and validated backup buffer.
*/
static int
-vmw_resource_check_buffer(struct vmw_resource *res,
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
return ret;
}
@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
*.
+ * @ticket: The ww acquire ctx used for reservation.
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer.
*
+ * @ticket: The ww acquire ticket to use, or NULL if trylocking.
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
-static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
val_buf.bo = NULL;
val_buf.shared = false;
- ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+ ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&val_buf);
+ vmw_resource_backoff_reservation(ticket, &val_buf);
return ret;
}
@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res)
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, true);
+ /* Trylock backup buffers with a NULL ticket. */
+ ret = vmw_resource_do_evict(NULL, evict_res, true);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res)
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
return 0;
@@ -1180,109 +661,39 @@ out_no_validate:
return ret;
}
-/**
- * vmw_fence_single_bo - Utility function to fence a single TTM buffer
- * object without unreserving it.
- *
- * @bo: Pointer to the struct ttm_buffer_object to fence.
- * @fence: Pointer to the fence. If NULL, this function will
- * insert a fence into the command stream..
- *
- * Contrary to the ttm_eu version of this function, it takes only
- * a single buffer object instead of a list, and it also doesn't
- * unreserve the buffer object, which needs to be done separately.
- */
-void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
- if (fence == NULL) {
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->resv, &fence->base);
- dma_fence_put(&fence->base);
- } else
- reservation_object_add_excl_fence(bo->resv, &fence->base);
-}
/**
- * vmw_resource_move_notify - TTM move_notify_callback
+ * vmw_resource_unbind_list
*
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @vbo: Pointer to the current backing MOB.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
- * Note that this function should not race with the resource
- * validation code as long as it accesses only members of struct
- * resource that remain static while bo::res is !NULL and
- * while we have @bo reserved. struct resource::backup is *not* a
- * static member. The resource validation code will take care
- * to set @bo::res to NULL, while having @bo reserved when the
- * buffer is no longer bound to the resource, so @bo:res can be
- * used to determine whether there is a need to unbind and whether
- * it is safe to unbind.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
*/
-void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
- struct vmw_dma_buffer *dma_buf;
-
- if (mem == NULL)
- return;
-
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
-
- dma_buf = container_of(bo, struct vmw_dma_buffer, base);
-
- /*
- * Kill any cached kernel maps before move. An optimization could
- * be to do this iff source or destination memory type is VRAM.
- */
- vmw_dma_buffer_unmap(dma_buf);
- if (mem->mem_type != VMW_PL_MOB) {
- struct vmw_resource *res, *n;
- struct ttm_validate_buffer val_buf;
+ struct vmw_resource *res, *next;
+ struct ttm_validate_buffer val_buf = {
+ .bo = &vbo->base,
+ .shared = false
+ };
- val_buf.bo = bo;
- val_buf.shared = false;
+ lockdep_assert_held(&vbo->base.resv->lock.base);
+ list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+ if (!res->func->unbind)
+ continue;
- list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
-
- if (unlikely(res->func->unbind == NULL))
- continue;
-
- (void) res->func->unbind(res, true, &val_buf);
- res->backup_dirty = true;
- res->res_dirty = false;
- list_del_init(&res->mob_head);
- }
-
- (void) ttm_bo_wait(bo, false, false);
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
}
-}
-
-
-/**
- * vmw_resource_swap_notify - swapout notify callback.
- *
- * @bo: The buffer object to be swapped out.
- */
-void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
-{
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
- /* Kill any cached kernel maps before swapout */
- vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+ (void) ttm_bo_wait(&vbo->base, false, false);
}
@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
* Read back cached states from the device if they exist. This function
* assumings binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
mutex_lock(&dev_priv->binding_mutex);
- dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+ dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
/* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct vmw_resource *evict_res;
unsigned err_count = 0;
int ret;
+ struct ww_acquire_ctx ticket;
do {
write_lock(&dev_priv->resource_lock);
@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, false);
+ /* Wait lock backup buffers with a ticket. */
+ ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_dma_buffer *vbo = NULL;
+ struct vmw_buffer_object *vbo = NULL;
if (res->backup) {
vbo = res->backup;
@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index ac05968a832b..a8c1c5ebd71d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 3d667e903beb..ad0de7f0cd60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
SVGAFifoCmdBlitScreenToGMRFB body;
};
-struct vmw_kms_sou_dmabuf_blit {
+struct vmw_kms_sou_bo_blit {
uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body;
};
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+ struct vmw_buffer_object *buffer; /**< Backing store buffer */
bool defined;
};
@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
- uint32_t x, uint32_t y,
+ int x, int y,
struct drm_display_mode *mode)
{
size_t fifo_size;
@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay;
- if (sou->base.is_implicit) {
- cmd->obj.root.x = x;
- cmd->obj.root.y = y;
- } else {
- cmd->obj.root.x = sou->base.gui_x;
- cmd->obj.root.y = sou->base.gui_y;
- }
+ cmd->obj.root.x = x;
+ cmd->obj.root.y = y;
sou->base.set_gui_x = cmd->obj.root.x;
sou->base.set_gui_y = cmd->obj.root.y;
@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_plane_state *vps;
int ret;
-
- sou = vmw_crtc_to_sou(crtc);
+ sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
- ps = crtc->primary->state;
- fb = ps->fb;
- vps = vmw_plane_state_to_vps(ps);
+ ps = crtc->primary->state;
+ fb = ps->fb;
+ vps = vmw_plane_state_to_vps(ps);
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
if (vfb) {
- sou->buffer = vps->dmabuf;
- sou->buffer_size = vps->dmabuf_size;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y;
+
+ sou->buffer = vps->bo;
+ sou->buffer_size = vps->bo_size;
+
+ if (sou->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
- ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
- &crtc->mode);
+ ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
- if (vps->dmabuf)
- vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ if (vps->bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
}
@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!new_fb) {
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
return 0;
}
@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->dmabuf) {
- if (vps->dmabuf_size == size) {
+ if (vps->bo) {
+ if (vps->bo_size == size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo,
true);
}
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
}
- vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
- if (!vps->dmabuf)
+ vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+ if (!vps->bo)
return -ENOMEM;
vmw_svga_enable(dev_priv);
@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
+ ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
+ false, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
- vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+ vps->bo = NULL; /* vmw_bo_init frees on error */
return ret;
}
- vps->dmabuf_size = size;
+ vps->bo_size = size;
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
vclips.w = crtc->mode.hdisplay;
vclips.h = crtc->mode.vdisplay;
- if (vfb->dmabuf)
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ if (vfb->bo)
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
+ &vclips, 1, 1, true,
+ &fence, crtc);
else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
&vclips, NULL, 0, 0,
@@ -527,8 +535,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
*/
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
/*
* When disabling a plane, CRTC and FB should always be NULL
@@ -697,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -777,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0;
}
-static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
int depth = framebuffer->base.format->depth;
struct {
@@ -972,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
}
/**
- * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
*
* @dirty: The closure structure.
*
* Commits a previously built command buffer of readback clips.
*/
-static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0);
@@ -986,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
}
vmw_fifo_commit(dirty->dev_priv,
- sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
/**
- * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
*
* @dirty: The closure structure
*
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/
-static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
{
- struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+ struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1014,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
- * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -1027,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
- * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
@@ -1041,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1052,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+ ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
dirty.crtc = crtc;
- dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
- dirty.clip = vmw_sou_dmabuf_clip;
- dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+ dirty.clip = vmw_sou_bo_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
@@ -1118,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
/**
* vmw_kms_sou_readback - Perform a readback from the screen object system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -1141,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1151,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+ ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 73b8e9a16368..fe4842ca3b6e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
- struct vmw_dma_buffer *byte_code,
+ struct vmw_buffer_object *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->backup_size = size;
if (byte_code) {
- res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup = vmw_bo_reference(byte_code);
res->backup_offset = offset;
}
shader->size = size;
@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *buffer = NULL;
+ struct vmw_buffer_object *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
+ ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ vmw_bo_unreference(&buffer);
return ret;
}
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf))
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
res, list);
vmw_resource_unreference(&res);
no_reserve:
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index a0cb310665cc..6ebc5affde14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index d3573c37c436..e9b6b7baa009 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 268738387b5e..b80c7252f2fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 67331f01ef32..93f6b96ca7bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -44,7 +44,7 @@
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
SEPARATE_SURFACE,
- SEPARATE_DMA
+ SEPARATE_BO
};
/**
@@ -58,7 +58,7 @@ enum stdu_content_type {
* @bottom: Bottom side of bounding box.
* @fb_left: Left side of the framebuffer/content bounding box
* @fb_top: Top of the framebuffer/content bounding box
- * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
s32 fb_left, fb_top;
u32 pitch;
union {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
u32 sid;
};
};
@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
- if (stdu->base.is_implicit) {
- cmd->body.xRoot = crtc_x;
- cmd->body.yRoot = crtc_y;
- } else {
- cmd->body.xRoot = stdu->base.gui_x;
- cmd->body.yRoot = stdu->base.gui_y;
- }
+ cmd->body.xRoot = crtc_x;
+ cmd->body.yRoot = crtc_y;
+
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
- int ret;
-
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y, ret;
- stdu = vmw_crtc_to_stdu(crtc);
+ stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ conn_state = stdu->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
+ if (stdu->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
+
vmw_svga_enable(dev_priv);
- ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y);
+ ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
if (ret)
DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
@@ -414,6 +421,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_plane_state *plane_state = crtc->primary->state;
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
struct vmw_framebuffer *vfb;
@@ -422,7 +430,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- fb = crtc->primary->fb;
+ fb = plane_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -507,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
/**
- * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
*
* Encodes a surface DMA command cliprect and updates the bounding box
* for the DMA.
*/
-static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -542,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in a DMA command, and optionally encodes
* a screen target update command, depending on transfer direction.
*/
-static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -593,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
*
* This function calculates the bounding box for all the incoming clips.
*/
-static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -623,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
*
* @dirty: The closure structure.
*
* For the special case when we cannot create a proxy surface in a
* 2D VM, we have to do a CPU blit ourselves.
*/
-static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -651,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
- /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -711,13 +719,13 @@ out_cleanup:
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -746,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -769,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+ ddirty.base.clip = vmw_stdu_bo_clip;
ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
num_clips * sizeof(SVGA3dCopyBox) +
sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -779,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
ddirty.base.fifo_reserve_size = 0;
}
@@ -926,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- if (vfbs->is_dmabuf_proxy) {
+ if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
goto out_finish;
@@ -1074,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* @new_state: info on the new plane state, including the FB
*
* This function allocates a new display surface if the content is
- * backed by a DMA. The display surface is pinned here, and it'll
+ * backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb()
*
* Returns 0 on success
@@ -1104,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
vfb = vmw_framebuffer_to_vfb(new_fb);
- new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+ new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
new_vfbs->surface->base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
- else if (vfb->dmabuf)
- new_content_type = SEPARATE_DMA;
+ else if (vfb->bo)
+ new_content_type = SEPARATE_BO;
else
new_content_type = SEPARATE_SURFACE;
@@ -1123,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
display_base_size.depth = 1;
/*
- * If content buffer is a DMA buf, then we have to construct
- * surface info
+ * If content buffer is a buffer object, then we have to
+ * construct surface info
*/
- if (new_content_type == SEPARATE_DMA) {
+ if (new_content_type == SEPARATE_BO) {
switch (new_fb->format->cpp[0]*8) {
case 32:
@@ -1149,6 +1157,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
+ content_srf.multisample_pattern =
+ SVGA3D_MS_PATTERN_NONE;
+ content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
} else {
content_srf = *new_vfbs->surface;
}
@@ -1177,6 +1188,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.multisample_count,
0,
display_base_size,
+ content_srf.multisample_pattern,
+ content_srf.quality_level,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1211,12 +1224,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
- * This should only happen if the DMA buf is too large to create a
+ * This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a DMA buffer then we have to use CPU blit
+ * If we are a 2D VM with a buffer object then we have to use CPU blit
* so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_DMA &&
+ if (vps->content_fb_type == SEPARATE_BO &&
!(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width;
@@ -1275,7 +1288,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
- if (vfb->dmabuf)
+ if (vfb->bo)
ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
&vclips, 1, 1, true, false,
crtc);
@@ -1285,8 +1298,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1, 1, NULL, crtc);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
crtc = old_state->crtc;
stdu = vmw_crtc_to_stdu(crtc);
@@ -1487,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b236c48bf265..e125233e074b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -33,6 +33,10 @@
#include "vmwgfx_binding.h"
#include "device_include/svga3d_surfacedefs.h"
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -81,7 +85,16 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_surface_destroy(struct vmw_resource *res);
-
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv);
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv);
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -224,7 +237,12 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
+ /*
+ * Downcast of surfaceFlags, was upcasted when received from user-space,
+ * since driver internally stores as 64 bit.
+ * For legacy surface define only 32 bit flag is supported.
+ */
+ cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -468,7 +486,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -760,7 +778,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf = &user_srf->srf;
res = &srf->res;
- srf->flags = req->flags;
+ /* Driver internally stores as 64-bit flags */
+ srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
@@ -785,6 +804,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
+ srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ srf->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -842,12 +863,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -990,7 +1011,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- rep->flags = srf->flags;
+ /* Downcast of flags when sending back to user space */
+ rep->flags = (uint32_t)srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -1031,6 +1053,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v2 body;
} *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v3 body;
+ } *cmd3;
if (likely(res->id != -1))
return 0;
@@ -1047,7 +1073,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
+ cmd_len = sizeof(cmd3->body);
+ submit_len = sizeof(*cmd3);
+ } else if (srf->array_size > 0) {
/* has_dx checked on creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
@@ -1060,6 +1090,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
+ cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
@@ -1067,12 +1098,27 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd3->header.id = cmd_id;
+ cmd3->header.size = cmd_len;
+ cmd3->body.sid = srf->res.id;
+ cmd3->body.surfaceFlags = srf->flags;
+ cmd3->body.format = srf->format;
+ cmd3->body.numMipLevels = srf->mip_levels[0];
+ cmd3->body.multisampleCount = srf->multisample_count;
+ cmd3->body.multisamplePattern = srf->multisample_pattern;
+ cmd3->body.qualityLevel = srf->quality_level;
+ cmd3->body.autogenFilter = srf->autogen_filter;
+ cmd3->body.size.width = srf->base_size.width;
+ cmd3->body.size.height = srf->base_size.height;
+ cmd3->body.size.depth = srf->base_size.depth;
+ cmd3->body.arraySize = srf->array_size;
+ } else if (srf->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = srf->flags;
- cmd2->body.format = cpu_to_le32(srf->format);
+ cmd2->body.format = srf->format;
cmd2->body.numMipLevels = srf->mip_levels[0];
cmd2->body.multisampleCount = srf->multisample_count;
cmd2->body.autogenFilter = srf->autogen_filter;
@@ -1085,7 +1131,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.format = srf->format;
cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter;
@@ -1210,7 +1256,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -1256,194 +1302,55 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
- * the user surface define functionality.
+ * the user surface define functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
- struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
- uint32_t size;
- uint32_t backup_handle = 0;
-
- if (req->multisample_count != 0)
- return -EINVAL;
-
- if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
+ struct drm_vmw_gb_surface_create_ext_req req_ext;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- size = vmw_user_surface_size + 128;
-
- /* Define a surface based on the parameters. */
- ret = vmw_surface_gb_priv_define(dev,
- size,
- req->svga3d_flags,
- req->format,
- req->drm_surface_flags & drm_vmw_surface_flag_scanout,
- req->mip_levels,
- req->multisample_count,
- req->array_size,
- req->base_size,
- &srf);
- if (unlikely(ret != 0))
- return ret;
-
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (drm_is_primary_client(file_priv))
- user_srf->master = drm_master_get(file_priv->master);
+ req_ext.base = arg->req;
+ req_ext.version = drm_vmw_gb_surface_v1;
+ req_ext.svga3d_flags_upper_32_bits = 0;
+ req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+ req_ext.must_be_zero = 0;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- res = &user_srf->srf.res;
-
-
- if (req->buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
- &res->backup,
- &user_srf->backup_base);
- if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
- } else {
- backup_handle = req->buffer_handle;
- }
- }
- } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->handle = user_srf->prime.base.hash.key;
- rep->backup_size = res->backup_size;
- if (res->backup) {
- rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
- rep->buffer_handle = backup_handle;
- } else {
- rep->buffer_map_handle = 0;
- rep->buffer_size = 0;
- rep->buffer_handle = SVGA3D_INVALID_ID;
- }
-
- vmw_resource_unreference(&res);
-
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
+ return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
}
/**
* vmw_gb_surface_reference_ioctl - Ioctl function implementing
- * the user surface reference functionality.
+ * the user surface reference functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_gb_surface_reference_arg *arg =
(union drm_vmw_gb_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct ttm_base_object *base;
- uint32_t backup_handle;
- int ret = -EINVAL;
+ struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
+ int ret;
+
+ ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
- ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
- req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
- user_srf = container_of(base, struct vmw_user_surface, prime.base);
- srf = &user_srf->srf;
- if (!srf->res.backup) {
- DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
- goto out_bad_resource;
- }
-
- mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
- &backup_handle);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
- TTM_REF_USAGE);
- goto out_bad_resource;
- }
-
- rep->creq.svga3d_flags = srf->flags;
- rep->creq.format = srf->format;
- rep->creq.mip_levels = srf->mip_levels[0];
- rep->creq.drm_surface_flags = 0;
- rep->creq.multisample_count = srf->multisample_count;
- rep->creq.autogen_filter = srf->autogen_filter;
- rep->creq.array_size = srf->array_size;
- rep->creq.buffer_handle = backup_handle;
- rep->creq.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
- rep->crep.backup_size = srf->res.backup_size;
- rep->crep.buffer_handle = backup_handle;
- rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
-
-out_bad_resource:
- ttm_base_object_unref(&base);
+ rep->creq = rep_ext.creq.base;
+ rep->crep = rep_ext.crep;
return ret;
}
@@ -1461,6 +1368,8 @@ out_bad_resource:
* @multisample_count:
* @array_size: Surface array size.
* @size: width, heigh, depth of the surface requested
+ * @multisample_pattern: Multisampling pattern when msaa is supported
+ * @quality_level: Precision settings
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
* GB surfaces allocated by this function will not have a user mode handle, and
@@ -1470,13 +1379,15 @@ out_bad_resource:
*/
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -1487,7 +1398,8 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
};
struct vmw_surface *srf;
int ret;
- u32 num_layers;
+ u32 num_layers = 1;
+ u32 sample_count = 1;
*srf_out = NULL;
@@ -1562,19 +1474,23 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->array_size = array_size;
srf->multisample_count = multisample_count;
+ srf->multisample_pattern = multisample_pattern;
+ srf->quality_level = quality_level;
if (array_size)
num_layers = array_size;
else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
num_layers = SVGA3D_MAX_SURFACE_FACES;
- else
- num_layers = 1;
+
+ if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
+ sample_count = srf->multisample_count;
srf->res.backup_size =
- svga3dsurface_get_serialized_size(srf->format,
- srf->base_size,
- srf->mip_levels[0],
- num_layers);
+ svga3dsurface_get_serialized_size_extended(srf->format,
+ srf->base_size,
+ srf->mip_levels[0],
+ num_layers,
+ sample_count);
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
@@ -1599,3 +1515,266 @@ out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
+
+/**
+ * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_create_ext_arg *arg =
+ (union drm_vmw_gb_surface_create_ext_arg *)data;
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_reference_ext_arg *arg =
+ (union drm_vmw_gb_surface_reference_ext_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_define_internal - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Request argument from user-space.
+ * @rep: Response argument to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ uint32_t backup_handle = 0;
+ SVGA3dSurfaceAllFlags svga3d_flags_64 =
+ SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
+ req->base.svga3d_flags);
+
+ if (!dev_priv->has_sm4_1) {
+ /*
+ * If SM4_1 is not support then cannot send 64-bit flag to
+ * device.
+ */
+ if (req->svga3d_flags_upper_32_bits != 0)
+ return -EINVAL;
+
+ if (req->base.multisample_count != 0)
+ return -EINVAL;
+
+ if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
+ return -EINVAL;
+
+ if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
+ return -EINVAL;
+ }
+
+ if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
+ req->base.multisample_count == 0)
+ return -EINVAL;
+
+ if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ /* Define a surface based on the parameters. */
+ ret = vmw_surface_gb_priv_define(dev,
+ size,
+ svga3d_flags_64,
+ req->base.format,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_scanout,
+ req->base.mip_levels,
+ req->base.multisample_count,
+ req->base.array_size,
+ req->base.base_size,
+ req->multisample_pattern,
+ req->quality_level,
+ &srf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(srf, struct vmw_user_surface, srf);
+ if (drm_is_primary_client(file_priv))
+ user_srf->master = drm_master_get(file_priv->master);
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = &user_srf->srf.res;
+
+ if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
+ &res->backup,
+ &user_srf->backup_base);
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer too small.\n");
+ vmw_bo_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->base.buffer_handle;
+ }
+ }
+ } else if (req->base.drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_internal - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Pointer to user-space request surface arg.
+ * @rep: Pointer to response to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+ req->handle_type, &base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (!srf->res.backup) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
+ rep->creq.base.format = srf->format;
+ rep->creq.base.mip_levels = srf->mip_levels[0];
+ rep->creq.base.drm_surface_flags = 0;
+ rep->creq.base.multisample_count = srf->multisample_count;
+ rep->creq.base.autogen_filter = srf->autogen_filter;
+ rep->creq.base.array_size = srf->array_size;
+ rep->creq.base.buffer_handle = backup_handle;
+ rep->creq.base.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+ rep->creq.version = drm_vmw_gb_surface_v1;
+ rep->creq.svga3d_flags_upper_32_bits =
+ SVGA3D_FLAGS_UPPER_32(srf->flags);
+ rep->creq.multisample_pattern = srf->multisample_pattern;
+ rep->creq.quality_level = srf->quality_level;
+ rep->creq.must_be_zero = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 21111fd091f9..31786b200afc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
- return vmw_user_dmabuf_verify_access(bo, tfile);
+ return vmw_user_bo_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *mem)
{
- vmw_resource_move_notify(bo, mem);
+ vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- vmw_resource_swap_notify(bo);
+ vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e771091d2cd3..7b1e5a5cbd2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index b4162fd78600..ebc1d83c34b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index b3786c1a4e80..6b6d5ab82ec3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
if (ret < 0)
return ret;
- DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+ DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
/* Create event channels for all connectors and publish */
ret = xen_drm_front_evtchnl_create_all(front_info);
if (ret < 0)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 2c2479b571ae..5693b4a4b02b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
{
- return (u64)fb;
+ return (uintptr_t)fb;
}
static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
{
- return (u64)gem_obj;
+ return (uintptr_t)gem_obj;
}
int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index 8099cb343ae3..d333b67cc1a0 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
}
#define xen_page_to_vaddr(page) \
- ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
static int backend_unmap(struct xen_drm_front_shbuf *buf)
{
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index 13ea90f7a185..78655269d843 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -272,7 +272,7 @@ static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -326,7 +326,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&zx_hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index d1931f5ea0b2..ae8c53b4b261 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,7 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
static void zx_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index 0de1a71ca4e0..b73afb212fb2 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -297,7 +297,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 3e7e33cd3dfa..23d1ff4355a0 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -109,7 +109,7 @@ static int zx_vga_connector_get_modes(struct drm_connector *connector)
*/
zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -175,7 +175,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
goto clean_connector;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f1d5f76e9c33..d88073e7d22d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ goto skip_iommu;
+
host->group = iommu_group_get(&pdev->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index e2f4a4d93d20..527a1cddb14f 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+ unpin->size && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i],
unpin->size);
free_iova(&host->iova,
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 48685cddbad1..474b00e19697 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -122,6 +122,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB24:
@@ -190,6 +192,8 @@ int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
return (24 * pixel_stride) >> 3;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_XRGB32:
return (32 * pixel_stride) >> 3;
default:
break;
@@ -1401,6 +1405,8 @@ static int ipu_probe(struct platform_device *pdev)
return -ENODEV;
ipu->id = of_alias_get_id(np, "ipu");
+ if (ipu->id < 0)
+ ipu->id = 0;
if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
IS_ENABLED(CONFIG_DRM)) {
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 9f2d9ec42add..a9d2501500a1 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -188,6 +188,12 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
case V4L2_PIX_FMT_RGB32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_XBGR32:
+ /* B G R X <=> [32:0] X:R:G:B */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_XRGB32:
+ /* X R G B <=> [32:0] B:G:R:X */
+ return DRM_FORMAT_BGRX8888;
case V4L2_PIX_FMT_UYVY:
return DRM_FORMAT_UYVY;
case V4L2_PIX_FMT_YUYV:
@@ -269,9 +275,20 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
{
+ u32 ilo, sly;
+
+ if (stride < 0) {
+ stride = -stride;
+ ilo = 0x100000 - (stride / 8);
+ } else {
+ ilo = stride / 8;
+ }
+
+ sly = (stride * 2) - 1;
+
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
@@ -530,17 +547,17 @@ static const struct ipu_rgb def_bgra_16 = {
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 4) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * (y) / 2) + (x) / 2)
#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 2) + \
(pix->width * (y) / 2) + (x) / 2)
#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 2) + (x))
+ (pix->width * ((y) / 2)) + (x))
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * y) + (x))
@@ -776,6 +793,8 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
offset = image->rect.left * 4 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index caa05b0702e1..954eefe144e2 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -224,14 +224,18 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
-static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
+ enum v4l2_mbus_type mbus_type)
{
switch (mbus_code) {
case MEDIA_BUS_FMT_BGR565_2X8_BE:
case MEDIA_BUS_FMT_BGR565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ if (mbus_type == V4L2_MBUS_CSI2)
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ else
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RGB565;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
@@ -247,6 +251,12 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
cfg->mipi_dt = MIPI_DT_RGB555;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+ cfg->mipi_dt = MIPI_DT_RGB888;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
case MEDIA_BUS_FMT_UYVY8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
@@ -318,13 +328,17 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
-static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
struct v4l2_mbus_config *mbus_cfg,
struct v4l2_mbus_framefmt *mbus_fmt)
{
+ int ret;
+
memset(csicfg, 0, sizeof(*csicfg));
- mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
+ if (ret < 0)
+ return ret;
switch (mbus_cfg->type) {
case V4L2_MBUS_PARALLEL:
@@ -339,7 +353,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
- if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+ if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+ mbus_fmt->field == V4L2_FIELD_ALTERNATE)
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
@@ -355,6 +370,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
/* will never get here, keep compiler quiet */
break;
}
+
+ return 0;
}
int ipu_csi_init_interface(struct ipu_csi *csi,
@@ -364,8 +381,11 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
+ int ret;
- fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ if (ret < 0)
+ return ret;
/* set default sensor frame width and height */
width = mbus_fmt->width;
@@ -586,11 +606,14 @@ int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 temp;
+ int ret;
if (vc > 3)
return -EINVAL;
- mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&csi->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 524a717ab28e..f4081962784c 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -227,6 +227,12 @@ static const struct ipu_image_pixfmt image_convert_formats[] = {
.fourcc = V4L2_PIX_FMT_BGR32,
.bpp = 32,
}, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 32,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.uv_width_dec = 2,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 0f70e8847540..2f8db9d62551 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -128,7 +128,8 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
list_for_each_entry(pre, &ipu_pre_list, list) {
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
- device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, pre->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
of_node_put(pre_node);
return pre;
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 83f9dd934a5d..38a3a9764e49 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -100,7 +100,8 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
list_for_each_entry(prg, &ipu_prg_list, list) {
if (prg_node == prg->dev->of_node) {
mutex_unlock(&ipu_prg_list_mutex);
- device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, prg->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
prg->id = ipu_id;
of_node_put(prg_node);
return prg;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fc4adf3d34e8..a96bf46bc483 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -103,9 +103,11 @@
* runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
* interface is a no-op so as not to interfere with runtime pm
* @list: client list
+ * @vga_dev: pci device, indicate which GPU is bound to current audio client
*
* Registered client. A client can be either a GPU or an audio device on a GPU.
- * For audio clients, the @fb_info and @active members are bogus.
+ * For audio clients, the @fb_info and @active members are bogus. For GPU
+ * clients, the @vga_dev is bogus.
*/
struct vga_switcheroo_client {
struct pci_dev *pdev;
@@ -116,6 +118,7 @@ struct vga_switcheroo_client {
bool active;
bool driver_power_control;
struct list_head list;
+ struct pci_dev *vga_dev;
};
/*
@@ -161,9 +164,8 @@ struct vgasr_priv {
};
#define ID_BIT_AUDIO 0x100
-#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
-#define client_is_vga(c) ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
- !client_is_audio(c))
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) (!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@@ -192,14 +194,29 @@ static void vga_switcheroo_enable(void)
vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
+ if (!client_is_vga(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
+
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
client->id = ret;
}
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client_is_audio(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
+ continue;
+
+ ret = vgasr_priv.handler->get_client_id(client->vga_dev);
+ if (ret < 0)
+ return;
+
+ client->id = ret | ID_BIT_AUDIO;
+ }
+
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
@@ -272,7 +289,9 @@ EXPORT_SYMBOL(vga_switcheroo_handler_flags);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id, bool active,
+ enum vga_switcheroo_client_id id,
+ struct pci_dev *vga_dev,
+ bool active,
bool driver_power_control)
{
struct vga_switcheroo_client *client;
@@ -287,6 +306,7 @@ static int register_client(struct pci_dev *pdev,
client->id = id;
client->active = active;
client->driver_power_control = driver_power_control;
+ client->vga_dev = vga_dev;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
@@ -319,7 +339,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control)
{
- return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
+ return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID, NULL,
pdev == vga_default_device(),
driver_power_control);
}
@@ -329,19 +349,40 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* vga_switcheroo_register_audio_client - register audio client
* @pdev: client pci device
* @ops: client callbacks
- * @id: client identifier
+ * @vga_dev: pci device which is bound to current audio client
*
* Register audio client (audio device on a GPU). The client is assumed
* to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
- * Return: 0 on success, -ENOMEM on memory allocation error.
+ * Return: 0 on success, -ENOMEM on memory allocation error, -EINVAL on getting
+ * client id error.
*/
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id)
+ struct pci_dev *vga_dev)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true);
+ enum vga_switcheroo_client_id id = VGA_SWITCHEROO_UNKNOWN_ID;
+
+ /*
+ * if vga_switcheroo has enabled, that mean two GPU clients and also
+ * handler are registered. Get audio client id from bound GPU client
+ * id directly, otherwise, set it as VGA_SWITCHEROO_UNKNOWN_ID,
+ * it will set to correct id in later when vga_switcheroo_enable()
+ * is called.
+ */
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.active) {
+ id = vgasr_priv.handler->get_client_id(vga_dev);
+ if (id < 0) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&vgasr_mutex);
+
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, vga_dev,
+ false, true);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a49a10437c40..61e1953ff921 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -207,6 +207,16 @@ config HID_CORSAIR
- Vengeance K90
- Scimitar PRO RGB
+config HID_COUGAR
+ tristate "Cougar devices"
+ depends on HID
+ help
+ Support for Cougar devices that are not fully compliant with the
+ HID standard.
+
+ Supported devices:
+ - Cougar 500k Gaming Keyboard
+
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 511e1cbff768..bd7ac53b75c5 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o
+obj-$(CONFIG_HID_COUGAR) += hid-cougar.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f858cc72011d..3da354af7a0a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -128,9 +128,19 @@ static int open_collection(struct hid_parser *parser, unsigned type)
usage = parser->local.usage[0];
- if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- hid_err(parser->device, "collection stack overflow\n");
- return -EINVAL;
+ if (parser->collection_stack_ptr == parser->collection_stack_size) {
+ unsigned int *collection_stack;
+ unsigned int new_size = parser->collection_stack_size +
+ HID_COLLECTION_STACK_SIZE;
+
+ collection_stack = krealloc(parser->collection_stack,
+ new_size * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!collection_stack)
+ return -ENOMEM;
+
+ parser->collection_stack = collection_stack;
+ parser->collection_stack_size = new_size;
}
if (parser->device->maxcollection == parser->device->collection_size) {
@@ -840,6 +850,7 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
+ kfree(parser->collection_stack);
vfree(parser);
return 0;
}
@@ -1939,6 +1950,29 @@ static int hid_bus_match(struct device *dev, struct device_driver *drv)
return hid_match_device(hdev, hdrv) != NULL;
}
+/**
+ * hid_compare_device_paths - check if both devices share the same path
+ * @hdev_a: hid device
+ * @hdev_b: hid device
+ * @separator: char to use as separator
+ *
+ * Check if two devices share the same path up to the last occurrence of
+ * the separator char. Both paths must exist (i.e., zero-length paths
+ * don't match).
+ */
+bool hid_compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator)
+{
+ int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
+ int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
+
+ if (n1 != n2 || n1 <= 0 || n2 <= 0)
+ return false;
+
+ return !strncmp(hdev_a->phys, hdev_b->phys, n1);
+}
+EXPORT_SYMBOL_GPL(hid_compare_device_paths);
+
static int hid_device_probe(struct device *dev)
{
struct hid_driver *hdrv = to_hid_driver(dev->driver);
@@ -1952,6 +1986,8 @@ static int hid_device_probe(struct device *dev)
}
hdev->io_started = false;
+ clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL) {
@@ -2215,7 +2251,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
struct hid_device *hdev = to_hid_device(dev);
if (hdev->driver == hdrv &&
- !hdrv->match(hdev, hid_ignore_special_drivers))
+ !hdrv->match(hdev, hid_ignore_special_drivers) &&
+ !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
return device_reprobe(dev);
return 0;
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
new file mode 100644
index 000000000000..ad2e87de7dc5
--- /dev/null
+++ b/drivers/hid/hid-cougar.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for Cougar 500k Gaming Keyboard
+ *
+ * Copyright (c) 2018 Daniel M. Lambea <dmlambea@gmail.com>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Daniel M. Lambea <dmlambea@gmail.com>");
+MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard");
+MODULE_LICENSE("GPL");
+MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18");
+
+static int cougar_g6_is_space = 1;
+module_param_named(g6_is_space, cougar_g6_is_space, int, 0600);
+MODULE_PARM_DESC(g6_is_space,
+ "If set, G6 programmable key sends SPACE instead of F18 (0=off, 1=on) (default=1)");
+
+
+#define COUGAR_VENDOR_USAGE 0xff00ff00
+
+#define COUGAR_FIELD_CODE 1
+#define COUGAR_FIELD_ACTION 2
+
+#define COUGAR_KEY_G1 0x83
+#define COUGAR_KEY_G2 0x84
+#define COUGAR_KEY_G3 0x85
+#define COUGAR_KEY_G4 0x86
+#define COUGAR_KEY_G5 0x87
+#define COUGAR_KEY_G6 0x78
+#define COUGAR_KEY_FN 0x0d
+#define COUGAR_KEY_MR 0x6f
+#define COUGAR_KEY_M1 0x80
+#define COUGAR_KEY_M2 0x81
+#define COUGAR_KEY_M3 0x82
+#define COUGAR_KEY_LEDS 0x67
+#define COUGAR_KEY_LOCK 0x6e
+
+
+/* Default key mappings. The special key COUGAR_KEY_G6 is defined first
+ * because it is more frequent to use the spacebar rather than any other
+ * special keys. Depending on the value of the parameter 'g6_is_space',
+ * the mapping will be updated in the probe function.
+ */
+static unsigned char cougar_mapping[][2] = {
+ { COUGAR_KEY_G6, KEY_SPACE },
+ { COUGAR_KEY_G1, KEY_F13 },
+ { COUGAR_KEY_G2, KEY_F14 },
+ { COUGAR_KEY_G3, KEY_F15 },
+ { COUGAR_KEY_G4, KEY_F16 },
+ { COUGAR_KEY_G5, KEY_F17 },
+ { COUGAR_KEY_LOCK, KEY_SCREENLOCK },
+/* The following keys are handled by the hardware itself, so no special
+ * treatment is required:
+ { COUGAR_KEY_FN, KEY_RESERVED },
+ { COUGAR_KEY_MR, KEY_RESERVED },
+ { COUGAR_KEY_M1, KEY_RESERVED },
+ { COUGAR_KEY_M2, KEY_RESERVED },
+ { COUGAR_KEY_M3, KEY_RESERVED },
+ { COUGAR_KEY_LEDS, KEY_RESERVED },
+*/
+ { 0, 0 },
+};
+
+struct cougar_shared {
+ struct list_head list;
+ struct kref kref;
+ bool enabled;
+ struct hid_device *dev;
+ struct input_dev *input;
+};
+
+struct cougar {
+ bool special_intf;
+ struct cougar_shared *shared;
+};
+
+static LIST_HEAD(cougar_udev_list);
+static DEFINE_MUTEX(cougar_udev_list_lock);
+
+static void cougar_fix_g6_mapping(struct hid_device *hdev)
+{
+ int i;
+
+ for (i = 0; cougar_mapping[i][0]; i++) {
+ if (cougar_mapping[i][0] == COUGAR_KEY_G6) {
+ cougar_mapping[i][1] =
+ cougar_g6_is_space ? KEY_SPACE : KEY_F18;
+ hid_info(hdev, "G6 mapped to %s\n",
+ cougar_g6_is_space ? "space" : "F18");
+ return;
+ }
+ }
+ hid_warn(hdev, "no mapping defined for G6/spacebar");
+}
+
+/*
+ * Constant-friendly rdesc fixup for mouse interface
+ */
+static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ hid_info(hdev,
+ "usage count exceeds max: fixing up report descriptor\n");
+ rdesc[115] = ((HID_MAX_USAGES-1) & 0xff);
+ rdesc[116] = ((HID_MAX_USAGES-1) >> 8);
+ }
+ return rdesc;
+}
+
+static struct cougar_shared *cougar_get_shared_data(struct hid_device *hdev)
+{
+ struct cougar_shared *shared;
+
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(shared, &cougar_udev_list, list) {
+ if (hid_compare_device_paths(hdev, shared->dev, '/')) {
+ kref_get(&shared->kref);
+ return shared;
+ }
+ }
+ return NULL;
+}
+
+static void cougar_release_shared_data(struct kref *kref)
+{
+ struct cougar_shared *shared = container_of(kref,
+ struct cougar_shared, kref);
+
+ mutex_lock(&cougar_udev_list_lock);
+ list_del(&shared->list);
+ mutex_unlock(&cougar_udev_list_lock);
+
+ kfree(shared);
+}
+
+static void cougar_remove_shared_data(void *resource)
+{
+ struct cougar *cougar = resource;
+
+ if (cougar->shared) {
+ kref_put(&cougar->shared->kref, cougar_release_shared_data);
+ cougar->shared = NULL;
+ }
+}
+
+/*
+ * Bind the device group's shared data to this cougar struct.
+ * If no shared data exists for this group, create and initialize it.
+ */
+static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar)
+{
+ struct cougar_shared *shared;
+ int error = 0;
+
+ mutex_lock(&cougar_udev_list_lock);
+
+ shared = cougar_get_shared_data(hdev);
+ if (!shared) {
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&shared->kref);
+ shared->dev = hdev;
+ list_add_tail(&shared->list, &cougar_udev_list);
+ }
+
+ cougar->shared = shared;
+
+ error = devm_add_action(&hdev->dev, cougar_remove_shared_data, cougar);
+ if (error) {
+ mutex_unlock(&cougar_udev_list_lock);
+ cougar_remove_shared_data(cougar);
+ return error;
+ }
+
+out:
+ mutex_unlock(&cougar_udev_list_lock);
+ return error;
+}
+
+static int cougar_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct cougar *cougar;
+ struct hid_input *next, *hidinput = NULL;
+ unsigned int connect_mask;
+ int error;
+
+ cougar = devm_kzalloc(&hdev->dev, sizeof(*cougar), GFP_KERNEL);
+ if (!cougar)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, cougar);
+
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "parse failed\n");
+ goto fail;
+ }
+
+ if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ cougar->special_intf = true;
+ connect_mask = HID_CONNECT_HIDRAW;
+ } else
+ connect_mask = HID_CONNECT_DEFAULT;
+
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
+ error = cougar_bind_shared_data(hdev, cougar);
+ if (error)
+ goto fail_stop_and_cleanup;
+
+ /* The custom vendor interface will use the hid_input registered
+ * for the keyboard interface, in order to send translated key codes
+ * to it.
+ */
+ if (hdev->collection->usage == HID_GD_KEYBOARD) {
+ cougar_fix_g6_mapping(hdev);
+ list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) {
+ if (hidinput->registered && hidinput->input != NULL) {
+ cougar->shared->input = hidinput->input;
+ cougar->shared->enabled = true;
+ break;
+ }
+ }
+ } else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ error = hid_hw_open(hdev);
+ if (error)
+ goto fail_stop_and_cleanup;
+ }
+ return 0;
+
+fail_stop_and_cleanup:
+ hid_hw_stop(hdev);
+fail:
+ hid_set_drvdata(hdev, NULL);
+ return error;
+}
+
+/*
+ * Convert events from vendor intf to input key events
+ */
+static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct cougar *cougar;
+ unsigned char code, action;
+ int i;
+
+ cougar = hid_get_drvdata(hdev);
+ if (!cougar->special_intf || !cougar->shared ||
+ !cougar->shared->input || !cougar->shared->enabled)
+ return 0;
+
+ code = data[COUGAR_FIELD_CODE];
+ action = data[COUGAR_FIELD_ACTION];
+ for (i = 0; cougar_mapping[i][0]; i++) {
+ if (code == cougar_mapping[i][0]) {
+ input_event(cougar->shared->input, EV_KEY,
+ cougar_mapping[i][1], action);
+ input_sync(cougar->shared->input);
+ return 0;
+ }
+ }
+ hid_warn(hdev, "unmapped special key code %x: ignoring\n", code);
+ return 0;
+}
+
+static void cougar_remove(struct hid_device *hdev)
+{
+ struct cougar *cougar = hid_get_drvdata(hdev);
+
+ if (cougar) {
+ /* Stop the vendor intf to process more events */
+ if (cougar->shared)
+ cougar->shared->enabled = false;
+ if (cougar->special_intf)
+ hid_hw_close(hdev);
+ }
+ hid_hw_stop(hdev);
+}
+
+static struct hid_device_id cougar_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+ USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
+ {}
+};
+MODULE_DEVICE_TABLE(hid, cougar_id_table);
+
+static struct hid_driver cougar_driver = {
+ .name = "cougar",
+ .id_table = cougar_id_table,
+ .report_fixup = cougar_report_fixup,
+ .probe = cougar_probe,
+ .remove = cougar_remove,
+ .raw_event = cougar_raw_event,
+};
+
+module_hid_driver(cougar_driver);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8469b6964ff6..b48100236df8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1154,6 +1154,8 @@ copy_rest:
goto out;
if (list->tail > list->head) {
len = list->tail - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
list->head += len;
} else {
len = HID_DEBUG_BUFSIZE - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
}
list->head = 0;
ret += len;
- goto copy_rest;
+ count -= len;
+ if (count > 0)
+ goto copy_rest;
}
}
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 803a725785cf..07e26c3567eb 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -19,38 +19,49 @@
#include "hid-ids.h"
+#define ELAN_MT_I2C 0x5d
#define ELAN_SINGLE_FINGER 0x81
#define ELAN_MT_FIRST_FINGER 0x82
#define ELAN_MT_SECOND_FINGER 0x83
#define ELAN_INPUT_REPORT_SIZE 8
+#define ELAN_I2C_REPORT_SIZE 32
+#define ELAN_FINGER_DATA_LEN 5
+#define ELAN_MAX_FINGERS 5
+#define ELAN_MAX_PRESSURE 255
+#define ELAN_TP_USB_INTF 1
+
+#define ELAN_FEATURE_REPORT 0x0d
+#define ELAN_FEATURE_SIZE 5
+#define ELAN_PARAM_MAX_X 6
+#define ELAN_PARAM_MAX_Y 7
+#define ELAN_PARAM_RES 8
#define ELAN_MUTE_LED_REPORT 0xBC
#define ELAN_LED_REPORT_SIZE 8
-struct elan_touchpad_settings {
- u8 max_fingers;
- u16 max_x;
- u16 max_y;
- u8 max_area_x;
- u8 max_area_y;
- u8 max_w;
- int usb_bInterfaceNumber;
-};
+#define ELAN_HAS_LED BIT(0)
struct elan_drvdata {
struct input_dev *input;
u8 prev_report[ELAN_INPUT_REPORT_SIZE];
struct led_classdev mute_led;
u8 mute_led_state;
- struct elan_touchpad_settings *settings;
+ u16 max_x;
+ u16 max_y;
+ u16 res_x;
+ u16 res_y;
};
static int is_not_elan_touchpad(struct hid_device *hdev)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
+ if (hdev->bus == BUS_USB) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ return (intf->altsetting->desc.bInterfaceNumber !=
+ ELAN_TP_USB_INTF);
+ }
- return (intf->altsetting->desc.bInterfaceNumber != drvdata->settings->usb_bInterfaceNumber);
+ return 0;
}
static int elan_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -62,12 +73,86 @@ static int elan_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->report->id == ELAN_SINGLE_FINGER ||
field->report->id == ELAN_MT_FIRST_FINGER ||
- field->report->id == ELAN_MT_SECOND_FINGER)
+ field->report->id == ELAN_MT_SECOND_FINGER ||
+ field->report->id == ELAN_MT_I2C)
return -1;
return 0;
}
+static int elan_get_device_param(struct hid_device *hdev,
+ unsigned char *dmabuf, unsigned char param)
+{
+ int ret;
+
+ dmabuf[0] = ELAN_FEATURE_REPORT;
+ dmabuf[1] = 0x05;
+ dmabuf[2] = 0x03;
+ dmabuf[3] = param;
+ dmabuf[4] = 0x01;
+
+ ret = hid_hw_raw_request(hdev, ELAN_FEATURE_REPORT, dmabuf,
+ ELAN_FEATURE_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+ if (ret != ELAN_FEATURE_SIZE) {
+ hid_err(hdev, "Set report error for parm %d: %d\n", param, ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, ELAN_FEATURE_REPORT, dmabuf,
+ ELAN_FEATURE_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != ELAN_FEATURE_SIZE) {
+ hid_err(hdev, "Get report error for parm %d: %d\n", param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned int elan_convert_res(char val)
+{
+ /*
+ * (value from firmware) * 10 + 790 = dpi
+ * dpi * 10 / 254 = dots/mm
+ */
+ return (val * 10 + 790) * 10 / 254;
+}
+
+static int elan_get_device_params(struct hid_device *hdev)
+{
+ struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
+ unsigned char *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(ELAN_FEATURE_SIZE, GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_MAX_X);
+ if (ret)
+ goto err;
+
+ drvdata->max_x = (dmabuf[4] << 8) | dmabuf[3];
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_MAX_Y);
+ if (ret)
+ goto err;
+
+ drvdata->max_y = (dmabuf[4] << 8) | dmabuf[3];
+
+ ret = elan_get_device_param(hdev, dmabuf, ELAN_PARAM_RES);
+ if (ret)
+ goto err;
+
+ drvdata->res_x = elan_convert_res(dmabuf[3]);
+ drvdata->res_y = elan_convert_res(dmabuf[4]);
+
+err:
+ kfree(dmabuf);
+ return ret;
+}
+
static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
int ret;
@@ -77,6 +162,10 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (is_not_elan_touchpad(hdev))
return 0;
+ ret = elan_get_device_params(hdev);
+ if (ret)
+ return ret;
+
input = devm_input_allocate_device(&hdev->dev);
if (!input)
return -ENOMEM;
@@ -90,25 +179,25 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
input->id.version = hdev->version;
input->dev.parent = &hdev->dev;
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- drvdata->settings->max_x, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- drvdata->settings->max_y, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
- drvdata->settings->max_fingers, 0, 0);
- input_set_abs_params(input, ABS_TOOL_WIDTH, 0,
- drvdata->settings->max_w, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, drvdata->max_x,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, drvdata->max_y,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, ELAN_MAX_PRESSURE,
+ 0, 0);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- ret = input_mt_init_slots(input, drvdata->settings->max_fingers,
- INPUT_MT_POINTER);
+ ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
return ret;
}
+ input_abs_set_res(input, ABS_X, drvdata->res_x);
+ input_abs_set_res(input, ABS_Y, drvdata->res_y);
+
ret = input_register_device(input);
if (ret) {
hid_err(hdev, "Failed to register elan input device: %d\n",
@@ -126,7 +215,7 @@ static void elan_report_mt_slot(struct elan_drvdata *drvdata, u8 *data,
unsigned int slot_num)
{
struct input_dev *input = drvdata->input;
- int x, y, w;
+ int x, y, p;
bool active = !!data;
@@ -134,17 +223,17 @@ static void elan_report_mt_slot(struct elan_drvdata *drvdata, u8 *data,
input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
if (active) {
x = ((data[0] & 0xF0) << 4) | data[1];
- y = drvdata->settings->max_y -
+ y = drvdata->max_y -
(((data[0] & 0x07) << 8) | data[2]);
- w = data[4];
+ p = data[4];
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
- input_report_abs(input, ABS_TOOL_WIDTH, w);
+ input_report_abs(input, ABS_MT_PRESSURE, p);
}
}
-static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
+static void elan_usb_report_input(struct elan_drvdata *drvdata, u8 *data)
{
int i;
struct input_dev *input = drvdata->input;
@@ -162,7 +251,7 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
* byte 5: x8 x7 x6 x5 x4 x3 x2 x1
* byte 6: y8 y7 y6 y5 y4 y3 y2 y1
* byte 7: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
- * byte 8: w8 w7 w6 w5 w4 w3 w2 w1
+ * byte 8: p8 p7 p6 p5 p4 p3 p2 p1
*
* packet structure for ELAN_MT_SECOND_FINGER:
*
@@ -171,19 +260,21 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
* byte 3: x8 x7 x6 x5 x4 x3 x2 x1
* byte 4: y8 y7 y6 y5 y4 y3 y2 y1
* byte 5: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
- * byte 6: w8 w7 w6 w5 w4 w3 w2 w1
+ * byte 6: p8 p7 p6 p5 p4 p3 p2 p1
* byte 7: 0 0 0 0 0 0 0 0
* byte 8: 0 0 0 0 0 0 0 0
*
* f5-f1: finger touch bits
* L: clickpad button
- * sy / sx: not sure yet, but this looks like rectangular
- * area for finger
- * w: looks like finger width
+ * sy / sx: finger width / height expressed in traces, the total number
+ * of traces can be queried by doing a HID_REQ_SET_REPORT
+ * { 0x0d, 0x05, 0x03, 0x05, 0x01 } followed by a GET, in the
+ * returned buf, buf[3]=no-x-traces, buf[4]=no-y-traces.
+ * p: pressure
*/
if (data[0] == ELAN_SINGLE_FINGER) {
- for (i = 0; i < drvdata->settings->max_fingers; i++) {
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
if (data[2] & BIT(i + 3))
elan_report_mt_slot(drvdata, data + 3, i);
else
@@ -210,7 +301,7 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
if (prev_report[0] != ELAN_MT_FIRST_FINGER)
return;
- for (i = 0; i < drvdata->settings->max_fingers; i++) {
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
if (prev_report[2] & BIT(i + 3)) {
if (!first) {
first = 1;
@@ -229,6 +320,46 @@ static void elan_report_input(struct elan_drvdata *drvdata, u8 *data)
input_sync(input);
}
+static void elan_i2c_report_input(struct elan_drvdata *drvdata, u8 *data)
+{
+ struct input_dev *input = drvdata->input;
+ u8 *finger_data;
+ int i;
+
+ /*
+ * Elan MT touchpads in i2c mode send finger data in the same format
+ * as in USB mode, but then with all fingers in a single packet.
+ *
+ * packet structure for ELAN_MT_I2C:
+ *
+ * byte 1: 1 0 0 1 1 1 0 1 // 0x5d
+ * byte 2: f5 f4 f3 f2 f1 0 0 L
+ * byte 3: x12 x11 x10 x9 0? y11 y10 y9
+ * byte 4: x8 x7 x6 x5 x4 x3 x2 x1
+ * byte 5: y8 y7 y6 y5 y4 y3 y2 y1
+ * byte 6: sy4 sy3 sy2 sy1 sx4 sx3 sx2 sx1
+ * byte 7: p8 p7 p6 p5 p4 p3 p2 p1
+ * byte 8-12: Same as byte 3-7 for second finger down
+ * byte 13-17: Same as byte 3-7 for third finger down
+ * byte 18-22: Same as byte 3-7 for fourth finger down
+ * byte 23-27: Same as byte 3-7 for fifth finger down
+ */
+
+ finger_data = data + 2;
+ for (i = 0; i < ELAN_MAX_FINGERS; i++) {
+ if (data[1] & BIT(i + 3)) {
+ elan_report_mt_slot(drvdata, finger_data, i);
+ finger_data += ELAN_FINGER_DATA_LEN;
+ } else {
+ elan_report_mt_slot(drvdata, NULL, i);
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
static int elan_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
@@ -241,11 +372,16 @@ static int elan_raw_event(struct hid_device *hdev,
data[0] == ELAN_MT_FIRST_FINGER ||
data[0] == ELAN_MT_SECOND_FINGER) {
if (size == ELAN_INPUT_REPORT_SIZE) {
- elan_report_input(drvdata, data);
+ elan_usb_report_input(drvdata, data);
return 1;
}
}
+ if (data[0] == ELAN_MT_I2C && size == ELAN_I2C_REPORT_SIZE) {
+ elan_i2c_report_input(drvdata, data);
+ return 1;
+ }
+
return 0;
}
@@ -343,7 +479,6 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!drvdata)
return -ENOMEM;
- drvdata->settings = (struct elan_touchpad_settings *)id->driver_data;
hid_set_drvdata(hdev, drvdata);
ret = hid_parse(hdev);
@@ -371,9 +506,11 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto err;
- ret = elan_init_mute_led(hdev);
- if (ret)
- goto err;
+ if (id->driver_data & ELAN_HAS_LED) {
+ ret = elan_init_mute_led(hdev);
+ if (ret)
+ goto err;
+ }
return 0;
err:
@@ -386,22 +523,14 @@ static void elan_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
-static const struct elan_touchpad_settings hp_x2_10_touchpad_data = {
- .max_fingers = 5,
- .max_x = 2930,
- .max_y = 1250,
- .max_area_x = 15,
- .max_area_y = 15,
- .max_w = 255,
- .usb_bInterfaceNumber = 1,
-};
-
static const struct hid_device_id elan_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_HP_X2),
+ .driver_data = ELAN_HAS_LED },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_HP_X2_10_COVER),
- (kernel_ulong_t)&hp_x2_10_touchpad_data},
+ .driver_data = ELAN_HAS_LED },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_TOSHIBA_CLICK_L9W) },
{ }
};
-
MODULE_DEVICE_TABLE(hid, elan_devices);
static struct hid_driver elan_driver = {
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3aa2bb9f0f81..b372854cf38d 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -598,6 +598,9 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init mousevsc_init(void)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c7981ddd8776..79bdf0c7e351 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -369,6 +369,8 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELAN 0x04f3
+#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
+#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define USB_VENDOR_ID_ELECOM 0x056e
@@ -1001,6 +1003,9 @@
#define USB_VENDOR_ID_SINO_LITE 0x1345
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
+#define USB_VENDOR_ID_SOLID_YEAR 0x060b
+#define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a
+
#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ab93dd5927c3..4e94ea3e280a 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1550,6 +1550,9 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
case HID_GD_WIRELESS_RADIO_CTLS:
suffix = "Wireless Radio Control";
break;
+ case HID_GD_SYSTEM_MULTIAXIS:
+ suffix = "System Multi Axis";
+ break;
default:
break;
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 96e7d3231d2f..72d983626afd 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -22,12 +22,13 @@
#include "hid-ids.h"
-#define MS_HIDINPUT 0x01
-#define MS_ERGONOMY 0x02
-#define MS_PRESENTER 0x04
-#define MS_RDESC 0x08
-#define MS_NOGET 0x10
-#define MS_DUPLICATE_USAGES 0x20
+#define MS_HIDINPUT BIT(0)
+#define MS_ERGONOMY BIT(1)
+#define MS_PRESENTER BIT(2)
+#define MS_RDESC BIT(3)
+#define MS_NOGET BIT(4)
+#define MS_DUPLICATE_USAGES BIT(5)
+#define MS_SURFACE_DIAL BIT(6)
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
@@ -130,6 +131,30 @@ static int ms_presenter_8k_quirk(struct hid_input *hi, struct hid_usage *usage,
return 1;
}
+static int ms_surface_dial_quirk(struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+ case 0xff070000:
+ /* fall-through */
+ case HID_UP_DIGITIZER:
+ /* ignore those axis */
+ return -1;
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ /* fall-through */
+ case HID_GD_Y:
+ /* fall-through */
+ case HID_GD_RFKILL_BTN:
+ /* ignore those axis */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -146,6 +171,13 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
ms_presenter_8k_quirk(hi, usage, bit, max))
return 1;
+ if (quirks & MS_SURFACE_DIAL) {
+ int ret = ms_surface_dial_quirk(hi, field, usage, bit, max);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -229,6 +261,9 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (quirks & MS_NOGET)
hdev->quirks |= HID_QUIRK_NOGET;
+ if (quirks & MS_SURFACE_DIAL)
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -281,6 +316,8 @@ static const struct hid_device_id ms_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
+ .driver_data = MS_SURFACE_DIAL },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 45968f7970f8..40fbb7c52723 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -28,14 +28,11 @@
*/
/*
- * This driver is regularly tested thanks to the tool hid-test[1].
- * This tool relies on hid-replay[2] and a database of hid devices[3].
+ * This driver is regularly tested thanks to the test suite in hid-tools[1].
* Please run these regression tests before patching this module so that
* your patch won't break existing known devices.
*
- * [1] https://github.com/bentiss/hid-test
- * [2] https://github.com/bentiss/hid-replay
- * [3] https://github.com/bentiss/hid-devices
+ * [1] https://gitlab.freedesktop.org/libevdev/hid-tools
*/
#include <linux/device.h>
@@ -90,13 +87,54 @@ enum latency_mode {
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
-struct mt_slot {
- __s32 x, y, cx, cy, p, w, h, a;
- __s32 contactid; /* the device ContactID assigned to this slot */
- bool touch_state; /* is the touch valid? */
- bool inrange_state; /* is the finger in proximity of the sensor? */
- bool confidence_state; /* is the touch made by a finger? */
- bool has_azimuth; /* the contact reports azimuth */
+static const bool mtrue = true; /* default for true */
+static const bool mfalse; /* default for false */
+static const __s32 mzero; /* default for 0 */
+
+#define DEFAULT_TRUE ((void *)&mtrue)
+#define DEFAULT_FALSE ((void *)&mfalse)
+#define DEFAULT_ZERO ((void *)&mzero)
+
+struct mt_usages {
+ struct list_head list;
+ __s32 *x, *y, *cx, *cy, *p, *w, *h, *a;
+ __s32 *contactid; /* the device ContactID assigned to this slot */
+ bool *tip_state; /* is the touch valid? */
+ bool *inrange_state; /* is the finger in proximity of the sensor? */
+ bool *confidence_state; /* is the touch made by a finger? */
+};
+
+struct mt_application {
+ struct list_head list;
+ unsigned int application;
+ struct list_head mt_usages; /* mt usages list */
+
+ __s32 quirks;
+
+ __s32 *scantime; /* scantime reported */
+ __s32 scantime_logical_max; /* max value for raw scantime */
+
+ __s32 *raw_cc; /* contact count in the report */
+ int left_button_state; /* left button state */
+ unsigned int mt_flags; /* flags to pass to input-mt */
+
+ unsigned long *pending_palm_slots; /* slots where we reported palm
+ * and need to release */
+
+ __u8 num_received; /* how many contacts we received */
+ __u8 num_expected; /* expected last contact index */
+ __u8 buttons_count; /* number of physical buttons per touchpad */
+ __u8 touches_by_report; /* how many touches are present in one report:
+ * 1 means we should use a serial protocol
+ * > 1 means hybrid (multitouch) protocol
+ */
+
+ __s32 dev_time; /* the scan time provided by the device */
+ unsigned long jiffies; /* the frame's jiffies */
+ int timestamp; /* the timestamp to be sent */
+ int prev_scantime; /* scantime reported previously */
+
+ bool have_contact_count;
};
struct mt_class {
@@ -111,46 +149,30 @@ struct mt_class {
bool export_all_inputs; /* do not ignore mouse, keyboards, etc... */
};
-struct mt_fields {
- unsigned usages[HID_MAX_FIELDS];
- unsigned int length;
+struct mt_report_data {
+ struct list_head list;
+ struct hid_report *report;
+ struct mt_application *application;
+ bool is_mt_collection;
};
struct mt_device {
- struct mt_slot curdata; /* placeholder of incoming data */
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
struct hid_device *hdev; /* hid_device we're attached to */
- struct mt_fields *fields; /* temporary placeholder for storing the
- multitouch fields */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
- int cc_index; /* contact count field index in the report */
- int cc_value_index; /* contact count value index in the field */
- int scantime_index; /* scantime field index in the report */
- int scantime_val_index; /* scantime value index in the field */
- int prev_scantime; /* scantime reported in the previous packet */
- int left_button_state; /* left button state */
- unsigned last_slot_field; /* the last field of a slot */
- unsigned mt_report_id; /* the report ID of the multitouch device */
__u8 inputmode_value; /* InputMode HID feature value */
- __u8 num_received; /* how many contacts we received */
- __u8 num_expected; /* expected last contact index */
__u8 maxcontacts;
- __u8 touches_by_report; /* how many touches are present in one report:
- * 1 means we should use a serial protocol
- * > 1 means hybrid (multitouch) protocol */
- __u8 buttons_count; /* number of physical buttons per touchpad */
bool is_buttonpad; /* is this device a button pad? */
bool serial_maybe; /* need to check for serial protocol */
- bool curvalid; /* is the current contact valid? */
- unsigned mt_flags; /* flags to pass to input-mt */
- __s32 dev_time; /* the scan time provided by the device */
- unsigned long jiffies; /* the frame's jiffies */
- int timestamp; /* the timestamp to be sent */
+
+ struct list_head applications;
+ struct list_head reports;
};
-static void mt_post_parse_default_settings(struct mt_device *td);
-static void mt_post_parse(struct mt_device *td);
+static void mt_post_parse_default_settings(struct mt_device *td,
+ struct mt_application *app);
+static void mt_post_parse(struct mt_device *td, struct mt_application *app);
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
@@ -203,15 +225,16 @@ static void mt_post_parse(struct mt_device *td);
* to a valid contact that was just read.
*/
-static int cypress_compute_slot(struct mt_device *td)
+static int cypress_compute_slot(struct mt_application *application,
+ struct mt_usages *slot)
{
- if (td->curdata.contactid != 0 || td->num_received == 0)
- return td->curdata.contactid;
+ if (*slot->contactid != 0 || application->num_received == 0)
+ return *slot->contactid;
else
return -1;
}
-static struct mt_class mt_classes[] = {
+static const struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE },
@@ -353,6 +376,7 @@ static ssize_t mt_set_quirks(struct device *dev,
{
struct hid_device *hdev = to_hid_device(dev);
struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_application *application;
unsigned long val;
@@ -361,8 +385,11 @@ static ssize_t mt_set_quirks(struct device *dev,
td->mtclass.quirks = val;
- if (td->cc_index < 0)
- td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ list_for_each_entry(application, &td->applications, list) {
+ application->quirks = val;
+ if (!application->have_contact_count)
+ application->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ }
return count;
}
@@ -457,41 +484,199 @@ static void set_abs(struct input_dev *input, unsigned int code,
input_abs_set_res(input, code, hidinput_calc_abs_res(field, code));
}
-static void mt_store_field(struct hid_usage *usage, struct mt_device *td,
- struct hid_input *hi)
+static struct mt_usages *mt_allocate_usage(struct hid_device *hdev,
+ struct mt_application *application)
+{
+ struct mt_usages *usage;
+
+ usage = devm_kzalloc(&hdev->dev, sizeof(*usage), GFP_KERNEL);
+ if (!usage)
+ return NULL;
+
+ /* set some defaults so we do not need to check for null pointers */
+ usage->x = DEFAULT_ZERO;
+ usage->y = DEFAULT_ZERO;
+ usage->cx = DEFAULT_ZERO;
+ usage->cy = DEFAULT_ZERO;
+ usage->p = DEFAULT_ZERO;
+ usage->w = DEFAULT_ZERO;
+ usage->h = DEFAULT_ZERO;
+ usage->a = DEFAULT_ZERO;
+ usage->contactid = DEFAULT_ZERO;
+ usage->tip_state = DEFAULT_FALSE;
+ usage->inrange_state = DEFAULT_FALSE;
+ usage->confidence_state = DEFAULT_TRUE;
+
+ list_add_tail(&usage->list, &application->mt_usages);
+
+ return usage;
+}
+
+static struct mt_application *mt_allocate_application(struct mt_device *td,
+ unsigned int application)
+{
+ struct mt_application *mt_application;
+
+ mt_application = devm_kzalloc(&td->hdev->dev, sizeof(*mt_application),
+ GFP_KERNEL);
+ if (!mt_application)
+ return NULL;
+
+ mt_application->application = application;
+ INIT_LIST_HEAD(&mt_application->mt_usages);
+
+ if (application == HID_DG_TOUCHSCREEN)
+ mt_application->mt_flags |= INPUT_MT_DIRECT;
+
+ /*
+ * Model touchscreens providing buttons as touchpads.
+ */
+ if (application == HID_DG_TOUCHPAD) {
+ mt_application->mt_flags |= INPUT_MT_POINTER;
+ td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
+ }
+
+ mt_application->scantime = DEFAULT_ZERO;
+ mt_application->raw_cc = DEFAULT_ZERO;
+ mt_application->quirks = td->mtclass.quirks;
+
+ list_add_tail(&mt_application->list, &td->applications);
+
+ return mt_application;
+}
+
+static struct mt_application *mt_find_application(struct mt_device *td,
+ unsigned int application)
+{
+ struct mt_application *tmp, *mt_application = NULL;
+
+ list_for_each_entry(tmp, &td->applications, list) {
+ if (application == tmp->application) {
+ mt_application = tmp;
+ break;
+ }
+ }
+
+ if (!mt_application)
+ mt_application = mt_allocate_application(td, application);
+
+ return mt_application;
+}
+
+static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+ struct hid_report *report)
+{
+ struct mt_report_data *rdata;
+ struct hid_field *field;
+ int r, n;
+
+ rdata = devm_kzalloc(&td->hdev->dev, sizeof(*rdata), GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+ rdata->report = report;
+ rdata->application = mt_find_application(td, report->application);
+
+ if (!rdata->application) {
+ devm_kfree(&td->hdev->dev, rdata);
+ return NULL;
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+ field = report->field[r];
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < field->report_count; n++) {
+ if (field->usage[n].hid == HID_DG_CONTACTID)
+ rdata->is_mt_collection = true;
+ }
+ }
+
+ list_add_tail(&rdata->list, &td->reports);
+
+ return rdata;
+}
+
+static struct mt_report_data *mt_find_report_data(struct mt_device *td,
+ struct hid_report *report)
{
- struct mt_fields *f = td->fields;
+ struct mt_report_data *tmp, *rdata = NULL;
- if (f->length >= HID_MAX_FIELDS)
+ list_for_each_entry(tmp, &td->reports, list) {
+ if (report == tmp->report) {
+ rdata = tmp;
+ break;
+ }
+ }
+
+ if (!rdata)
+ rdata = mt_allocate_report_data(td, report);
+
+ return rdata;
+}
+
+static void mt_store_field(struct hid_device *hdev,
+ struct mt_application *application,
+ __s32 *value,
+ size_t offset)
+{
+ struct mt_usages *usage;
+ __s32 **target;
+
+ if (list_empty(&application->mt_usages))
+ usage = mt_allocate_usage(hdev, application);
+ else
+ usage = list_last_entry(&application->mt_usages,
+ struct mt_usages,
+ list);
+
+ if (!usage)
return;
- f->usages[f->length++] = usage->hid;
+ target = (__s32 **)((char *)usage + offset);
+
+ /* the value has already been filled, create a new slot */
+ if (*target != DEFAULT_TRUE &&
+ *target != DEFAULT_FALSE &&
+ *target != DEFAULT_ZERO) {
+ usage = mt_allocate_usage(hdev, application);
+ if (!usage)
+ return;
+
+ target = (__s32 **)((char *)usage + offset);
+ }
+
+ *target = value;
}
+#define MT_STORE_FIELD(__name) \
+ mt_store_field(hdev, app, \
+ &field->value[usage->usage_index], \
+ offsetof(struct mt_usages, __name))
+
static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
+ unsigned long **bit, int *max, struct mt_application *app)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
int code;
struct hid_usage *prev_usage = NULL;
- if (field->application == HID_DG_TOUCHSCREEN)
- td->mt_flags |= INPUT_MT_DIRECT;
-
/*
* Model touchscreens providing buttons as touchpads.
*/
- if (field->application == HID_DG_TOUCHPAD ||
+ if (field->application == HID_DG_TOUCHSCREEN &&
(usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
- td->mt_flags |= INPUT_MT_POINTER;
+ app->mt_flags |= INPUT_MT_POINTER;
td->inputmode_value = MT_INPUTMODE_TOUCHPAD;
}
/* count the buttons on touchpads */
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
- td->buttons_count++;
+ app->buttons_count++;
if (usage->usage_index)
prev_usage = &field->usage[usage->usage_index - 1];
@@ -502,33 +687,40 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
switch (usage->hid) {
case HID_GD_X:
if (prev_usage && (prev_usage->hid == usage->hid)) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOOL_X);
- set_abs(hi->input, ABS_MT_TOOL_X, field,
- cls->sn_move);
+ code = ABS_MT_TOOL_X;
+ MT_STORE_FIELD(cx);
} else {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- set_abs(hi->input, ABS_MT_POSITION_X, field,
- cls->sn_move);
+ code = ABS_MT_POSITION_X;
+ MT_STORE_FIELD(x);
+ }
+
+ set_abs(hi->input, code, field, cls->sn_move);
+
+ /*
+ * A system multi-axis that exports X and Y has a high
+ * chance of being used directly on a surface
+ */
+ if (field->application == HID_GD_SYSTEM_MULTIAXIS) {
+ __set_bit(INPUT_PROP_DIRECT,
+ hi->input->propbit);
+ input_set_abs_params(hi->input,
+ ABS_MT_TOOL_TYPE,
+ MT_TOOL_DIAL,
+ MT_TOOL_DIAL, 0, 0);
}
- mt_store_field(usage, td, hi);
return 1;
case HID_GD_Y:
if (prev_usage && (prev_usage->hid == usage->hid)) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOOL_Y);
- set_abs(hi->input, ABS_MT_TOOL_Y, field,
- cls->sn_move);
+ code = ABS_MT_TOOL_Y;
+ MT_STORE_FIELD(cy);
} else {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- set_abs(hi->input, ABS_MT_POSITION_Y, field,
- cls->sn_move);
+ code = ABS_MT_POSITION_Y;
+ MT_STORE_FIELD(y);
}
- mt_store_field(usage, td, hi);
+ set_abs(hi->input, code, field, cls->sn_move);
+
return 1;
}
return 0;
@@ -536,43 +728,45 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
- if (cls->quirks & MT_QUIRK_HOVERING) {
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_DISTANCE);
+ if (app->quirks & MT_QUIRK_HOVERING) {
input_set_abs_params(hi->input,
ABS_MT_DISTANCE, 0, 1, 0, 0);
}
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
if ((cls->name == MT_CLS_WIN_8 ||
cls->name == MT_CLS_WIN_8_DUAL) &&
- field->application == HID_DG_TOUCHPAD)
- cls->quirks |= MT_QUIRK_CONFIDENCE;
- mt_store_field(usage, td, hi);
+ (field->application == HID_DG_TOUCHPAD ||
+ field->application == HID_DG_TOUCHSCREEN))
+ app->quirks |= MT_QUIRK_CONFIDENCE;
+
+ if (app->quirks & MT_QUIRK_CONFIDENCE)
+ input_set_abs_params(hi->input,
+ ABS_MT_TOOL_TYPE,
+ MT_TOOL_FINGER,
+ MT_TOOL_PALM, 0, 0);
+
+ MT_STORE_FIELD(confidence_state);
return 1;
case HID_DG_TIPSWITCH:
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
- mt_store_field(usage, td, hi);
+ if (field->application != HID_GD_SYSTEM_MULTIAXIS)
+ input_set_capability(hi->input,
+ EV_KEY, BTN_TOUCH);
+ MT_STORE_FIELD(tip_state);
return 1;
case HID_DG_CONTACTID:
- mt_store_field(usage, td, hi);
- td->touches_by_report++;
- td->mt_report_id = field->report->id;
+ MT_STORE_FIELD(contactid);
+ app->touches_by_report++;
return 1;
case HID_DG_WIDTH:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOUCH_MAJOR);
- if (!(cls->quirks & MT_QUIRK_NO_AREA))
+ if (!(app->quirks & MT_QUIRK_NO_AREA))
set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
cls->sn_width);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(w);
return 1;
case HID_DG_HEIGHT:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TOUCH_MINOR);
- if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
+ if (!(app->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
@@ -585,41 +779,23 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input,
ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(h);
return 1;
case HID_DG_TIPPRESSURE:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
cls->sn_pressure);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(p);
return 1;
case HID_DG_SCANTIME:
- hid_map_usage(hi, usage, bit, max,
- EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
- /* Ignore if indexes are out of bounds. */
- if (field->index >= field->report->maxfield ||
- usage->usage_index >= field->report_count)
- return 1;
- td->scantime_index = field->index;
- td->scantime_val_index = usage->usage_index;
- /*
- * We don't set td->last_slot_field as scan time is
- * global to the report.
- */
+ app->scantime = &field->value[usage->usage_index];
+ app->scantime_logical_max = field->logical_maximum;
return 1;
case HID_DG_CONTACTCOUNT:
- /* Ignore if indexes are out of bounds. */
- if (field->index >= field->report->maxfield ||
- usage->usage_index >= field->report_count)
- return 1;
- td->cc_index = field->index;
- td->cc_value_index = usage->usage_index;
+ app->have_contact_count = true;
+ app->raw_cc = &field->value[usage->usage_index];
return 1;
case HID_DG_AZIMUTH:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_ORIENTATION);
/*
* Azimuth has the range of [0, MAX) representing a full
* revolution. Set ABS_MT_ORIENTATION to a quarter of
@@ -630,11 +806,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->logical_maximum / 4,
cls->sn_move ?
field->logical_maximum / cls->sn_move : 0, 0);
- mt_store_field(usage, td, hi);
+ MT_STORE_FIELD(a);
return 1;
case HID_DG_CONTACTMAX:
- /* we don't set td->last_slot_field as contactcount and
- * contact max are global to the report */
+ /* contact max are global to the report */
return -1;
case HID_DG_TOUCH:
/* Legacy devices use TIPSWITCH and not TOUCH.
@@ -650,10 +825,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
* MS PTP spec says that external buttons left and right have
* usages 2 and 3.
*/
- if ((cls->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
+ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
field->application == HID_DG_TOUCHPAD &&
(usage->hid & HID_USAGE) > 1)
code--;
+
+ if (field->application == HID_GD_SYSTEM_MULTIAXIS)
+ code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
+
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -666,110 +845,68 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
+static int mt_compute_slot(struct mt_device *td, struct mt_application *app,
+ struct mt_usages *slot,
+ struct input_dev *input)
{
- __s32 quirks = td->mtclass.quirks;
+ __s32 quirks = app->quirks;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
- return td->curdata.contactid;
+ return *slot->contactid;
if (quirks & MT_QUIRK_CYPRESS)
- return cypress_compute_slot(td);
+ return cypress_compute_slot(app, slot);
if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
- return td->num_received;
+ return app->num_received;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE)
- return td->curdata.contactid - 1;
+ return *slot->contactid - 1;
- return input_mt_get_slot_by_key(input, td->curdata.contactid);
+ return input_mt_get_slot_by_key(input, *slot->contactid);
}
-/*
- * this function is called when a whole contact has been processed,
- * so that it can assign it to a slot and store the data there
- */
-static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
+static void mt_release_pending_palms(struct mt_device *td,
+ struct mt_application *app,
+ struct input_dev *input)
{
- if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
- td->num_received >= td->num_expected)
- return;
-
- if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
- int active;
- int slotnum = mt_compute_slot(td, input);
- struct mt_slot *s = &td->curdata;
- struct input_mt *mt = input->mt;
+ int slotnum;
+ bool need_sync = false;
- if (slotnum < 0 || slotnum >= td->maxcontacts)
- return;
-
- if ((td->mtclass.quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
- struct input_mt_slot *slot = &mt->slots[slotnum];
- if (input_mt_is_active(slot) &&
- input_mt_is_used(mt, slot))
- return;
- }
-
- if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
- s->confidence_state = true;
- active = (s->touch_state || s->inrange_state) &&
- s->confidence_state;
+ for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
+ clear_bit(slotnum, app->pending_palm_slots);
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
- if (active) {
- /* this finger is in proximity of the sensor */
- int wide = (s->w > s->h);
- int major = max(s->w, s->h);
- int minor = min(s->w, s->h);
- int orientation = wide;
+ input_mt_report_slot_state(input, MT_TOOL_PALM, false);
- if (s->has_azimuth)
- orientation = s->a;
-
- /*
- * divided by two to match visual scale of touch
- * for devices with this quirk
- */
- if (td->mtclass.quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
- major = major >> 1;
- minor = minor >> 1;
- }
-
- input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
- input_event(input, EV_ABS, ABS_MT_TOOL_X, s->cx);
- input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
- input_event(input, EV_ABS, ABS_MT_DISTANCE,
- !s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION,
- orientation);
- input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
- input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
- input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
-
- set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
- }
+ need_sync = true;
}
- td->num_received++;
+ if (need_sync) {
+ input_mt_sync_frame(input);
+ input_sync(input);
+ }
}
/*
* this function is called when a whole packet has been received and processed,
* so that it can decide what to send to the input layer.
*/
-static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
+static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
+ struct input_dev *input)
{
- if (td->mtclass.quirks & MT_QUIRK_WIN8_PTP_BUTTONS)
- input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+ if (app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS)
+ input_event(input, EV_KEY, BTN_LEFT, app->left_button_state);
input_mt_sync_frame(input);
- input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
+ input_event(input, EV_MSC, MSC_TIMESTAMP, app->timestamp);
input_sync(input);
- td->num_received = 0;
- td->left_button_state = 0;
+
+ mt_release_pending_palms(td, app, input);
+
+ app->num_received = 0;
+ app->left_button_state = 0;
+
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -777,17 +914,15 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
-static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
- __s32 value)
+static int mt_compute_timestamp(struct mt_application *app, __s32 value)
{
- long delta = value - td->dev_time;
- unsigned long jdelta = jiffies_to_usecs(jiffies - td->jiffies);
+ long delta = value - app->prev_scantime;
+ unsigned long jdelta = jiffies_to_usecs(jiffies - app->jiffies);
- td->jiffies = jiffies;
- td->dev_time = value;
+ app->jiffies = jiffies;
if (delta < 0)
- delta += field->logical_maximum;
+ delta += app->scantime_logical_max;
/* HID_DG_SCANTIME is expressed in 100us, we want it in us. */
delta *= 100;
@@ -796,7 +931,7 @@ static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
/* No data received for a while, resync the timestamp. */
return 0;
else
- return td->timestamp + delta;
+ return app->timestamp + delta;
}
static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
@@ -809,63 +944,90 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
return 1;
}
-static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value,
- bool first_packet)
+static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ struct mt_application *app,
+ struct mt_usages *slot)
{
- struct mt_device *td = hid_get_drvdata(hid);
- __s32 quirks = td->mtclass.quirks;
- struct input_dev *input = field->hidinput->input;
+ struct input_mt *mt = input->mt;
+ __s32 quirks = app->quirks;
+ bool valid = true;
+ bool confidence_state = true;
+ bool inrange_state = false;
+ int active;
+ int slotnum;
+ int tool = MT_TOOL_FINGER;
+
+ if (!slot)
+ return -EINVAL;
- if (hid->claimed & HID_CLAIMED_INPUT) {
- switch (usage->hid) {
- case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_VALID_IS_INRANGE)
- td->curvalid = value;
- if (quirks & MT_QUIRK_HOVERING)
- td->curdata.inrange_state = value;
- break;
- case HID_DG_TIPSWITCH:
- if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
- td->curvalid = value;
- td->curdata.touch_state = value;
- break;
- case HID_DG_CONFIDENCE:
- if (quirks & MT_QUIRK_CONFIDENCE)
- td->curdata.confidence_state = value;
- if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
- td->curvalid = value;
- break;
- case HID_DG_CONTACTID:
- td->curdata.contactid = value;
- break;
- case HID_DG_TIPPRESSURE:
- td->curdata.p = value;
- break;
- case HID_GD_X:
- if (usage->code == ABS_MT_TOOL_X)
- td->curdata.cx = value;
- else
- td->curdata.x = value;
- break;
- case HID_GD_Y:
- if (usage->code == ABS_MT_TOOL_Y)
- td->curdata.cy = value;
- else
- td->curdata.y = value;
- break;
- case HID_DG_WIDTH:
- td->curdata.w = value;
- break;
- case HID_DG_HEIGHT:
- td->curdata.h = value;
- break;
- case HID_DG_SCANTIME:
- td->timestamp = mt_compute_timestamp(td, field, value);
- break;
- case HID_DG_CONTACTCOUNT:
- break;
- case HID_DG_AZIMUTH:
+ if ((quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
+ app->num_received >= app->num_expected)
+ return -EAGAIN;
+
+ if (!(quirks & MT_QUIRK_ALWAYS_VALID)) {
+ if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ valid = *slot->inrange_state;
+ if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+ valid = *slot->tip_state;
+ if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+ valid = *slot->confidence_state;
+
+ if (!valid)
+ return 0;
+ }
+
+ slotnum = mt_compute_slot(td, app, slot, input);
+ if (slotnum < 0 || slotnum >= td->maxcontacts)
+ return 0;
+
+ if ((quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
+ struct input_mt_slot *i_slot = &mt->slots[slotnum];
+
+ if (input_mt_is_active(i_slot) &&
+ input_mt_is_used(mt, i_slot))
+ return -EAGAIN;
+ }
+
+ if (quirks & MT_QUIRK_CONFIDENCE)
+ confidence_state = *slot->confidence_state;
+
+ if (quirks & MT_QUIRK_HOVERING)
+ inrange_state = *slot->inrange_state;
+
+ active = *slot->tip_state || inrange_state;
+
+ if (app->application == HID_GD_SYSTEM_MULTIAXIS)
+ tool = MT_TOOL_DIAL;
+ else if (unlikely(!confidence_state)) {
+ tool = MT_TOOL_PALM;
+ if (!active &&
+ input_mt_is_active(&mt->slots[slotnum])) {
+ /*
+ * The non-confidence was reported for
+ * previously valid contact that is also no
+ * longer valid. We can't simply report
+ * lift-off as userspace will not be aware
+ * of non-confidence, so we need to split
+ * it into 2 events: active MT_TOOL_PALM
+ * and a separate liftoff.
+ */
+ active = true;
+ set_bit(slotnum, app->pending_palm_slots);
+ }
+ }
+
+ input_mt_slot(input, slotnum);
+ input_mt_report_slot_state(input, tool, active);
+ if (active) {
+ /* this finger is in proximity of the sensor */
+ int wide = (*slot->w > *slot->h);
+ int major = max(*slot->w, *slot->h);
+ int minor = min(*slot->w, *slot->h);
+ int orientation = wide;
+ int max_azimuth;
+ int azimuth;
+
+ if (slot->a != DEFAULT_ZERO) {
/*
* Azimuth is counter-clockwise and ranges from [0, MAX)
* (a full revolution). Convert it to clockwise ranging
@@ -876,77 +1038,107 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
* out of range to [-MAX/2, MAX/2] to report an upside
* down ellipsis.
*/
- if (value > field->logical_maximum / 2)
- value -= field->logical_maximum;
- td->curdata.a = -value;
- td->curdata.has_azimuth = true;
- break;
- case HID_DG_TOUCH:
- /* do nothing */
- break;
+ azimuth = *slot->a;
+ max_azimuth = input_abs_get_max(input,
+ ABS_MT_ORIENTATION);
+ if (azimuth > max_azimuth * 2)
+ azimuth -= max_azimuth * 4;
+ orientation = -azimuth;
+ }
- default:
+ if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
/*
- * For Win8 PTP touchpads we should only look at
- * non finger/touch events in the first_packet of
- * a (possible) multi-packet frame.
+ * divided by two to match visual scale of touch
+ * for devices with this quirk
*/
- if ((quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- !first_packet)
- return;
+ major = major >> 1;
+ minor = minor >> 1;
+ }
- /*
- * For Win8 PTP touchpads we map both the clickpad click
- * and any "external" left buttons to BTN_LEFT if a
- * device claims to have both we need to report 1 for
- * BTN_LEFT if either is pressed, so we or all values
- * together and report the result in mt_sync_frame().
- */
- if ((quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- usage->type == EV_KEY && usage->code == BTN_LEFT) {
- td->left_button_state |= value;
- return;
- }
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, *slot->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, *slot->y);
+ input_event(input, EV_ABS, ABS_MT_TOOL_X, *slot->cx);
+ input_event(input, EV_ABS, ABS_MT_TOOL_Y, *slot->cy);
+ input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
+
+ set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ }
- if (usage->type)
- input_event(input, usage->type, usage->code,
- value);
+ return 0;
+}
+
+static void mt_process_mt_event(struct hid_device *hid,
+ struct mt_application *app,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ __s32 value,
+ bool first_packet)
+{
+ __s32 quirks = app->quirks;
+ struct input_dev *input = field->hidinput->input;
+
+ if (!usage->type || !(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ if (quirks & MT_QUIRK_WIN8_PTP_BUTTONS) {
+
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of a
+ * (possible) multi-packet frame.
+ */
+ if (!first_packet)
return;
- }
- if (usage->usage_index + 1 == field->report_count) {
- /* we only take into account the last report. */
- if (usage->hid == td->last_slot_field)
- mt_complete_slot(td, field->hidinput->input);
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if (usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ app->left_button_state |= value;
+ return;
}
-
}
+
+ input_event(input, usage->type, usage->code, value);
}
-static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
+static void mt_touch_report(struct hid_device *hid,
+ struct mt_report_data *rdata)
{
struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_report *report = rdata->report;
+ struct mt_application *app = rdata->application;
struct hid_field *field;
+ struct input_dev *input;
+ struct mt_usages *slot;
bool first_packet;
unsigned count;
- int r, n, scantime = 0;
+ int r, n;
+ int scantime = 0;
+ int contact_count = -1;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
+ scantime = *app->scantime;
+ app->timestamp = mt_compute_timestamp(app, scantime);
+ if (app->raw_cc != DEFAULT_ZERO)
+ contact_count = *app->raw_cc;
+
/*
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
- if (td->scantime_index >= 0) {
- field = report->field[td->scantime_index];
- scantime = field->value[td->scantime_val_index];
- }
- if (td->cc_index >= 0) {
- struct hid_field *field = report->field[td->cc_index];
- int value = field->value[td->cc_value_index];
-
+ if (contact_count >= 0) {
/*
* For Win8 PTPs the first packet (td->num_received == 0) may
* have a contactcount of 0 if there only is a button event.
@@ -954,16 +1146,25 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* of a possible multi-packet frame be checking that the
* timestamp has changed.
*/
- if ((td->mtclass.quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
- td->num_received == 0 && td->prev_scantime != scantime)
- td->num_expected = value;
+ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) &&
+ app->num_received == 0 &&
+ app->prev_scantime != scantime)
+ app->num_expected = contact_count;
/* A non 0 contact count always indicates a first packet */
- else if (value)
- td->num_expected = value;
+ else if (contact_count)
+ app->num_expected = contact_count;
+ }
+ app->prev_scantime = scantime;
+
+ first_packet = app->num_received == 0;
+
+ input = report->field[0]->hidinput->input;
+
+ list_for_each_entry(slot, &app->mt_usages, list) {
+ if (!mt_process_slot(td, input, app, slot))
+ app->num_received++;
}
- td->prev_scantime = scantime;
- first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -972,12 +1173,13 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
continue;
for (n = 0; n < count; n++)
- mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n], first_packet);
+ mt_process_mt_event(hid, app, field,
+ &field->usage[n], field->value[n],
+ first_packet);
}
- if (td->num_received >= td->num_expected)
- mt_sync_frame(td, report->field[0]->hidinput->input);
+ if (app->num_received >= app->num_expected)
+ mt_sync_frame(td, app, input);
/*
* Windows 8 specs says 2 things:
@@ -997,7 +1199,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* only affect laggish machines and the ones that have a firmware
* defect.
*/
- if (td->mtclass.quirks & MT_QUIRK_STICKY_FINGERS) {
+ if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
mod_timer(&td->release_timer,
jiffies + msecs_to_jiffies(100));
@@ -1009,7 +1211,8 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
}
static int mt_touch_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+ struct hid_input *hi,
+ struct mt_application *app)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
@@ -1019,28 +1222,36 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (!td->maxcontacts)
td->maxcontacts = MT_DEFAULT_MAXCONTACT;
- mt_post_parse(td);
+ mt_post_parse(td, app);
if (td->serial_maybe)
- mt_post_parse_default_settings(td);
+ mt_post_parse_default_settings(td, app);
if (cls->is_indirect)
- td->mt_flags |= INPUT_MT_POINTER;
+ app->mt_flags |= INPUT_MT_POINTER;
- if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
- td->mt_flags |= INPUT_MT_DROP_UNUSED;
+ if (app->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+ app->mt_flags |= INPUT_MT_DROP_UNUSED;
/* check for clickpads */
- if ((td->mt_flags & INPUT_MT_POINTER) && (td->buttons_count == 1))
+ if ((app->mt_flags & INPUT_MT_POINTER) &&
+ (app->buttons_count == 1))
td->is_buttonpad = true;
if (td->is_buttonpad)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+ app->pending_palm_slots = devm_kcalloc(&hi->input->dev,
+ BITS_TO_LONGS(td->maxcontacts),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!app->pending_palm_slots)
+ return -ENOMEM;
+
+ ret = input_mt_init_slots(input, td->maxcontacts, app->mt_flags);
if (ret)
return ret;
- td->mt_flags = 0;
+ app->mt_flags = 0;
return 0;
}
@@ -1051,6 +1262,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_application *application;
+ struct mt_report_data *rdata;
+
+ rdata = mt_find_report_data(td, field->report);
+ if (!rdata) {
+ hid_err(hdev, "failed to allocate data for report\n");
+ return 0;
+ }
+
+ application = rdata->application;
/*
* If mtclass.export_all_inputs is not set, only map fields from
@@ -1066,8 +1287,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_GD_SYSTEM_CONTROL &&
field->application != HID_CP_CONSUMER_CONTROL &&
field->application != HID_GD_WIRELESS_RADIO_CTLS &&
+ field->application != HID_GD_SYSTEM_MULTIAXIS &&
!(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
- td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP))
+ application->quirks & MT_QUIRK_ASUS_CUSTOM_UP))
return -1;
/*
@@ -1076,7 +1298,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
* map usages to input keys.
*/
if (field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
- td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP &&
+ application->quirks & MT_QUIRK_ASUS_CUSTOM_UP &&
(usage->hid & HID_USAGE_PAGE) == HID_UP_CUSTOM) {
set_bit(EV_REP, hi->input->evbit);
if (field->flags & HID_MAIN_ITEM_VARIABLE)
@@ -1093,23 +1315,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
- /*
- * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
- * for the stylus.
- * The check for mt_report_id ensures we don't process
- * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
- * collection, but within the report ID.
- */
- if (field->physical == HID_DG_STYLUS)
- return 0;
- else if ((field->physical == 0) &&
- (field->report->id != td->mt_report_id) &&
- (td->mt_report_id != -1))
- return 0;
-
- if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD)
- return mt_touch_input_mapping(hdev, hi, field, usage, bit, max);
+ if (rdata->is_mt_collection)
+ return mt_touch_input_mapping(hdev, hi, field, usage, bit, max,
+ application);
/* let hid-core decide for the others */
return 0;
@@ -1119,15 +1327,11 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- /*
- * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
- * for the stylus.
- */
- if (field->physical == HID_DG_STYLUS)
- return 0;
+ struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_report_data *rdata;
- if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD) {
+ rdata = mt_find_report_data(td, field->report);
+ if (rdata && rdata->is_mt_collection) {
/* We own these mappings, tell hid-input to ignore them */
return -1;
}
@@ -1140,8 +1344,10 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct mt_device *td = hid_get_drvdata(hid);
+ struct mt_report_data *rdata;
- if (field->report->id == td->mt_report_id)
+ rdata = mt_find_report_data(td, field->report);
+ if (rdata && rdata->is_mt_collection)
return mt_touch_event(hid, field, usage, value);
return 0;
@@ -1151,12 +1357,14 @@ static void mt_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
struct hid_field *field = report->field[0];
+ struct mt_report_data *rdata;
if (!(hid->claimed & HID_CLAIMED_INPUT))
return;
- if (report->id == td->mt_report_id)
- return mt_touch_report(hid, report);
+ rdata = mt_find_report_data(td, report);
+ if (rdata && rdata->is_mt_collection)
+ return mt_touch_report(hid, rdata);
if (field && field->hidinput && field->hidinput->input)
input_sync(field->hidinput->input);
@@ -1197,9 +1405,9 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
return true;
case HID_DG_CONTACTMAX:
- if (td->mtclass.maxcontacts) {
+ if (cls->maxcontacts) {
max = min_t(int, field->logical_maximum,
- td->mtclass.maxcontacts);
+ cls->maxcontacts);
if (field->value[index] != max) {
field->value[index] = max;
return true;
@@ -1259,12 +1467,13 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
}
}
-static void mt_post_parse_default_settings(struct mt_device *td)
+static void mt_post_parse_default_settings(struct mt_device *td,
+ struct mt_application *app)
{
- __s32 quirks = td->mtclass.quirks;
+ __s32 quirks = app->quirks;
/* unknown serial device needs special quirks */
- if (td->touches_by_report == 1) {
+ if (list_is_singular(&app->mt_usages)) {
quirks |= MT_QUIRK_ALWAYS_VALID;
quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
@@ -1272,21 +1481,13 @@ static void mt_post_parse_default_settings(struct mt_device *td)
quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
- td->mtclass.quirks = quirks;
+ app->quirks = quirks;
}
-static void mt_post_parse(struct mt_device *td)
+static void mt_post_parse(struct mt_device *td, struct mt_application *app)
{
- struct mt_fields *f = td->fields;
- struct mt_class *cls = &td->mtclass;
-
- if (td->touches_by_report > 0) {
- int field_count_per_touch = f->length / td->touches_by_report;
- td->last_slot_field = f->usages[field_count_per_touch - 1];
- }
-
- if (td->cc_index < 0)
- cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+ if (!app->have_contact_count)
+ app->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
@@ -1295,13 +1496,24 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
char *name;
const char *suffix = NULL;
unsigned int application = 0;
+ struct mt_report_data *rdata;
+ struct mt_application *mt_application = NULL;
struct hid_report *report;
int ret;
list_for_each_entry(report, &hi->reports, hidinput_list) {
application = report->application;
- if (report->id == td->mt_report_id) {
- ret = mt_touch_input_configured(hdev, hi);
+ rdata = mt_find_report_data(td, report);
+ if (!rdata) {
+ hid_err(hdev, "failed to allocate data for report\n");
+ return -ENOMEM;
+ }
+
+ mt_application = rdata->application;
+
+ if (rdata->is_mt_collection) {
+ ret = mt_touch_input_configured(hdev, hi,
+ mt_application);
if (ret)
return ret;
}
@@ -1327,6 +1539,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_GD_SYSTEM_CONTROL:
case HID_CP_CONSUMER_CONTROL:
case HID_GD_WIRELESS_RADIO_CTLS:
+ case HID_GD_SYSTEM_MULTIAXIS:
/* already handled by hid core */
break;
case HID_DG_TOUCHSCREEN:
@@ -1390,6 +1603,7 @@ static void mt_fix_const_fields(struct hid_device *hdev, unsigned int usage)
static void mt_release_contacts(struct hid_device *hid)
{
struct hid_input *hidinput;
+ struct mt_application *application;
struct mt_device *td = hid_get_drvdata(hid);
list_for_each_entry(hidinput, &hid->inputs, list) {
@@ -1409,7 +1623,9 @@ static void mt_release_contacts(struct hid_device *hid)
}
}
- td->num_received = 0;
+ list_for_each_entry(application, &td->applications, list) {
+ application->num_received = 0;
+ }
}
static void mt_expired_timeout(struct timer_list *t)
@@ -1432,7 +1648,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
- struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+ const struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
@@ -1449,17 +1665,10 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
- td->cc_index = -1;
- td->scantime_index = -1;
- td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
- td->fields = devm_kzalloc(&hdev->dev, sizeof(struct mt_fields),
- GFP_KERNEL);
- if (!td->fields) {
- dev_err(&hdev->dev, "cannot allocate multitouch fields data\n");
- return -ENOMEM;
- }
+ INIT_LIST_HEAD(&td->applications);
+ INIT_LIST_HEAD(&td->reports);
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
@@ -1496,10 +1705,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
- /* release .fields memory as it is not used anymore */
- devm_kfree(&hdev->dev, td->fields);
- td->fields = NULL;
-
return 0;
}
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 43b1c7234316..9bc6f4867cb3 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
+ if (ret)
+ hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:
diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
index daf59578bf93..73c9d4c4fa34 100644
--- a/drivers/hid/hid-redragon.c
+++ b/drivers/hid/hid-redragon.c
@@ -44,29 +44,6 @@ static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
-static int redragon_probe(struct hid_device *dev,
- const struct hid_device_id *id)
-{
- int ret;
-
- ret = hid_parse(dev);
- if (ret) {
- hid_err(dev, "parse failed\n");
- return ret;
- }
-
- /* do not register unused input device */
- if (dev->maxapplication == 1)
- return 0;
-
- ret = hid_hw_start(dev, HID_CONNECT_DEFAULT);
- if (ret) {
- hid_err(dev, "hw start failed\n");
- return ret;
- }
-
- return 0;
-}
static const struct hid_device_id redragon_devices[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)},
{}
@@ -77,8 +54,7 @@ MODULE_DEVICE_TABLE(hid, redragon_devices);
static struct hid_driver redragon_driver = {
.name = "redragon",
.id_table = redragon_devices,
- .report_fixup = redragon_report_fixup,
- .probe = redragon_probe
+ .report_fixup = redragon_report_fixup
};
module_hid_driver(redragon_driver);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index e475c5073c99..9671a4bad643 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1353,7 +1353,7 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
char *name;
int ret;
- sc->touchpad = input_allocate_device();
+ sc->touchpad = devm_input_allocate_device(&sc->hdev->dev);
if (!sc->touchpad)
return -ENOMEM;
@@ -1370,11 +1370,9 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
* DS4 compatible non-Sony devices with different names.
*/
name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
- name = kzalloc(name_sz, GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
- goto err;
- }
+ name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
sc->touchpad->name = name;
@@ -1403,34 +1401,13 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
ret = input_mt_init_slots(sc->touchpad, touch_count, INPUT_MT_POINTER);
if (ret < 0)
- goto err;
+ return ret;
ret = input_register_device(sc->touchpad);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- kfree(sc->touchpad->name);
- sc->touchpad->name = NULL;
-
- input_free_device(sc->touchpad);
- sc->touchpad = NULL;
-
- return ret;
-}
-
-static void sony_unregister_touchpad(struct sony_sc *sc)
-{
- if (!sc->touchpad)
- return;
-
- kfree(sc->touchpad->name);
- sc->touchpad->name = NULL;
-
- input_unregister_device(sc->touchpad);
- sc->touchpad = NULL;
}
static int sony_register_sensors(struct sony_sc *sc)
@@ -1440,7 +1417,7 @@ static int sony_register_sensors(struct sony_sc *sc)
int ret;
int range;
- sc->sensor_dev = input_allocate_device();
+ sc->sensor_dev = devm_input_allocate_device(&sc->hdev->dev);
if (!sc->sensor_dev)
return -ENOMEM;
@@ -1457,11 +1434,9 @@ static int sony_register_sensors(struct sony_sc *sc)
* DS4 compatible non-Sony devices with different names.
*/
name_sz = strlen(sc->hdev->name) + sizeof(SENSOR_SUFFIX);
- name = kzalloc(name_sz, GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
- goto err;
- }
+ name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
snprintf(name, name_sz, "%s" SENSOR_SUFFIX, sc->hdev->name);
sc->sensor_dev->name = name;
@@ -1503,33 +1478,11 @@ static int sony_register_sensors(struct sony_sc *sc)
ret = input_register_device(sc->sensor_dev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- kfree(sc->sensor_dev->name);
- sc->sensor_dev->name = NULL;
-
- input_free_device(sc->sensor_dev);
- sc->sensor_dev = NULL;
-
- return ret;
}
-static void sony_unregister_sensors(struct sony_sc *sc)
-{
- if (!sc->sensor_dev)
- return;
-
- kfree(sc->sensor_dev->name);
- sc->sensor_dev->name = NULL;
-
- input_unregister_device(sc->sensor_dev);
- sc->sensor_dev = NULL;
-}
-
-
/*
* Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
* to "operational". Without this, the ps3 controller will not report any
@@ -1987,25 +1940,6 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
return 0;
}
-static void sony_leds_remove(struct sony_sc *sc)
-{
- struct led_classdev *led;
- int n;
-
- BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
-
- for (n = 0; n < sc->led_count; n++) {
- led = sc->leds[n];
- sc->leds[n] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- sc->led_count = 0;
-}
-
static int sony_leds_init(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
@@ -2078,11 +2012,10 @@ static int sony_leds_init(struct sony_sc *sc)
if (use_ds4_names)
name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2;
- led = kzalloc(sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
- ret = -ENOMEM;
- goto error_leds;
+ return -ENOMEM;
}
name = (void *)(&led[1]);
@@ -2103,21 +2036,14 @@ static int sony_leds_init(struct sony_sc *sc)
sc->leds[n] = led;
- ret = led_classdev_register(&hdev->dev, led);
+ ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "Failed to register LED %d\n", n);
- sc->leds[n] = NULL;
- kfree(led);
- goto error_leds;
+ return ret;
}
}
- return ret;
-
-error_leds:
- sony_leds_remove(sc);
-
- return ret;
+ return 0;
}
static void sixaxis_send_output_report(struct sony_sc *sc)
@@ -2276,16 +2202,20 @@ static int sony_allocate_output_report(struct sony_sc *sc)
if ((sc->quirks & SIXAXIS_CONTROLLER) ||
(sc->quirks & NAVIGATION_CONTROLLER))
sc->output_report_dmabuf =
- kmalloc(sizeof(union sixaxis_output_report_01),
+ devm_kmalloc(&sc->hdev->dev,
+ sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE))
- sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
- sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
+ sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
+ MOTION_REPORT_0x02_SIZE,
GFP_KERNEL);
else
return 0;
@@ -2392,36 +2322,21 @@ static int sony_battery_probe(struct sony_sc *sc, int append_dev_id)
sc->battery_desc.get_property = sony_battery_get_property;
sc->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
sc->battery_desc.use_for_apm = 0;
- sc->battery_desc.name = kasprintf(GFP_KERNEL, battery_str_fmt,
- sc->mac_address, sc->device_id);
+ sc->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ battery_str_fmt, sc->mac_address, sc->device_id);
if (!sc->battery_desc.name)
return -ENOMEM;
- sc->battery = power_supply_register(&hdev->dev, &sc->battery_desc,
+ sc->battery = devm_power_supply_register(&hdev->dev, &sc->battery_desc,
&psy_cfg);
if (IS_ERR(sc->battery)) {
ret = PTR_ERR(sc->battery);
hid_err(hdev, "Unable to register battery device\n");
- goto err_free;
+ return ret;
}
power_supply_powers(sc->battery, &hdev->dev);
return 0;
-
-err_free:
- kfree(sc->battery_desc.name);
- sc->battery_desc.name = NULL;
- return ret;
-}
-
-static void sony_battery_remove(struct sony_sc *sc)
-{
- if (!sc->battery_desc.name)
- return;
-
- power_supply_unregister(sc->battery);
- kfree(sc->battery_desc.name);
- sc->battery_desc.name = NULL;
}
/*
@@ -2879,16 +2794,7 @@ err_stop:
device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
if (sc->hw_version)
device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
- if (sc->quirks & SONY_LED_SUPPORT)
- sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT)
- sony_battery_remove(sc);
- if (sc->touchpad)
- sony_unregister_touchpad(sc);
- if (sc->sensor_dev)
- sony_unregister_sensors(sc);
sony_cancel_work_sync(sc);
- kfree(sc->output_report_dmabuf);
sony_remove_dev_list(sc);
sony_release_device_id(sc);
hid_hw_stop(hdev);
@@ -2965,18 +2871,6 @@ static void sony_remove(struct hid_device *hdev)
hid_hw_close(hdev);
- if (sc->quirks & SONY_LED_SUPPORT)
- sony_leds_remove(sc);
-
- if (sc->quirks & SONY_BATTERY_SUPPORT)
- sony_battery_remove(sc);
-
- if (sc->touchpad)
- sony_unregister_touchpad(sc);
-
- if (sc->sensor_dev)
- sony_unregister_sensors(sc);
-
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
@@ -2988,8 +2882,6 @@ static void sony_remove(struct hid_device *hdev)
sony_cancel_work_sync(sc);
- kfree(sc->output_report_dmabuf);
-
sony_remove_dev_list(sc);
sony_release_device_id(sc);
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 579884ebd94d..7780da4fe897 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -455,6 +455,12 @@ static __u8 wiimote_cmd_read_ext(struct wiimote_data *wdata, __u8 *rmem)
return WIIMOTE_EXT_BALANCE_BOARD;
if (rmem[4] == 0x01 && rmem[5] == 0x20)
return WIIMOTE_EXT_PRO_CONTROLLER;
+ if (rmem[0] == 0x01 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_DRUMS;
+ if (rmem[0] == 0x00 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_GUITAR;
return WIIMOTE_EXT_UNKNOWN;
}
@@ -488,6 +494,8 @@ static bool wiimote_cmd_map_mp(struct wiimote_data *wdata, __u8 exttype)
/* map MP with correct pass-through mode */
switch (exttype) {
case WIIMOTE_EXT_CLASSIC_CONTROLLER:
+ case WIIMOTE_EXT_DRUMS:
+ case WIIMOTE_EXT_GUITAR:
wmem = 0x07;
break;
case WIIMOTE_EXT_NUNCHUK:
@@ -1075,6 +1083,8 @@ static const char *wiimote_exttype_names[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = "Nintendo Wii Classic Controller",
[WIIMOTE_EXT_BALANCE_BOARD] = "Nintendo Wii Balance Board",
[WIIMOTE_EXT_PRO_CONTROLLER] = "Nintendo Wii U Pro Controller",
+ [WIIMOTE_EXT_DRUMS] = "Nintendo Wii Drums",
+ [WIIMOTE_EXT_GUITAR] = "Nintendo Wii Guitar",
};
/*
@@ -1660,6 +1670,10 @@ static ssize_t wiimote_ext_show(struct device *dev,
return sprintf(buf, "balanceboard\n");
case WIIMOTE_EXT_PRO_CONTROLLER:
return sprintf(buf, "procontroller\n");
+ case WIIMOTE_EXT_DRUMS:
+ return sprintf(buf, "drums\n");
+ case WIIMOTE_EXT_GUITAR:
+ return sprintf(buf, "guitar\n");
case WIIMOTE_EXT_UNKNOWN:
/* fallthrough */
default:
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index c830ed39348f..aa72eb9a8e2f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1950,6 +1950,444 @@ static const struct wiimod_ops wiimod_pro = {
};
/*
+ * Drums
+ * Guitar-Hero, Rock-Band and other games came bundled with drums which can
+ * be plugged as extension to a Wiimote. Drum-reports are still not entirely
+ * figured out, but the most important information is known.
+ * We create a separate device for drums and report all information via this
+ * input device.
+ */
+
+static inline void wiimod_drums_report_pressure(struct wiimote_data *wdata,
+ __u8 none, __u8 which,
+ __u8 pressure, __u8 onoff,
+ __u8 *store, __u16 code,
+ __u8 which_code)
+{
+ static const __u8 default_pressure = 3;
+
+ if (!none && which == which_code) {
+ *store = pressure;
+ input_report_abs(wdata->extension.input, code, *store);
+ } else if (onoff != !!*store) {
+ *store = onoff ? default_pressure : 0;
+ input_report_abs(wdata->extension.input, code, *store);
+ }
+}
+
+static void wiimod_drums_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 pressure, which, none, hhp, sx, sy;
+ __u8 o, r, y, g, b, bass, bm, bp;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, the following bits will get invalid:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> |XXXXX|
+ * 2 | 0 | 0 | SY <5:1> |XXXXX|
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ pressure = 7 - (ext[3] >> 5);
+ which = (ext[2] >> 1) & 0x1f;
+ none = !!(ext[2] & 0x40);
+ hhp = !(ext[2] & 0x80);
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ o = !(ext[5] & 0x80);
+ r = !(ext[5] & 0x40);
+ y = !(ext[5] & 0x20);
+ g = !(ext[5] & 0x10);
+ b = !(ext[5] & 0x08);
+ bass = !(ext[5] & 0x04);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+
+ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
+ sx &= 0x3e;
+ sy &= 0x3e;
+ }
+
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ o, &wdata->state.pressure_drums[0],
+ ABS_HAT2Y, 0x0e);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ r, &wdata->state.pressure_drums[1],
+ ABS_HAT0X, 0x19);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ y, &wdata->state.pressure_drums[2],
+ ABS_HAT2X, 0x11);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ g, &wdata->state.pressure_drums[3],
+ ABS_HAT1X, 0x12);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ b, &wdata->state.pressure_drums[4],
+ ABS_HAT0Y, 0x0f);
+
+ /* Bass shares pressure with hi-hat (set via hhp) */
+ wiimod_drums_report_pressure(wdata, none, hhp ? 0xff : which, pressure,
+ bass, &wdata->state.pressure_drums[5],
+ ABS_HAT3X, 0x1b);
+ /* Hi-hat has no on/off values, just pressure. Force to off/0. */
+ wiimod_drums_report_pressure(wdata, none, hhp ? which : 0xff, pressure,
+ 0, &wdata->state.pressure_drums[6],
+ ABS_HAT3Y, 0x0e);
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+
+ input_report_key(wdata->extension.input, BTN_START, bp);
+ input_report_key(wdata->extension.input, BTN_SELECT, bm);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_drums_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_drums_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_drums_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_drums_open;
+ wdata->extension.input->close = wiimod_drums_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Drums";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ set_bit(BTN_START, wdata->extension.input->keybit);
+ set_bit(BTN_SELECT, wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT1X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT2X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT2Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT3X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT3Y, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0Y, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT1X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT2X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT2Y, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT3X, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT3Y, 0, 7, 0, 0);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_drums_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_drums = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_drums_probe,
+ .remove = wiimod_drums_remove,
+ .in_ext = wiimod_drums_in_ext,
+};
+
+/*
+ * Guitar
+ * Guitar-Hero, Rock-Band and other games came bundled with guitars which can
+ * be plugged as extension to a Wiimote.
+ * We create a separate device for guitars and report all information via this
+ * input device.
+ */
+
+enum wiimod_guitar_keys {
+ WIIMOD_GUITAR_KEY_G,
+ WIIMOD_GUITAR_KEY_R,
+ WIIMOD_GUITAR_KEY_Y,
+ WIIMOD_GUITAR_KEY_B,
+ WIIMOD_GUITAR_KEY_O,
+ WIIMOD_GUITAR_KEY_UP,
+ WIIMOD_GUITAR_KEY_DOWN,
+ WIIMOD_GUITAR_KEY_PLUS,
+ WIIMOD_GUITAR_KEY_MINUS,
+ WIIMOD_GUITAR_KEY_NUM,
+};
+
+static const __u16 wiimod_guitar_map[] = {
+ BTN_1, /* WIIMOD_GUITAR_KEY_G */
+ BTN_2, /* WIIMOD_GUITAR_KEY_R */
+ BTN_3, /* WIIMOD_GUITAR_KEY_Y */
+ BTN_4, /* WIIMOD_GUITAR_KEY_B */
+ BTN_5, /* WIIMOD_GUITAR_KEY_O */
+ BTN_DPAD_UP, /* WIIMOD_GUITAR_KEY_UP */
+ BTN_DPAD_DOWN, /* WIIMOD_GUITAR_KEY_DOWN */
+ BTN_START, /* WIIMOD_GUITAR_KEY_PLUS */
+ BTN_SELECT, /* WIIMOD_GUITAR_KEY_MINUS */
+};
+
+static void wiimod_guitar_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 sx, sy, tb, wb, bd, bm, bp, bo, br, bb, bg, by, bu;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 | 1 | BU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, it will look like this:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> | BU |
+ * 2 | 0 | 0 | SY <5:1> | 1 |
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ tb = ext[2] & 0x1f;
+ wb = ext[3] & 0x1f;
+ bd = !(ext[4] & 0x40);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+ bo = !(ext[5] & 0x80);
+ br = !(ext[5] & 0x40);
+ bb = !(ext[5] & 0x20);
+ bg = !(ext[5] & 0x10);
+ by = !(ext[5] & 0x08);
+ bu = !(ext[5] & 0x01);
+
+ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
+ bu = !(ext[0] & 0x01);
+ sx &= 0x3e;
+ sy &= 0x3e;
+ }
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+ input_report_abs(wdata->extension.input, ABS_HAT0X, tb);
+ input_report_abs(wdata->extension.input, ABS_HAT1X, wb - 0x10);
+
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_G],
+ bg);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_R],
+ br);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_Y],
+ by);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_B],
+ bb);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_O],
+ bo);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_UP],
+ bu);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_DOWN],
+ bd);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_PLUS],
+ bp);
+ input_report_key(wdata->extension.input,
+ wiimod_guitar_map[WIIMOD_GUITAR_KEY_MINUS],
+ bm);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_guitar_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_guitar_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_guitar_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret, i;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_guitar_open;
+ wdata->extension.input->close = wiimod_guitar_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Guitar";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ for (i = 0; i < WIIMOD_GUITAR_KEY_NUM; ++i)
+ set_bit(wiimod_guitar_map[i],
+ wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_HAT0X, wdata->extension.input->absbit);
+ set_bit(ABS_HAT1X, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT0X, 0, 0x1f, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HAT1X, 0, 0x0f, 1, 1);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_guitar_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_guitar = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_guitar_probe,
+ .remove = wiimod_guitar_remove,
+ .in_ext = wiimod_guitar_in_ext,
+};
+
+/*
* Builtin Motion Plus
* This module simply sets the WIIPROTO_FLAG_BUILTIN_MP protocol flag which
* disables polling for Motion-Plus. This should be set only for devices which
@@ -2201,4 +2639,6 @@ const struct wiimod_ops *wiimod_ext_table[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = &wiimod_classic,
[WIIMOTE_EXT_BALANCE_BOARD] = &wiimod_bboard,
[WIIMOTE_EXT_PRO_CONTROLLER] = &wiimod_pro,
+ [WIIMOTE_EXT_DRUMS] = &wiimod_drums,
+ [WIIMOTE_EXT_GUITAR] = &wiimod_guitar,
};
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index 510ca77fe14e..3bf3d3cc1c38 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -89,6 +89,8 @@ enum wiimote_exttype {
WIIMOTE_EXT_CLASSIC_CONTROLLER,
WIIMOTE_EXT_BALANCE_BOARD,
WIIMOTE_EXT_PRO_CONTROLLER,
+ WIIMOTE_EXT_DRUMS,
+ WIIMOTE_EXT_GUITAR,
WIIMOTE_EXT_NUM,
};
@@ -137,6 +139,7 @@ struct wiimote_state {
/* calibration/cache data */
__u16 calib_bboard[4][3];
__s16 calib_pro_sticks[4];
+ __u8 pressure_drums[7];
__u8 cache_rumble;
};
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1652bb7bd15..2ce194a84868 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if ((ret_size > size) || (ret_size <= 2)) {
+ if ((ret_size > size) || (ret_size < 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
@@ -1002,18 +1002,18 @@ static int i2c_hid_probe(struct i2c_client *client,
return client->irq;
}
- ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
+ ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
if (!ihid)
return -ENOMEM;
if (client->dev.of_node) {
ret = i2c_hid_of_probe(client, &ihid->pdata);
if (ret)
- goto err;
+ return ret;
} else if (!platform_data) {
ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
if (ret)
- goto err;
+ return ret;
} else {
ihid->pdata = *platform_data;
}
@@ -1021,21 +1021,20 @@ static int i2c_hid_probe(struct i2c_client *client,
/* Parse platform agnostic common properties from ACPI / device tree */
i2c_hid_fwnode_probe(client, &ihid->pdata);
- ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(ihid->pdata.supply)) {
- ret = PTR_ERR(ihid->pdata.supply);
- if (ret != -EPROBE_DEFER)
- dev_err(&client->dev, "Failed to get regulator: %d\n",
- ret);
- goto err;
- }
+ ihid->pdata.supplies[0].supply = "vdd";
+ ihid->pdata.supplies[1].supply = "vddl";
+
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret < 0)
+ return ret;
- ret = regulator_enable(ihid->pdata.supply);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to enable regulator: %d\n",
- ret);
- goto err;
- }
if (ihid->pdata.post_power_delay_ms)
msleep(ihid->pdata.post_power_delay_ms);
@@ -1122,11 +1121,9 @@ err_pm:
pm_runtime_disable(&client->dev);
err_regulator:
- regulator_disable(ihid->pdata.supply);
-
-err:
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
i2c_hid_free_buffers(ihid);
- kfree(ihid);
return ret;
}
@@ -1148,9 +1145,8 @@ static int i2c_hid_remove(struct i2c_client *client)
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- regulator_disable(ihid->pdata.supply);
-
- kfree(ihid);
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
return 0;
}
@@ -1201,9 +1197,8 @@ static int i2c_hid_suspend(struct device *dev)
hid_warn(hid, "Failed to enable irq wake: %d\n",
wake_status);
} else {
- ret = regulator_disable(ihid->pdata.supply);
- if (ret < 0)
- hid_warn(hid, "Failed to disable supply: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
}
return 0;
@@ -1218,9 +1213,11 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (!device_may_wakeup(&client->dev)) {
- ret = regulator_enable(ihid->pdata.supply);
- if (ret < 0)
- hid_warn(hid, "Failed to enable supply: %d\n", ret);
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+ ihid->pdata.supplies);
+ if (ret)
+ hid_warn(hid, "Failed to enable supplies: %d\n", ret);
+
if (ihid->pdata.post_power_delay_ms)
msleep(ihid->pdata.post_power_delay_ms);
} else if (ihid->irq_wake_enabled) {
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 9a60ec13cb10..bfbca7ec54ce 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -907,8 +907,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
struct ishtp_device *dev;
int i;
- dev = kzalloc(sizeof(struct ishtp_device) + sizeof(struct ish_hw),
- GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+ GFP_KERNEL);
if (!dev)
return NULL;
@@ -925,7 +926,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
struct wr_msg_ctl_info *tx_buf;
- tx_buf = kzalloc(sizeof(struct wr_msg_ctl_info), GFP_KERNEL);
+ tx_buf = devm_kzalloc(&pdev->dev,
+ sizeof(struct wr_msg_ctl_info),
+ GFP_KERNEL);
if (!tx_buf) {
/*
* IPC buffers may be limited or not available
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a2c53ea3b5ed..050f9872f5c0 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -95,6 +95,13 @@ static int ish_init(struct ishtp_device *dev)
return 0;
}
+static const struct pci_device_id ish_invalid_pci_ids[] = {
+ /* Mehlow platform special pci ids */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA309)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA30A)},
+ {}
+};
+
/**
* ish_probe() - PCI driver probe callback
* @pdev: pci device
@@ -110,6 +117,10 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ish_hw *hw;
int ret;
+ /* Check for invalid platforms for ISH support */
+ if (pci_dev_present(ish_invalid_pci_ids))
+ return -ENODEV;
+
/* enable pci dev */
ret = pci_enable_device(pdev);
if (ret) {
@@ -172,7 +183,6 @@ free_irq:
free_irq(pdev->irq, dev);
free_device:
pci_iounmap(pdev, hw->mem_addr);
- kfree(dev);
release_regions:
pci_release_regions(pdev);
disable_device:
@@ -202,7 +212,6 @@ static void ish_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_master(pdev);
pci_disable_device(pdev);
- kfree(ishtp_dev);
}
static struct device __maybe_unused *ish_resume_device;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index ae4a69f7f2f4..8b5dd580ceec 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -298,7 +298,6 @@ int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
struct ishtp_msg_hdr *ishtp_hdr = &hdr;
const size_t len = sizeof(struct hbm_flow_control);
int rv;
- unsigned int num_frags;
unsigned long flags;
spin_lock_irqsave(&cl->fc_spinlock, flags);
@@ -314,7 +313,6 @@ int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
return 0;
}
- num_frags = cl->recv_msg_num_frags;
cl->recv_msg_num_frags = 0;
rv = ishtp_write_message(dev, ishtp_hdr, data);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index af0e0d061b15..11103efebbaa 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -480,6 +480,7 @@ static void hid_ctrl(struct urb *urb)
{
struct hid_device *hid = urb->context;
struct usbhid_device *usbhid = hid->driver_data;
+ unsigned long flags;
int unplug = 0, status = urb->status;
switch (status) {
@@ -501,7 +502,7 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
- spin_lock(&usbhid->lock);
+ spin_lock_irqsave(&usbhid->lock, flags);
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
@@ -511,13 +512,13 @@ static void hid_ctrl(struct urb *urb)
if (usbhid->ctrlhead != usbhid->ctrltail &&
hid_submit_ctrl(hid) == 0) {
/* Successfully submitted next urb in queue */
- spin_unlock(&usbhid->lock);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
return;
}
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- spin_unlock(&usbhid->lock);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
usb_autopm_put_interface_async(usbhid->intf);
wake_up(&usbhid->wait);
}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e3ce233f8bdc..23872d08308c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -36,6 +36,7 @@
#include <linux/hiddev.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#include "usbhid.h"
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index = array_index_nospec(uref->usage_index,
+ field->maxusage);
uref->usage_code = field->usage[uref->usage_index].hid;
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (finfo.field_index >= report->maxfield)
break;
+ finfo.field_index = array_index_nospec(finfo.field_index,
+ report->maxfield);
field = report->field[finfo.field_index];
memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cinfo.index >= hid->maxcollection)
break;
+ cinfo.index = array_index_nospec(cinfo.index,
+ hid->maxcollection);
cinfo.type = hid->collection[cinfo.index].type;
cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index d6797535fff9..0bdd85d486fe 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -210,6 +210,57 @@ static int wacom_calc_hid_res(int logical_extents, int physical_extents,
return hidinput_calc_abs_res(&field, ABS_X);
}
+static void wacom_hid_usage_quirk(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_features *features = &wacom->wacom_wac.features;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ /*
+ * The Dell Canvas 27 needs to be switched to its vendor-defined
+ * report to provide the best resolution.
+ */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x4200 &&
+ field->application == HID_UP_MSVENDOR) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ }
+
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
+ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x0358 &&
+ WACOM_PEN_FIELD(field) &&
+ equivalent_usage == HID_GD_Y) {
+ field->logical_maximum = 43200;
+ }
+}
+
static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -221,6 +272,8 @@ static void wacom_feature_mapping(struct hid_device *hdev,
int ret;
u32 n;
+ wacom_hid_usage_quirk(hdev, field, usage);
+
switch (equivalent_usage) {
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
@@ -300,13 +353,6 @@ static void wacom_feature_mapping(struct hid_device *hdev,
kfree(data);
break;
}
-
- if (hdev->vendor == USB_VENDOR_ID_WACOM &&
- hdev->product == 0x4200 /* Dell Canvas 27 */ &&
- field->application == HID_UP_MSVENDOR) {
- wacom->wacom_wac.mode_report = field->report->id;
- wacom->wacom_wac.mode_value = 2;
- }
}
/*
@@ -348,6 +394,7 @@ static void wacom_usage_mapping(struct hid_device *hdev,
struct wacom_features *features = &wacom->wacom_wac.features;
bool finger = WACOM_FINGER_FIELD(field);
bool pen = WACOM_PEN_FIELD(field);
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
/*
* Requiring Stylus Usage will ignore boot mouse
@@ -361,49 +408,9 @@ static void wacom_usage_mapping(struct hid_device *hdev,
else
return;
- /*
- * Bamboo models do not support HID_DG_CONTACTMAX.
- * And, Bamboo Pen only descriptor contains touch.
- */
- if (features->type > BAMBOO_PT) {
- /* ISDv4 touch devices at least supports one touch point */
- if (finger && !features->touch_max)
- features->touch_max = 1;
- }
-
- /*
- * ISDv4 devices which predate HID's adoption of the
- * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
- * position instead. We can accurately detect if a
- * usage with that value should be HID_DG_BARRELSWITCH2
- * based on the surrounding usages, which have remained
- * constant across generations.
- */
- if (features->type == HID_GENERIC &&
- usage->hid == 0x000D0000 &&
- field->application == HID_DG_PEN &&
- field->physical == HID_DG_STYLUS) {
- int i = usage->usage_index;
+ wacom_hid_usage_quirk(hdev, field, usage);
- if (i-4 >= 0 && i+1 < field->maxusage &&
- field->usage[i-4].hid == HID_DG_TIPSWITCH &&
- field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
- field->usage[i-2].hid == HID_DG_ERASER &&
- field->usage[i-1].hid == HID_DG_INVERT &&
- field->usage[i+1].hid == HID_DG_INRANGE) {
- usage->hid = HID_DG_BARRELSWITCH2;
- }
- }
-
- /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
- if (hdev->vendor == USB_VENDOR_ID_WACOM &&
- hdev->product == 0x0358 &&
- WACOM_PEN_FIELD(field) &&
- wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
- field->logical_maximum = 43200;
- }
-
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
features->x_max = field->logical_maximum;
if (finger) {
@@ -703,18 +710,6 @@ struct wacom_hdev_data {
static LIST_HEAD(wacom_udev_list);
static DEFINE_MUTEX(wacom_udev_list_lock);
-static bool compare_device_paths(struct hid_device *hdev_a,
- struct hid_device *hdev_b, char separator)
-{
- int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
- int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
-
- if (n1 != n2 || n1 <= 0 || n2 <= 0)
- return false;
-
- return !strncmp(hdev_a->phys, hdev_b->phys, n1);
-}
-
static bool wacom_are_sibling(struct hid_device *hdev,
struct hid_device *sibling)
{
@@ -737,10 +732,10 @@ static bool wacom_are_sibling(struct hid_device *hdev,
* the same physical parent device path.
*/
if (hdev->vendor == sibling->vendor && hdev->product == sibling->product) {
- if (!compare_device_paths(hdev, sibling, '/'))
+ if (!hid_compare_device_paths(hdev, sibling, '/'))
return false;
} else {
- if (!compare_device_paths(hdev, sibling, '.'))
+ if (!hid_compare_device_paths(hdev, sibling, '.'))
return false;
}
@@ -787,7 +782,7 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
/* Try to find an already-probed interface from the same device */
list_for_each_entry(data, &wacom_udev_list, list) {
- if (compare_device_paths(hdev, data->dev, '/')) {
+ if (hid_compare_device_paths(hdev, data->dev, '/')) {
kref_get(&data->kref);
return data;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0bb44d0088ed..e0a06be5ef5c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
features->device_type |= WACOM_DEVICETYPE_PAD;
- features->x_max = 4096;
- features->y_max = 4096;
+ if (features->type == INTUOSHT2) {
+ features->x_max = features->x_max / 10;
+ features->y_max = features->y_max / 10;
+ }
+ else {
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
}
else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->device_type |= WACOM_DEVICETYPE_PAD;
@@ -4351,19 +4357,19 @@ static const struct wacom_features wacom_features_0x5E =
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x90 =
{ "Wacom ISDv4 90", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x93 =
{ "Wacom ISDv4 93", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x97 =
{ "Wacom ISDv4 97", 26202, 16325, 511, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x9A =
{ "Wacom ISDv4 9A", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x9F =
{ "Wacom ISDv4 9F", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0xE2 =
{ "Wacom ISDv4 E2", 26202, 16325, 255, 0,
TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
@@ -4378,13 +4384,13 @@ static const struct wacom_features wacom_features_0xE6 =
TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", 25710, 14500, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0xED =
{ "Wacom ISDv4 ED", 26202, 16325, 255, 0,
- TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0xEF =
{ "Wacom ISDv4 EF", 26202, 16325, 255, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x100 =
{ "Wacom ISDv4 100", 26202, 16325, 255, 0,
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -4402,10 +4408,10 @@ static const struct wacom_features wacom_features_0x10F =
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x116 =
{ "Wacom ISDv4 116", 26202, 16325, 255, 0,
- TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 1 };
static const struct wacom_features wacom_features_0x12C =
{ "Wacom ISDv4 12C", 27848, 15752, 2047, 0,
- TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; /* Pen-only */
static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", 26202, 16325, 255, 0,
MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 7765de2f1ef1..2ada82d2ec8c 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -20,6 +20,7 @@
* 02110-1301 USA
*/
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ba0a092ae085..741857d80da1 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -29,12 +29,26 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
+#include <asm/page.h>
#include "hyperv_vmbus.h"
#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+static unsigned long virt_to_hvpfn(void *addr)
+{
+ unsigned long paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> PAGE_SHIFT;
+}
+
/*
* vmbus_setevent- Trigger an event notification on the specified
* channel.
@@ -298,8 +312,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
pfnsum = pfncount;
@@ -350,9 +364,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * (pfnsum + i)) >>
- PAGE_SHIFT;
+ gpadl_body->pfn[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * (pfnsum + i));
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -380,8 +393,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
}
@@ -558,11 +571,8 @@ static void reset_channel_cb(void *arg)
channel->onchannel_callback = NULL;
}
-static int vmbus_close_internal(struct vmbus_channel *channel)
+void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
- struct vmbus_channel_close_channel *msg;
- int ret;
-
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
@@ -572,6 +582,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
*/
tasklet_disable(&channel->callback_event);
+ channel->sc_creation_callback = NULL;
+
+ /* Stop the callback asap */
+ if (channel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(channel->target_cpu, reset_channel_cb,
+ channel, true);
+ } else {
+ reset_channel_cb(channel);
+ put_cpu();
+ }
+
+ /* Re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
+}
+
+static int vmbus_close_internal(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+
+ vmbus_reset_channel_cb(channel);
+
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
@@ -585,16 +618,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
channel->state = CHANNEL_OPEN_STATE;
- channel->sc_creation_callback = NULL;
- /* Stop callback and cancel the timer asap */
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu, reset_channel_cb,
- channel, true);
- } else {
- reset_channel_cb(channel);
- put_cpu();
- }
/* Send a closing message */
@@ -639,8 +662,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- /* re-enable tasklet for use on re-open */
- tasklet_enable(&channel->callback_event);
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ecc2bd275a73..0f0e091c117c 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -527,10 +527,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
struct hv_device *dev
= newchannel->primary_channel->device_obj;
- if (vmbus_add_channel_kobj(dev, newchannel)) {
- atomic_dec(&vmbus_connection.offer_in_progress);
+ if (vmbus_add_channel_kobj(dev, newchannel))
goto err_free_chan;
- }
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
@@ -895,6 +893,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
}
/*
+ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
+ * should make sure the channel callback is not running any more.
+ */
+ vmbus_reset_channel_cb(channel);
+
+ /*
* Now wait for offer handling to complete.
*/
vmbus_rescind_cleanup(channel);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 658dc765753b..748a1c4172a6 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -64,7 +64,7 @@ int hv_init(void)
return -ENOMEM;
direct_mode_enabled = ms_hyperv.misc_features &
- HV_X64_STIMER_DIRECT_MODE_AVAILABLE;
+ HV_STIMER_DIRECT_MODE_AVAILABLE;
return 0;
}
@@ -127,14 +127,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hyperv_cs->read(NULL);
current_tick += delta;
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
+ hv_init_timer(0, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
+ hv_init_timer(0, 0);
+ hv_init_timer_config(0, 0);
if (direct_mode_enabled)
hv_disable_stimer0_percpu_irq(stimer0_irq);
@@ -164,7 +164,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
}
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_init_timer_config(0, timer_cfg.as_uint64);
return 0;
}
@@ -242,6 +242,10 @@ int hv_synic_alloc(void)
return 0;
err:
+ /*
+ * Any memory allocations that succeeded will be freed when
+ * the caller cleans up by calling hv_synic_free()
+ */
return -ENOMEM;
}
@@ -254,12 +258,10 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- if (hv_cpu->synic_event_page)
- free_page((unsigned long)hv_cpu->synic_event_page);
- if (hv_cpu->synic_message_page)
- free_page((unsigned long)hv_cpu->synic_message_page);
- if (hv_cpu->post_msg_page)
- free_page((unsigned long)hv_cpu->post_msg_page);
+ kfree(hv_cpu->clk_evt);
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->post_msg_page);
}
kfree(hv_context.hv_numa_map);
@@ -298,18 +300,16 @@ int hv_synic_init(unsigned int cpu)
hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
shared_sint.auto_eoi = false;
else
shared_sint.auto_eoi = true;
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
hv_get_synic_state(sctrl.as_uint64);
@@ -322,7 +322,7 @@ int hv_synic_init(unsigned int cpu)
/*
* Register the per-cpu clockevent source.
*/
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
clockevents_config_and_register(hv_cpu->clk_evt,
HV_TIMER_FREQUENCY,
HV_MIN_DELTA_TICKS,
@@ -337,7 +337,7 @@ void hv_synic_clockevents_cleanup(void)
{
int cpu;
- if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
return;
if (direct_mode_enabled)
@@ -396,7 +396,7 @@ int hv_synic_cleanup(unsigned int cpu)
return -EBUSY;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
@@ -405,15 +405,13 @@ int hv_synic_cleanup(unsigned int cpu)
put_cpu_ptr(hv_cpu);
}
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 0;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b3e9f13f8bc3..b1b788082793 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1765,6 +1765,9 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init init_balloon_drv(void)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 14dce25c104f..423205077bf6 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -487,6 +487,9 @@ static struct hv_driver util_drv = {
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int hv_ptp_enable(struct ptp_clock_info *info,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index be3c8b10b84a..3e90eb91db45 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -431,7 +431,24 @@ static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
}
/*
- * Update host ring buffer after iterating over packets.
+ * Update host ring buffer after iterating over packets. If the host has
+ * stopped queuing new entries because it found the ring buffer full, and
+ * sufficient space is being freed up, signal the host. But be careful to
+ * only signal the host when necessary, both for performance reasons and
+ * because Hyper-V protects itself by throttling guests that signal
+ * inappropriately.
+ *
+ * Determining when to signal is tricky. There are three key data inputs
+ * that must be handled in this order to avoid race conditions:
+ *
+ * 1. Update the read_index
+ * 2. Read the pending_send_sz
+ * 3. Read the current write_index
+ *
+ * The interrupt_mask is not used to determine when to signal. The
+ * interrupt_mask is used only on the guest->host ring buffer when
+ * sending requests to the host. The host does not use it on the host->
+ * guest ring buffer to indicate whether it should be signaled.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
@@ -447,22 +464,30 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
start_read_index = rbi->ring_buffer->read_index;
rbi->ring_buffer->read_index = rbi->priv_read_index;
+ /*
+ * Older versions of Hyper-V (before WS2102 and Win8) do not
+ * implement pending_send_sz and simply poll if the host->guest
+ * ring buffer is full. No signaling is needed or expected.
+ */
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
return;
/*
* Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
+ * If reading pending_send_sz were to be reordered and happen
+ * before we commit the new read_index, a race could occur. If the
+ * host were to set the pending_send_sz after we have sampled
+ * pending_send_sz, and the ring buffer blocks before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
+ /*
+ * If the pending_send_sz is zero, then the ring buffer is not
+ * blocked and there is no need to signal. This is far by the
+ * most common case, so exit quickly for best performance.
+ */
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
if (!pending_sz)
return;
@@ -476,14 +501,32 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
/*
- * If there was space before we began iteration,
- * then host was not blocked.
+ * We want to signal the host only if we're transitioning
+ * from a "not enough free space" state to a "enough free
+ * space" state. For example, it's possible that this function
+ * could run and free up enough space to signal the host, and then
+ * run again and free up additional space before the host has a
+ * chance to clear the pending_send_sz. The 2nd invocation would
+ * be a null transition from "enough free space" to "enough free
+ * space", which doesn't warrant a signal.
+ *
+ * Exactly filling the ring buffer is treated as "not enough
+ * space". The ring buffer always must have at least one byte
+ * empty so the empty and full conditions are distinguishable.
+ * hv_get_bytes_to_write() doesn't fully tell the truth in
+ * this regard.
+ *
+ * So first check if we were in the "enough free space" state
+ * before we began the iteration. If so, the host was not
+ * blocked, and there's no need to signal.
*/
-
if (curr_write_sz - bytes_read > pending_sz)
return;
- /* If pending write will not fit, don't give false hope. */
+ /*
+ * Similarly, if the new state is "not enough space", then
+ * there's no need to signal.
+ */
if (curr_write_sz <= pending_sz)
return;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b10fe26c4891..b1b548a21f91 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -56,6 +56,8 @@ static struct completion probe_event;
static int hyperv_cpuhp_online;
+static void *hv_panic_page;
+
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
@@ -208,6 +210,20 @@ static ssize_t modalias_show(struct device *dev,
}
static DEVICE_ATTR_RO(modalias);
+#ifdef CONFIG_NUMA
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
+}
+static DEVICE_ATTR_RO(numa_node);
+#endif
+
static ssize_t server_monitor_pending_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
@@ -490,6 +506,9 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_class_id.attr,
&dev_attr_device_id.attr,
&dev_attr_modalias.attr,
+#ifdef CONFIG_NUMA
+ &dev_attr_numa_node.attr,
+#endif
&dev_attr_server_monitor_pending.attr,
&dev_attr_client_monitor_pending.attr,
&dev_attr_server_monitor_latency.attr,
@@ -1018,6 +1037,72 @@ static void vmbus_isr(void)
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+/*
+ * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
+ * buffer and call into Hyper-V to transfer the data.
+ */
+static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ size_t bytes_written;
+ phys_addr_t panic_pa;
+
+ /* We are only interested in panics. */
+ if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
+ return;
+
+ panic_pa = virt_to_phys(hv_panic_page);
+
+ /*
+ * Write dump contents to the page. No need to synchronize; panic should
+ * be single-threaded.
+ */
+ kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
+ &bytes_written);
+ if (bytes_written)
+ hyperv_report_panic_msg(panic_pa, bytes_written);
+}
+
+static struct kmsg_dumper hv_kmsg_dumper = {
+ .dump = hv_kmsg_dump,
+};
+
+static struct ctl_table_header *hv_ctl_table_hdr;
+static int zero;
+static int one = 1;
+
+/*
+ * sysctl option to allow the user to control whether kmsg data should be
+ * reported to Hyper-V on panic.
+ */
+static struct ctl_table hv_ctl_table[] = {
+ {
+ .procname = "hyperv_record_panic_msg",
+ .data = &sysctl_record_panic_msg,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one
+ },
+ {}
+};
+
+static struct ctl_table hv_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = hv_ctl_table
+ },
+ {}
+};
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
@@ -1065,6 +1150,32 @@ static int vmbus_bus_init(void)
* Only register if the crash MSRs are available
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ u64 hyperv_crash_ctl;
+ /*
+ * Sysctl registration is not fatal, since by default
+ * reporting is enabled.
+ */
+ hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
+ if (!hv_ctl_table_hdr)
+ pr_err("Hyper-V: sysctl table register error");
+
+ /*
+ * Register for panic kmsg callback only if the right
+ * capability is supported by the hypervisor.
+ */
+ hv_get_crash_ctl(hyperv_crash_ctl);
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
+ hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (hv_panic_page) {
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret)
+ pr_err("Hyper-V: kmsg dump register "
+ "error 0x%x\n", ret);
+ } else
+ pr_err("Hyper-V: panic message page memory "
+ "allocation failed");
+ }
+
register_die_notifier(&hyperv_die_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
@@ -1081,7 +1192,9 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
-
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
return ret;
}
@@ -1785,10 +1898,15 @@ static void __exit vmbus_exit(void)
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
+
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
bus_unregister(&hv_bus);
cpuhp_remove_state(hyperv_cpuhp_online);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 195154178aff..81da17a42dc9 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -937,6 +937,18 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_MLXREG_FAN
+ tristate "Mellanox Mellanox FAN driver"
+ depends on MELLANOX_PLATFORM
+ imply THERMAL
+ select REGMAP
+ help
+ This option enables support for the FAN control on the Mellanox
+ Ethernet and InfiniBand switches. The driver can be activated by the
+ platform device add call. Say Y to enable these. To compile this
+ driver as a module, choose 'M' here: the module will be called
+ mlxreg-fan.
+
config SENSORS_TC654
tristate "Microchip TC654/TC655 and compatibles"
depends on I2C
@@ -1256,6 +1268,16 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NPCM7XX
+ tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers"
+ imply THERMAL
+ help
+ This driver provides support for Nuvoton NPCM750/730/715/705 PWM
+ and Fan controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called npcm750-pwm-fan.
+
config SENSORS_NSA320
tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
depends on GPIOLIB && OF
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index a9297703fd6e..93f7f41ea4ad 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 9ef84998c7f3..90837f7c7d0f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -194,8 +194,7 @@ struct adt7475_data {
struct mutex lock;
unsigned long measure_updated;
- unsigned long limits_updated;
- char valid;
+ bool valid;
u8 config4;
u8 config5;
@@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
unsigned short val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case ALARM:
return sprintf(buf, "%d\n",
@@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case HYSTERSIS:
mutex_lock(&data->lock);
@@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out, val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
mutex_lock(&data->lock);
out = (data->range[sattr->index] >> 4) & 0x0F;
val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
@@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (sattr->nr == ALARM)
out = (data->alarms >> (sattr->index + 10)) & 1;
else
@@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
}
@@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
}
@@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
}
@@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
int i = clamp_val(data->range[sattr->index] & 0xf, 0,
ARRAY_SIZE(pwmfreq_table) - 1);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", pwmfreq_table[i]);
}
@@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
@@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
}
+static int adt7475_update_limits(struct i2c_client *client)
+{
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int i;
+ int ret;
+
+ ret = adt7475_read(REG_CONFIG4);
+ if (ret < 0)
+ return ret;
+ data->config4 = ret;
+
+ ret = adt7475_read(REG_CONFIG5);
+ if (ret < 0)
+ return ret;
+ data->config5 = ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(VOLTAGE_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][i] = ret << 2;
+
+ ret = adt7475_read(VOLTAGE_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][i] = ret << 2;
+ }
+
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_VTT_MIN);
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][5] = ret << 2;
+
+ ret = adt7475_read(REG_VTT_MAX);
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][5] = ret << 2;
+ }
+
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(TEMP_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MAX][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_TMIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[AUTOMIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_THERM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[THERM][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_OFFSET_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[OFFSET][i] = ret;
+ }
+ adt7475_read_hystersis(client);
+
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[MIN][i] = ret;
+ }
+
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MAX][i] = ret;
+
+ ret = adt7475_read(PWM_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MIN][i] = ret;
+ /* Set the channel and control information */
+ adt7475_read_pwm(client, i);
+ }
+
+ ret = adt7475_read(TEMP_TRANGE_REG(0));
+ if (ret < 0)
+ return ret;
+ data->range[0] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(1));
+ if (ret < 0)
+ return ret;
+ data->range[1] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(2));
+ if (ret < 0)
+ return ret;
+ data->range[2] = ret;
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client,
(data->bypass_attn & (1 << 3)) ? " in3" : "",
(data->bypass_attn & (1 << 4)) ? " in4" : "");
+ /* Limits and settings, should never change update more than once */
+ ret = adt7475_update_limits(client);
+ if (ret)
+ goto eremove;
+
return 0;
eremove:
@@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
}
}
-static struct adt7475_data *adt7475_update_device(struct device *dev)
+static int adt7475_update_measure(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
u16 ext;
int i;
+ int ret;
- mutex_lock(&data->lock);
+ ret = adt7475_read(REG_STATUS2);
+ if (ret < 0)
+ return ret;
+ data->alarms = ret << 8;
- /* Measurement values update every 2 seconds */
- if (time_after(jiffies, data->measure_updated + HZ * 2) ||
- !data->valid) {
- data->alarms = adt7475_read(REG_STATUS2) << 8;
- data->alarms |= adt7475_read(REG_STATUS1);
-
- ext = (adt7475_read(REG_EXTEND2) << 8) |
- adt7475_read(REG_EXTEND1);
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- data->voltage[INPUT][i] =
- (adt7475_read(VOLTAGE_REG(i)) << 2) |
- ((ext >> (i * 2)) & 3);
- }
+ ret = adt7475_read(REG_STATUS1);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret;
- for (i = 0; i < ADT7475_TEMP_COUNT; i++)
- data->temp[INPUT][i] =
- (adt7475_read(TEMP_REG(i)) << 2) |
- ((ext >> ((i + 5) * 2)) & 3);
+ ret = adt7475_read(REG_EXTEND2);
+ if (ret < 0)
+ return ret;
- if (data->has_voltage & (1 << 5)) {
- data->alarms |= adt7475_read(REG_STATUS4) << 24;
- ext = adt7475_read(REG_EXTEND3);
- data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
- ((ext >> 4) & 3);
- }
+ ext = (ret << 8);
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[INPUT][i] =
- adt7475_read_word(client, TACH_REG(i));
- }
+ ret = adt7475_read(REG_EXTEND1);
+ if (ret < 0)
+ return ret;
- /* Updated by hw when in auto mode */
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
- }
+ ext |= ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ ret = adt7475_read(VOLTAGE_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][i] =
+ (ret << 2) |
+ ((ext >> (i * 2)) & 3);
+ }
- if (data->has_vid)
- data->vid = adt7475_read(REG_VID) & 0x3f;
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ ret = adt7475_read(TEMP_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[INPUT][i] =
+ (ret << 2) |
+ ((ext >> ((i + 5) * 2)) & 3);
+ }
- data->measure_updated = jiffies;
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_STATUS4);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret << 24;
+
+ ret = adt7475_read(REG_EXTEND3);
+ if (ret < 0)
+ return ret;
+ ext = ret;
+
+ ret = adt7475_read(REG_VTT);
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][5] = ret << 2 |
+ ((ext >> 4) & 3);
}
- /* Limits and settings, should never change update every 60 seconds */
- if (time_after(jiffies, data->limits_updated + HZ * 60) ||
- !data->valid) {
- data->config4 = adt7475_read(REG_CONFIG4);
- data->config5 = adt7475_read(REG_CONFIG5);
-
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- /* Adjust values so they match the input precision */
- data->voltage[MIN][i] =
- adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
- data->voltage[MAX][i] =
- adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
- }
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[INPUT][i] = ret;
+ }
- if (data->has_voltage & (1 << 5)) {
- data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
- data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
- }
+ /* Updated by hw when in auto mode */
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[INPUT][i] = ret;
+ }
- for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
- /* Adjust values so they match the input precision */
- data->temp[MIN][i] =
- adt7475_read(TEMP_MIN_REG(i)) << 2;
- data->temp[MAX][i] =
- adt7475_read(TEMP_MAX_REG(i)) << 2;
- data->temp[AUTOMIN][i] =
- adt7475_read(TEMP_TMIN_REG(i)) << 2;
- data->temp[THERM][i] =
- adt7475_read(TEMP_THERM_REG(i)) << 2;
- data->temp[OFFSET][i] =
- adt7475_read(TEMP_OFFSET_REG(i));
- }
- adt7475_read_hystersis(client);
+ if (data->has_vid) {
+ ret = adt7475_read(REG_VID);
+ if (ret < 0)
+ return ret;
+ data->vid = ret & 0x3f;
+ }
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[MIN][i] =
- adt7475_read_word(client, TACH_MIN_REG(i));
- }
+ return 0;
+}
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
- data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
- /* Set the channel and control information */
- adt7475_read_pwm(client, i);
- }
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int ret;
- data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
- data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
- data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+ mutex_lock(&data->lock);
- data->limits_updated = jiffies;
- data->valid = 1;
+ /* Measurement values update every 2 seconds */
+ if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+ !data->valid) {
+ ret = adt7475_update_measure(dev);
+ if (ret) {
+ data->valid = false;
+ mutex_unlock(&data->lock);
+ return ERR_PTR(ret);
+ }
+ data->measure_updated = jiffies;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 1ea7ca510f84..aaebeb726d6a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
+ /* fall through */
case emc1403:
data->groups[1] = &emc1403_group;
+ /* fall through */
case emc1402:
data->groups[0] = &emc1402_group;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e88c01961948..33d51281272b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_cap_hyst] = "power%d_cap_hyst",
[hwmon_power_cap_max] = "power%d_cap_max",
[hwmon_power_cap_min] = "power%d_cap_min",
+ [hwmon_power_min] = "power%d_min",
[hwmon_power_max] = "power%d_max",
+ [hwmon_power_lcrit] = "power%d_lcrit",
[hwmon_power_crit] = "power%d_crit",
[hwmon_power_label] = "power%d_label",
[hwmon_power_alarm] = "power%d_alarm",
[hwmon_power_cap_alarm] = "power%d_cap_alarm",
+ [hwmon_power_min_alarm] = "power%d_min_alarm",
[hwmon_power_max_alarm] = "power%d_max_alarm",
+ [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm",
[hwmon_power_crit_alarm] = "power%d_crit_alarm",
};
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index f829dadfd5a0..83472808c816 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -90,11 +90,20 @@ struct sensor_data {
char label[MAX_LABEL_LEN];
char name[MAX_ATTR_LEN];
struct device_attribute dev_attr;
+ struct sensor_group_data *sgrp_data;
+};
+
+struct sensor_group_data {
+ struct mutex mutex;
+ u32 gid;
+ bool enable;
};
struct platform_data {
const struct attribute_group *attr_groups[MAX_SENSOR_TYPE + 1];
+ struct sensor_group_data *sgrp_data;
u32 sensors_count; /* Total count of sensors from each group */
+ u32 nr_sensor_groups; /* Total number of sensor groups */
};
static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
@@ -105,6 +114,9 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
ssize_t ret;
u64 x;
+ if (sdata->sgrp_data && !sdata->sgrp_data->enable)
+ return -ENODATA;
+
ret = opal_get_sensor_data_u64(sdata->id, &x);
if (ret)
@@ -120,6 +132,46 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%llu\n", x);
}
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_data *sdata = container_of(devattr, struct sensor_data,
+ dev_attr);
+
+ return sprintf(buf, "%u\n", sdata->sgrp_data->enable);
+}
+
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_data *sdata = container_of(devattr, struct sensor_data,
+ dev_attr);
+ struct sensor_group_data *sgrp_data = sdata->sgrp_data;
+ int ret;
+ bool data;
+
+ ret = kstrtobool(buf, &data);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&sgrp_data->mutex);
+ if (ret)
+ return ret;
+
+ if (data != sgrp_data->enable) {
+ ret = sensor_group_enable(sgrp_data->gid, data);
+ if (!ret)
+ sgrp_data->enable = data;
+ }
+
+ if (!ret)
+ ret = count;
+
+ mutex_unlock(&sgrp_data->mutex);
+ return ret;
+}
+
static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -292,12 +344,115 @@ static u32 get_sensor_hwmon_index(struct sensor_data *sdata,
return ++sensor_groups[sdata->type].hwmon_index;
}
+static int init_sensor_group_data(struct platform_device *pdev,
+ struct platform_data *pdata)
+{
+ struct sensor_group_data *sgrp_data;
+ struct device_node *groups, *sgrp;
+ int count = 0, ret = 0;
+ enum sensors type;
+
+ groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group");
+ if (!groups)
+ return ret;
+
+ for_each_child_of_node(groups, sgrp) {
+ type = get_sensor_type(sgrp);
+ if (type != MAX_SENSOR_TYPE)
+ pdata->nr_sensor_groups++;
+ }
+
+ if (!pdata->nr_sensor_groups)
+ goto out;
+
+ sgrp_data = devm_kcalloc(&pdev->dev, pdata->nr_sensor_groups,
+ sizeof(*sgrp_data), GFP_KERNEL);
+ if (!sgrp_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for_each_child_of_node(groups, sgrp) {
+ u32 gid;
+
+ type = get_sensor_type(sgrp);
+ if (type == MAX_SENSOR_TYPE)
+ continue;
+
+ if (of_property_read_u32(sgrp, "sensor-group-id", &gid))
+ continue;
+
+ if (of_count_phandle_with_args(sgrp, "sensors", NULL) <= 0)
+ continue;
+
+ sensor_groups[type].attr_count++;
+ sgrp_data[count].gid = gid;
+ mutex_init(&sgrp_data[count].mutex);
+ sgrp_data[count++].enable = false;
+ }
+
+ pdata->sgrp_data = sgrp_data;
+out:
+ of_node_put(groups);
+ return ret;
+}
+
+static struct sensor_group_data *get_sensor_group(struct platform_data *pdata,
+ struct device_node *node,
+ enum sensors gtype)
+{
+ struct sensor_group_data *sgrp_data = pdata->sgrp_data;
+ struct device_node *groups, *sgrp;
+
+ groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group");
+ if (!groups)
+ return NULL;
+
+ for_each_child_of_node(groups, sgrp) {
+ struct of_phandle_iterator it;
+ u32 gid;
+ int rc, i;
+ enum sensors type;
+
+ type = get_sensor_type(sgrp);
+ if (type != gtype)
+ continue;
+
+ if (of_property_read_u32(sgrp, "sensor-group-id", &gid))
+ continue;
+
+ of_for_each_phandle(&it, rc, sgrp, "sensors", NULL, 0)
+ if (it.phandle == node->phandle) {
+ of_node_put(it.node);
+ break;
+ }
+
+ if (rc)
+ continue;
+
+ for (i = 0; i < pdata->nr_sensor_groups; i++)
+ if (gid == sgrp_data[i].gid) {
+ of_node_put(sgrp);
+ of_node_put(groups);
+ return &sgrp_data[i];
+ }
+ }
+
+ of_node_put(groups);
+ return NULL;
+}
+
static int populate_attr_groups(struct platform_device *pdev)
{
struct platform_data *pdata = platform_get_drvdata(pdev);
const struct attribute_group **pgroups = pdata->attr_groups;
struct device_node *opal, *np;
enum sensors type;
+ int ret;
+
+ ret = init_sensor_group_data(pdev, pdata);
+ if (ret)
+ return ret;
opal = of_find_node_by_path("/ibm,opal/sensors");
for_each_child_of_node(opal, np) {
@@ -344,7 +499,10 @@ static int populate_attr_groups(struct platform_device *pdev)
static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name,
ssize_t (*show)(struct device *dev,
struct device_attribute *attr,
- char *buf))
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
snprintf(sdata->name, MAX_ATTR_LEN, "%s%d_%s",
sensor_groups[sdata->type].name, sdata->hwmon_index,
@@ -352,23 +510,33 @@ static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name,
sysfs_attr_init(&sdata->dev_attr.attr);
sdata->dev_attr.attr.name = sdata->name;
- sdata->dev_attr.attr.mode = S_IRUGO;
sdata->dev_attr.show = show;
+ if (store) {
+ sdata->dev_attr.store = store;
+ sdata->dev_attr.attr.mode = 0664;
+ } else {
+ sdata->dev_attr.attr.mode = 0444;
+ }
}
static void populate_sensor(struct sensor_data *sdata, int od, int hd, int sid,
const char *attr_name, enum sensors type,
const struct attribute_group *pgroup,
+ struct sensor_group_data *sgrp_data,
ssize_t (*show)(struct device *dev,
struct device_attribute *attr,
- char *buf))
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
sdata->id = sid;
sdata->type = type;
sdata->opal_index = od;
sdata->hwmon_index = hd;
- create_hwmon_attr(sdata, attr_name, show);
+ create_hwmon_attr(sdata, attr_name, show, store);
pgroup->attrs[sensor_groups[type].attr_count++] = &sdata->dev_attr.attr;
+ sdata->sgrp_data = sgrp_data;
}
static char *get_max_attr(enum sensors type)
@@ -403,24 +571,23 @@ static int create_device_attrs(struct platform_device *pdev)
const struct attribute_group **pgroups = pdata->attr_groups;
struct device_node *opal, *np;
struct sensor_data *sdata;
- u32 sensor_id;
- enum sensors type;
u32 count = 0;
- int err = 0;
+ u32 group_attr_id[MAX_SENSOR_TYPE] = {0};
- opal = of_find_node_by_path("/ibm,opal/sensors");
sdata = devm_kcalloc(&pdev->dev,
pdata->sensors_count, sizeof(*sdata),
GFP_KERNEL);
- if (!sdata) {
- err = -ENOMEM;
- goto exit_put_node;
- }
+ if (!sdata)
+ return -ENOMEM;
+ opal = of_find_node_by_path("/ibm,opal/sensors");
for_each_child_of_node(opal, np) {
+ struct sensor_group_data *sgrp_data;
const char *attr_name;
- u32 opal_index;
+ u32 opal_index, hw_id;
+ u32 sensor_id;
const char *label;
+ enum sensors type;
if (np->name == NULL)
continue;
@@ -456,14 +623,12 @@ static int create_device_attrs(struct platform_device *pdev)
opal_index = INVALID_INDEX;
}
- sdata[count].opal_index = opal_index;
- sdata[count].hwmon_index =
- get_sensor_hwmon_index(&sdata[count], sdata, count);
-
- create_hwmon_attr(&sdata[count], attr_name, show_sensor);
-
- pgroups[type]->attrs[sensor_groups[type].attr_count++] =
- &sdata[count++].dev_attr.attr;
+ hw_id = get_sensor_hwmon_index(&sdata[count], sdata, count);
+ sgrp_data = get_sensor_group(pdata, np, type);
+ populate_sensor(&sdata[count], opal_index, hw_id, sensor_id,
+ attr_name, type, pgroups[type], sgrp_data,
+ show_sensor, NULL);
+ count++;
if (!of_property_read_string(np, "label", &label)) {
/*
@@ -474,35 +639,43 @@ static int create_device_attrs(struct platform_device *pdev)
*/
make_sensor_label(np, &sdata[count], label);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, "label", type, pgroups[type],
- show_label);
+ NULL, show_label, NULL);
count++;
}
if (!of_property_read_u32(np, "sensor-data-max", &sensor_id)) {
attr_name = get_max_attr(type);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, attr_name, type,
- pgroups[type], show_sensor);
+ pgroups[type], sgrp_data, show_sensor,
+ NULL);
count++;
}
if (!of_property_read_u32(np, "sensor-data-min", &sensor_id)) {
attr_name = get_min_attr(type);
- populate_sensor(&sdata[count], opal_index,
- sdata[count - 1].hwmon_index,
+ populate_sensor(&sdata[count], opal_index, hw_id,
sensor_id, attr_name, type,
- pgroups[type], show_sensor);
+ pgroups[type], sgrp_data, show_sensor,
+ NULL);
+ count++;
+ }
+
+ if (sgrp_data && !sgrp_data->enable) {
+ sgrp_data->enable = true;
+ hw_id = ++group_attr_id[type];
+ populate_sensor(&sdata[count], opal_index, hw_id,
+ sgrp_data->gid, "enable", type,
+ pgroups[type], sgrp_data, show_enable,
+ store_enable);
count++;
}
}
-exit_put_node:
of_node_put(opal);
- return err;
+ return 0;
}
static int ibmpowernv_probe(struct platform_device *pdev)
@@ -517,6 +690,7 @@ static int ibmpowernv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
pdata->sensors_count = 0;
+ pdata->nr_sensor_groups = 0;
err = populate_attr_groups(pdev);
if (err)
return err;
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 69031a0f7ed2..2f3f875c06ac 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -22,7 +22,6 @@
* struct iio_hwmon_state - device instance state
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
- * @hwmon_dev: associated hwmon device
* @attr_group: the group of attributes
* @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
@@ -30,7 +29,6 @@
struct iio_hwmon_state {
struct iio_channel *channels;
int num_channels;
- struct device *hwmon_dev;
struct attribute_group attr_group;
const struct attribute_group *groups[2];
struct attribute **attrs;
@@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ struct device *hwmon_dev;
char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
- channels = iio_channel_get_all(dev);
+ channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
if (PTR_ERR(channels) == -ENODEV)
return -EPROBE_DEFER;
@@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st == NULL)
+ return -ENOMEM;
st->channels = channels;
@@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attrs = devm_kcalloc(dev,
st->num_channels + 1, sizeof(*st->attrs),
GFP_KERNEL);
- if (st->attrs == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st->attrs == NULL)
+ return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
- if (a == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (a == NULL)
+ return -ENOMEM;
sysfs_attr_init(&a->dev_attr.attr);
ret = iio_get_channel_type(&st->channels[i], &type);
if (ret < 0)
- goto error_release_channels;
+ return ret;
switch (type) {
case IIO_VOLTAGE:
@@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
humidity_i++);
break;
default:
- ret = -EINVAL;
- goto error_release_channels;
- }
- if (a->dev_attr.attr.name == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
+ return -EINVAL;
}
+ if (a->dev_attr.attr.name == NULL)
+ return -ENOMEM;
+
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = S_IRUGO;
a->index = i;
@@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->groups[0] = &st->attr_group;
sname = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!sname) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (!sname)
+ return -ENOMEM;
strreplace(sname, '-', '_');
- st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
- st->groups);
- if (IS_ERR(st->hwmon_dev)) {
- ret = PTR_ERR(st->hwmon_dev);
- goto error_release_channels;
- }
- platform_set_drvdata(pdev, st);
- return 0;
-
-error_release_channels:
- iio_channel_release_all(channels);
- return ret;
-}
-
-static int iio_hwmon_remove(struct platform_device *pdev)
-{
- struct iio_hwmon_state *st = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(st->hwmon_dev);
- iio_channel_release_all(st->channels);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
+ st->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct of_device_id iio_hwmon_of_match[] = {
@@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = {
.of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
- .remove = iio_hwmon_remove,
};
module_platform_driver(iio_hwmon_driver);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 17c6460ae351..bb15d7816a29 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen 7 2700X", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
+ { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 638567fb7cd8..3d9e210beedf 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 67860ad2e3d9..78fe8759d2a9 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -23,6 +23,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/hwmon.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
new file mode 100644
index 000000000000..de46577c7d5a
--- /dev/null
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_STATE 10
+#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
+#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
+/*
+ * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
+ * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
+#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
+#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
+#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
+#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+/*
+ * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
+ * The logic in a programmable device measures the time t-high by sampling the
+ * tachometer every t-sample (with the default value 11.32 uS) and increment
+ * a counter (N) as long as the pulse has not change:
+ * RPM = 15 / (t-sample * (K + Regval)), where:
+ * Regval: is the value read from the programmable device register;
+ * - 0xff - represents tachometer fault;
+ * - 0xfe - represents tachometer minimum value , which is 4444 RPM;
+ * - 0x00 - represents tachometer maximum value , which is 300000 RPM;
+ * K: is 44 and it represents the minimum allowed samples per pulse;
+ * N: is equal K + Regval;
+ * In order to calculate RPM from the register value the following formula is
+ * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in the
+ * default case is modified to:
+ * RPM = 15000000 * 100 / ((Regval + 44) * 1132);
+ * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115;
+ * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446;
+ * In common case the formula is modified to:
+ * RPM = 15000000 * 100 / ((Regval + samples) * divider).
+ */
+#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
+ ((rval) + (s)) * (d)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
+ MLXREG_FAN_MAX_STATE, \
+ MLXREG_FAN_MAX_DUTY))
+#define MLXREG_FAN_PWM_STATE2DUTY(stat) (DIV_ROUND_CLOSEST((stat) * \
+ MLXREG_FAN_MAX_DUTY, \
+ MLXREG_FAN_MAX_STATE))
+
+/*
+ * struct mlxreg_fan_tacho - tachometer data (internal use):
+ *
+ * @connected: indicates if tachometer is connected;
+ * @reg: register offset;
+ * @mask: fault mask;
+ */
+struct mlxreg_fan_tacho {
+ bool connected;
+ u32 reg;
+ u32 mask;
+};
+
+/*
+ * struct mlxreg_fan_pwm - PWM data (internal use):
+ *
+ * @connected: indicates if PWM is connected;
+ * @reg: register offset;
+ */
+struct mlxreg_fan_pwm {
+ bool connected;
+ u32 reg;
+};
+
+/*
+ * struct mlxreg_fan - private data (internal use):
+ *
+ * @dev: basic device;
+ * @regmap: register map of parent device;
+ * @tacho: tachometer data;
+ * @pwm: PWM data;
+ * @samples: minimum allowed samples per pulse;
+ * @divider: divider value for tachometer RPM calculation;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
+ */
+struct mlxreg_fan {
+ struct device *dev;
+ void *regmap;
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
+ struct mlxreg_fan_pwm pwm;
+ int samples;
+ int divider;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
+};
+
+static int
+mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_tacho *tacho;
+ u32 regval;
+ int err;
+
+ switch (type) {
+ case hwmon_fan:
+ tacho = &fan->tacho[channel];
+ switch (attr) {
+ case hwmon_fan_input:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ fan->samples);
+ break;
+
+ case hwmon_fan_fault:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err)
+ return err;
+
+ *val = regval;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < MLXREG_FAN_MIN_DUTY ||
+ val > MLXREG_FAN_MAX_DUTY)
+ return -EINVAL;
+ return regmap_write(fan->regmap, fan->pwm.reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
+mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ if (!(((struct mlxreg_fan *)data)->tacho[channel].connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const u32 mlxreg_fan_hwmon_fan_config[] = {
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
+ .type = hwmon_fan,
+ .config = mlxreg_fan_hwmon_fan_config,
+};
+
+static const u32 mlxreg_fan_hwmon_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
+ .type = hwmon_pwm,
+ .config = mlxreg_fan_hwmon_pwm_config,
+};
+
+static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
+ &mlxreg_fan_hwmon_fan,
+ &mlxreg_fan_hwmon_pwm,
+ NULL
+};
+
+static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = {
+ .is_visible = mlxreg_fan_is_visible,
+ .read = mlxreg_fan_read,
+ .write = mlxreg_fan_write,
+};
+
+static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = {
+ .ops = &mlxreg_fan_hwmon_hwmon_ops,
+ .info = mlxreg_fan_hwmon_info,
+};
+
+static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MLXREG_FAN_MAX_STATE;
+ return 0;
+}
+
+static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ *state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+
+ return 0;
+}
+
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ unsigned long cur_state;
+ u32 regval;
+ int i;
+ int err;
+
+ /*
+ * Verify if this request is for changing allowed FAN dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
+ * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ state -= MLXREG_FAN_MAX_STATE;
+ for (i = 0; i < state; i++)
+ fan->cooling_levels[i] = state;
+ for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXREG_FAN_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = fan->cooling_levels[state];
+ err = regmap_write(fan->regmap, fan->pwm.reg,
+ MLXREG_FAN_PWM_STATE2DUTY(state));
+ if (err) {
+ dev_err(fan->dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+ .get_max_state = mlxreg_fan_get_max_state,
+ .get_cur_state = mlxreg_fan_get_cur_state,
+ .set_cur_state = mlxreg_fan_set_cur_state,
+};
+
+static int mlxreg_fan_config(struct mlxreg_fan *fan,
+ struct mlxreg_core_platform_data *pdata)
+{
+ struct mlxreg_core_data *data = pdata->data;
+ bool configured = false;
+ int tacho_num = 0, i;
+
+ fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ for (i = 0; i < pdata->counter; i++, data++) {
+ if (strnstr(data->label, "tacho", sizeof(data->label))) {
+ if (tacho_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many tacho entries: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->tacho[tacho_num].reg = data->reg;
+ fan->tacho[tacho_num].mask = data->mask;
+ fan->tacho[tacho_num++].connected = true;
+ } else if (strnstr(data->label, "pwm", sizeof(data->label))) {
+ if (fan->pwm.connected) {
+ dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->pwm.reg = data->reg;
+ fan->pwm.connected = true;
+ } else if (strnstr(data->label, "conf", sizeof(data->label))) {
+ if (configured) {
+ dev_err(fan->dev, "duplicate conf entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ /* Validate that conf parameters are not zeros. */
+ if (!data->mask || !data->bit) {
+ dev_err(fan->dev, "invalid conf entry params: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->samples = data->mask;
+ fan->divider = data->bit;
+ configured = true;
+ } else {
+ dev_err(fan->dev, "invalid label: %s\n", data->label);
+ return -EINVAL;
+ }
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
+ fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ return 0;
+}
+
+static int mlxreg_fan_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan *fan;
+ struct device *hwm;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return -ENOMEM;
+
+ fan->dev = &pdev->dev;
+ fan->regmap = pdata->regmap;
+ platform_set_drvdata(pdev, fan);
+
+ err = mlxreg_fan_config(fan, pdata);
+ if (err)
+ return err;
+
+ hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ fan,
+ &mlxreg_fan_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwm)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwm);
+ }
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
+ &mlxreg_fan_cooling_ops);
+ if (IS_ERR(fan->cdev)) {
+ dev_err(&pdev->dev, "Failed to register cooling device\n");
+ return PTR_ERR(fan->cdev);
+ }
+ }
+
+ return 0;
+}
+
+static int mlxreg_fan_remove(struct platform_device *pdev)
+{
+ struct mlxreg_fan *fan = platform_get_drvdata(pdev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ thermal_cooling_device_unregister(fan->cdev);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_fan_driver = {
+ .driver = {
+ .name = "mlxreg-fan",
+ },
+ .probe = mlxreg_fan_probe,
+ .remove = mlxreg_fan_remove,
+};
+
+module_platform_driver(mlxreg_fan_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-fan");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index f9d1349c3286..c6bd61e4695a 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1050,8 +1050,8 @@ struct nct6775_data {
u64 beeps;
u8 pwm_num; /* number of pwm */
- u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage,
- * 0->PWM variable duty cycle
+ u8 pwm_mode[NUM_FAN]; /* 0->DC variable voltage,
+ * 1->PWM variable duty cycle
*/
enum pwm_enable pwm_enable[NUM_FAN];
/* 0->off
@@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* intentional */
+ /* fall through */
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95a68ab175c7..7815ddf149f6 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -77,7 +77,7 @@ struct nct7904_data {
};
/* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank)
+static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
{
int ret;
@@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data)
/* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
static int nct7904_read_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret;
@@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data,
* -ERRNO on error.
*/
static int nct7904_read_reg16(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret, hi;
@@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data,
/* Write 1-byte register. Returns 0 or -ERRNO on error. */
static int nct7904_write_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg, u8 val)
+ unsigned int bank, unsigned int reg, u8 val)
{
struct i2c_client *client = data->client;
int ret;
@@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
unsigned int cnt, rpm;
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_fan_input:
ret = nct7904_read_reg16(data, BANK_0,
FANIN1_HV_REG + channel * 2);
@@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
index = nct7904_chan_to_index[channel];
- switch(attr) {
+ switch (attr) {
case hwmon_in_input:
ret = nct7904_read_reg16(data, BANK_0,
VSEN1_HV_REG + index * 2);
@@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
- switch(attr) {
+ switch (attr) {
case hwmon_temp_input:
if (channel == 0)
ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
@@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
if (ret < 0)
@@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
@@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
return S_IRUGO | S_IWUSR;
@@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = {
};
static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_fan = {
@@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = {
};
static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
};
static const struct hwmon_channel_info nct7904_pwm = {
@@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = {
};
static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_temp = {
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
new file mode 100644
index 000000000000..8474d601aa63
--- /dev/null
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+/* NPCM7XX PWM registers */
+#define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch)))
+#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C)
+#define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40)
+
+#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT BIT(3)
+#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT BIT(11)
+#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT BIT(15)
+#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT BIT(19)
+
+#define NPCM7XX_PWM_CTRL_CH0_INV_BIT BIT(2)
+#define NPCM7XX_PWM_CTRL_CH1_INV_BIT BIT(10)
+#define NPCM7XX_PWM_CTRL_CH2_INV_BIT BIT(14)
+#define NPCM7XX_PWM_CTRL_CH3_INV_BIT BIT(18)
+
+#define NPCM7XX_PWM_CTRL_CH0_EN_BIT BIT(0)
+#define NPCM7XX_PWM_CTRL_CH1_EN_BIT BIT(8)
+#define NPCM7XX_PWM_CTRL_CH2_EN_BIT BIT(12)
+#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16)
+
+/* Define the maximum PWM channel number */
+#define NPCM7XX_PWM_MAX_CHN_NUM 8
+#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4
+#define NPCM7XX_PWM_MAX_MODULES 2
+
+/* Define the Counter Register, value = 100 for match 100% */
+#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
+#define NPCM7XX_PWM_CMR_MAX 255
+
+/* default all PWM channels PRESCALE2 = 1 */
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 0x4
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 0x40
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 0x400
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3 0x4000
+
+#define PWM_OUTPUT_FREQ_25KHZ 25000
+#define PWN_CNT_DEFAULT 256
+#define MIN_PRESCALE1 2
+#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01 8
+
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3)
+
+#define NPCM7XX_PWM_CTRL_MODE_DEFAULT (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH3_MODE_BIT)
+
+/* NPCM7XX FAN Tacho registers */
+#define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02)
+#define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06)
+#define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A)
+#define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C)
+#define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E)
+#define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10)
+#define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12)
+#define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14)
+#define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16)
+#define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18)
+#define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A)
+#define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C)
+
+#define NPCM7XX_FAN_TCKC_CLKX_NONE 0
+#define NPCM7XX_FAN_TCKC_CLK1_APB BIT(0)
+#define NPCM7XX_FAN_TCKC_CLK2_APB BIT(3)
+
+#define NPCM7XX_FAN_TMCTRL_TBEN BIT(6)
+#define NPCM7XX_FAN_TMCTRL_TAEN BIT(5)
+#define NPCM7XX_FAN_TMCTRL_TBEDG BIT(4)
+#define NPCM7XX_FAN_TMCTRL_TAEDG BIT(3)
+#define NPCM7XX_FAN_TMCTRL_MODE_5 BIT(2)
+
+#define NPCM7XX_FAN_TICLR_CLEAR_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TICLR_TFCLR BIT(5)
+#define NPCM7XX_FAN_TICLR_TECLR BIT(4)
+#define NPCM7XX_FAN_TICLR_TDCLR BIT(3)
+#define NPCM7XX_FAN_TICLR_TCCLR BIT(2)
+#define NPCM7XX_FAN_TICLR_TBCLR BIT(1)
+#define NPCM7XX_FAN_TICLR_TACLR BIT(0)
+
+#define NPCM7XX_FAN_TIEN_ENABLE_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TIEN_TFIEN BIT(5)
+#define NPCM7XX_FAN_TIEN_TEIEN BIT(4)
+#define NPCM7XX_FAN_TIEN_TDIEN BIT(3)
+#define NPCM7XX_FAN_TIEN_TCIEN BIT(2)
+#define NPCM7XX_FAN_TIEN_TBIEN BIT(1)
+#define NPCM7XX_FAN_TIEN_TAIEN BIT(0)
+
+#define NPCM7XX_FAN_TICTRL_TFPND BIT(5)
+#define NPCM7XX_FAN_TICTRL_TEPND BIT(4)
+#define NPCM7XX_FAN_TICTRL_TDPND BIT(3)
+#define NPCM7XX_FAN_TICTRL_TCPND BIT(2)
+#define NPCM7XX_FAN_TICTRL_TBPND BIT(1)
+#define NPCM7XX_FAN_TICTRL_TAPND BIT(0)
+
+#define NPCM7XX_FAN_TCPCFG_HIBEN BIT(7)
+#define NPCM7XX_FAN_TCPCFG_EQBEN BIT(6)
+#define NPCM7XX_FAN_TCPCFG_LOBEN BIT(5)
+#define NPCM7XX_FAN_TCPCFG_CPBSEL BIT(4)
+#define NPCM7XX_FAN_TCPCFG_HIAEN BIT(3)
+#define NPCM7XX_FAN_TCPCFG_EQAEN BIT(2)
+#define NPCM7XX_FAN_TCPCFG_LOAEN BIT(1)
+#define NPCM7XX_FAN_TCPCFG_CPASEL BIT(0)
+
+/* FAN General Definition */
+/* Define the maximum FAN channel number */
+#define NPCM7XX_FAN_MAX_MODULE 8
+#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE 2
+#define NPCM7XX_FAN_MAX_CHN_NUM 16
+
+/*
+ * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us)
+ * Timeout 94ms ~= 0x5000
+ * (The minimum FAN speed could to support ~640RPM/pulse 1,
+ * 320RPM/pulse 2, ...-- 10.6Hz)
+ */
+#define NPCM7XX_FAN_TIMEOUT 0x5000
+#define NPCM7XX_FAN_TCNT 0xFFFF
+#define NPCM7XX_FAN_TCPA (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+#define NPCM7XX_FAN_TCPB (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+
+#define NPCM7XX_FAN_POLL_TIMER_200MS 200
+#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION 2
+#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT 0
+#define NPCM7XX_FAN_CLK_PRESCALE 255
+
+#define NPCM7XX_FAN_CMPA 0
+#define NPCM7XX_FAN_CMPB 1
+
+/* Obtain the fan number */
+#define NPCM7XX_FAN_INPUT(fan, cmp) (((fan) << 1) + (cmp))
+
+/* fan sample status */
+#define FAN_DISABLE 0xFF
+#define FAN_INIT 0x00
+#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01
+#define FAN_ENOUGH_SAMPLE 0x02
+
+struct npcm7xx_fan_dev {
+ u8 fan_st_flg;
+ u8 fan_pls_per_rev;
+ u16 fan_cnt;
+ u32 fan_cnt_tmp;
+};
+
+struct npcm7xx_cooling_device {
+ char name[THERMAL_NAME_LENGTH];
+ struct npcm7xx_pwm_fan_data *data;
+ struct thermal_cooling_device *tcdev;
+ int pwm_port;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
+struct npcm7xx_pwm_fan_data {
+ void __iomem *pwm_base;
+ void __iomem *fan_base;
+ unsigned long pwm_clk_freq;
+ unsigned long fan_clk_freq;
+ struct clk *pwm_clk;
+ struct clk *fan_clk;
+ struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
+ spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
+ int fan_irq[NPCM7XX_FAN_MAX_MODULE];
+ bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
+ bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM];
+ u32 input_clk_freq;
+ struct timer_list fan_timer;
+ struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
+ struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+ u8 fan_select;
+};
+
+static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
+ int channel, u16 val)
+{
+ u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 tmp_buf, ctrl_en_bit, env_bit;
+
+ /*
+ * Config PWM Comparator register for setting duty cycle
+ */
+ mutex_lock(&data->pwm_lock[module]);
+
+ /* write new CMR value */
+ iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
+ tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+
+ switch (pwm_ch) {
+ case 0:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT;
+ break;
+ case 1:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT;
+ break;
+ case 2:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT;
+ break;
+ case 3:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
+ break;
+ default:
+ mutex_unlock(&data->pwm_lock[module]);
+ return -ENODEV;
+ }
+
+ if (val == 0) {
+ /* Disable PWM */
+ tmp_buf &= ~ctrl_en_bit;
+ tmp_buf |= env_bit;
+ } else {
+ /* Enable PWM */
+ tmp_buf |= ctrl_en_bit;
+ tmp_buf &= ~env_bit;
+ }
+
+ iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+ mutex_unlock(&data->pwm_lock[module]);
+
+ return 0;
+}
+
+static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp)
+{
+ u8 fan_id;
+ u8 reg_mode;
+ u8 reg_int;
+ unsigned long flags;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ /* to check whether any fan tach is enable */
+ if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) {
+ /* reset status */
+ spin_lock_irqsave(&data->fan_lock[fan], flags);
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /*
+ * the interrupt enable bits do not need to be cleared before
+ * it sets, the interrupt enable bits are cleared only on reset.
+ * the clock unit control register is behaving in the same
+ * manner that the interrupt enable register behave.
+ */
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN |
+ NPCM7XX_FAN_TIEN_TEIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+ } else {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN |
+ NPCM7XX_FAN_TIEN_TFIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode =
+ NPCM7XX_FAN_TCKC_CLK2_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[fan], flags);
+ }
+}
+
+/*
+ * Enable a background timer to poll fan tach value, (200ms * 4)
+ * to polling all fan
+ */
+static void npcm7xx_fan_polling(struct timer_list *t)
+{
+ struct npcm7xx_pwm_fan_data *data;
+ int i;
+
+ data = from_timer(data, t, fan_timer);
+
+ /*
+ * Polling two module per one round,
+ * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415
+ */
+ for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE;
+ i = i + 4) {
+ /* clear the flag and reset the counter (TCNT) */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, i));
+
+ if (data->fan_present[i * 2]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA);
+ }
+ if (data->fan_present[(i * 2) + 1]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB);
+ }
+ }
+
+ data->fan_select++;
+ data->fan_select &= 0x3;
+
+ /* reset the timer interval */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ add_timer(&data->fan_timer);
+}
+
+static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 fan_id, u8 flag_int,
+ u8 flag_mode, u8 flag_clear)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u16 fan_cap;
+
+ if (cmp == NPCM7XX_FAN_CMPA)
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan));
+ else
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan));
+
+ /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */
+ iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) {
+ /* First capture, drop it */
+ data->fan_dev[fan_id].fan_st_flg =
+ FAN_PREPARE_TO_GET_FIRST_CAPTURE;
+
+ /* reset counter */
+ data->fan_dev[fan_id].fan_cnt_tmp = 0;
+ } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) {
+ /*
+ * collect the enough sample,
+ * (ex: 2 pulse fan need to get 2 sample)
+ */
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ data->fan_dev[fan_id].fan_st_flg++;
+ } else {
+ /* get enough sample or fan disable */
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) {
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ /* compute finial average cnt per pulse */
+ data->fan_dev[fan_id].fan_cnt =
+ data->fan_dev[fan_id].fan_cnt_tmp /
+ FAN_ENOUGH_SAMPLE;
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ }
+
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+}
+
+static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 flag)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u8 flag_timeout;
+ u8 flag_cap;
+ u8 flag_clear;
+ u8 flag_int;
+ u8 flag_mode;
+ u8 fan_id;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ flag_cap = NPCM7XX_FAN_TICTRL_TAPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TEPND;
+ flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR;
+ } else {
+ flag_cap = NPCM7XX_FAN_TICTRL_TBPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TFPND;
+ flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR;
+ }
+
+ if (flag & flag_timeout) {
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* clear interrupt flag */
+ iowrite8(flag_clear,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /*
+ * If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't
+ * connect or speed is lower than 10.6Hz (320RPM/pulse2).
+ * In these situation, the RPM output should be zero.
+ */
+ data->fan_dev[fan_id].fan_cnt = 0;
+ } else {
+ /* input capture is occurred */
+ if (flag & flag_cap)
+ npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int,
+ flag_mode, flag_clear);
+ }
+}
+
+static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_id;
+ unsigned long flags;
+ int module;
+ u8 flag;
+
+ module = irq - data->fan_irq[0];
+ spin_lock_irqsave(&data->fan_lock[module], flags);
+
+ flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module));
+ if (flag > 0) {
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag);
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag);
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+
+ return IRQ_NONE;
+}
+
+static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ioread32
+ (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > NPCM7XX_PWM_CMR_MAX)
+ return -EINVAL;
+ err = npcm7xx_pwm_config_set(data, channel, (u16)val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->pwm_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = 0;
+ if (data->fan_dev[channel].fan_cnt <= 0)
+ return data->fan_dev[channel].fan_cnt;
+
+ /* Convert the raw reading to RPM */
+ if (data->fan_dev[channel].fan_cnt > 0 &&
+ data->fan_dev[channel].fan_pls_per_rev > 0)
+ *val = ((data->input_clk_freq * 60) /
+ (data->fan_dev[channel].fan_cnt *
+ data->fan_dev[channel].fan_pls_per_rev));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->fan_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_read_pwm(dev, attr, channel, val);
+ case hwmon_fan:
+ return npcm7xx_read_fan(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_pwm_is_visible(data, attr, channel);
+ case hwmon_fan:
+ return npcm7xx_fan_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const u32 npcm7xx_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_pwm = {
+ .type = hwmon_pwm,
+ .config = npcm7xx_pwm_config,
+};
+
+static const u32 npcm7xx_fan_config[] = {
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_fan = {
+ .type = hwmon_fan,
+ .config = npcm7xx_fan_config,
+};
+
+static const struct hwmon_channel_info *npcm7xx_info[] = {
+ &npcm7xx_pwm,
+ &npcm7xx_fan,
+ NULL
+};
+
+static const struct hwmon_ops npcm7xx_hwmon_ops = {
+ .is_visible = npcm7xx_is_visible,
+ .read = npcm7xx_read,
+ .write = npcm7xx_write,
+};
+
+static const struct hwmon_chip_info npcm7xx_chip_info = {
+ .ops = &npcm7xx_hwmon_ops,
+ .info = npcm7xx_info,
+};
+
+static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int m, ch;
+ u32 prescale_val, output_freq;
+
+ data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+
+ /* Adjust NPCM7xx PWMs output frequency to ~25Khz */
+ output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+ prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
+
+ /* If prescale_val = 0, then the prescale output clock is stopped */
+ if (prescale_val < MIN_PRESCALE1)
+ prescale_val = MIN_PRESCALE1;
+ /*
+ * prescale_val need to decrement in one because in the PWM Prescale
+ * register the Prescale value increment by one
+ */
+ prescale_val--;
+
+ /* Setting PWM Prescale Register value register to both modules */
+ prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
+
+ for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) {
+ iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
+ NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT,
+ NPCM7XX_PWM_REG_CR(data->pwm_base, m));
+
+ for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) {
+ iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM,
+ NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch));
+ }
+ }
+
+ return output_freq / ((prescale_val & 0xf) + 1);
+}
+
+static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int md;
+ int ch;
+ int i;
+ u32 apb_clk_freq;
+
+ for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
+ /* stop FAN0~7 clock */
+ iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
+
+ /* disable all interrupt */
+ iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
+
+ /* clear all interrupt */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
+
+ /* set FAN0~7 clock prescaler */
+ iowrite8(NPCM7XX_FAN_CLK_PRESCALE,
+ NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
+
+ /* set FAN0~7 mode (high-to-low transition) */
+ iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN |
+ NPCM7XX_FAN_TMCTRL_TAEN),
+ NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
+
+ /* set FAN0~7 Initial Count/Cap */
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
+
+ /* set FAN0~7 compare (equal to count) */
+ iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN),
+ NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
+
+ /* set FAN0~7 compare value */
+ iowrite16(NPCM7XX_FAN_TCPA,
+ NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCPB,
+ NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
+
+ /* set FAN0~7 fan input FANIN 0~15 */
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) {
+ ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
+ data->fan_dev[ch].fan_st_flg = FAN_DISABLE;
+ data->fan_dev[ch].fan_pls_per_rev =
+ NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION;
+ data->fan_dev[ch].fan_cnt = 0;
+ }
+ }
+
+ apb_clk_freq = clk_get_rate(data->fan_clk);
+
+ /* Fan tach input clock = APB clock / prescalar, default is 255. */
+ data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1);
+}
+
+static int
+npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+ int ret;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = {
+ .get_max_state = npcm7xx_pwm_cz_get_max_state,
+ .get_cur_state = npcm7xx_pwm_cz_get_cur_state,
+ .set_cur_state = npcm7xx_pwm_cz_set_cur_state,
+};
+
+static int npcm7xx_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data,
+ u32 pwm_port, u8 num_levels)
+{
+ int ret;
+ struct npcm7xx_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+ pwm_port);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &npcm7xx_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->data = data;
+ cdev->pwm_port = pwm_port;
+
+ data->cdev[pwm_port] = cdev;
+
+ return 0;
+}
+
+static int npcm7xx_en_pwm_fan(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data)
+{
+ u8 *fan_ch;
+ u32 pwm_port;
+ int ret, fan_cnt;
+ u8 index, ch;
+
+ ret = of_property_read_u32(child, "reg", &pwm_port);
+ if (ret)
+ return ret;
+
+ data->pwm_present[pwm_port] = true;
+ ret = npcm7xx_pwm_config_set(data, pwm_port,
+ NPCM7XX_PWM_CMR_DEFAULT_NUM);
+
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+ if (ret > 0) {
+ ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port,
+ ret);
+ if (ret)
+ return ret;
+ }
+
+ fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch");
+ if (fan_cnt < 1)
+ return -EINVAL;
+
+ fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ if (!fan_ch)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt);
+ if (ret)
+ return ret;
+
+ for (ch = 0; ch < fan_cnt; ch++) {
+ index = fan_ch[ch];
+ data->fan_present[index] = true;
+ data->fan_dev[index].fan_st_flg = FAN_INIT;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np, *child;
+ struct npcm7xx_pwm_fan_data *data;
+ struct resource *res;
+ struct device *hwmon;
+ char name[20];
+ int ret, cnt;
+ u32 output_freq;
+ u32 i;
+
+ np = dev->of_node;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+ if (!res) {
+ dev_err(dev, "pwm resource not found\n");
+ return -ENODEV;
+ }
+
+ data->pwm_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "pwm base resource is %pR\n", res);
+ if (IS_ERR(data->pwm_base))
+ return PTR_ERR(data->pwm_base);
+
+ data->pwm_clk = devm_clk_get(dev, "pwm");
+ if (IS_ERR(data->pwm_clk)) {
+ dev_err(dev, "couldn't get pwm clock\n");
+ return PTR_ERR(data->pwm_clk);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan");
+ if (!res) {
+ dev_err(dev, "fan resource not found\n");
+ return -ENODEV;
+ }
+
+ data->fan_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "fan base resource is %pR\n", res);
+ if (IS_ERR(data->fan_base))
+ return PTR_ERR(data->fan_base);
+
+ data->fan_clk = devm_clk_get(dev, "fan");
+ if (IS_ERR(data->fan_clk)) {
+ dev_err(dev, "couldn't get fan clock\n");
+ return PTR_ERR(data->fan_clk);
+ }
+
+ output_freq = npcm7xx_pwm_init(data);
+ npcm7xx_fan_init(data);
+
+ for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++)
+ mutex_init(&data->pwm_lock[cnt]);
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
+ spin_lock_init(&data->fan_lock[i]);
+
+ data->fan_irq[i] = platform_get_irq(pdev, i);
+ if (data->fan_irq[i] < 0) {
+ dev_err(dev, "get IRQ fan%d failed\n", i);
+ return data->fan_irq[i];
+ }
+
+ sprintf(name, "NPCM7XX-FAN-MD%d", i);
+ ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
+ 0, name, (void *)data);
+ if (ret) {
+ dev_err(dev, "register IRQ fan%d failed\n", i);
+ return ret;
+ }
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = npcm7xx_en_pwm_fan(dev, child, data);
+ if (ret) {
+ dev_err(dev, "enable pwm and fan failed\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan",
+ data, &npcm7xx_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) {
+ if (data->fan_present[i]) {
+ /* fan timer initialization */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ timer_setup(&data->fan_timer,
+ npcm7xx_fan_polling, 0);
+ add_timer(&data->fan_timer);
+ break;
+ }
+ }
+
+ pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n",
+ output_freq, data->input_clk_freq);
+
+ return 0;
+}
+
+static const struct of_device_id of_pwm_fan_match_table[] = {
+ { .compatible = "nuvoton,npcm750-pwm-fan", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
+
+static struct platform_driver npcm7xx_pwm_fan_driver = {
+ .probe = npcm7xx_pwm_fan_probe,
+ .driver = {
+ .name = "npcm7xx_pwm_fan",
+ .of_match_table = of_pwm_fan_match_table,
+ },
+};
+
+module_platform_driver(npcm7xx_pwm_fan_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index e71aec69e76e..a82018aaf473 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -130,7 +130,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
+ MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 74a1f6f68fb3..47576c460010 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34460, max34461 };
+enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_STATUS_OT_FAULT BIT(5)
#define MAX34440_STATUS_OT_WARN BIT(6)
+#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
+#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
+
struct max34440_data {
int id;
struct pmbus_driver_info info;
@@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34451)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_IOUT_AVG);
@@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && data->id == max34446)
+ if (!ret && (data->id == max34446 || data->id == max34451))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int max34451_set_supported_funcs(struct i2c_client *client,
+ struct max34440_data *data)
+{
+ /*
+ * Each of the channel 0-15 can be configured to monitor the following
+ * functions based on MFR_CHANNEL_CONFIG[5:0]
+ * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11)
+ * 0x20: Voltage monitoring (no sequencing)
+ * 0x21: Voltage read only
+ * 0x22: Current monitoring
+ * 0x23: Current read only
+ * 0x30: General-purpose input active low
+ * 0x34: General-purpose input active high
+ * 0x00: Disabled
+ */
+
+ int page, rv;
+
+ for (page = 0; page < 16; page++) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_word_data(client,
+ MAX34451_MFR_CHANNEL_CONFIG);
+ if (rv < 0)
+ return rv;
+
+ switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) {
+ case 0x10:
+ case 0x20:
+ data->info.func[page] = PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_STATUS_VOUT;
+ break;
+ case 0x21:
+ data->info.func[page] = PMBUS_HAVE_VOUT;
+ break;
+ case 0x22:
+ data->info.func[page] = PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT;
+ break;
+ case 0x23:
+ data->info.func[page] = PMBUS_HAVE_IOUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
@@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34451] = {
+ .pages = 21,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* func 0-15 is set dynamically before probing */
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34460] = {
.pages = 18,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max34440_data *data;
+ int rv;
data = devm_kzalloc(&client->dev, sizeof(struct max34440_data),
GFP_KERNEL);
@@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client,
data->id = id->driver_data;
data->info = max34440_info[id->driver_data];
+ if (data->id == max34451) {
+ rv = max34451_set_supported_funcs(client, data);
+ if (rv)
+ return rv;
+ }
+
return pmbus_do_probe(client, id, &data->info);
}
@@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34451", max34451},
{"max34460", max34460},
{"max34461", max34461},
{}
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index d16e6a3d38e8..2bad40d42210 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -367,6 +367,35 @@ out:
}
EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
+/**
+ * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name
+ * @np: device node from which to request the specific hwlock
+ * @name: hwlock name
+ *
+ * This function provides a means for DT users of the hwspinlock module to
+ * get the global lock id of a specific hwspinlock using the specified name of
+ * the hwspinlock device, so that it can be requested using the normal
+ * hwspin_lock_request_specific() API.
+ *
+ * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
+ * device is not yet registered, -EINVAL on invalid args specifier value or an
+ * appropriate error as returned from the OF parsing of the DT client node.
+ */
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
+{
+ int index;
+
+ if (!name)
+ return -EINVAL;
+
+ index = of_property_match_string(np, "hwlock-names", name);
+ if (index < 0)
+ return index;
+
+ return of_hwspin_lock_get_id(np, index);
+}
+EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
+
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
@@ -500,6 +529,88 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank)
}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+static void devm_hwspin_lock_unreg(struct device *dev, void *res)
+{
+ hwspin_lock_unregister(*(struct hwspinlock_device **)res);
+}
+
+static int devm_hwspin_lock_device_match(struct device *dev, void *res,
+ void *data)
+{
+ struct hwspinlock_device **bank = res;
+
+ if (WARN_ON(!bank || !*bank))
+ return 0;
+
+ return *bank == data;
+}
+
+/**
+ * devm_hwspin_lock_unregister() - unregister an hw spinlock device for
+ * a managed device
+ * @dev: the backing device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_unregister(struct device *dev,
+ struct hwspinlock_device *bank)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_hwspin_lock_unreg,
+ devm_hwspin_lock_device_match, bank);
+ WARN_ON(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
+
+/**
+ * devm_hwspin_lock_register() - register a new hw spinlock device for
+ * a managed device
+ * @dev: the backing device
+ * @bank: the hwspinlock device, which usually provides numerous hw locks
+ * @ops: hwspinlock handlers for this device
+ * @base_id: id of the first hardware spinlock in this bank
+ * @num_locks: number of hwspinlocks provided by this device
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock device instance.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_register(struct device *dev,
+ struct hwspinlock_device *bank,
+ const struct hwspinlock_ops *ops,
+ int base_id, int num_locks)
+{
+ struct hwspinlock_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
+ if (!ret) {
+ *ptr = bank;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
+
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
*
@@ -656,7 +767,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
*
* This function mark @hwlock as free again.
* Should only be called with an @hwlock that was retrieved from
- * an earlier call to omap_hwspin_lock_request{_specific}.
+ * an earlier call to hwspin_lock_request{_specific}.
*
* Should be called from a process context (might sleep)
*
@@ -706,6 +817,116 @@ out:
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
+static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
+{
+ struct hwspinlock **hwlock = res;
+
+ if (WARN_ON(!hwlock || !*hwlock))
+ return 0;
+
+ return *hwlock == data;
+}
+
+static void devm_hwspin_lock_release(struct device *dev, void *res)
+{
+ hwspin_lock_free(*(struct hwspinlock **)res);
+}
+
+/**
+ * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device
+ * @dev: the device to free the specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to hwspin_lock_request{_specific}.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_hwspin_lock_release,
+ devm_hwspin_lock_match, hwlock);
+ WARN_ON(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
+
+/**
+ * devm_hwspin_lock_request() - request an hwspinlock for a managed device
+ * @dev: the device to request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
+{
+ struct hwspinlock **ptr, *hwlock;
+
+ ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ hwlock = hwspin_lock_request();
+ if (hwlock) {
+ *ptr = hwlock;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
+
+/**
+ * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
+ * a managed device
+ * @dev: the device to request the specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Should be called from a process context (might sleep)
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id)
+{
+ struct hwspinlock **ptr, *hwlock;
+
+ ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ hwlock = hwspin_lock_request_specific(id);
+ if (hwlock) {
+ *ptr = hwlock;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hwlock;
+}
+EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ef9cb3c164e1..ad34380cac49 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -31,6 +31,17 @@ config CORESIGHT_LINK_AND_SINK_TMC
complies with the generic implementation of the component without
special enhancement or added features.
+config CORESIGHT_CATU
+ bool "Coresight Address Translation Unit (CATU) driver"
+ depends on CORESIGHT_LINK_AND_SINK_TMC
+ help
+ Enable support for the Coresight Address Translation Unit (CATU).
+ CATU supports a scatter gather table of 4K pages, with forward/backward
+ lookup. CATU helps TMC ETR to use a large physically non-contiguous trace
+ buffer by translating the addresses used by ETR to the physical address
+ by looking up the provided table. CATU can also be used in pass-through
+ mode where the address is not translated.
+
config CORESIGHT_SINK_TPIU
bool "Coresight generic TPIU driver"
depends on CORESIGHT_LINKS_AND_SINKS
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 61db9dd0d571..41870ded51a3 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
+obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
new file mode 100644
index 000000000000..ff94e58845b7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Coresight Address Translation Unit support
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "coresight-catu.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+#define csdev_to_catu_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/* Verbose output for CATU table contents */
+#ifdef CATU_DEBUG
+#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
+#else
+#define catu_dbg(x, ...) do {} while (0)
+#endif
+
+struct catu_etr_buf {
+ struct tmc_sg_table *catu_table;
+ dma_addr_t sladdr;
+};
+
+/*
+ * CATU uses a page size of 4KB for page tables as well as data pages.
+ * Each 64bit entry in the table has the following format.
+ *
+ * 63 12 1 0
+ * ------------------------------------
+ * | Address [63-12] | SBZ | V|
+ * ------------------------------------
+ *
+ * Where bit[0] V indicates if the address is valid or not.
+ * Each 4K table pages have upto 256 data page pointers, taking upto 2K
+ * size. There are two Link pointers, pointing to the previous and next
+ * table pages respectively at the end of the 4K page. (i.e, entry 510
+ * and 511).
+ * E.g, a table of two pages could look like :
+ *
+ * Table Page 0 Table Page 1
+ * SLADDR ===> x------------------x x--> x-----------------x
+ * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
+ * |------------------| | |-----------------|
+ * INADDR+4K ->| Page 1 | V | | | |
+ * |------------------| | |-----------------|
+ * | Page 2 | V | | | |
+ * |------------------| | |-----------------|
+ * | ... | V | | | ... |
+ * |------------------| | |-----------------|
+ * INADDR+1020K| Page 255 | V | | | Page 511 | V |
+ * SLADDR+2K==>|------------------| | |-----------------|
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | ... | | | | |
+ * |------------------| | |-----------------|
+ * | IGNORED | 0 | | | Table Page 0| 1 |
+ * |------------------| | |-----------------|
+ * | Table Page 1| 1 |--x | IGNORED | 0 |
+ * x------------------x x-----------------x
+ * SLADDR+4K==>
+ *
+ * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
+ * must be aligned to 1MB (the size addressable by a single page table).
+ * The CATU maps INADDR{LO:HI} to the first page in the table pointed
+ * to by SLADDR{LO:HI} and so on.
+ *
+ */
+typedef u64 cate_t;
+
+#define CATU_PAGE_SHIFT 12
+#define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
+#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
+
+/* Page pointers are only allocated in the first 2K half */
+#define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
+#define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
+#define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
+#define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
+
+#define CATU_ADDR_SHIFT 12
+#define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
+#define CATU_ENTRY_VALID ((cate_t)0x1)
+#define CATU_VALID_ENTRY(addr) \
+ (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
+#define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
+
+/* CATU expects the INADDR to be aligned to 1M. */
+#define CATU_DEFAULT_INADDR (1ULL << 20)
+
+/*
+ * catu_get_table : Retrieve the table pointers for the given @offset
+ * within the buffer. The buffer is wrapped around to a valid offset.
+ *
+ * Returns : The CPU virtual address for the beginning of the table
+ * containing the data page pointer for @offset. If @daddrp is not NULL,
+ * @daddrp points the DMA address of the beginning of the table.
+ */
+static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
+ unsigned long offset,
+ dma_addr_t *daddrp)
+{
+ unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
+ unsigned int table_nr, pg_idx, pg_offset;
+ struct tmc_pages *table_pages = &catu_table->table_pages;
+ void *ptr;
+
+ /* Make sure offset is within the range */
+ offset %= buf_size;
+
+ /*
+ * Each table can address 1MB and a single kernel page can
+ * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
+ */
+ table_nr = offset >> 20;
+ /* Find the table page where the table_nr lies in */
+ pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
+ pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
+ if (daddrp)
+ *daddrp = table_pages->daddrs[pg_idx] + pg_offset;
+ ptr = page_address(table_pages->pages[pg_idx]);
+ return (cate_t *)((unsigned long)ptr + pg_offset);
+}
+
+#ifdef CATU_DEBUG
+static void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ cate_t *table;
+ unsigned long table_end, buf_size, offset = 0;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ dev_dbg(catu_table->dev,
+ "Dump table %p, tdaddr: %llx\n",
+ catu_table, catu_table->table_daddr);
+
+ while (offset < buf_size) {
+ table_end = offset + SZ_1M < buf_size ?
+ offset + SZ_1M : buf_size;
+ table = catu_get_table(catu_table, offset, NULL);
+ for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
+ dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
+ dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
+ table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
+ dev_dbg(catu_table->dev, "== End of sub-table ===");
+ }
+ dev_dbg(catu_table->dev, "== End of Table ===");
+}
+
+#else
+static inline void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+}
+#endif
+
+static inline cate_t catu_make_entry(dma_addr_t addr)
+{
+ return addr ? CATU_VALID_ENTRY(addr) : 0;
+}
+
+/*
+ * catu_populate_table : Populate the given CATU table.
+ * The table is always populated as a circular table.
+ * i.e, the "prev" link of the "first" table points to the "last"
+ * table and the "next" link of the "last" table points to the
+ * "first" table. The buffer should be made linear by calling
+ * catu_set_table().
+ */
+static void
+catu_populate_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ int sys_pidx; /* Index to current system data page */
+ int catu_pidx; /* Index of CATU page within the system data page */
+ unsigned long offset, buf_size, table_end;
+ dma_addr_t data_daddr;
+ dma_addr_t prev_taddr, next_taddr, cur_taddr;
+ cate_t *table_ptr, *next_table;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ sys_pidx = catu_pidx = 0;
+ offset = 0;
+
+ table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
+ prev_taddr = 0; /* Prev link for the first table */
+
+ while (offset < buf_size) {
+ /*
+ * The @offset is always 1M aligned here and we have an
+ * empty table @table_ptr to fill. Each table can address
+ * upto 1MB data buffer. The last table may have fewer
+ * entries if the buffer size is not aligned.
+ */
+ table_end = (offset + SZ_1M) < buf_size ?
+ (offset + SZ_1M) : buf_size;
+ for (i = 0; offset < table_end;
+ i++, offset += CATU_PAGE_SIZE) {
+
+ data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
+ catu_pidx * CATU_PAGE_SIZE;
+ catu_dbg(catu_table->dev,
+ "[table %5ld:%03d] 0x%llx\n",
+ (offset >> 20), i, data_daddr);
+ table_ptr[i] = catu_make_entry(data_daddr);
+ /* Move the pointers for data pages */
+ catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
+ if (catu_pidx == 0)
+ sys_pidx++;
+ }
+
+ /*
+ * If we have finished all the valid entries, fill the rest of
+ * the table (i.e, last table page) with invalid entries,
+ * to fail the lookups.
+ */
+ if (offset == buf_size) {
+ memset(&table_ptr[i], 0,
+ sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
+ next_taddr = 0;
+ } else {
+ next_table = catu_get_table(catu_table,
+ offset, &next_taddr);
+ }
+
+ table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
+ table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
+
+ catu_dbg(catu_table->dev,
+ "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
+ (offset >> 20) - 1, cur_taddr, prev_taddr, next_taddr);
+
+ /* Update the prev/next addresses */
+ if (next_taddr) {
+ prev_taddr = cur_taddr;
+ cur_taddr = next_taddr;
+ table_ptr = next_table;
+ }
+ }
+
+ /* Sync the table for device */
+ tmc_sg_table_sync_table(catu_table);
+}
+
+static struct tmc_sg_table *
+catu_init_sg_table(struct device *catu_dev, int node,
+ ssize_t size, void **pages)
+{
+ int nr_tpages;
+ struct tmc_sg_table *catu_table;
+
+ /*
+ * Each table can address upto 1MB and we can have
+ * CATU_PAGES_PER_SYSPAGE tables in a system page.
+ */
+ nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
+ catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
+ size >> PAGE_SHIFT, pages);
+ if (IS_ERR(catu_table))
+ return catu_table;
+
+ catu_populate_table(catu_table);
+ dev_dbg(catu_dev,
+ "Setup table %p, size %ldKB, %d table pages\n",
+ catu_table, (unsigned long)size >> 10, nr_tpages);
+ catu_dump_table(catu_table);
+ return catu_table;
+}
+
+static void catu_free_etr_buf(struct etr_buf *etr_buf)
+{
+ struct catu_etr_buf *catu_buf;
+
+ if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
+ return;
+
+ catu_buf = etr_buf->private;
+ tmc_free_sg_table(catu_buf->catu_table);
+ kfree(catu_buf);
+}
+
+static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
+}
+
+static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+ struct tmc_sg_table *catu_table = catu_buf->catu_table;
+ u64 r_offset, w_offset;
+
+ /*
+ * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
+ * offsets within the trace buffer.
+ */
+ r_offset = rrp - etr_buf->hwaddr;
+ w_offset = rwp - etr_buf->hwaddr;
+
+ if (!etr_buf->full) {
+ etr_buf->len = w_offset - r_offset;
+ if (w_offset < r_offset)
+ etr_buf->len += etr_buf->size;
+ } else {
+ etr_buf->len = etr_buf->size;
+ }
+
+ etr_buf->offset = r_offset;
+ tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
+}
+
+static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
+ struct etr_buf *etr_buf, int node, void **pages)
+{
+ struct coresight_device *csdev;
+ struct device *catu_dev;
+ struct tmc_sg_table *catu_table;
+ struct catu_etr_buf *catu_buf;
+
+ csdev = tmc_etr_get_catu_device(tmc_drvdata);
+ if (!csdev)
+ return -ENODEV;
+ catu_dev = csdev->dev.parent;
+ catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
+ if (!catu_buf)
+ return -ENOMEM;
+
+ catu_table = catu_init_sg_table(catu_dev, node, etr_buf->size, pages);
+ if (IS_ERR(catu_table)) {
+ kfree(catu_buf);
+ return PTR_ERR(catu_table);
+ }
+
+ etr_buf->mode = ETR_MODE_CATU;
+ etr_buf->private = catu_buf;
+ etr_buf->hwaddr = CATU_DEFAULT_INADDR;
+
+ catu_buf->catu_table = catu_table;
+ /* Get the table base address */
+ catu_buf->sladdr = catu_table->table_daddr;
+
+ return 0;
+}
+
+const struct etr_buf_operations etr_catu_buf_ops = {
+ .alloc = catu_alloc_etr_buf,
+ .free = catu_free_etr_buf,
+ .sync = catu_sync_etr_buf,
+ .get_data = catu_get_data_etr_buf,
+};
+
+coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
+coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
+coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
+coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
+coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
+coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
+coresight_simple_reg64(struct catu_drvdata, sladdr,
+ CATU_SLADDRLO, CATU_SLADDRHI);
+coresight_simple_reg64(struct catu_drvdata, inaddr,
+ CATU_INADDRLO, CATU_INADDRHI);
+
+static struct attribute *catu_mgmt_attrs[] = {
+ &dev_attr_devid.attr,
+ &dev_attr_control.attr,
+ &dev_attr_status.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_axictrl.attr,
+ &dev_attr_irqen.attr,
+ &dev_attr_sladdr.attr,
+ &dev_attr_inaddr.attr,
+ NULL,
+};
+
+static const struct attribute_group catu_mgmt_group = {
+ .attrs = catu_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *catu_groups[] = {
+ &catu_mgmt_group,
+ NULL,
+};
+
+
+static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
+{
+ return coresight_timeout(drvdata->base,
+ CATU_STATUS, CATU_STATUS_READY, 1);
+}
+
+static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
+{
+ u32 control, mode;
+ struct etr_buf *etr_buf = data;
+
+ if (catu_wait_for_ready(drvdata))
+ dev_warn(drvdata->dev, "Timeout while waiting for READY\n");
+
+ control = catu_read_control(drvdata);
+ if (control & BIT(CATU_CONTROL_ENABLE)) {
+ dev_warn(drvdata->dev, "CATU is already enabled\n");
+ return -EBUSY;
+ }
+
+ control |= BIT(CATU_CONTROL_ENABLE);
+
+ if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ mode = CATU_MODE_TRANSLATE;
+ catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
+ catu_write_sladdr(drvdata, catu_buf->sladdr);
+ catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
+ } else {
+ mode = CATU_MODE_PASS_THROUGH;
+ catu_write_sladdr(drvdata, 0);
+ catu_write_inaddr(drvdata, 0);
+ }
+
+ catu_write_irqen(drvdata, 0);
+ catu_write_mode(drvdata, mode);
+ catu_write_control(drvdata, control);
+ dev_dbg(drvdata->dev, "Enabled in %s mode\n",
+ (mode == CATU_MODE_PASS_THROUGH) ?
+ "Pass through" :
+ "Translate");
+ return 0;
+}
+
+static int catu_enable(struct coresight_device *csdev, void *data)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_enable_hw(catu_drvdata, data);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+static int catu_disable_hw(struct catu_drvdata *drvdata)
+{
+ int rc = 0;
+
+ catu_write_control(drvdata, 0);
+ if (catu_wait_for_ready(drvdata)) {
+ dev_info(drvdata->dev, "Timeout while waiting for READY\n");
+ rc = -EAGAIN;
+ }
+
+ dev_dbg(drvdata->dev, "Disabled\n");
+ return rc;
+}
+
+static int catu_disable(struct coresight_device *csdev, void *__unused)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_disable_hw(catu_drvdata);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+const struct coresight_ops_helper catu_helper_ops = {
+ .enable = catu_enable,
+ .disable = catu_disable,
+};
+
+const struct coresight_ops catu_ops = {
+ .helper_ops = &catu_helper_ops,
+};
+
+static int catu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ u32 dma_mask;
+ struct catu_drvdata *drvdata;
+ struct coresight_desc catu_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ dev->platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->dev = dev;
+ dev_set_drvdata(dev, drvdata);
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out;
+ }
+
+ /* Setup dma mask for the device */
+ dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ case 56:
+ case 64:
+ break;
+ default:
+ /* Default to the 40bits as supported by TMC-ETR */
+ dma_mask = 40;
+ }
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
+ if (ret)
+ goto out;
+
+ drvdata->base = base;
+ catu_desc.pdata = pdata;
+ catu_desc.dev = dev;
+ catu_desc.groups = catu_groups;
+ catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
+ catu_desc.ops = &catu_ops;
+ drvdata->csdev = coresight_register(&catu_desc);
+ if (IS_ERR(drvdata->csdev))
+ ret = PTR_ERR(drvdata->csdev);
+out:
+ pm_runtime_put(&adev->dev);
+ return ret;
+}
+
+static struct amba_id catu_ids[] = {
+ {
+ .id = 0x000bb9ee,
+ .mask = 0x000fffff,
+ },
+ {},
+};
+
+static struct amba_driver catu_driver = {
+ .drv = {
+ .name = "coresight-catu",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = catu_probe,
+ .id_table = catu_ids,
+};
+
+builtin_amba_driver(catu_driver);
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
new file mode 100644
index 000000000000..1b281f0dcccc
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#ifndef _CORESIGHT_CATU_H
+#define _CORESIGHT_CATU_H
+
+#include "coresight-priv.h"
+
+/* Register offset from base */
+#define CATU_CONTROL 0x000
+#define CATU_MODE 0x004
+#define CATU_AXICTRL 0x008
+#define CATU_IRQEN 0x00c
+#define CATU_SLADDRLO 0x020
+#define CATU_SLADDRHI 0x024
+#define CATU_INADDRLO 0x028
+#define CATU_INADDRHI 0x02c
+#define CATU_STATUS 0x100
+#define CATU_DEVARCH 0xfbc
+
+#define CATU_CONTROL_ENABLE 0
+
+#define CATU_MODE_PASS_THROUGH 0U
+#define CATU_MODE_TRANSLATE 1U
+
+#define CATU_AXICTRL_ARCACHE_SHIFT 4
+#define CATU_AXICTRL_ARCACHE_MASK 0xf
+#define CATU_AXICTRL_ARPROT_MASK 0x3
+#define CATU_AXICTRL_ARCACHE(arcache) \
+ (((arcache) & CATU_AXICTRL_ARCACHE_MASK) << CATU_AXICTRL_ARCACHE_SHIFT)
+
+#define CATU_AXICTRL_VAL(arcache, arprot) \
+ (CATU_AXICTRL_ARCACHE(arcache) | ((arprot) & CATU_AXICTRL_ARPROT_MASK))
+
+#define AXI3_AxCACHE_WB_READ_ALLOC 0x7
+/*
+ * AXI - ARPROT bits:
+ * See AMBA AXI & ACE Protocol specification (ARM IHI 0022E)
+ * sectionA4.7 Access Permissions.
+ *
+ * Bit 0: 0 - Unprivileged access, 1 - Privileged access
+ * Bit 1: 0 - Secure access, 1 - Non-secure access.
+ * Bit 2: 0 - Data access, 1 - instruction access.
+ *
+ * CATU AXICTRL:ARPROT[2] is res0 as we always access data.
+ */
+#define CATU_OS_ARPROT 0x2
+
+#define CATU_OS_AXICTRL \
+ CATU_AXICTRL_VAL(AXI3_AxCACHE_WB_READ_ALLOC, CATU_OS_ARPROT)
+
+#define CATU_STATUS_READY 8
+#define CATU_STATUS_ADRERR 0
+#define CATU_STATUS_AXIERR 4
+
+#define CATU_IRQEN_ON 0x1
+#define CATU_IRQEN_OFF 0x0
+
+struct catu_drvdata {
+ struct device *dev;
+ void __iomem *base;
+ struct coresight_device *csdev;
+ int irq;
+};
+
+#define CATU_REG32(name, offset) \
+static inline u32 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, offset, -1); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+}
+
+#define CATU_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+CATU_REG32(control, CATU_CONTROL);
+CATU_REG32(mode, CATU_MODE);
+CATU_REG32(irqen, CATU_IRQEN);
+CATU_REG32(axictrl, CATU_AXICTRL);
+CATU_REG_PAIR(sladdr, CATU_SLADDRLO, CATU_SLADDRHI)
+CATU_REG_PAIR(inaddr, CATU_INADDRLO, CATU_INADDRHI)
+
+static inline bool coresight_is_catu_device(struct coresight_device *csdev)
+{
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return false;
+ if (csdev->type != CORESIGHT_DEV_TYPE_HELPER)
+ return false;
+ if (csdev->subtype.helper_subtype != CORESIGHT_DEV_SUBTYPE_HELPER_CATU)
+ return false;
+ return true;
+}
+
+#ifdef CONFIG_CORESIGHT_CATU
+extern const struct etr_buf_operations etr_catu_buf_ops;
+#else
+/* Dummy declaration for the CATU ops */
+static const struct etr_buf_operations etr_catu_buf_ops;
+#endif
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 320d29df17e1..306119eaf16a 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -195,7 +195,6 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
bool lost = false;
int i;
u8 *buf_ptr;
- const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -226,19 +225,16 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
- barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
*(u32 *)buf_ptr = read_data;
buf_ptr += 4;
}
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+
if (frame_off) {
buf_ptr -= (frame_endoff * 4);
for (i = 0; i < frame_endoff; i++) {
@@ -447,7 +443,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
read_data = *barrier;
barrier++;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index e8b4549e30e2..79e1ad860d8a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -168,8 +168,6 @@
* @seq_curr_state: current value of the sequencer register.
* @ctxid_idx: index for the context ID registers.
* @ctxid_pid: value for the context ID to trigger on.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
@@ -202,7 +200,6 @@ struct etm_config {
u32 seq_curr_state;
u8 ctxid_idx;
u32 ctxid_pid[ETM_MAX_CTXID_CMP];
- u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
u32 ctxid_mask;
u32 sync_freq;
u32 timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 9435c1481f61..75487b3fad86 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
@@ -1025,8 +1026,15 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
- val = config->ctxid_vpid[config->ctxid_idx];
+ val = config->ctxid_pid[config->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
@@ -1037,19 +1045,28 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
int ret;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- ret = kstrtoul(buf, 16, &vpid);
+ /*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &pid);
if (ret)
return ret;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
config->ctxid_pid[config->ctxid_idx] = pid;
- config->ctxid_vpid[config->ctxid_idx] = vpid;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1063,6 +1080,13 @@ static ssize_t ctxid_mask_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
val = config->ctxid_mask;
return sprintf(buf, "%#lx\n", val);
}
@@ -1076,6 +1100,13 @@ static ssize_t ctxid_mask_store(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 15ed64d51a5b..7c74263c333d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -230,10 +230,8 @@ void etm_set_default(struct etm_config *config)
config->seq_curr_state = 0x0;
config->ctxid_idx = 0x0;
- for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask = 0x0;
/* Setting default to 1024 as per TRM recommendation */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 4eb8da785ce0..a0365e23678e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
@@ -250,10 +251,8 @@ static ssize_t reset_store(struct device *dev,
}
config->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++) {
+ for (i = 0; i < drvdata->numcidc; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask0 = 0x0;
config->ctxid_mask1 = 0x0;
@@ -1637,9 +1636,16 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
- val = (unsigned long)config->ctxid_vpid[idx];
+ val = (unsigned long)config->ctxid_pid[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1649,26 +1655,35 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
u8 idx;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
/*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (kstrtoul(buf, 16, &vpid))
+ if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- config->ctxid_vpid[idx] = (u64)vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1682,6 +1697,13 @@ static ssize_t ctxid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
@@ -1699,6 +1721,13 @@ static ssize_t ctxid_masks_store(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 9bc04c50d45b..1d94ebec027b 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1027,7 +1027,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
+ drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1045,23 +1046,19 @@ err_arch_supported:
return ret;
}
+#define ETM4x_AMBA_ID(pid) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ }
+
static const struct amba_id etm4_ids[] = {
- { /* ETM 4.0 - Cortex-A53 */
- .id = 0x000bb95d,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - Cortex-A57 */
- .id = 0x000bb95e,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - A72, Maia, HiSilicon */
- .id = 0x000bb95a,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { 0, 0},
+ ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
+ {},
};
static struct amba_driver etm4x_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index b7c4a6f6c6b9..52786e9d8926 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -230,8 +230,6 @@
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
* @ctxid_pid: Value of the context ID comparator.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
@@ -274,7 +272,6 @@ struct etmv4_config {
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
- u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 0e5a74dae6a6..1a6cf3589866 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -57,7 +57,8 @@ static DEVICE_ATTR_RO(name)
#define coresight_simple_reg64(type, name, lo_off, hi_off) \
__coresight_simple_func(type, NULL, name, lo_off, hi_off)
-extern const u32 barrier_pkt[5];
+extern const u32 barrier_pkt[4];
+#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
@@ -91,6 +92,13 @@ struct cs_buffers {
void **data_pages;
};
+static inline void coresight_insert_barrier_packet(void *buf)
+{
+ if (buf)
+ memcpy(buf, barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
+}
+
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 61d849b11c26..0549249f4b39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -32,39 +32,28 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
- bool lost = false;
char *bufp;
- const u32 *barrier;
- u32 read_data, status;
+ u32 read_data, lost;
int i;
- /*
- * Get a hold of the status register and see if a wrap around
- * has occurred.
- */
- status = readl_relaxed(drvdata->base + TMC_STS);
- if (status & TMC_STS_FULL)
- lost = true;
-
+ /* Check if the buffer wrapped around. */
+ lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
- barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
-
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
+ goto done;
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
}
}
+done:
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+ return;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -109,6 +98,24 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+/*
+ * Return the available trace data in the buffer from @pos, with
+ * a maximum limit of @len, updating the @bufpp on where to
+ * find it.
+ */
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ ssize_t actual = len;
+
+ /* Adjust the len to available size @pos */
+ if (pos + actual > drvdata->len)
+ actual = drvdata->len - pos;
+ if (actual > 0)
+ *bufpp = drvdata->buf + pos;
+ return actual;
+}
+
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 02f747afa2ba..2eda5de304c2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -6,22 +6,912 @@
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "coresight-catu.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+struct etr_flat_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ void *vaddr;
+ size_t size;
+};
+
+/*
+ * The TMC ETR SG has a page size of 4K. The SG table contains pointers
+ * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
+ * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
+ * contain more than one SG buffer and tables.
+ *
+ * A table entry has the following format:
+ *
+ * ---Bit31------------Bit4-------Bit1-----Bit0--
+ * | Address[39:12] | SBZ | Entry Type |
+ * ----------------------------------------------
+ *
+ * Address: Bits [39:12] of a physical page address. Bits [11:0] are
+ * always zero.
+ *
+ * Entry type:
+ * b00 - Reserved.
+ * b01 - Last entry in the tables, points to 4K page buffer.
+ * b10 - Normal entry, points to 4K page buffer.
+ * b11 - Link. The address points to the base of next table.
+ */
+
+typedef u32 sgte_t;
+
+#define ETR_SG_PAGE_SHIFT 12
+#define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
+#define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
+#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
+
+#define ETR_SG_ET_MASK 0x3
+#define ETR_SG_ET_LAST 0x1
+#define ETR_SG_ET_NORMAL 0x2
+#define ETR_SG_ET_LINK 0x3
+
+#define ETR_SG_ADDR_SHIFT 4
+
+#define ETR_SG_ENTRY(addr, type) \
+ (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
+ (type & ETR_SG_ET_MASK))
+
+#define ETR_SG_ADDR(entry) \
+ (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
+
+/*
+ * struct etr_sg_table : ETR SG Table
+ * @sg_table: Generic SG Table holding the data/table pages.
+ * @hwaddr: hwaddress used by the TMC, which is the base
+ * address of the table.
+ */
+struct etr_sg_table {
+ struct tmc_sg_table *sg_table;
+ dma_addr_t hwaddr;
+};
+
+/*
+ * tmc_etr_sg_table_entries: Total number of table entries required to map
+ * @nr_pages system pages.
+ *
+ * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
+ * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
+ * with the last entry pointing to another page of table entries.
+ * If we spill over to a new page for mapping 1 entry, we could as
+ * well replace the link entry of the previous page with the last entry.
+ */
+static inline unsigned long __attribute_const__
+tmc_etr_sg_table_entries(int nr_pages)
+{
+ unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
+ unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
+ /*
+ * If we spill over to a new page for 1 entry, we could as well
+ * make it the LAST entry in the previous page, skipping the Link
+ * address.
+ */
+ if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
+ nr_sglinks--;
+ return nr_sgpages + nr_sglinks;
+}
+
+/*
+ * tmc_pages_get_offset: Go through all the pages in the tmc_pages
+ * and map the device address @addr to an offset within the virtual
+ * contiguous buffer.
+ */
+static long
+tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
+{
+ int i;
+ dma_addr_t page_start;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ page_start = tmc_pages->daddrs[i];
+ if (addr >= page_start && addr < (page_start + PAGE_SIZE))
+ return i * PAGE_SIZE + (addr - page_start);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * tmc_pages_free : Unmap and free the pages used by tmc_pages.
+ * If the pages were not allocated in tmc_pages_alloc(), we would
+ * simply drop the refcount.
+ */
+static void tmc_pages_free(struct tmc_pages *tmc_pages,
+ struct device *dev, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ if (tmc_pages->daddrs && tmc_pages->daddrs[i])
+ dma_unmap_page(dev, tmc_pages->daddrs[i],
+ PAGE_SIZE, dir);
+ if (tmc_pages->pages && tmc_pages->pages[i])
+ __free_page(tmc_pages->pages[i]);
+ }
+
+ kfree(tmc_pages->pages);
+ kfree(tmc_pages->daddrs);
+ tmc_pages->pages = NULL;
+ tmc_pages->daddrs = NULL;
+ tmc_pages->nr_pages = 0;
+}
+
+/*
+ * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
+ * If @pages is not NULL, the list of page virtual addresses are
+ * used as the data pages. The pages are then dma_map'ed for @dev
+ * with dma_direction @dir.
+ *
+ * Returns 0 upon success, else the error number.
+ */
+static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
+ struct device *dev, int node,
+ enum dma_data_direction dir, void **pages)
+{
+ int i, nr_pages;
+ dma_addr_t paddr;
+ struct page *page;
+
+ nr_pages = tmc_pages->nr_pages;
+ tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
+ GFP_KERNEL);
+ if (!tmc_pages->daddrs)
+ return -ENOMEM;
+ tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
+ GFP_KERNEL);
+ if (!tmc_pages->pages) {
+ kfree(tmc_pages->daddrs);
+ tmc_pages->daddrs = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages && pages[i]) {
+ page = virt_to_page(pages[i]);
+ /* Hold a refcount on the page */
+ get_page(page);
+ } else {
+ page = alloc_pages_node(node,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ }
+ paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, paddr))
+ goto err;
+ tmc_pages->daddrs[i] = paddr;
+ tmc_pages->pages[i] = page;
+ }
+ return 0;
+err:
+ tmc_pages_free(tmc_pages, dev, dir);
+ return -ENOMEM;
+}
+
+static inline long
+tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
+{
+ return tmc_pages_get_offset(&sg_table->data_pages, addr);
+}
+
+static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->table_vaddr)
+ vunmap(sg_table->table_vaddr);
+ tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
+}
+
+static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->data_vaddr)
+ vunmap(sg_table->data_vaddr);
+ tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
+}
+
+void tmc_free_sg_table(struct tmc_sg_table *sg_table)
+{
+ tmc_free_table_pages(sg_table);
+ tmc_free_data_pages(sg_table);
+}
+
+/*
+ * Alloc pages for the table. Since this will be used by the device,
+ * allocate the pages closer to the device (i.e, dev_to_node(dev)
+ * rather than the CPU node).
+ */
+static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
+{
+ int rc;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ rc = tmc_pages_alloc(table_pages, sg_table->dev,
+ dev_to_node(sg_table->dev),
+ DMA_TO_DEVICE, NULL);
+ if (rc)
+ return rc;
+ sg_table->table_vaddr = vmap(table_pages->pages,
+ table_pages->nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->table_vaddr)
+ rc = -ENOMEM;
+ else
+ sg_table->table_daddr = table_pages->daddrs[0];
+ return rc;
+}
+
+static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
+{
+ int rc;
+
+ /* Allocate data pages on the node requested by the caller */
+ rc = tmc_pages_alloc(&sg_table->data_pages,
+ sg_table->dev, sg_table->node,
+ DMA_FROM_DEVICE, pages);
+ if (!rc) {
+ sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
+ sg_table->data_pages.nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->data_vaddr)
+ rc = -ENOMEM;
+ }
+ return rc;
+}
+
+/*
+ * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
+ * and data buffers. TMC writes to the data buffers and reads from the SG
+ * Table pages.
+ *
+ * @dev - Device to which page should be DMA mapped.
+ * @node - Numa node for mem allocations
+ * @nr_tpages - Number of pages for the table entries.
+ * @nr_dpages - Number of pages for Data buffer.
+ * @pages - Optional list of virtual address of pages.
+ */
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages)
+{
+ long rc;
+ struct tmc_sg_table *sg_table;
+
+ sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return ERR_PTR(-ENOMEM);
+ sg_table->data_pages.nr_pages = nr_dpages;
+ sg_table->table_pages.nr_pages = nr_tpages;
+ sg_table->node = node;
+ sg_table->dev = dev;
+
+ rc = tmc_alloc_data_pages(sg_table, pages);
+ if (!rc)
+ rc = tmc_alloc_table_pages(sg_table);
+ if (rc) {
+ tmc_free_sg_table(sg_table);
+ kfree(sg_table);
+ return ERR_PTR(rc);
+ }
+
+ return sg_table;
+}
+
+/*
+ * tmc_sg_table_sync_data_range: Sync the data buffer written
+ * by the device from @offset upto a @size bytes.
+ */
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size)
+{
+ int i, index, start;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct device *dev = table->dev;
+ struct tmc_pages *data = &table->data_pages;
+
+ start = offset >> PAGE_SHIFT;
+ for (i = start; i < (start + npages); i++) {
+ index = i % data->nr_pages;
+ dma_sync_single_for_cpu(dev, data->daddrs[index],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+
+/* tmc_sg_sync_table: Sync the page table */
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
+{
+ int i;
+ struct device *dev = sg_table->dev;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ for (i = 0; i < table_pages->nr_pages; i++)
+ dma_sync_single_for_device(dev, table_pages->daddrs[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+/*
+ * tmc_sg_table_get_data: Get the buffer pointer for data @offset
+ * in the SG buffer. The @bufpp is updated to point to the buffer.
+ * Returns :
+ * the length of linear data available at @offset.
+ * or
+ * <= 0 if no data is available.
+ */
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp)
+{
+ size_t size;
+ int pg_idx = offset >> PAGE_SHIFT;
+ int pg_offset = offset & (PAGE_SIZE - 1);
+ struct tmc_pages *data_pages = &sg_table->data_pages;
+
+ size = tmc_sg_table_buf_size(sg_table);
+ if (offset >= size)
+ return -EINVAL;
+
+ /* Make sure we don't go beyond the end */
+ len = (len < (size - offset)) ? len : size - offset;
+ /* Respect the page boundaries */
+ len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
+ if (len > 0)
+ *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
+ return len;
+}
+
+#ifdef ETR_SG_DEBUG
+/* Map a dma address to virtual address */
+static unsigned long
+tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
+ dma_addr_t addr, bool table)
+{
+ long offset;
+ unsigned long base;
+ struct tmc_pages *tmc_pages;
+
+ if (table) {
+ tmc_pages = &sg_table->table_pages;
+ base = (unsigned long)sg_table->table_vaddr;
+ } else {
+ tmc_pages = &sg_table->data_pages;
+ base = (unsigned long)sg_table->data_vaddr;
+ }
+
+ offset = tmc_pages_get_offset(tmc_pages, addr);
+ if (offset < 0)
+ return 0;
+ return base + offset;
+}
+
+/* Dump the given sg_table */
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
+{
+ sgte_t *ptr;
+ int i = 0;
+ dma_addr_t addr;
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ etr_table->hwaddr, true);
+ while (ptr) {
+ addr = ETR_SG_ADDR(*ptr);
+ switch (ETR_SG_ET(*ptr)) {
+ case ETR_SG_ET_NORMAL:
+ dev_dbg(sg_table->dev,
+ "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
+ ptr++;
+ break;
+ case ETR_SG_ET_LINK:
+ dev_dbg(sg_table->dev,
+ "%05d: *** %p\t:{L} 0x%llx ***\n",
+ i, ptr, addr);
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ addr, true);
+ break;
+ case ETR_SG_ET_LAST:
+ dev_dbg(sg_table->dev,
+ "%05d: ### %p\t:[L] 0x%llx ###\n",
+ i, ptr, addr);
+ return;
+ default:
+ dev_dbg(sg_table->dev,
+ "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
+ i, ptr, addr);
+ return;
+ }
+ i++;
+ }
+ dev_dbg(sg_table->dev, "******* End of Table *****\n");
+}
+#else
+static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+#endif
+
+/*
+ * Populate the SG Table page table entries from table/data
+ * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
+ * So does a Table page. So we keep track of indices of the tables
+ * in each system page and move the pointers accordingly.
+ */
+#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
+static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
+{
+ dma_addr_t paddr;
+ int i, type, nr_entries;
+ int tpidx = 0; /* index to the current system table_page */
+ int sgtidx = 0; /* index to the sg_table within the current syspage */
+ int sgtentry = 0; /* the entry within the sg_table */
+ int dpidx = 0; /* index to the current system data_page */
+ int spidx = 0; /* index to the SG page within the current data page */
+ sgte_t *ptr; /* pointer to the table entry to fill */
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+ dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
+ dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
+
+ nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
+ /*
+ * Use the contiguous virtual address of the table to update entries.
+ */
+ ptr = sg_table->table_vaddr;
+ /*
+ * Fill all the entries, except the last entry to avoid special
+ * checks within the loop.
+ */
+ for (i = 0; i < nr_entries - 1; i++) {
+ if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
+ /*
+ * Last entry in a sg_table page is a link address to
+ * the next table page. If this sg_table is the last
+ * one in the system page, it links to the first
+ * sg_table in the next system page. Otherwise, it
+ * links to the next sg_table page within the system
+ * page.
+ */
+ if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
+ paddr = table_daddrs[tpidx + 1];
+ } else {
+ paddr = table_daddrs[tpidx] +
+ (ETR_SG_PAGE_SIZE * (sgtidx + 1));
+ }
+ type = ETR_SG_ET_LINK;
+ } else {
+ /*
+ * Update the indices to the data_pages to point to the
+ * next sg_page in the data buffer.
+ */
+ type = ETR_SG_ET_NORMAL;
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
+ dpidx++;
+ }
+ *ptr++ = ETR_SG_ENTRY(paddr, type);
+ /*
+ * Move to the next table pointer, moving the table page index
+ * if necessary
+ */
+ if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
+ if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
+ tpidx++;
+ }
+ }
+
+ /* Set up the last entry, which is always a data pointer */
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
+}
+
+/*
+ * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
+ * populate the table.
+ *
+ * @dev - Device pointer for the TMC
+ * @node - NUMA node where the memory should be allocated
+ * @size - Total size of the data buffer
+ * @pages - Optional list of page virtual address
+ */
+static struct etr_sg_table *
+tmc_init_etr_sg_table(struct device *dev, int node,
+ unsigned long size, void **pages)
+{
+ int nr_entries, nr_tpages;
+ int nr_dpages = size >> PAGE_SHIFT;
+ struct tmc_sg_table *sg_table;
+ struct etr_sg_table *etr_table;
+
+ etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
+ if (!etr_table)
+ return ERR_PTR(-ENOMEM);
+ nr_entries = tmc_etr_sg_table_entries(nr_dpages);
+ nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
+
+ sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
+ if (IS_ERR(sg_table)) {
+ kfree(etr_table);
+ return ERR_PTR(PTR_ERR(sg_table));
+ }
+
+ etr_table->sg_table = sg_table;
+ /* TMC should use table base address for DBA */
+ etr_table->hwaddr = sg_table->table_daddr;
+ tmc_etr_sg_table_populate(etr_table);
+ /* Sync the table pages for the HW */
+ tmc_sg_table_sync_table(sg_table);
+ tmc_etr_sg_table_dump(etr_table);
+
+ return etr_table;
+}
+
+/*
+ * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
+ */
+static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *flat_buf;
+
+ /* We cannot reuse existing pages for flat buf */
+ if (pages)
+ return -EINVAL;
+
+ flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
+ if (!flat_buf)
+ return -ENOMEM;
+
+ flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
+ &flat_buf->daddr, GFP_KERNEL);
+ if (!flat_buf->vaddr) {
+ kfree(flat_buf);
+ return -ENOMEM;
+ }
+
+ flat_buf->size = etr_buf->size;
+ flat_buf->dev = drvdata->dev;
+ etr_buf->hwaddr = flat_buf->daddr;
+ etr_buf->mode = ETR_MODE_FLAT;
+ etr_buf->private = flat_buf;
+ return 0;
+}
+
+static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ if (flat_buf && flat_buf->daddr)
+ dma_free_coherent(flat_buf->dev, flat_buf->size,
+ flat_buf->vaddr, flat_buf->daddr);
+ kfree(flat_buf);
+}
+
+static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ *bufpp = (char *)flat_buf->vaddr + offset;
+ /*
+ * tmc_etr_buf_get_data already adjusts the length to handle
+ * buffer wrapping around.
+ */
+ return len;
+}
+
+static const struct etr_buf_operations etr_flat_buf_ops = {
+ .alloc = tmc_etr_alloc_flat_buf,
+ .free = tmc_etr_free_flat_buf,
+ .sync = tmc_etr_sync_flat_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
+ * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
+ * appropriately.
+ */
+static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_sg_table *etr_table;
+
+ etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
+ etr_buf->size, pages);
+ if (IS_ERR(etr_table))
+ return -ENOMEM;
+ etr_buf->hwaddr = etr_table->hwaddr;
+ etr_buf->mode = ETR_MODE_ETR_SG;
+ etr_buf->private = etr_table;
+ return 0;
+}
+
+static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ if (etr_table) {
+ tmc_free_sg_table(etr_table->sg_table);
+ kfree(etr_table);
+ }
+}
+
+static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
+}
+
+static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ long r_offset, w_offset;
+ struct etr_sg_table *etr_table = etr_buf->private;
+ struct tmc_sg_table *table = etr_table->sg_table;
+
+ /* Convert hw address to offset in the buffer */
+ r_offset = tmc_sg_get_data_page_offset(table, rrp);
+ if (r_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RRP %llx to offset\n", rrp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ w_offset = tmc_sg_get_data_page_offset(table, rwp);
+ if (w_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RWP %llx to offset\n", rwp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ etr_buf->offset = r_offset;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
+ w_offset - r_offset;
+ tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+}
+
+static const struct etr_buf_operations etr_sg_buf_ops = {
+ .alloc = tmc_etr_alloc_sg_buf,
+ .free = tmc_etr_free_sg_buf,
+ .sync = tmc_etr_sync_sg_buf,
+ .get_data = tmc_etr_get_data_sg_buf,
+};
+
+/*
+ * TMC ETR could be connected to a CATU device, which can provide address
+ * translation service. This is represented by the Output port of the TMC
+ * (ETR) connected to the input port of the CATU.
+ *
+ * Returns : coresight_device ptr for the CATU device if a CATU is found.
+ * : NULL otherwise.
+ */
+struct coresight_device *
+tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
+{
+ int i;
+ struct coresight_device *tmp, *etr = drvdata->csdev;
+
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return NULL;
+
+ for (i = 0; i < etr->nr_outport; i++) {
+ tmp = etr->conns[i].child_dev;
+ if (tmp && coresight_is_catu_device(tmp))
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->enable)
+ helper_ops(catu)->enable(catu, drvdata->etr_buf);
+}
+
+static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->disable)
+ helper_ops(catu)->disable(catu, drvdata->etr_buf);
+}
+
+static const struct etr_buf_operations *etr_buf_ops[] = {
+ [ETR_MODE_FLAT] = &etr_flat_buf_ops,
+ [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
+ [ETR_MODE_CATU] = &etr_catu_buf_ops,
+};
+
+static inline int tmc_etr_mode_alloc_buf(int mode,
+ struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ int rc = -EINVAL;
+
+ switch (mode) {
+ case ETR_MODE_FLAT:
+ case ETR_MODE_ETR_SG:
+ case ETR_MODE_CATU:
+ if (etr_buf_ops[mode]->alloc)
+ rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
+ node, pages);
+ if (!rc)
+ etr_buf->ops = etr_buf_ops[mode];
+ return rc;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
+ * @drvdata : ETR device details.
+ * @size : size of the requested buffer.
+ * @flags : Required properties for the buffer.
+ * @node : Node for memory allocations.
+ * @pages : An optional list of pages.
+ */
+static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
+ ssize_t size, int flags,
+ int node, void **pages)
+{
+ int rc = -ENOMEM;
+ bool has_etr_sg, has_iommu;
+ bool has_sg, has_catu;
+ struct etr_buf *etr_buf;
+
+ has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+ has_iommu = iommu_get_domain_for_dev(drvdata->dev);
+ has_catu = !!tmc_etr_get_catu_device(drvdata);
+
+ has_sg = has_catu || has_etr_sg;
+
+ etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
+ if (!etr_buf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf->size = size;
+
+ /*
+ * If we have to use an existing list of pages, we cannot reliably
+ * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
+ * we use the contiguous DMA memory if at least one of the following
+ * conditions is true:
+ * a) The ETR cannot use Scatter-Gather.
+ * b) we have a backing IOMMU
+ * c) The requested memory size is smaller (< 1M).
+ *
+ * Fallback to available mechanisms.
+ *
+ */
+ if (!pages &&
+ (!has_sg || has_iommu || size < SZ_1M))
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_etr_sg)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_catu)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
+ etr_buf, node, pages);
+ if (rc) {
+ kfree(etr_buf);
+ return ERR_PTR(rc);
+ }
+
+ dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
+ (unsigned long)size >> 10, etr_buf->mode);
+ return etr_buf;
+}
+
+static void tmc_free_etr_buf(struct etr_buf *etr_buf)
+{
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
+ etr_buf->ops->free(etr_buf);
+ kfree(etr_buf);
+}
+
+/*
+ * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
+ * with a maximum of @len bytes.
+ * Returns: The size of the linear data available @pos, with *bufpp
+ * updated to point to the buffer.
+ */
+static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ /* Adjust the length to limit this transaction to end of buffer */
+ len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
+
+ return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
+}
+
+static inline s64
+tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+{
+ ssize_t len;
+ char *bufp;
+
+ len = tmc_etr_buf_get_data(etr_buf, offset,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+ if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ return -EINVAL;
+ coresight_insert_barrier_packet(bufp);
+ return offset + CORESIGHT_BARRIER_PKT_SIZE;
+}
+
+/*
+ * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
+ * Makes sure the trace data is synced to the memory for consumption.
+ * @etr_buf->offset will hold the offset to the beginning of the trace data
+ * within the buffer, with @etr_buf->len bytes to consume.
+ */
+static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
+{
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+ u64 rrp, rwp;
+ u32 status;
+
+ rrp = tmc_read_rrp(drvdata);
+ rwp = tmc_read_rwp(drvdata);
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ etr_buf->full = status & TMC_STS_FULL;
+
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
+
+ etr_buf->ops->sync(etr_buf, rrp, rwp);
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+}
+
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- /* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on
+ */
+ tmc_etr_enable_catu(drvdata);
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -34,16 +924,22 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
+ if (etr_buf->mode == ETR_MODE_ETR_SG) {
+ if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return;
+ axictl |= TMC_AXICTL_SCT_GAT_MODE;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- tmc_write_dba(drvdata, drvdata->paddr);
+ tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
* If the TMC pointers must be programmed before the session,
* we have to set it properly (i.e, RRP/RWP to base address and
* STS to "not full").
*/
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
- tmc_write_rrp(drvdata, drvdata->paddr);
- tmc_write_rwp(drvdata, drvdata->paddr);
+ tmc_write_rrp(drvdata, etr_buf->hwaddr);
+ tmc_write_rwp(drvdata, etr_buf->hwaddr);
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
writel_relaxed(sts, drvdata->base + TMC_STS);
}
@@ -58,37 +954,49 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+/*
+ * Return the available trace data in the buffer (starts at etr_buf->offset,
+ * limited by etr_buf->len) from @pos, with a maximum limit of @len,
+ * also updating the @bufpp on where to find it. Since the trace data
+ * starts at anywhere in the buffer, depending on the RRP, we adjust the
+ * @len returned to handle buffer wrapping around.
+ */
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
{
- const u32 *barrier;
- u32 val;
- u32 *temp;
- u64 rwp;
+ s64 offset;
+ ssize_t actual = len;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- rwp = tmc_read_rwp(drvdata);
- val = readl_relaxed(drvdata->base + TMC_STS);
+ if (pos + actual > etr_buf->len)
+ actual = etr_buf->len - pos;
+ if (actual <= 0)
+ return actual;
- /*
- * Adjust the buffer to point to the beginning of the trace data
- * and update the available trace data.
- */
- if (val & TMC_STS_FULL) {
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- drvdata->len = drvdata->size;
+ /* Compute the offset from which we read the data */
+ offset = etr_buf->offset + pos;
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
+}
- barrier = barrier_pkt;
- temp = (u32 *)drvdata->buf;
+static struct etr_buf *
+tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ return tmc_alloc_etr_buf(drvdata, drvdata->size,
+ 0, cpu_to_node(0), NULL);
+}
- while (*barrier) {
- *temp = *barrier;
- temp++;
- barrier++;
- }
+static void
+tmc_etr_free_sysfs_buf(struct etr_buf *buf)
+{
+ if (buf)
+ tmc_free_etr_buf(buf);
+}
- } else {
- drvdata->buf = drvdata->vaddr;
- drvdata->len = rwp - drvdata->paddr;
- }
+static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ tmc_sync_etr_buf(drvdata);
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -101,44 +1009,45 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* read before the TMC is disabled.
*/
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_dump_hw(drvdata);
+ tmc_etr_sync_sysfs_buf(drvdata);
+
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ /* Disable CATU device if this ETR is connected to one */
+ tmc_etr_disable_catu(drvdata);
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
- bool used = false;
unsigned long flags;
- void __iomem *vaddr = NULL;
- dma_addr_t paddr = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_buf *new_buf = NULL, *free_buf = NULL;
/*
- * If we don't have a buffer release the lock and allocate memory.
- * Otherwise keep the lock and move along.
+ * If we are enabling the ETR from disabled state, we need to make
+ * sure we have a buffer with the right size. The etr_buf is not reset
+ * immediately after we stop the tracing in SYSFS mode as we wait for
+ * the user to collect the data. We may be able to reuse the existing
+ * buffer, provided the size matches. Any allocation has to be done
+ * with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->vaddr) {
+ if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- /*
- * Contiguous memory can't be allocated while a spinlock is
- * held. As such allocate memory here and free it if a buffer
- * has already been allocated (from a previous session).
- */
- vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
- &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
+ /* Allocate memory with the locks released */
+ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
+ if (IS_ERR(new_buf))
+ return PTR_ERR(new_buf);
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading) {
+ if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -146,21 +1055,19 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
- * touched.
+ * touched, even if the buffer size has changed.
*/
if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
- * If drvdata::vaddr == NULL, use the memory allocated above.
- * Otherwise a buffer still exists from a previous session, so
- * simply use that.
+ * If we don't have a buffer or it doesn't match the requested size,
+ * use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (drvdata->vaddr == NULL) {
- used = true;
- drvdata->vaddr = vaddr;
- drvdata->paddr = paddr;
- drvdata->buf = drvdata->vaddr;
+ if (!drvdata->etr_buf ||
+ (new_buf && drvdata->etr_buf->size != new_buf->size)) {
+ free_buf = drvdata->etr_buf;
+ drvdata->etr_buf = new_buf;
}
drvdata->mode = CS_MODE_SYSFS;
@@ -169,8 +1076,8 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
- if (!used && vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (free_buf)
+ tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -180,32 +1087,8 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
- int ret = 0;
- unsigned long flags;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
-
- drvdata->mode = CS_MODE_PERF;
- tmc_etr_enable_hw(drvdata);
-out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- return ret;
+ /* We don't support perf mode yet ! */
+ return -EINVAL;
}
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
@@ -273,8 +1156,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* If drvdata::buf is NULL the trace data has been read already */
- if (drvdata->buf == NULL) {
+ /* If drvdata::etr_buf is NULL the trace data has been read already */
+ if (drvdata->etr_buf == NULL) {
ret = -EINVAL;
goto out;
}
@@ -293,8 +1176,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- dma_addr_t paddr;
- void __iomem *vaddr = NULL;
+ struct etr_buf *etr_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -306,9 +1188,8 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
- * so we don't have to explicitly clear it. Also, since the
- * tracer is still enabled drvdata::buf can't be NULL.
+ * buffer. Since the tracer is still enabled drvdata::buf can't
+ * be NULL.
*/
tmc_etr_enable_hw(drvdata);
} else {
@@ -316,17 +1197,16 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- vaddr = drvdata->vaddr;
- paddr = drvdata->paddr;
- drvdata->buf = drvdata->vaddr = NULL;
+ etr_buf = drvdata->etr_buf;
+ drvdata->etr_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (etr_buf)
+ tmc_free_etr_buf(etr_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 456f122df74f..1b817ec1192c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -123,35 +124,40 @@ static int tmc_open(struct inode *inode, struct file *file)
return 0;
}
+static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
+ case TMC_CONFIG_TYPE_ETR:
+ return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
+ }
+
+ return -EINVAL;
+}
+
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
+ char *bufp;
+ ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
+ if (actual <= 0)
+ return 0;
- if (*ppos + len > drvdata->len)
- len = drvdata->len - *ppos;
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (bufp == (char *)(drvdata->vaddr + drvdata->size))
- bufp = drvdata->vaddr;
- else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
- bufp -= drvdata->size;
- if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
- len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
- }
-
- if (copy_to_user(data, bufp, len)) {
+ if (copy_to_user(data, bufp, actual)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
- *ppos += len;
+ *ppos += actual;
+ dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
- dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
- __func__, len, (int)(drvdata->len - *ppos));
- return len;
+ return actual;
}
static int tmc_release(struct inode *inode, struct file *file)
@@ -271,8 +277,41 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
+static ssize_t buffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%#x\n", drvdata->size);
+}
+
+static ssize_t buffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /* Only permitted for TMC-ETRs */
+ if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
+ return -EPERM;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ /* The buffer size should be page aligned */
+ if (val & (PAGE_SIZE - 1))
+ return -EINVAL;
+ drvdata->size = val;
+ return size;
+}
+
+static DEVICE_ATTR_RW(buffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_buffer_size.attr,
NULL,
};
@@ -291,6 +330,12 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
+{
+ return fwnode_property_present(drvdata->dev->fwnode,
+ "arm,scatter-gather");
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
@@ -300,7 +345,7 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
- if (!(devid & TMC_DEVID_NOSCAT))
+ if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(drvdata))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index dfaff077a7fc..7027bd60c4cc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_TMC_H
#define _CORESIGHT_TMC_H
+#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
#define TMC_RSZ 0x004
@@ -122,6 +123,36 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+enum etr_mode {
+ ETR_MODE_FLAT, /* Uses contiguous flat buffer */
+ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
+ ETR_MODE_CATU, /* Use SG mechanism in CATU */
+};
+
+struct etr_buf_operations;
+
+/**
+ * struct etr_buf - Details of the buffer used by ETR
+ * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
+ * @full : Trace data overflow
+ * @size : Size of the buffer.
+ * @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
+ * @offset : Offset of the trace data in the buffer for consumption.
+ * @len : Available trace data @buf (may round up to the beginning).
+ * @ops : ETR buffer operations for the mode.
+ * @private : Backend specific information for the buf
+ */
+struct etr_buf {
+ enum etr_mode mode;
+ bool full;
+ ssize_t size;
+ dma_addr_t hwaddr;
+ unsigned long offset;
+ s64 len;
+ const struct etr_buf_operations *ops;
+ void *private;
+};
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -129,11 +160,10 @@ enum tmc_mem_intf_width {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @buf: area of memory where trace data get sent.
- * @paddr: DMA start location in RAM.
- * @vaddr: virtual representation of @paddr.
- * @size: trace buffer size.
- * @len: size of the available trace.
+ * @buf: Snapshot of the trace data for ETF/ETB.
+ * @etr_buf: details of buffer used in TMC-ETR
+ * @len: size of the available trace for ETF/ETB.
+ * @size: trace buffer size for this TMC (common for all modes).
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -148,11 +178,12 @@ struct tmc_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
bool reading;
- char *buf;
- dma_addr_t paddr;
- void __iomem *vaddr;
- u32 size;
+ union {
+ char *buf; /* TMC ETB */
+ struct etr_buf *etr_buf; /* TMC ETR */
+ };
u32 len;
+ u32 size;
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
@@ -160,6 +191,47 @@ struct tmc_drvdata {
u32 etr_caps;
};
+struct etr_buf_operations {
+ int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages);
+ void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
+ ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
+ char **bufpp);
+ void (*free)(struct etr_buf *etr_buf);
+};
+
+/**
+ * struct tmc_pages - Collection of pages used for SG.
+ * @nr_pages: Number of pages in the list.
+ * @daddrs: Array of DMA'able page address.
+ * @pages: Array pages for the buffer.
+ */
+struct tmc_pages {
+ int nr_pages;
+ dma_addr_t *daddrs;
+ struct page **pages;
+};
+
+/*
+ * struct tmc_sg_table - Generic SG table for TMC
+ * @dev: Device for DMA allocations
+ * @table_vaddr: Contiguous Virtual address for PageTable
+ * @data_vaddr: Contiguous Virtual address for Data Buffer
+ * @table_daddr: DMA address of the PageTable base
+ * @node: Node for Page allocations
+ * @table_pages: List of pages & dma address for Table
+ * @data_pages: List of pages & dma address for Data
+ */
+struct tmc_sg_table {
+ struct device *dev;
+ void *table_vaddr;
+ void *data_vaddr;
+ dma_addr_t table_daddr;
+ int node;
+ struct tmc_pages table_pages;
+ struct tmc_pages data_pages;
+};
+
/* Generic functions */
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
@@ -172,10 +244,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etb_cs_ops;
extern const struct coresight_ops tmc_etf_cs_ops;
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
@@ -211,4 +287,23 @@ static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
return !!(drvdata->etr_caps & cap);
}
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages);
+void tmc_free_sg_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size);
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp);
+static inline unsigned long
+tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+{
+ return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+}
+
+struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 01b7457fe8fc..459ef930d98c 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -40,8 +40,9 @@
/** register definition **/
/* FFSR - 0x300 */
-#define FFSR_FT_STOPPED BIT(1)
+#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
@@ -86,9 +87,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
/* Generate manual flush */
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29e834aab539..3e07fd335f8c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -51,8 +51,7 @@ static struct list_head *stm_path;
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
*/
-const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
- 0x7fffffff, 0x7fffffff, 0x0};
+const u32 barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
static int coresight_id_match(struct device *dev, void *data)
{
@@ -108,7 +107,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
dev_name(&parent->dev), dev_name(&csdev->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_find_link_outport(struct coresight_device *csdev,
@@ -126,7 +125,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
dev_name(&csdev->dev), dev_name(&child->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
@@ -179,6 +178,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
else
refport = 0;
+ if (refport < 0)
+ return refport;
+
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -423,6 +425,42 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+/*
+ * coresight_grab_device - Power up this device and any of the helper
+ * devices connected to it for trace operation. Since the helper devices
+ * don't appear on the trace path, they should be handled along with the
+ * the master device.
+ */
+static void coresight_grab_device(struct coresight_device *csdev)
+{
+ int i;
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_get_sync(child->dev.parent);
+ }
+ pm_runtime_get_sync(csdev->dev.parent);
+}
+
+/*
+ * coresight_drop_device - Release this device and any of the helper
+ * devices connected to it.
+ */
+static void coresight_drop_device(struct coresight_device *csdev)
+{
+ int i;
+
+ pm_runtime_put(csdev->dev.parent);
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_put(child->dev.parent);
+ }
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -471,9 +509,9 @@ out:
if (!node)
return -ENOMEM;
+ coresight_grab_device(csdev);
node->csdev = csdev;
list_add(&node->link, path);
- pm_runtime_get_sync(csdev->dev.parent);
return 0;
}
@@ -517,7 +555,7 @@ void coresight_release_path(struct list_head *path)
list_for_each_entry_safe(nd, next, path, link) {
csdev = nd->csdev;
- pm_runtime_put_sync(csdev->dev.parent);
+ coresight_drop_device(csdev);
list_del(&nd->link);
kfree(nd);
}
@@ -768,6 +806,9 @@ static struct device_type coresight_dev_type[] = {
.name = "source",
.groups = coresight_source_groups,
},
+ {
+ .name = "helper",
+ },
};
static void coresight_device_release(struct device *dev)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 4f8df2ec87b1..451d4ae50e66 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -116,24 +116,21 @@ config I2C_I801
DH89xxCC (PCH)
Panther Point (PCH)
Lynx Point (PCH)
- Lynx Point-LP (PCH)
Avoton (SOC)
Wellsburg (PCH)
Coleto Creek (PCH)
Wildcat Point (PCH)
- Wildcat Point-LP (PCH)
BayTrail (SOC)
Braswell (SOC)
- Sunrise Point-H (PCH)
- Sunrise Point-LP (PCH)
- Kaby Lake-H (PCH)
+ Sunrise Point (PCH)
+ Kaby Lake (PCH)
DNV (SOC)
Broxton (SOC)
Lewisburg (PCH)
Gemini Lake (SOC)
- Cannon Lake-H (PCH)
- Cannon Lake-LP (PCH)
+ Cannon Lake (PCH)
Cedar Fork (PCH)
+ Ice Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -762,6 +759,13 @@ config I2C_OMAP
Like OMAP1510/1610/1710/5912 and OMAP242x.
For details see http://www.ti.com/omap.
+config I2C_OWL
+ tristate "Actions Semiconductor Owl I2C Controller"
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Actions Semiconductor Owl SoC's.
+
config I2C_PASEMI
tristate "PA Semi SMBus interface"
depends on PPC_PASEMI && PCI
@@ -828,6 +832,19 @@ config I2C_PXA_SLAVE
is necessary for systems where the PXA may be a target on the
I2C bus.
+config I2C_QCOM_GENI
+ tristate "Qualcomm Technologies Inc.'s GENI based I2C controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_GENI_SE
+ help
+ This driver supports GENI serial engine based I2C controller in
+ master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
+ yes to this option, support will be included for the built-in I2C
+ interface on the Qualcomm Technologies Inc.'s SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-qcom-geni.
+
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
depends on ARCH_QCOM
@@ -1330,4 +1347,15 @@ config I2C_ZX2967
This driver can also be built as a module. If so, the module will be
called i2c-zx2967.
+config I2C_FSI
+ tristate "FSI I2C driver"
+ depends on FSI
+ help
+ Driver for FSI bus attached I2C masters. These are I2C masters that
+ are connected to the system over an FSI bus, instead of the more
+ common PCI or MMIO interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-fsi.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5a869144a0c5..18b26af82b1c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
+obj-$(CONFIG_I2C_OWL) += i2c-owl.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
+obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
@@ -137,5 +139,6 @@ obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_I2C_XGENE_SLIMPRO) += i2c-xgene-slimpro.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
+obj-$(CONFIG_I2C_FSI) += i2c-fsi.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 95a80a8f81b5..134567f3019f 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -384,6 +384,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
if (status)
return status;
len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+ /* fall through */
case I2C_SMBUS_I2C_BLOCK_DATA:
for (i = 0; i < len; i++) {
status = amd_ec_read(smbus, AMD_SMB_DATA + i,
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 60e4d0e939a3..a4f956c6d567 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -111,22 +111,22 @@
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
enum aspeed_i2c_master_state {
+ ASPEED_I2C_MASTER_INACTIVE,
ASPEED_I2C_MASTER_START,
ASPEED_I2C_MASTER_TX_FIRST,
ASPEED_I2C_MASTER_TX,
ASPEED_I2C_MASTER_RX_FIRST,
ASPEED_I2C_MASTER_RX,
ASPEED_I2C_MASTER_STOP,
- ASPEED_I2C_MASTER_INACTIVE,
};
enum aspeed_i2c_slave_state {
+ ASPEED_I2C_SLAVE_STOP,
ASPEED_I2C_SLAVE_START,
ASPEED_I2C_SLAVE_READ_REQUESTED,
ASPEED_I2C_SLAVE_READ_PROCESSED,
ASPEED_I2C_SLAVE_WRITE_REQUESTED,
ASPEED_I2C_SLAVE_WRITE_RECEIVED,
- ASPEED_I2C_SLAVE_STOP,
};
struct aspeed_i2c_bus {
@@ -234,7 +234,6 @@ static bool aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus)
bool irq_handled = true;
u8 value;
- spin_lock(&bus->lock);
if (!slave) {
irq_handled = false;
goto out;
@@ -325,7 +324,6 @@ static bool aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus)
writel(status_ack, bus->base + ASPEED_I2C_INTR_STS_REG);
out:
- spin_unlock(&bus->lock);
return irq_handled;
}
#endif /* CONFIG_I2C_SLAVE */
@@ -389,7 +387,6 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
u8 recv_byte;
int ret;
- spin_lock(&bus->lock);
irq_status = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
/* Ack all interrupt bits. */
writel(irq_status, bus->base + ASPEED_I2C_INTR_STS_REG);
@@ -407,7 +404,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
*/
ret = aspeed_i2c_is_irq_error(irq_status);
if (ret < 0) {
- dev_dbg(bus->dev, "received error interrupt: 0x%08x",
+ dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
irq_status);
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
@@ -416,7 +413,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
/* We are in an invalid state; reset bus to a known state. */
if (!bus->msgs) {
- dev_err(bus->dev, "bus in unknown state");
+ dev_err(bus->dev, "bus in unknown state\n");
bus->cmd_err = -EIO;
if (bus->master_state != ASPEED_I2C_MASTER_STOP)
aspeed_i2c_do_stop(bus);
@@ -431,7 +428,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
*/
if (bus->master_state == ASPEED_I2C_MASTER_START) {
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
- pr_devel("no slave present at %02x", msg->addr);
+ pr_devel("no slave present at %02x\n", msg->addr);
status_ack |= ASPEED_I2CD_INTR_TX_NAK;
bus->cmd_err = -ENXIO;
aspeed_i2c_do_stop(bus);
@@ -451,11 +448,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
switch (bus->master_state) {
case ASPEED_I2C_MASTER_TX:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
- dev_dbg(bus->dev, "slave NACKed TX");
+ dev_dbg(bus->dev, "slave NACKed TX\n");
status_ack |= ASPEED_I2CD_INTR_TX_NAK;
goto error_and_stop;
} else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
- dev_err(bus->dev, "slave failed to ACK TX");
+ dev_err(bus->dev, "slave failed to ACK TX\n");
goto error_and_stop;
}
status_ack |= ASPEED_I2CD_INTR_TX_ACK;
@@ -478,7 +475,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
/* fallthrough intended */
case ASPEED_I2C_MASTER_RX:
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
- dev_err(bus->dev, "master failed to RX");
+ dev_err(bus->dev, "master failed to RX\n");
goto error_and_stop;
}
status_ack |= ASPEED_I2CD_INTR_RX_DONE;
@@ -509,7 +506,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
goto out_no_complete;
case ASPEED_I2C_MASTER_STOP:
if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
- dev_err(bus->dev, "master failed to STOP");
+ dev_err(bus->dev, "master failed to STOP\n");
bus->cmd_err = -EIO;
/* Do not STOP as we have already tried. */
} else {
@@ -520,7 +517,7 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
goto out_complete;
case ASPEED_I2C_MASTER_INACTIVE:
dev_err(bus->dev,
- "master received interrupt 0x%08x, but is inactive",
+ "master received interrupt 0x%08x, but is inactive\n",
irq_status);
bus->cmd_err = -EIO;
/* Do not STOP as we should be inactive. */
@@ -547,22 +544,29 @@ out_no_complete:
dev_err(bus->dev,
"irq handled != irq. expected 0x%08x, but was 0x%08x\n",
irq_status, status_ack);
- spin_unlock(&bus->lock);
return !!irq_status;
}
static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
{
struct aspeed_i2c_bus *bus = dev_id;
+ bool ret;
+
+ spin_lock(&bus->lock);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
if (aspeed_i2c_slave_irq(bus)) {
dev_dbg(bus->dev, "irq handled by slave.\n");
- return IRQ_HANDLED;
+ ret = true;
+ goto out;
}
#endif /* CONFIG_I2C_SLAVE */
- return aspeed_i2c_master_irq(bus) ? IRQ_HANDLED : IRQ_NONE;
+ ret = aspeed_i2c_master_irq(bus);
+
+out:
+ spin_unlock(&bus->lock);
+ return ret ? IRQ_HANDLED : IRQ_NONE;
}
static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
@@ -851,7 +855,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
if (IS_ERR(bus->rst)) {
dev_err(&pdev->dev,
- "missing or invalid reset controller device tree entry");
+ "missing or invalid reset controller device tree entry\n");
return PTR_ERR(bus->rst);
}
reset_control_deassert(bus->rst);
@@ -868,7 +872,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
if (!match)
bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
else
- bus->get_clk_reg_val = match->data;
+ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
/* Initialize the I2C adapter */
spin_lock_init(&bus->lock);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 78792b4d6437..826d32049996 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -689,9 +689,9 @@ static int brcmstb_i2c_suspend(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_lock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = true;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ i2c_unlock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
@@ -700,10 +700,10 @@ static int brcmstb_i2c_resume(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_lock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
brcmstb_i2c_set_bsc_reg_defaults(i2c_dev);
i2c_dev->is_suspended = false;
- i2c_unlock_adapter(&i2c_dev->adapter);
+ i2c_unlock_bus(&i2c_dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 44cffad43701..c4d176f5ed79 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
.name = "cht_wc_ext_chrg_irq_chip",
};
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+ "tcpm-source-psy-i2c-fusb302" };
static const struct property_entry bq24190_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..11caafa0e050 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
+ *
+ * Note:
+ * CLKH is not allowed to be 0, in this case I2C clock is not generated
+ * at all
*/
- if (clk >= clkl + d) {
+ if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
- clkh = 0;
+ clkh = 1;
clkl = clk - (d << 1);
}
@@ -714,14 +718,14 @@ static int i2c_davinci_cpufreq_transition(struct notifier_block *nb,
dev = container_of(nb, struct davinci_i2c_dev, freq_transition);
- i2c_lock_adapter(&dev->adapter);
+ i2c_lock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER);
if (val == CPUFREQ_PRECHANGE) {
davinci_i2c_reset_ctrl(dev, 0);
} else if (val == CPUFREQ_POSTCHANGE) {
i2c_davinci_calc_clk_dividers(dev);
davinci_i2c_reset_ctrl(dev, 1);
}
- i2c_unlock_adapter(&dev->adapter);
+ i2c_unlock_bus(&dev->adapter, I2C_LOCK_ROOT_ADAPTER);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index dbda8c9c8a1c..a2a275cfc1f6 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel BayTrail PMIC I2C bus semaphore implementaion
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 48914dfc8ce8..69ec4a791f23 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -31,6 +18,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/swab.h>
#include "i2c-designware-core.h"
@@ -94,6 +82,40 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
}
}
+/**
+ * i2c_dw_set_reg_access() - Set register access flags
+ * @dev: device private data
+ *
+ * Autodetects needed register access mode and sets access flags accordingly.
+ * This must be called before doing any other register access.
+ */
+int i2c_dw_set_reg_access(struct dw_i2c_dev *dev)
+{
+ u32 reg;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ reg = dw_readl(dev, DW_IC_COMP_TYPE);
+ i2c_dw_release_lock(dev);
+
+ if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
+ /* Configure register endianess access */
+ dev->flags |= ACCESS_SWAP;
+ } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+ /* Configure register access mode 16bit */
+ dev->flags |= ACCESS_16BIT;
+ } else if (reg != DW_IC_COMP_TYPE_VALUE) {
+ dev_err(dev->dev,
+ "Unknown Synopsys component type: 0x%08x\n", reg);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
/*
@@ -149,6 +171,47 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset;
}
+int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+{
+ u32 reg;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ /* Configure SDA Hold Time if required */
+ reg = dw_readl(dev, DW_IC_COMP_VERSION);
+ if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+ if (!dev->sda_hold_time) {
+ /* Keep previous hold time setting if no one set it */
+ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+ }
+
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+
+ dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n",
+ dev->sda_hold_time & ~(u32)DW_IC_SDA_HOLD_RX_MASK,
+ dev->sda_hold_time >> DW_IC_SDA_HOLD_RX_SHIFT);
+ } else if (dev->sda_hold_time) {
+ dev_warn(dev->dev,
+ "Hardware too old to adjust SDA hold time.\n");
+ dev->sda_hold_time = 0;
+ }
+
+ i2c_dw_release_lock(dev);
+
+ return 0;
+}
+
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
int timeout = 100;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index d690e648bc01..e367b1af4ab2 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/i2c.h>
@@ -212,7 +199,8 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
- * @clk_freq: bus clock frequency
+ * @timings: bus clock frequency, SDA hold and other timings
+ * @sda_hold_time: SDA hold value
* @ss_hcnt: standard speed HCNT value
* @ss_lcnt: standard speed LCNT value
* @fs_hcnt: fast speed HCNT value
@@ -264,10 +252,8 @@ struct dw_i2c_dev {
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
int rx_outstanding;
- u32 clk_freq;
+ struct i2c_timings timings;
u32 sda_hold_time;
- u32 sda_falling_time;
- u32 scl_falling_time;
u16 ss_hcnt;
u16 ss_lcnt;
u16 fs_hcnt;
@@ -295,8 +281,10 @@ struct dw_i2c_dev {
u32 dw_readl(struct dw_i2c_dev *dev, int offset);
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
+int i2c_dw_set_reg_access(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev);
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 27436a937492..e18442b9973a 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -45,90 +32,79 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
dw_writel(dev, dev->master_cfg, DW_IC_CON);
}
-/**
- * i2c_dw_init() - Initialize the designware I2C master hardware
- * @dev: device private data
- *
- * This functions configures and enables the I2C master.
- * This function is called during I2C init function, and in case of timeout at
- * run time.
- */
-static int i2c_dw_init_master(struct dw_i2c_dev *dev)
+static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 hcnt, lcnt;
- u32 reg, comp_param1;
+ u32 ic_clk = i2c_dw_clk_rate(dev);
+ const char *mode_str, *fp_str = "";
+ u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
+ struct i2c_timings *t = &dev->timings;
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
-
- reg = dw_readl(dev, DW_IC_COMP_TYPE);
- if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianess access */
- dev->flags |= ACCESS_SWAP;
- } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
- /* Configure register access mode 16bit */
- dev->flags |= ACCESS_16BIT;
- } else if (reg != DW_IC_COMP_TYPE_VALUE) {
- dev_err(dev->dev,
- "Unknown Synopsys component type: 0x%08x\n", reg);
- i2c_dw_release_lock(dev);
- return -ENODEV;
- }
-
comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
+ i2c_dw_release_lock(dev);
- /* Disable the adapter */
- __i2c_dw_disable(dev);
-
- /* Set standard and fast speed deviders for high/low periods */
-
- sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
- scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
+ /* Set standard and fast speed dividers for high/low periods */
+ sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
+ scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
- /* Set SCL timing parameters for standard-mode */
- if (dev->ss_hcnt && dev->ss_lcnt) {
- hcnt = dev->ss_hcnt;
- lcnt = dev->ss_lcnt;
- } else {
- hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
+ /* Calculate SCL timing parameters for standard mode if not set */
+ if (!dev->ss_hcnt || !dev->ss_lcnt) {
+ dev->ss_hcnt =
+ i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
+ dev->ss_lcnt =
+ i2c_dw_scl_lcnt(ic_clk,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
}
- dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
- dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
-
- /* Set SCL timing parameters for fast-mode or fast-mode plus */
- if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) {
- hcnt = dev->fp_hcnt;
- lcnt = dev->fp_lcnt;
- } else if (dev->fs_hcnt && dev->fs_lcnt) {
- hcnt = dev->fs_hcnt;
- lcnt = dev->fs_lcnt;
- } else {
- hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
+ dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
+ dev->ss_hcnt, dev->ss_lcnt);
+
+ /*
+ * Set SCL timing parameters for fast mode or fast mode plus. Only
+ * difference is the timing parameter values since the registers are
+ * the same.
+ */
+ if (t->bus_freq_hz == 1000000) {
+ /*
+ * Check are fast mode plus parameters available and use
+ * fast mode if not.
+ */
+ if (dev->fp_hcnt && dev->fp_lcnt) {
+ dev->fs_hcnt = dev->fp_hcnt;
+ dev->fs_lcnt = dev->fp_lcnt;
+ fp_str = " Plus";
+ }
+ }
+ /*
+ * Calculate SCL timing parameters for fast mode if not set. They are
+ * needed also in high speed mode.
+ */
+ if (!dev->fs_hcnt || !dev->fs_lcnt) {
+ dev->fs_hcnt =
+ i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
+ dev->fs_lcnt =
+ i2c_dw_scl_lcnt(ic_clk,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
}
- dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
- dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
+ fp_str, dev->fs_hcnt, dev->fs_lcnt);
+ /* Check is high speed possible and fall back to fast mode if not */
if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
DW_IC_CON_SPEED_HIGH) {
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
@@ -136,37 +112,70 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev)
dev_err(dev->dev, "High Speed not supported!\n");
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ dev->hs_hcnt = 0;
+ dev->hs_lcnt = 0;
} else if (dev->hs_hcnt && dev->hs_lcnt) {
- hcnt = dev->hs_hcnt;
- lcnt = dev->hs_lcnt;
- dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT);
- dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT);
- dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n",
- hcnt, lcnt);
+ dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
+ dev->hs_hcnt, dev->hs_lcnt);
}
}
- /* Configure SDA Hold Time if required */
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (!dev->sda_hold_time) {
- /* Keep previous hold time setting if no one set it */
- dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
- }
- /*
- * Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
- * SCL by enabling non-zero SDA RX hold. Specification says it
- * extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
- */
- if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
- dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else if (dev->sda_hold_time) {
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.\n");
+ ret = i2c_dw_set_sda_hold(dev);
+ if (ret)
+ goto out;
+
+ switch (dev->master_cfg & DW_IC_CON_SPEED_MASK) {
+ case DW_IC_CON_SPEED_STD:
+ mode_str = "Standard Mode";
+ break;
+ case DW_IC_CON_SPEED_HIGH:
+ mode_str = "High Speed Mode";
+ break;
+ default:
+ mode_str = "Fast Mode";
}
+ dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str);
+
+out:
+ return ret;
+}
+
+/**
+ * i2c_dw_init() - Initialize the designware I2C master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static int i2c_dw_init_master(struct dw_i2c_dev *dev)
+{
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ /* Disable the adapter */
+ __i2c_dw_disable(dev);
+
+ /* Write standard speed timing parameters */
+ dw_writel(dev, dev->ss_hcnt, DW_IC_SS_SCL_HCNT);
+ dw_writel(dev, dev->ss_lcnt, DW_IC_SS_SCL_LCNT);
+
+ /* Write fast mode/fast mode plus timing parameters */
+ dw_writel(dev, dev->fs_hcnt, DW_IC_FS_SCL_HCNT);
+ dw_writel(dev, dev->fs_lcnt, DW_IC_FS_SCL_LCNT);
+
+ /* Write high speed timing parameters if supported */
+ if (dev->hs_hcnt && dev->hs_lcnt) {
+ dw_writel(dev, dev->hs_hcnt, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, dev->hs_lcnt, DW_IC_HS_SCL_LCNT);
+ }
+
+ /* Write SDA hold time if supported */
+ if (dev->sda_hold_time)
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
i2c_dw_configure_fifo_master(dev);
i2c_dw_release_lock(dev);
@@ -253,13 +262,6 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
break;
}
- if (msgs[dev->msg_write_idx].len == 0) {
- dev_err(dev->dev,
- "%s: invalid message length\n", __func__);
- dev->msg_err = -EINVAL;
- break;
- }
-
if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
/* new i2c_msg */
buf = msgs[dev->msg_write_idx].buf;
@@ -502,6 +504,10 @@ static const struct i2c_algorithm i2c_dw_algo = {
.functionality = i2c_dw_func,
};
+static const struct i2c_adapter_quirks i2c_dw_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
u32 stat;
@@ -681,6 +687,14 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
dev->disable = i2c_dw_disable;
dev->disable_int = i2c_dw_disable_int;
+ ret = i2c_dw_set_reg_access(dev);
+ if (ret)
+ return ret;
+
+ ret = i2c_dw_set_timings_master(dev);
+ if (ret)
+ return ret;
+
ret = dev->init(dev);
if (ret)
return ret;
@@ -689,6 +703,7 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
adap->algo = &i2c_dw_algo;
+ adap->quirks = &i2c_dw_quirks;
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 86e1bd0b82e9..d50f80487214 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
@@ -7,22 +8,7 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
* Copyright (C) 2011, 2015, 2016 Intel Corporation.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
-
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -105,6 +91,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
case 0x0817:
c->bus_cfg &= ~DW_IC_CON_SPEED_MASK;
c->bus_cfg |= DW_IC_CON_SPEED_STD;
+ /* fall through */
case 0x0818:
case 0x0819:
c->bus_num = pdev->device - 0x817 + 3;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5660daf6c92e..1a8d2da5b000 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver.
*
@@ -6,20 +7,6 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/acpi.h>
#include <linux/clk-provider.h>
@@ -96,6 +83,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct i2c_timings *t = &dev->timings;
u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
const struct acpi_device_id *id;
@@ -115,7 +103,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
- switch (dev->clk_freq) {
+ switch (t->bus_freq_hz) {
case 100000:
dev->sda_hold_time = ss_ht;
break;
@@ -175,6 +163,8 @@ static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
{
+ struct i2c_timings *t = &dev->timings;
+
dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
@@ -182,7 +172,7 @@ static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
dev->mode = DW_IC_MASTER;
- switch (dev->clk_freq) {
+ switch (t->bus_freq_hz) {
case 100000:
dev->master_cfg |= DW_IC_CON_SPEED_STD;
break;
@@ -240,7 +230,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
- u32 acpi_speed, ht = 0;
+ struct i2c_timings *t;
+ u32 acpi_speed;
struct resource *mem;
int i, irq, ret;
static const int supported_speeds[] = {
@@ -272,18 +263,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
reset_control_deassert(dev->rst);
}
- if (pdata) {
- dev->clk_freq = pdata->i2c_scl_freq;
- } else {
- device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns",
- &ht);
- device_property_read_u32(&pdev->dev, "i2c-sda-falling-time-ns",
- &dev->sda_falling_time);
- device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns",
- &dev->scl_falling_time);
- device_property_read_u32(&pdev->dev, "clock-frequency",
- &dev->clk_freq);
- }
+ t = &dev->timings;
+ if (pdata)
+ t->bus_freq_hz = pdata->i2c_scl_freq;
+ else
+ i2c_parse_fw_timings(&pdev->dev, t, false);
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/*
@@ -300,12 +284,12 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
*/
- if (acpi_speed && dev->clk_freq)
- dev->clk_freq = min(dev->clk_freq, acpi_speed);
- else if (acpi_speed || dev->clk_freq)
- dev->clk_freq = max(dev->clk_freq, acpi_speed);
+ if (acpi_speed && t->bus_freq_hz)
+ t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed);
+ else if (acpi_speed || t->bus_freq_hz)
+ t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
else
- dev->clk_freq = 400000;
+ t->bus_freq_hz = 400000;
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
@@ -314,11 +298,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Only standard mode at 100kHz, fast mode at 400kHz,
* fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (dev->clk_freq != 100000 && dev->clk_freq != 400000
- && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
+ if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 &&
+ t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) {
dev_err(&pdev->dev,
"%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
- dev->clk_freq);
+ t->bus_freq_hz);
ret = -EINVAL;
goto exit_reset;
}
@@ -334,12 +318,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_prepare_clk(dev, true)) {
+ u64 clk_khz;
+
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+ clk_khz = dev->get_clk_rate_khz(dev);
- if (!dev->sda_hold_time && ht)
- dev->sda_hold_time = div_u64(
- (u64)dev->get_clk_rate_khz(dev) * ht + 500000,
- 1000000);
+ if (!dev->sda_hold_time && t->sda_hold_ns)
+ dev->sda_hold_time =
+ div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
}
dw_i2c_set_fifo_size(dev, pdev->id);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 8ce2cd368477..e7f9305b2dd9 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Synopsys DesignWare I2C adapter driver (slave only).
*
* Based on the Synopsys DesignWare I2C adapter driver (master).
*
* Copyright (C) 2016 Synopsys Inc.
- *
- * ----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * ----------------------------------------------------------------------------
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -51,53 +38,18 @@ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
*/
static int i2c_dw_init_slave(struct dw_i2c_dev *dev)
{
- u32 reg, comp_param1;
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
- reg = dw_readl(dev, DW_IC_COMP_TYPE);
- if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianness access. */
- dev->flags |= ACCESS_SWAP;
- } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
- /* Configure register access mode 16bit. */
- dev->flags |= ACCESS_16BIT;
- } else if (reg != DW_IC_COMP_TYPE_VALUE) {
- dev_err(dev->dev,
- "Unknown Synopsys component type: 0x%08x\n", reg);
- i2c_dw_release_lock(dev);
- return -ENODEV;
- }
-
- comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
-
/* Disable the adapter. */
__i2c_dw_disable(dev);
- /* Configure SDA Hold Time if required. */
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (!dev->sda_hold_time) {
- /* Keep previous hold time setting if no one set it. */
- dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
- }
- /*
- * Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
- * SCL by enabling non-zero SDA RX hold. Specification says it
- * extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
- */
- if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
- dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ /* Write SDA hold time if supported */
+ if (dev->sda_hold_time)
dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.\n");
- }
i2c_dw_configure_fifo_slave(dev);
i2c_dw_release_lock(dev);
@@ -299,6 +251,14 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
dev->disable = i2c_dw_disable;
dev->disable_int = i2c_dw_disable_int;
+ ret = i2c_dw_set_reg_access(dev);
+ if (ret)
+ return ret;
+
+ ret = i2c_dw_set_sda_hold(dev);
+ if (ret)
+ return ret;
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index de82ad8ff534..c1ce2299a76e 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -176,7 +176,10 @@
#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(100))
-#define HSI2C_EXYNOS7 BIT(0)
+enum i2c_type_exynos {
+ I2C_TYPE_EXYNOS5,
+ I2C_TYPE_EXYNOS7,
+};
struct exynos5_i2c {
struct i2c_adapter adap;
@@ -212,27 +215,30 @@ struct exynos5_i2c {
/**
* struct exynos_hsi2c_variant - platform specific HSI2C driver data
* @fifo_depth: the fifo depth supported by the HSI2C module
+ * @hw: the hardware variant of Exynos I2C controller
*
* Specifies platform specific configuration of HSI2C module.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct exynos_hsi2c_variant {
- unsigned int fifo_depth;
- unsigned int hw;
+ unsigned int fifo_depth;
+ enum i2c_type_exynos hw;
};
static const struct exynos_hsi2c_variant exynos5250_hsi2c_data = {
.fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOS5,
};
static const struct exynos_hsi2c_variant exynos5260_hsi2c_data = {
.fifo_depth = 16,
+ .hw = I2C_TYPE_EXYNOS5,
};
static const struct exynos_hsi2c_variant exynos7_hsi2c_data = {
.fifo_depth = 16,
- .hw = HSI2C_EXYNOS7,
+ .hw = I2C_TYPE_EXYNOS7,
};
static const struct of_device_id exynos5_i2c_match[] = {
@@ -300,7 +306,7 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
temp = clkin / op_clk - 8 - t_ftl_cycle;
- if (i2c->variant->hw != HSI2C_EXYNOS7)
+ if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
temp -= t_ftl_cycle;
div = temp / 512;
clk_cycle = temp / (div + 1) - 2;
@@ -424,7 +430,7 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
/* handle interrupt related to the transfer status */
- if (i2c->variant->hw == HSI2C_EXYNOS7) {
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS7) {
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
i2c->state = 0;
@@ -571,7 +577,7 @@ static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c)
{
unsigned long timeout;
- if (i2c->variant->hw != HSI2C_EXYNOS7)
+ if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
return;
/*
@@ -612,7 +618,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
unsigned long flags;
unsigned short trig_lvl;
- if (i2c->variant->hw == HSI2C_EXYNOS7)
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
int_en |= HSI2C_INT_I2C_TRANS;
else
int_en |= HSI2C_INT_I2C;
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
new file mode 100644
index 000000000000..1e2be2219a60
--- /dev/null
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FSI-attached I2C master algorithm
+ *
+ * Copyright 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fsi.h>
+#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#define FSI_ENGID_I2C 0x7
+
+#define I2C_DEFAULT_CLK_DIV 6
+
+/* i2c registers */
+#define I2C_FSI_FIFO 0x00
+#define I2C_FSI_CMD 0x04
+#define I2C_FSI_MODE 0x08
+#define I2C_FSI_WATER_MARK 0x0C
+#define I2C_FSI_INT_MASK 0x10
+#define I2C_FSI_INT_COND 0x14
+#define I2C_FSI_OR_INT_MASK 0x14
+#define I2C_FSI_INTS 0x18
+#define I2C_FSI_AND_INT_MASK 0x18
+#define I2C_FSI_STAT 0x1C
+#define I2C_FSI_RESET_I2C 0x1C
+#define I2C_FSI_ESTAT 0x20
+#define I2C_FSI_RESET_ERR 0x20
+#define I2C_FSI_RESID_LEN 0x24
+#define I2C_FSI_SET_SCL 0x24
+#define I2C_FSI_PORT_BUSY 0x28
+#define I2C_FSI_RESET_SCL 0x2C
+#define I2C_FSI_SET_SDA 0x30
+#define I2C_FSI_RESET_SDA 0x34
+
+/* cmd register */
+#define I2C_CMD_WITH_START BIT(31)
+#define I2C_CMD_WITH_ADDR BIT(30)
+#define I2C_CMD_RD_CONT BIT(29)
+#define I2C_CMD_WITH_STOP BIT(28)
+#define I2C_CMD_FORCELAUNCH BIT(27)
+#define I2C_CMD_ADDR GENMASK(23, 17)
+#define I2C_CMD_READ BIT(16)
+#define I2C_CMD_LEN GENMASK(15, 0)
+
+/* mode register */
+#define I2C_MODE_CLKDIV GENMASK(31, 16)
+#define I2C_MODE_PORT GENMASK(15, 10)
+#define I2C_MODE_ENHANCED BIT(3)
+#define I2C_MODE_DIAG BIT(2)
+#define I2C_MODE_PACE_ALLOW BIT(1)
+#define I2C_MODE_WRAP BIT(0)
+
+/* watermark register */
+#define I2C_WATERMARK_HI GENMASK(15, 12)
+#define I2C_WATERMARK_LO GENMASK(7, 4)
+
+#define I2C_FIFO_HI_LVL 4
+#define I2C_FIFO_LO_LVL 4
+
+/* interrupt register */
+#define I2C_INT_INV_CMD BIT(15)
+#define I2C_INT_PARITY BIT(14)
+#define I2C_INT_BE_OVERRUN BIT(13)
+#define I2C_INT_BE_ACCESS BIT(12)
+#define I2C_INT_LOST_ARB BIT(11)
+#define I2C_INT_NACK BIT(10)
+#define I2C_INT_DAT_REQ BIT(9)
+#define I2C_INT_CMD_COMP BIT(8)
+#define I2C_INT_STOP_ERR BIT(7)
+#define I2C_INT_BUSY BIT(6)
+#define I2C_INT_IDLE BIT(5)
+
+/* status register */
+#define I2C_STAT_INV_CMD BIT(31)
+#define I2C_STAT_PARITY BIT(30)
+#define I2C_STAT_BE_OVERRUN BIT(29)
+#define I2C_STAT_BE_ACCESS BIT(28)
+#define I2C_STAT_LOST_ARB BIT(27)
+#define I2C_STAT_NACK BIT(26)
+#define I2C_STAT_DAT_REQ BIT(25)
+#define I2C_STAT_CMD_COMP BIT(24)
+#define I2C_STAT_STOP_ERR BIT(23)
+#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_ANY_INT BIT(15)
+#define I2C_STAT_SCL_IN BIT(11)
+#define I2C_STAT_SDA_IN BIT(10)
+#define I2C_STAT_PORT_BUSY BIT(9)
+#define I2C_STAT_SELF_BUSY BIT(8)
+#define I2C_STAT_FIFO_COUNT GENMASK(7, 0)
+
+#define I2C_STAT_ERR (I2C_STAT_INV_CMD | \
+ I2C_STAT_PARITY | \
+ I2C_STAT_BE_OVERRUN | \
+ I2C_STAT_BE_ACCESS | \
+ I2C_STAT_LOST_ARB | \
+ I2C_STAT_NACK | \
+ I2C_STAT_STOP_ERR)
+#define I2C_STAT_ANY_RESP (I2C_STAT_ERR | \
+ I2C_STAT_DAT_REQ | \
+ I2C_STAT_CMD_COMP)
+
+/* extended status register */
+#define I2C_ESTAT_FIFO_SZ GENMASK(31, 24)
+#define I2C_ESTAT_SCL_IN_SY BIT(15)
+#define I2C_ESTAT_SDA_IN_SY BIT(14)
+#define I2C_ESTAT_S_SCL BIT(13)
+#define I2C_ESTAT_S_SDA BIT(12)
+#define I2C_ESTAT_M_SCL BIT(11)
+#define I2C_ESTAT_M_SDA BIT(10)
+#define I2C_ESTAT_HI_WATER BIT(9)
+#define I2C_ESTAT_LO_WATER BIT(8)
+#define I2C_ESTAT_PORT_BUSY BIT(7)
+#define I2C_ESTAT_SELF_BUSY BIT(6)
+#define I2C_ESTAT_VERSION GENMASK(4, 0)
+
+/* port busy register */
+#define I2C_PORT_BUSY_RESET BIT(31)
+
+/* wait for command complete or data request */
+#define I2C_CMD_SLEEP_MAX_US 500
+#define I2C_CMD_SLEEP_MIN_US 50
+
+/* wait after reset; choose time from legacy driver */
+#define I2C_RESET_SLEEP_MAX_US 2000
+#define I2C_RESET_SLEEP_MIN_US 1000
+
+/* choose timeout length from legacy driver; it's well tested */
+#define I2C_ABORT_TIMEOUT msecs_to_jiffies(100)
+
+struct fsi_i2c_master {
+ struct fsi_device *fsi;
+ u8 fifo_size;
+ struct list_head ports;
+ struct mutex lock;
+};
+
+struct fsi_i2c_port {
+ struct list_head list;
+ struct i2c_adapter adapter;
+ struct fsi_i2c_master *master;
+ u16 port;
+ u16 xfrd;
+};
+
+static int fsi_i2c_read_reg(struct fsi_device *fsi, unsigned int reg,
+ u32 *data)
+{
+ int rc;
+ __be32 data_be;
+
+ rc = fsi_device_read(fsi, reg, &data_be, sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *data = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+static int fsi_i2c_write_reg(struct fsi_device *fsi, unsigned int reg,
+ u32 *data)
+{
+ __be32 data_be = cpu_to_be32p(data);
+
+ return fsi_device_write(fsi, reg, &data_be, sizeof(data_be));
+}
+
+static int fsi_i2c_dev_init(struct fsi_i2c_master *i2c)
+{
+ int rc;
+ u32 mode = I2C_MODE_ENHANCED, extended_status, watermark;
+ u32 interrupt = 0;
+
+ /* since we use polling, disable interrupts */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_INT_MASK, &interrupt);
+ if (rc)
+ return rc;
+
+ mode |= FIELD_PREP(I2C_MODE_CLKDIV, I2C_DEFAULT_CLK_DIV);
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_ESTAT, &extended_status);
+ if (rc)
+ return rc;
+
+ i2c->fifo_size = FIELD_GET(I2C_ESTAT_FIFO_SZ, extended_status);
+ watermark = FIELD_PREP(I2C_WATERMARK_HI,
+ i2c->fifo_size - I2C_FIFO_HI_LVL);
+ watermark |= FIELD_PREP(I2C_WATERMARK_LO, I2C_FIFO_LO_LVL);
+
+ return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_WATER_MARK, &watermark);
+}
+
+static int fsi_i2c_set_port(struct fsi_i2c_port *port)
+{
+ int rc;
+ struct fsi_device *fsi = port->master->fsi;
+ u32 mode, dummy = 0;
+
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ if (FIELD_GET(I2C_MODE_PORT, mode) == port->port)
+ return 0;
+
+ mode = (mode & ~I2C_MODE_PORT) | FIELD_PREP(I2C_MODE_PORT, port->port);
+ rc = fsi_i2c_write_reg(fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ /* reset engine when port is changed */
+ return fsi_i2c_write_reg(fsi, I2C_FSI_RESET_ERR, &dummy);
+}
+
+static int fsi_i2c_start(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ bool stop)
+{
+ struct fsi_i2c_master *i2c = port->master;
+ u32 cmd = I2C_CMD_WITH_START | I2C_CMD_WITH_ADDR;
+
+ port->xfrd = 0;
+
+ if (msg->flags & I2C_M_RD)
+ cmd |= I2C_CMD_READ;
+
+ if (stop || msg->flags & I2C_M_STOP)
+ cmd |= I2C_CMD_WITH_STOP;
+
+ cmd |= FIELD_PREP(I2C_CMD_ADDR, msg->addr);
+ cmd |= FIELD_PREP(I2C_CMD_LEN, msg->len);
+
+ return fsi_i2c_write_reg(i2c->fsi, I2C_FSI_CMD, &cmd);
+}
+
+static int fsi_i2c_get_op_bytes(int op_bytes)
+{
+ /* fsi is limited to max 4 byte aligned ops */
+ if (op_bytes > 4)
+ return 4;
+ else if (op_bytes == 3)
+ return 2;
+ return op_bytes;
+}
+
+static int fsi_i2c_write_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ u8 fifo_count)
+{
+ int write;
+ int rc;
+ struct fsi_i2c_master *i2c = port->master;
+ int bytes_to_write = i2c->fifo_size - fifo_count;
+ int bytes_remaining = msg->len - port->xfrd;
+
+ bytes_to_write = min(bytes_to_write, bytes_remaining);
+
+ while (bytes_to_write) {
+ write = fsi_i2c_get_op_bytes(bytes_to_write);
+
+ rc = fsi_device_write(i2c->fsi, I2C_FSI_FIFO,
+ &msg->buf[port->xfrd], write);
+ if (rc)
+ return rc;
+
+ port->xfrd += write;
+ bytes_to_write -= write;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_read_fifo(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ u8 fifo_count)
+{
+ int read;
+ int rc;
+ struct fsi_i2c_master *i2c = port->master;
+ int bytes_to_read;
+ int xfr_remaining = msg->len - port->xfrd;
+ u32 dummy;
+
+ bytes_to_read = min_t(int, fifo_count, xfr_remaining);
+
+ while (bytes_to_read) {
+ read = fsi_i2c_get_op_bytes(bytes_to_read);
+
+ if (xfr_remaining) {
+ rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO,
+ &msg->buf[port->xfrd], read);
+ if (rc)
+ return rc;
+
+ port->xfrd += read;
+ xfr_remaining -= read;
+ } else {
+ /* no more buffer but data in fifo, need to clear it */
+ rc = fsi_device_read(i2c->fsi, I2C_FSI_FIFO, &dummy,
+ read);
+ if (rc)
+ return rc;
+ }
+
+ bytes_to_read -= read;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_get_scl(struct i2c_adapter *adap)
+{
+ u32 stat = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+
+ return !!(stat & I2C_STAT_SCL_IN);
+}
+
+static void fsi_i2c_set_scl(struct i2c_adapter *adap, int val)
+{
+ u32 dummy = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ if (val)
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SCL, &dummy);
+ else
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SCL, &dummy);
+}
+
+static int fsi_i2c_get_sda(struct i2c_adapter *adap)
+{
+ u32 stat = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+
+ return !!(stat & I2C_STAT_SDA_IN);
+}
+
+static void fsi_i2c_set_sda(struct i2c_adapter *adap, int val)
+{
+ u32 dummy = 0;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ if (val)
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_SET_SDA, &dummy);
+ else
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_SDA, &dummy);
+}
+
+static void fsi_i2c_prepare_recovery(struct i2c_adapter *adap)
+{
+ int rc;
+ u32 mode;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return;
+
+ mode |= I2C_MODE_DIAG;
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+}
+
+static void fsi_i2c_unprepare_recovery(struct i2c_adapter *adap)
+{
+ int rc;
+ u32 mode;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *i2c = port->master;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return;
+
+ mode &= ~I2C_MODE_DIAG;
+ fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+}
+
+static int fsi_i2c_reset_bus(struct fsi_i2c_master *i2c,
+ struct fsi_i2c_port *port)
+{
+ int rc;
+ u32 stat, dummy = 0;
+
+ /* force bus reset, ignore errors */
+ i2c_recover_bus(&port->adapter);
+
+ /* reset errors */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_ERR, &dummy);
+ if (rc)
+ return rc;
+
+ /* wait for command complete */
+ usleep_range(I2C_RESET_SLEEP_MIN_US, I2C_RESET_SLEEP_MAX_US);
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_STAT, &stat);
+ if (rc)
+ return rc;
+
+ if (stat & I2C_STAT_CMD_COMP)
+ return 0;
+
+ /* failed to get command complete; reset engine again */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy);
+ if (rc)
+ return rc;
+
+ /* re-init engine again */
+ return fsi_i2c_dev_init(i2c);
+}
+
+static int fsi_i2c_reset_engine(struct fsi_i2c_master *i2c, u16 port)
+{
+ int rc;
+ u32 mode, dummy = 0;
+
+ /* reset engine */
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_RESET_I2C, &dummy);
+ if (rc)
+ return rc;
+
+ /* re-init engine */
+ rc = fsi_i2c_dev_init(i2c);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+
+ /* set port; default after reset is 0 */
+ if (port) {
+ mode &= ~I2C_MODE_PORT;
+ mode |= FIELD_PREP(I2C_MODE_PORT, port);
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_MODE, &mode);
+ if (rc)
+ return rc;
+ }
+
+ /* reset busy register; hw workaround */
+ dummy = I2C_PORT_BUSY_RESET;
+ rc = fsi_i2c_write_reg(i2c->fsi, I2C_FSI_PORT_BUSY, &dummy);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int fsi_i2c_abort(struct fsi_i2c_port *port, u32 status)
+{
+ int rc;
+ unsigned long start;
+ u32 cmd = I2C_CMD_WITH_STOP;
+ u32 stat;
+ struct fsi_i2c_master *i2c = port->master;
+ struct fsi_device *fsi = i2c->fsi;
+
+ rc = fsi_i2c_reset_engine(i2c, port->port);
+ if (rc)
+ return rc;
+
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &stat);
+ if (rc)
+ return rc;
+
+ /* if sda is low, peform full bus reset */
+ if (!(stat & I2C_STAT_SDA_IN)) {
+ rc = fsi_i2c_reset_bus(i2c, port);
+ if (rc)
+ return rc;
+ }
+
+ /* skip final stop command for these errors */
+ if (status & (I2C_STAT_PARITY | I2C_STAT_LOST_ARB | I2C_STAT_STOP_ERR))
+ return 0;
+
+ /* write stop command */
+ rc = fsi_i2c_write_reg(fsi, I2C_FSI_CMD, &cmd);
+ if (rc)
+ return rc;
+
+ /* wait until we see command complete in the master */
+ start = jiffies;
+
+ do {
+ rc = fsi_i2c_read_reg(fsi, I2C_FSI_STAT, &status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_CMD_COMP)
+ return 0;
+
+ usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US);
+ } while (time_after(start + I2C_ABORT_TIMEOUT, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int fsi_i2c_handle_status(struct fsi_i2c_port *port,
+ struct i2c_msg *msg, u32 status)
+{
+ int rc;
+ u8 fifo_count;
+
+ if (status & I2C_STAT_ERR) {
+ rc = fsi_i2c_abort(port, status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_INV_CMD)
+ return -EINVAL;
+
+ if (status & (I2C_STAT_PARITY | I2C_STAT_BE_OVERRUN |
+ I2C_STAT_BE_ACCESS))
+ return -EPROTO;
+
+ if (status & I2C_STAT_NACK)
+ return -ENXIO;
+
+ if (status & I2C_STAT_LOST_ARB)
+ return -EAGAIN;
+
+ if (status & I2C_STAT_STOP_ERR)
+ return -EBADMSG;
+
+ return -EIO;
+ }
+
+ if (status & I2C_STAT_DAT_REQ) {
+ fifo_count = FIELD_GET(I2C_STAT_FIFO_COUNT, status);
+
+ if (msg->flags & I2C_M_RD)
+ return fsi_i2c_read_fifo(port, msg, fifo_count);
+
+ return fsi_i2c_write_fifo(port, msg, fifo_count);
+ }
+
+ if (status & I2C_STAT_CMD_COMP) {
+ if (port->xfrd < msg->len)
+ return -ENODATA;
+
+ return msg->len;
+ }
+
+ return 0;
+}
+
+static int fsi_i2c_wait(struct fsi_i2c_port *port, struct i2c_msg *msg,
+ unsigned long timeout)
+{
+ u32 status = 0;
+ int rc;
+ unsigned long start = jiffies;
+
+ do {
+ rc = fsi_i2c_read_reg(port->master->fsi, I2C_FSI_STAT,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & I2C_STAT_ANY_RESP) {
+ rc = fsi_i2c_handle_status(port, msg, status);
+ if (rc < 0)
+ return rc;
+
+ /* cmd complete and all data xfrd */
+ if (rc == msg->len)
+ return 0;
+
+ /* need to xfr more data, but maybe don't need wait */
+ continue;
+ }
+
+ usleep_range(I2C_CMD_SLEEP_MIN_US, I2C_CMD_SLEEP_MAX_US);
+ } while (time_after(start + timeout, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int fsi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int i, rc;
+ unsigned long start_time;
+ struct fsi_i2c_port *port = adap->algo_data;
+ struct fsi_i2c_master *master = port->master;
+ struct i2c_msg *msg;
+
+ mutex_lock(&master->lock);
+
+ rc = fsi_i2c_set_port(port);
+ if (rc)
+ goto unlock;
+
+ for (i = 0; i < num; i++) {
+ msg = msgs + i;
+ start_time = jiffies;
+
+ rc = fsi_i2c_start(port, msg, i == num - 1);
+ if (rc)
+ goto unlock;
+
+ rc = fsi_i2c_wait(port, msg,
+ adap->timeout - (jiffies - start_time));
+ if (rc)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&master->lock);
+ return rc ? : num;
+}
+
+static u32 fsi_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING |
+ I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static struct i2c_bus_recovery_info fsi_i2c_bus_recovery_info = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = fsi_i2c_get_scl,
+ .set_scl = fsi_i2c_set_scl,
+ .get_sda = fsi_i2c_get_sda,
+ .set_sda = fsi_i2c_set_sda,
+ .prepare_recovery = fsi_i2c_prepare_recovery,
+ .unprepare_recovery = fsi_i2c_unprepare_recovery,
+};
+
+static const struct i2c_algorithm fsi_i2c_algorithm = {
+ .master_xfer = fsi_i2c_xfer,
+ .functionality = fsi_i2c_functionality,
+};
+
+static int fsi_i2c_probe(struct device *dev)
+{
+ struct fsi_i2c_master *i2c;
+ struct fsi_i2c_port *port;
+ struct device_node *np;
+ int rc;
+ u32 port_no;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ mutex_init(&i2c->lock);
+ i2c->fsi = to_fsi_dev(dev);
+ INIT_LIST_HEAD(&i2c->ports);
+
+ rc = fsi_i2c_dev_init(i2c);
+ if (rc)
+ return rc;
+
+ /* Add adapter for each i2c port of the master. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ rc = of_property_read_u32(np, "reg", &port_no);
+ if (rc || port_no > USHRT_MAX)
+ continue;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ break;
+
+ port->master = i2c;
+ port->port = port_no;
+
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.of_node = np;
+ port->adapter.dev.parent = dev;
+ port->adapter.algo = &fsi_i2c_algorithm;
+ port->adapter.bus_recovery_info = &fsi_i2c_bus_recovery_info;
+ port->adapter.algo_data = port;
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "i2c_bus-%u", port_no);
+
+ rc = i2c_add_adapter(&port->adapter);
+ if (rc < 0) {
+ dev_err(dev, "Failed to register adapter: %d\n", rc);
+ kfree(port);
+ continue;
+ }
+
+ list_add(&port->list, &i2c->ports);
+ }
+
+ dev_set_drvdata(dev, i2c);
+
+ return 0;
+}
+
+static int fsi_i2c_remove(struct device *dev)
+{
+ struct fsi_i2c_master *i2c = dev_get_drvdata(dev);
+ struct fsi_i2c_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &i2c->ports, list) {
+ list_del(&port->list);
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
+
+ return 0;
+}
+
+static const struct fsi_device_id fsi_i2c_ids[] = {
+ { FSI_ENGID_I2C, FSI_VERSION_ANY },
+ { }
+};
+
+static struct fsi_driver fsi_i2c_driver = {
+ .id_table = fsi_i2c_ids,
+ .drv = {
+ .name = "i2c-fsi",
+ .bus = &fsi_bus_type,
+ .probe = fsi_i2c_probe,
+ .remove = fsi_i2c_remove,
+ },
+};
+
+module_fsi_driver(fsi_i2c_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@us.ibm.com>");
+MODULE_DESCRIPTION("FSI attached I2C master");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 66f85bbf3591..c008d209f0b8 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -78,49 +78,43 @@ static struct dentry *i2c_gpio_debug_dir;
#define getscl(bd) ((bd)->getscl((bd)->data))
#define WIRE_ATTRIBUTE(wire) \
-static int fops_##wire##_get(void *data, u64 *val) \
-{ \
- struct i2c_gpio_private_data *priv = data; \
- \
- i2c_lock_adapter(&priv->adap); \
- *val = get##wire(&priv->bit_data); \
- i2c_unlock_adapter(&priv->adap); \
- return 0; \
-} \
-static int fops_##wire##_set(void *data, u64 val) \
-{ \
- struct i2c_gpio_private_data *priv = data; \
- \
- i2c_lock_adapter(&priv->adap); \
- set##wire(&priv->bit_data, val); \
- i2c_unlock_adapter(&priv->adap); \
- return 0; \
-} \
+static int fops_##wire##_get(void *data, u64 *val) \
+{ \
+ struct i2c_gpio_private_data *priv = data; \
+ \
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ *val = get##wire(&priv->bit_data); \
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ return 0; \
+} \
+static int fops_##wire##_set(void *data, u64 val) \
+{ \
+ struct i2c_gpio_private_data *priv = data; \
+ \
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ set##wire(&priv->bit_data, val); \
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER); \
+ return 0; \
+} \
DEFINE_DEBUGFS_ATTRIBUTE(fops_##wire, fops_##wire##_get, fops_##wire##_set, "%llu\n")
WIRE_ATTRIBUTE(scl);
WIRE_ATTRIBUTE(sda);
-static int fops_incomplete_transfer_set(void *data, u64 addr)
+static void i2c_gpio_incomplete_transfer(struct i2c_gpio_private_data *priv,
+ u32 pattern, u8 pattern_size)
{
- struct i2c_gpio_private_data *priv = data;
struct i2c_algo_bit_data *bit_data = &priv->bit_data;
- int i, pattern;
+ int i;
- if (addr > 0x7f)
- return -EINVAL;
-
- /* ADDR (7 bit) + RD (1 bit) + SDA hi (1 bit) */
- pattern = (addr << 2) | 3;
-
- i2c_lock_adapter(&priv->adap);
+ i2c_lock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER);
/* START condition */
setsda(bit_data, 0);
udelay(bit_data->udelay);
- /* Send ADDR+RD, request ACK, don't send STOP */
- for (i = 8; i >= 0; i--) {
+ /* Send pattern, request ACK, don't send STOP */
+ for (i = pattern_size - 1; i >= 0; i--) {
setscl(bit_data, 0);
udelay(bit_data->udelay / 2);
setsda(bit_data, (pattern >> i) & 1);
@@ -129,11 +123,44 @@ static int fops_incomplete_transfer_set(void *data, u64 addr)
udelay(bit_data->udelay);
}
- i2c_unlock_adapter(&priv->adap);
+ i2c_unlock_bus(&priv->adap, I2C_LOCK_ROOT_ADAPTER);
+}
+
+static int fops_incomplete_addr_phase_set(void *data, u64 addr)
+{
+ struct i2c_gpio_private_data *priv = data;
+ u32 pattern;
+
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ /* ADDR (7 bit) + RD (1 bit) + Client ACK, keep SDA hi (1 bit) */
+ pattern = (addr << 2) | 3;
+
+ i2c_gpio_incomplete_transfer(priv, pattern, 9);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_addr_phase, NULL, fops_incomplete_addr_phase_set, "%llu\n");
+
+static int fops_incomplete_write_byte_set(void *data, u64 addr)
+{
+ struct i2c_gpio_private_data *priv = data;
+ u32 pattern;
+
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ /* ADDR (7 bit) + WR (1 bit) + Client ACK (1 bit) */
+ pattern = (addr << 2) | 1;
+ /* 0x00 (8 bit) + Client ACK, keep SDA hi (1 bit) */
+ pattern = (pattern << 9) | 1;
+
+ i2c_gpio_incomplete_transfer(priv, pattern, 18);
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_transfer, NULL, fops_incomplete_transfer_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_incomplete_write_byte, NULL, fops_incomplete_write_byte_set, "%llu\n");
static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
{
@@ -156,8 +183,10 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
debugfs_create_file_unsafe("scl", 0600, priv->debug_dir, priv, &fops_scl);
debugfs_create_file_unsafe("sda", 0600, priv->debug_dir, priv, &fops_sda);
- debugfs_create_file_unsafe("incomplete_transfer", 0200, priv->debug_dir,
- priv, &fops_incomplete_transfer);
+ debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->debug_dir,
+ priv, &fops_incomplete_addr_phase);
+ debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->debug_dir,
+ priv, &fops_incomplete_write_byte);
}
static void i2c_gpio_fault_injector_exit(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index aa726607645e..941c223f6491 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -70,6 +70,7 @@
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
+ * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -220,6 +221,7 @@
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
#define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4
+#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1034,6 +1036,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
{ 0, }
};
@@ -1518,6 +1521,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..c406700789e1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
+ reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -420,10 +421,14 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
return -EAGAIN;
}
- if (for_busy && (temp & I2SR_IBB))
+ if (for_busy && (temp & I2SR_IBB)) {
+ i2c_imx->stopped = 0;
break;
- if (!for_busy && !(temp & I2SR_IBB))
+ }
+ if (!for_busy && !(temp & I2SR_IBB)) {
+ i2c_imx->stopped = 1;
break;
+ }
if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> I2C bus is busy\n", __func__);
@@ -537,7 +542,6 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
return result;
- i2c_imx->stopped = 0;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
temp &= ~I2CR_DMAEN;
@@ -566,10 +570,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
udelay(i2c_imx->disable_delay);
}
- if (!i2c_imx->stopped) {
+ if (!i2c_imx->stopped)
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
- }
/* Disable I2C controller */
temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
@@ -622,7 +624,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -668,9 +669,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
- temp |= I2CR_DMAEN;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
dma->chan_using = dma->chan_rx;
dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -681,7 +679,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -728,7 +725,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -784,6 +780,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
+ int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write slave address: addr=0x%x\n",
@@ -810,12 +807,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
*/
if ((msgs->len - 1) || block_data)
temp &= ~I2CR_TXAK;
+ if (use_dma)
+ temp |= I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
- if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+ if (use_dma)
return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
/* read data */
@@ -851,7 +850,6 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
i2c_imx_bus_busy(i2c_imx, 0);
- i2c_imx->stopped = 1;
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -1010,7 +1008,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
"gpio");
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 642c58946d8d..7d79317a1046 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -567,9 +567,6 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
- if (msg->len == 0)
- return -EINVAL;
-
/*
* The MX28 I2C IP block can only do PIO READ for transfer of to up
* 4 bytes of length. The write transfer is not limited as it can use
@@ -683,6 +680,10 @@ static const struct i2c_algorithm mxs_i2c_algo = {
.functionality = mxs_i2c_func,
};
+static const struct i2c_adapter_quirks mxs_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
{
/* The I2C block clock runs at 24MHz */
@@ -854,6 +855,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
+ adap->quirks = &mxs_i2c_quirks;
adap->dev.parent = dev;
adap->nr = pdev->id;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
new file mode 100644
index 000000000000..96b4572e6d9c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semiconductor Owl SoC's I2C driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+/* I2C registers */
+#define OWL_I2C_REG_CTL 0x0000
+#define OWL_I2C_REG_CLKDIV 0x0004
+#define OWL_I2C_REG_STAT 0x0008
+#define OWL_I2C_REG_ADDR 0x000C
+#define OWL_I2C_REG_TXDAT 0x0010
+#define OWL_I2C_REG_RXDAT 0x0014
+#define OWL_I2C_REG_CMD 0x0018
+#define OWL_I2C_REG_FIFOCTL 0x001C
+#define OWL_I2C_REG_FIFOSTAT 0x0020
+#define OWL_I2C_REG_DATCNT 0x0024
+#define OWL_I2C_REG_RCNT 0x0028
+
+/* I2Cx_CTL Bit Mask */
+#define OWL_I2C_CTL_RB BIT(1)
+#define OWL_I2C_CTL_GBCC(x) (((x) & 0x3) << 2)
+#define OWL_I2C_CTL_GBCC_NONE OWL_I2C_CTL_GBCC(0)
+#define OWL_I2C_CTL_GBCC_START OWL_I2C_CTL_GBCC(1)
+#define OWL_I2C_CTL_GBCC_STOP OWL_I2C_CTL_GBCC(2)
+#define OWL_I2C_CTL_GBCC_RSTART OWL_I2C_CTL_GBCC(3)
+#define OWL_I2C_CTL_IRQE BIT(5)
+#define OWL_I2C_CTL_EN BIT(7)
+#define OWL_I2C_CTL_AE BIT(8)
+#define OWL_I2C_CTL_SHSM BIT(10)
+
+#define OWL_I2C_DIV_FACTOR(x) ((x) & 0xff)
+
+/* I2Cx_STAT Bit Mask */
+#define OWL_I2C_STAT_RACK BIT(0)
+#define OWL_I2C_STAT_BEB BIT(1)
+#define OWL_I2C_STAT_IRQP BIT(2)
+#define OWL_I2C_STAT_LAB BIT(3)
+#define OWL_I2C_STAT_STPD BIT(4)
+#define OWL_I2C_STAT_STAD BIT(5)
+#define OWL_I2C_STAT_BBB BIT(6)
+#define OWL_I2C_STAT_TCB BIT(7)
+#define OWL_I2C_STAT_LBST BIT(8)
+#define OWL_I2C_STAT_SAMB BIT(9)
+#define OWL_I2C_STAT_SRGC BIT(10)
+
+/* I2Cx_CMD Bit Mask */
+#define OWL_I2C_CMD_SBE BIT(0)
+#define OWL_I2C_CMD_RBE BIT(4)
+#define OWL_I2C_CMD_DE BIT(8)
+#define OWL_I2C_CMD_NS BIT(9)
+#define OWL_I2C_CMD_SE BIT(10)
+#define OWL_I2C_CMD_MSS BIT(11)
+#define OWL_I2C_CMD_WRS BIT(12)
+#define OWL_I2C_CMD_SECL BIT(15)
+
+#define OWL_I2C_CMD_AS(x) (((x) & 0x7) << 1)
+#define OWL_I2C_CMD_SAS(x) (((x) & 0x7) << 5)
+
+/* I2Cx_FIFOCTL Bit Mask */
+#define OWL_I2C_FIFOCTL_NIB BIT(0)
+#define OWL_I2C_FIFOCTL_RFR BIT(1)
+#define OWL_I2C_FIFOCTL_TFR BIT(2)
+
+/* I2Cc_FIFOSTAT Bit Mask */
+#define OWL_I2C_FIFOSTAT_RNB BIT(1)
+#define OWL_I2C_FIFOSTAT_RFE BIT(2)
+#define OWL_I2C_FIFOSTAT_TFF BIT(5)
+#define OWL_I2C_FIFOSTAT_TFD GENMASK(23, 16)
+#define OWL_I2C_FIFOSTAT_RFD GENMASK(15, 8)
+
+/* I2C bus timeout */
+#define OWL_I2C_TIMEOUT msecs_to_jiffies(4 * 1000)
+
+#define OWL_I2C_MAX_RETRIES 50
+
+#define OWL_I2C_DEF_SPEED_HZ 100000
+#define OWL_I2C_MAX_SPEED_HZ 400000
+
+struct owl_i2c_dev {
+ struct i2c_adapter adap;
+ struct i2c_msg *msg;
+ struct completion msg_complete;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *base;
+ unsigned long clk_rate;
+ u32 bus_freq;
+ u32 msg_ptr;
+ int err;
+};
+
+static void owl_i2c_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static void owl_i2c_reset(struct owl_i2c_dev *i2c_dev)
+{
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, false);
+ mdelay(1);
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, true);
+
+ /* Clear status registers */
+ writel(0, i2c_dev->base + OWL_I2C_REG_STAT);
+}
+
+static int owl_i2c_reset_fifo(struct owl_i2c_dev *i2c_dev)
+{
+ unsigned int val, timeout = 0;
+
+ /* Reset FIFO */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR,
+ true);
+
+ /* Wait 50ms for FIFO reset complete */
+ do {
+ val = readl(i2c_dev->base + OWL_I2C_REG_FIFOCTL);
+ if (!(val & (OWL_I2C_FIFOCTL_RFR | OWL_I2C_FIFOCTL_TFR)))
+ break;
+ usleep_range(500, 1000);
+ } while (timeout++ < OWL_I2C_MAX_RETRIES);
+
+ if (timeout > OWL_I2C_MAX_RETRIES) {
+ dev_err(&i2c_dev->adap.dev, "FIFO reset timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void owl_i2c_set_freq(struct owl_i2c_dev *i2c_dev)
+{
+ unsigned int val;
+
+ val = DIV_ROUND_UP(i2c_dev->clk_rate, i2c_dev->bus_freq * 16);
+
+ /* Set clock divider factor */
+ writel(OWL_I2C_DIV_FACTOR(val), i2c_dev->base + OWL_I2C_REG_CLKDIV);
+}
+
+static irqreturn_t owl_i2c_interrupt(int irq, void *_dev)
+{
+ struct owl_i2c_dev *i2c_dev = _dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ unsigned long flags;
+ unsigned int stat, fifostat;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ i2c_dev->err = 0;
+
+ /* Handle NACK from slave */
+ fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT);
+ if (fifostat & OWL_I2C_FIFOSTAT_RNB) {
+ i2c_dev->err = -ENXIO;
+ goto stop;
+ }
+
+ /* Handle bus error */
+ stat = readl(i2c_dev->base + OWL_I2C_REG_STAT);
+ if (stat & OWL_I2C_STAT_BEB) {
+ i2c_dev->err = -EIO;
+ goto stop;
+ }
+
+ /* Handle FIFO read */
+ if (msg->flags & I2C_M_RD) {
+ while ((readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_RFE) && i2c_dev->msg_ptr < msg->len) {
+ msg->buf[i2c_dev->msg_ptr++] = readl(i2c_dev->base +
+ OWL_I2C_REG_RXDAT);
+ }
+ } else {
+ /* Handle the remaining bytes which were not sent */
+ while (!(readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_TFF) && i2c_dev->msg_ptr < msg->len) {
+ writel(msg->buf[i2c_dev->msg_ptr++],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+ }
+ }
+
+stop:
+ /* Clear pending interrupts */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT,
+ OWL_I2C_STAT_IRQP, true);
+
+ complete_all(&i2c_dev->msg_complete);
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static u32 owl_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static int owl_i2c_check_bus_busy(struct i2c_adapter *adap)
+{
+ struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ unsigned long timeout;
+
+ /* Check for Bus busy */
+ timeout = jiffies + OWL_I2C_TIMEOUT;
+ while (readl(i2c_dev->base + OWL_I2C_REG_STAT) & OWL_I2C_STAT_BBB) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(&adap->dev, "Bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int owl_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct owl_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msg;
+ unsigned long time_left, flags;
+ unsigned int i2c_cmd, val;
+ unsigned int addr;
+ int ret, idx;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ /* Reset I2C controller */
+ owl_i2c_reset(i2c_dev);
+
+ /* Set bus frequency */
+ owl_i2c_set_freq(i2c_dev);
+
+ /*
+ * Spinlock should be released before calling reset FIFO and
+ * bus busy check since those functions may sleep
+ */
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ /* Reset FIFO */
+ ret = owl_i2c_reset_fifo(i2c_dev);
+ if (ret)
+ goto unlocked_err_exit;
+
+ /* Check for bus busy */
+ ret = owl_i2c_check_bus_busy(adap);
+ if (ret)
+ goto unlocked_err_exit;
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+
+ /* Check for Arbitration lost */
+ val = readl(i2c_dev->base + OWL_I2C_REG_STAT);
+ if (val & OWL_I2C_STAT_LAB) {
+ val &= ~OWL_I2C_STAT_LAB;
+ writel(val, i2c_dev->base + OWL_I2C_REG_STAT);
+ ret = -EAGAIN;
+ goto err_exit;
+ }
+
+ reinit_completion(&i2c_dev->msg_complete);
+
+ /* Enable I2C controller interrupt */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_IRQE, true);
+
+ /*
+ * Select: FIFO enable, Master mode, Stop enable, Data count enable,
+ * Send start bit
+ */
+ i2c_cmd = OWL_I2C_CMD_SECL | OWL_I2C_CMD_MSS | OWL_I2C_CMD_SE |
+ OWL_I2C_CMD_NS | OWL_I2C_CMD_DE | OWL_I2C_CMD_SBE;
+
+ /* Handle repeated start condition */
+ if (num > 1) {
+ /* Set internal address length and enable repeated start */
+ i2c_cmd |= OWL_I2C_CMD_AS(msgs[0].len + 1) |
+ OWL_I2C_CMD_SAS(1) | OWL_I2C_CMD_RBE;
+
+ /* Write slave address */
+ addr = i2c_8bit_addr_from_msg(&msgs[0]);
+ writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ /* Write internal register address */
+ for (idx = 0; idx < msgs[0].len; idx++)
+ writel(msgs[0].buf[idx],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ msg = &msgs[1];
+ } else {
+ /* Set address length */
+ i2c_cmd |= OWL_I2C_CMD_AS(1);
+ msg = &msgs[0];
+ }
+
+ i2c_dev->msg = msg;
+ i2c_dev->msg_ptr = 0;
+
+ /* Set data count for the message */
+ writel(msg->len, i2c_dev->base + OWL_I2C_REG_DATCNT);
+
+ addr = i2c_8bit_addr_from_msg(msg);
+ writel(addr, i2c_dev->base + OWL_I2C_REG_TXDAT);
+
+ if (!(msg->flags & I2C_M_RD)) {
+ /* Write data to FIFO */
+ for (idx = 0; idx < msg->len; idx++) {
+ /* Check for FIFO full */
+ if (readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT) &
+ OWL_I2C_FIFOSTAT_TFF)
+ break;
+
+ writel(msg->buf[idx],
+ i2c_dev->base + OWL_I2C_REG_TXDAT);
+ }
+
+ i2c_dev->msg_ptr = idx;
+ }
+
+ /* Ignore the NACK if needed */
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_NIB, true);
+ else
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOCTL,
+ OWL_I2C_FIFOCTL_NIB, false);
+
+ /* Start the transfer */
+ writel(i2c_cmd, i2c_dev->base + OWL_I2C_REG_CMD);
+
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+ time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
+ adap->timeout);
+
+ spin_lock_irqsave(&i2c_dev->lock, flags);
+ if (time_left == 0) {
+ dev_err(&adap->dev, "Transaction timed out\n");
+ /* Send stop condition and release the bus */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_GBCC_STOP | OWL_I2C_CTL_RB,
+ true);
+ ret = -ETIMEDOUT;
+ goto err_exit;
+ }
+
+ ret = i2c_dev->err < 0 ? i2c_dev->err : num;
+
+err_exit:
+ spin_unlock_irqrestore(&i2c_dev->lock, flags);
+
+unlocked_err_exit:
+ /* Disable I2C controller */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_CTL,
+ OWL_I2C_CTL_EN, false);
+
+ return ret;
+}
+
+static const struct i2c_algorithm owl_i2c_algorithm = {
+ .master_xfer = owl_i2c_master_xfer,
+ .functionality = owl_i2c_func,
+};
+
+static const struct i2c_adapter_quirks owl_i2c_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST,
+ .max_read_len = 240,
+ .max_write_len = 240,
+ .max_comb_1st_msg_len = 6,
+ .max_comb_2nd_msg_len = 240,
+};
+
+static int owl_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct owl_i2c_dev *i2c_dev;
+ struct resource *res;
+ int ret, irq;
+
+ i2c_dev = devm_kzalloc(dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c_dev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(i2c_dev->base))
+ return PTR_ERR(i2c_dev->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get IRQ number\n");
+ return irq;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clock-frequency",
+ &i2c_dev->bus_freq))
+ i2c_dev->bus_freq = OWL_I2C_DEF_SPEED_HZ;
+
+ /* We support only frequencies of 100k and 400k for now */
+ if (i2c_dev->bus_freq != OWL_I2C_DEF_SPEED_HZ &&
+ i2c_dev->bus_freq != OWL_I2C_MAX_SPEED_HZ) {
+ dev_err(dev, "invalid clock-frequency %d\n", i2c_dev->bus_freq);
+ return -EINVAL;
+ }
+
+ i2c_dev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+
+ ret = clk_prepare_enable(i2c_dev->clk);
+ if (ret)
+ return ret;
+
+ i2c_dev->clk_rate = clk_get_rate(i2c_dev->clk);
+ if (!i2c_dev->clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto disable_clk;
+ }
+
+ init_completion(&i2c_dev->msg_complete);
+ spin_lock_init(&i2c_dev->lock);
+ i2c_dev->adap.owner = THIS_MODULE;
+ i2c_dev->adap.algo = &owl_i2c_algorithm;
+ i2c_dev->adap.timeout = OWL_I2C_TIMEOUT;
+ i2c_dev->adap.quirks = &owl_i2c_quirks;
+ i2c_dev->adap.dev.parent = dev;
+ i2c_dev->adap.dev.of_node = dev->of_node;
+ snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
+ "%s", "OWL I2C adapter");
+ i2c_set_adapdata(&i2c_dev->adap, i2c_dev);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = devm_request_irq(dev, irq, owl_i2c_interrupt, 0, pdev->name,
+ i2c_dev);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", irq);
+ goto disable_clk;
+ }
+
+ return i2c_add_adapter(&i2c_dev->adap);
+
+disable_clk:
+ clk_disable_unprepare(i2c_dev->clk);
+
+ return ret;
+}
+
+static const struct of_device_id owl_i2c_of_match[] = {
+ { .compatible = "actions,s900-i2c" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_i2c_of_match);
+
+static struct platform_driver owl_i2c_driver = {
+ .probe = owl_i2c_probe,
+ .driver = {
+ .name = "owl-i2c",
+ .of_match_table = of_match_ptr(owl_i2c_of_match),
+ },
+};
+module_platform_driver(owl_i2c_driver);
+
+MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semiconductor Owl SoC's I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 55fd5c6f3cca..50803e5d995b 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -365,7 +365,6 @@ static int pasemi_smb_probe(struct pci_dev *dev,
smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus->adapter.algo = &smbus_algorithm;
smbus->adapter.algo_data = smbus;
- smbus->adapter.nr = PCI_FUNC(dev->devfn);
/* set up the sysfs linkage to our parent device */
smbus->adapter.dev.parent = &dev->dev;
@@ -373,7 +372,7 @@ static int pasemi_smb_probe(struct pci_dev *dev,
reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR |
(CLK_100K_DIV & CTL_CLK_M)));
- error = i2c_add_numbered_adapter(&smbus->adapter);
+ error = i2c_add_adapter(&smbus->adapter);
if (error)
goto out_release_region;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dae8ac618a52..0829cb696d9d 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -444,16 +444,6 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
{
enum pmcmsptwi_xfer_result retval;
- if ((cmd->type == MSP_TWI_CMD_WRITE && cmd->write_len == 0) ||
- (cmd->type == MSP_TWI_CMD_READ && cmd->read_len == 0) ||
- (cmd->type == MSP_TWI_CMD_WRITE_READ &&
- (cmd->read_len == 0 || cmd->write_len == 0))) {
- dev_err(&pmcmsptwi_adapter.dev,
- "%s: Cannot transfer less than 1 byte\n",
- __func__);
- return -EINVAL;
- }
-
mutex_lock(&data->lock);
dev_dbg(&pmcmsptwi_adapter.dev,
"Setting address to 0x%04x\n", cmd->addr);
@@ -532,11 +522,6 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
cmd.write_data = msg->buf;
}
- if (msg->len == 0) {
- dev_err(&adap->dev, "Zero-byte messages unsupported\n");
- return -EINVAL;
- }
-
cmd.addr = msg->addr;
if (msg->flags & I2C_M_TEN) {
@@ -578,7 +563,7 @@ static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
}
static const struct i2c_adapter_quirks pmcmsptwi_i2c_quirks = {
- .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN,
.max_write_len = MSP_MAX_BYTES_PER_RW,
.max_read_len = MSP_MAX_BYTES_PER_RW,
.max_comb_1st_msg_len = MSP_MAX_BYTES_PER_RW,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
new file mode 100644
index 000000000000..36732eb688a4
--- /dev/null
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spinlock.h>
+
+#define SE_I2C_TX_TRANS_LEN 0x26c
+#define SE_I2C_RX_TRANS_LEN 0x270
+#define SE_I2C_SCL_COUNTERS 0x278
+
+#define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
+ M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
+#define SE_I2C_ABORT BIT(1)
+
+/* M_CMD OP codes for I2C */
+#define I2C_WRITE 0x1
+#define I2C_READ 0x2
+#define I2C_WRITE_READ 0x3
+#define I2C_ADDR_ONLY 0x4
+#define I2C_BUS_CLEAR 0x6
+#define I2C_STOP_ON_BUS 0x7
+/* M_CMD params for I2C */
+#define PRE_CMD_DELAY BIT(0)
+#define TIMESTAMP_BEFORE BIT(1)
+#define STOP_STRETCH BIT(2)
+#define TIMESTAMP_AFTER BIT(3)
+#define POST_COMMAND_DELAY BIT(4)
+#define IGNORE_ADD_NACK BIT(6)
+#define READ_FINISHED_WITH_ACK BIT(7)
+#define BYPASS_ADDR_PHASE BIT(8)
+#define SLV_ADDR_MSK GENMASK(15, 9)
+#define SLV_ADDR_SHFT 9
+/* I2C SCL COUNTER fields */
+#define HIGH_COUNTER_MSK GENMASK(29, 20)
+#define HIGH_COUNTER_SHFT 20
+#define LOW_COUNTER_MSK GENMASK(19, 10)
+#define LOW_COUNTER_SHFT 10
+#define CYCLE_COUNTER_MSK GENMASK(9, 0)
+
+enum geni_i2c_err_code {
+ GP_IRQ0,
+ NACK,
+ GP_IRQ2,
+ BUS_PROTO,
+ ARB_LOST,
+ GP_IRQ5,
+ GENI_OVERRUN,
+ GENI_ILLEGAL_CMD,
+ GENI_ABORT_DONE,
+ GENI_TIMEOUT,
+};
+
+#define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \
+ << 5)
+
+#define I2C_AUTO_SUSPEND_DELAY 250
+#define KHZ(freq) (1000 * freq)
+#define PACKING_BYTES_PW 4
+
+#define ABORT_TIMEOUT HZ
+#define XFER_TIMEOUT HZ
+#define RST_TIMEOUT HZ
+
+struct geni_i2c_dev {
+ struct geni_se se;
+ u32 tx_wm;
+ int irq;
+ int err;
+ struct i2c_adapter adap;
+ struct completion done;
+ struct i2c_msg *cur;
+ int cur_wr;
+ int cur_rd;
+ spinlock_t lock;
+ u32 clk_freq_out;
+ const struct geni_i2c_clk_fld *clk_fld;
+ int suspended;
+};
+
+struct geni_i2c_err_log {
+ int err;
+ const char *msg;
+};
+
+static const struct geni_i2c_err_log gi2c_log[] = {
+ [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
+ [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
+ [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
+ [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+ [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
+ [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
+ [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
+ [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"},
+ [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
+ [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
+};
+
+struct geni_i2c_clk_fld {
+ u32 clk_freq_out;
+ u8 clk_div;
+ u8 t_high_cnt;
+ u8 t_low_cnt;
+ u8 t_cycle_cnt;
+};
+
+/*
+ * Hardware uses the underlying formula to calculate time periods of
+ * SCL clock cycle. Firmware uses some additional cycles excluded from the
+ * below formula and it is confirmed that the time periods are within
+ * specification limits.
+ *
+ * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock
+ * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock
+ * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock
+ * clk_freq_out = t / t_cycle
+ * source_clock = 19.2 MHz
+ */
+static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
+ {KHZ(100), 7, 10, 11, 26},
+ {KHZ(400), 2, 5, 12, 24},
+ {KHZ(1000), 1, 3, 9, 18},
+};
+
+static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
+{
+ int i;
+ const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
+
+ for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
+ if (itr->clk_freq_out == gi2c->clk_freq_out) {
+ gi2c->clk_fld = itr;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c)
+{
+ const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
+ u32 val;
+
+ writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
+
+ val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN;
+ writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
+
+ val = itr->t_high_cnt << HIGH_COUNTER_SHFT;
+ val |= itr->t_low_cnt << LOW_COUNTER_SHFT;
+ val |= itr->t_cycle_cnt;
+ writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
+}
+
+static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c)
+{
+ u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
+ u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
+ u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
+ u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
+ u32 rx_st, tx_st;
+
+ if (dma) {
+ rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ } else {
+ rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
+ tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
+ }
+ dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
+ dma, tx_st, rx_st, m_stat);
+ dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
+ m_cmd, geni_s, geni_ios);
+}
+
+static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
+{
+ if (!gi2c->err)
+ gi2c->err = gi2c_log[err].err;
+ if (gi2c->cur)
+ dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
+ gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
+
+ if (err != NACK && err != GENI_ABORT_DONE) {
+ dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
+ geni_i2c_err_misc(gi2c);
+ }
+}
+
+static irqreturn_t geni_i2c_irq(int irq, void *dev)
+{
+ struct geni_i2c_dev *gi2c = dev;
+ int j;
+ u32 m_stat;
+ u32 rx_st;
+ u32 dm_tx_st;
+ u32 dm_rx_st;
+ u32 dma;
+ struct i2c_msg *cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi2c->lock, flags);
+ m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
+ dm_tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ dm_rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
+ cur = gi2c->cur;
+
+ if (!cur ||
+ m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) ||
+ dm_rx_st & (DM_I2C_CB_ERR)) {
+ if (m_stat & M_GP_IRQ_1_EN)
+ geni_i2c_err(gi2c, NACK);
+ if (m_stat & M_GP_IRQ_3_EN)
+ geni_i2c_err(gi2c, BUS_PROTO);
+ if (m_stat & M_GP_IRQ_4_EN)
+ geni_i2c_err(gi2c, ARB_LOST);
+ if (m_stat & M_CMD_OVERRUN_EN)
+ geni_i2c_err(gi2c, GENI_OVERRUN);
+ if (m_stat & M_ILLEGAL_CMD_EN)
+ geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
+ if (m_stat & M_CMD_ABORT_EN)
+ geni_i2c_err(gi2c, GENI_ABORT_DONE);
+ if (m_stat & M_GP_IRQ_0_EN)
+ geni_i2c_err(gi2c, GP_IRQ0);
+
+ /* Disable the TX Watermark interrupt to stop TX */
+ if (!dma)
+ writel_relaxed(0, gi2c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ goto irqret;
+ }
+
+ if (dma) {
+ dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
+ dm_tx_st, dm_rx_st);
+ goto irqret;
+ }
+
+ if (cur->flags & I2C_M_RD &&
+ m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) {
+ u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
+
+ for (j = 0; j < rxcnt; j++) {
+ u32 val;
+ int p = 0;
+
+ val = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFOn);
+ while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
+ cur->buf[gi2c->cur_rd++] = val & 0xff;
+ val >>= 8;
+ p++;
+ }
+ if (gi2c->cur_rd == cur->len)
+ break;
+ }
+ } else if (!(cur->flags & I2C_M_RD) &&
+ m_stat & M_TX_FIFO_WATERMARK_EN) {
+ for (j = 0; j < gi2c->tx_wm; j++) {
+ u32 temp;
+ u32 val = 0;
+ int p = 0;
+
+ while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
+ temp = cur->buf[gi2c->cur_wr++];
+ val |= temp << (p * 8);
+ p++;
+ }
+ writel_relaxed(val, gi2c->se.base + SE_GENI_TX_FIFOn);
+ /* TX Complete, Disable the TX Watermark interrupt */
+ if (gi2c->cur_wr == cur->len) {
+ writel_relaxed(0, gi2c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ break;
+ }
+ }
+ }
+irqret:
+ if (m_stat)
+ writel_relaxed(m_stat, gi2c->se.base + SE_GENI_M_IRQ_CLEAR);
+
+ if (dma) {
+ if (dm_tx_st)
+ writel_relaxed(dm_tx_st, gi2c->se.base +
+ SE_DMA_TX_IRQ_CLR);
+ if (dm_rx_st)
+ writel_relaxed(dm_rx_st, gi2c->se.base +
+ SE_DMA_RX_IRQ_CLR);
+ }
+ /* if this is err with done-bit not set, handle that through timeout. */
+ if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN)
+ complete(&gi2c->done);
+ else if (dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE)
+ complete(&gi2c->done);
+ else if (dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
+ complete(&gi2c->done);
+
+ spin_unlock_irqrestore(&gi2c->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = ABORT_TIMEOUT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi2c->lock, flags);
+ geni_i2c_err(gi2c, GENI_TIMEOUT);
+ gi2c->cur = NULL;
+ geni_se_abort_m_cmd(&gi2c->se);
+ spin_unlock_irqrestore(&gi2c->lock, flags);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
+ } while (!(val & M_CMD_ABORT_EN) && time_left);
+
+ if (!(val & M_CMD_ABORT_EN))
+ dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
+}
+
+static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = RST_TIMEOUT;
+
+ writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
+ } while (!(val & RX_RESET_DONE) && time_left);
+
+ if (!(val & RX_RESET_DONE))
+ dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
+}
+
+static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
+{
+ u32 val;
+ unsigned long time_left = RST_TIMEOUT;
+
+ writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
+ do {
+ time_left = wait_for_completion_timeout(&gi2c->done, time_left);
+ val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
+ } while (!(val & TX_RESET_DONE) && time_left);
+
+ if (!(val & TX_RESET_DONE))
+ dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
+}
+
+static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+{
+ dma_addr_t rx_dma;
+ enum geni_se_xfer_mode mode;
+ unsigned long time_left = XFER_TIMEOUT;
+
+ gi2c->cur = msg;
+ mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
+ geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
+ if (mode == GENI_SE_DMA) {
+ int ret;
+
+ ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ &rx_dma);
+ if (ret) {
+ mode = GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ }
+ }
+
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+ gi2c->cur_rd = 0;
+ if (mode == GENI_SE_DMA) {
+ if (gi2c->err)
+ geni_i2c_rx_fsm_rst(gi2c);
+ geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+ }
+ return gi2c->err;
+}
+
+static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+{
+ dma_addr_t tx_dma;
+ enum geni_se_xfer_mode mode;
+ unsigned long time_left;
+
+ gi2c->cur = msg;
+ mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
+ geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
+ if (mode == GENI_SE_DMA) {
+ int ret;
+
+ ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ &tx_dma);
+ if (ret) {
+ mode = GENI_SE_FIFO;
+ geni_se_select_mode(&gi2c->se, mode);
+ }
+ }
+
+ if (mode == GENI_SE_FIFO) /* Get FIFO IRQ */
+ writel_relaxed(1, gi2c->se.base + SE_GENI_TX_WATERMARK_REG);
+
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+ gi2c->cur_wr = 0;
+ if (mode == GENI_SE_DMA) {
+ if (gi2c->err)
+ geni_i2c_tx_fsm_rst(gi2c);
+ geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+ }
+ return gi2c->err;
+}
+
+static int geni_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
+ int i, ret;
+
+ gi2c->err = 0;
+ reinit_completion(&gi2c->done);
+ ret = pm_runtime_get_sync(gi2c->se.dev);
+ if (ret < 0) {
+ dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
+ pm_runtime_put_noidle(gi2c->se.dev);
+ /* Set device in suspended since resume failed */
+ pm_runtime_set_suspended(gi2c->se.dev);
+ return ret;
+ }
+
+ qcom_geni_i2c_conf(gi2c);
+ for (i = 0; i < num; i++) {
+ u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
+
+ m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param);
+ else
+ ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param);
+
+ if (ret)
+ break;
+ }
+ if (ret == 0)
+ ret = num;
+
+ pm_runtime_mark_last_busy(gi2c->se.dev);
+ pm_runtime_put_autosuspend(gi2c->se.dev);
+ gi2c->cur = NULL;
+ gi2c->err = 0;
+ return ret;
+}
+
+static u32 geni_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm geni_i2c_algo = {
+ .master_xfer = geni_i2c_xfer,
+ .functionality = geni_i2c_func,
+};
+
+static int geni_i2c_probe(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c;
+ struct resource *res;
+ u32 proto, tx_depth;
+ int ret;
+
+ gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
+ if (!gi2c)
+ return -ENOMEM;
+
+ gi2c->se.dev = &pdev->dev;
+ gi2c->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gi2c->se.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gi2c->se.base))
+ return PTR_ERR(gi2c->se.base);
+
+ gi2c->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(gi2c->se.clk)) {
+ ret = PTR_ERR(gi2c->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency",
+ &gi2c->clk_freq_out);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Bus frequency not specified, default to 100kHz.\n");
+ gi2c->clk_freq_out = KHZ(100);
+ }
+
+ gi2c->irq = platform_get_irq(pdev, 0);
+ if (gi2c->irq < 0) {
+ dev_err(&pdev->dev, "IRQ error for i2c-geni\n");
+ return gi2c->irq;
+ }
+
+ ret = geni_i2c_clk_map_idx(gi2c);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid clk frequency %d Hz: %d\n",
+ gi2c->clk_freq_out, ret);
+ return ret;
+ }
+
+ gi2c->adap.algo = &geni_i2c_algo;
+ init_completion(&gi2c->done);
+ spin_lock_init(&gi2c->lock);
+ platform_set_drvdata(pdev, gi2c);
+ ret = devm_request_irq(&pdev->dev, gi2c->irq, geni_i2c_irq,
+ IRQF_TRIGGER_HIGH, "i2c_geni", gi2c);
+ if (ret) {
+ dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+ gi2c->irq, ret);
+ return ret;
+ }
+ /* Disable the interrupt so that the system can enter low-power mode */
+ disable_irq(gi2c->irq);
+ i2c_set_adapdata(&gi2c->adap, gi2c);
+ gi2c->adap.dev.parent = &pdev->dev;
+ gi2c->adap.dev.of_node = pdev->dev.of_node;
+ strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+
+ ret = geni_se_resources_on(&gi2c->se);
+ if (ret) {
+ dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+ return ret;
+ }
+ proto = geni_se_read_proto(&gi2c->se);
+ tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
+ if (proto != GENI_SE_I2C) {
+ dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+ geni_se_resources_off(&gi2c->se);
+ return -ENXIO;
+ }
+ gi2c->tx_wm = tx_depth - 1;
+ geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
+ geni_se_config_packing(&gi2c->se, BITS_PER_BYTE, PACKING_BYTES_PW,
+ true, true, true);
+ ret = geni_se_resources_off(&gi2c->se);
+ if (ret) {
+ dev_err(&pdev->dev, "Error turning off resources %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ return ret;
+ }
+
+ gi2c->suspended = 1;
+ pm_runtime_set_suspended(gi2c->se.dev);
+ pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gi2c->se.dev);
+ pm_runtime_enable(gi2c->se.dev);
+
+ return 0;
+}
+
+static int geni_i2c_remove(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(gi2c->se.dev);
+ i2c_del_adapter(&gi2c->adap);
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ disable_irq(gi2c->irq);
+ ret = geni_se_resources_off(&gi2c->se);
+ if (ret) {
+ enable_irq(gi2c->irq);
+ return ret;
+
+ } else {
+ gi2c->suspended = 1;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ ret = geni_se_resources_on(&gi2c->se);
+ if (ret)
+ return ret;
+
+ enable_irq(gi2c->irq);
+ gi2c->suspended = 0;
+ return 0;
+}
+
+static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
+{
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ if (!gi2c->suspended) {
+ geni_i2c_runtime_suspend(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops geni_i2c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+ SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id geni_i2c_dt_match[] = {
+ { .compatible = "qcom,geni-i2c" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+
+static struct platform_driver geni_i2c_driver = {
+ .probe = geni_i2c_probe,
+ .remove = geni_i2c_remove,
+ .driver = {
+ .name = "geni_i2c",
+ .pm = &geni_i2c_pm_ops,
+ .of_match_table = geni_i2c_dt_match,
+ },
+};
+
+module_platform_driver(geni_i2c_driver);
+
+MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..43ad933df0f0 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -19,6 +19,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,6 +33,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
/* register offsets */
@@ -111,8 +113,10 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
-#define ID_P_PM_BLOCKED (1 << 31)
-#define ID_P_MASK ID_P_PM_BLOCKED
+#define ID_P_REP_AFTER_RD BIT(29)
+#define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */
+#define ID_P_PM_BLOCKED BIT(31)
+#define ID_P_MASK GENMASK(31, 29)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@@ -141,6 +145,8 @@ struct rcar_i2c_priv {
struct dma_chan *dma_rx;
struct scatterlist sg;
enum dma_data_direction dma_direction;
+
+ struct reset_control *rstc;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -179,8 +185,6 @@ static void rcar_i2c_set_scl(struct i2c_adapter *adap, int val)
rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr);
};
-/* No get_sda, because the HW only reports its bus free logic, not SDA itself */
-
static void rcar_i2c_set_sda(struct i2c_adapter *adap, int val)
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
@@ -193,10 +197,19 @@ static void rcar_i2c_set_sda(struct i2c_adapter *adap, int val)
rcar_i2c_write(priv, ICMCR, priv->recovery_icmcr);
};
+static int rcar_i2c_get_bus_free(struct i2c_adapter *adap)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
+
+ return !(rcar_i2c_read(priv, ICMCR) & FSDA);
+
+};
+
static struct i2c_bus_recovery_info rcar_i2c_bri = {
.get_scl = rcar_i2c_get_scl,
.set_scl = rcar_i2c_set_scl,
.set_sda = rcar_i2c_set_sda,
+ .get_bus_free = rcar_i2c_get_bus_free,
.recover_bus = i2c_generic_scl_recovery,
};
static void rcar_i2c_init(struct rcar_i2c_priv *priv)
@@ -211,7 +224,7 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
{
- int i, ret;
+ int i;
for (i = 0; i < LOOP_TIMEOUT; i++) {
/* make sure that bus is not busy */
@@ -222,13 +235,7 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
/* Waiting did not help, try to recover */
priv->recovery_icmcr = MDBS | OBPC | FSDA | FSCL;
- ret = i2c_recover_bus(&priv->adap);
-
- /* No failure when recovering, so check bus busy bit again */
- if (ret == 0)
- ret = (rcar_i2c_read(priv, ICMCR) & FSDA) ? -EBUSY : 0;
-
- return ret;
+ return i2c_recover_bus(&priv->adap);
}
static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timings *t)
@@ -339,7 +346,10 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICMSR, 0);
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
} else {
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
+ if (priv->flags & ID_P_REP_AFTER_RD)
+ priv->flags &= ~ID_P_REP_AFTER_RD;
+ else
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
rcar_i2c_write(priv, ICMSR, 0);
}
rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
@@ -370,6 +380,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
+ /* Gen3 can only do one RXDMA per transfer and we just completed it */
+ if (priv->devtype == I2C_RCAR_GEN3 &&
+ priv->dma_direction == DMA_FROM_DEVICE)
+ priv->flags |= ID_P_NO_RXDMA;
+
priv->dma_direction = DMA_NONE;
}
@@ -407,8 +422,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
unsigned char *buf;
int len;
- /* Do not use DMA if it's not available or for messages < 8 bytes */
- if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+ /* Do various checks to see if DMA is feasible at all */
+ if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+ (read && priv->flags & ID_P_NO_RXDMA))
return;
if (read) {
@@ -538,15 +554,15 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
priv->pos++;
}
- /*
- * If next received data is the _LAST_, go to STOP phase. Might be
- * overwritten by REP START when setting up a new msg. Not elegant
- * but the only stable sequence for REP START I have found so far.
- * If you want to change this code, make sure sending one transfer with
- * four messages (WR-RD-WR-RD) works!
- */
- if (priv->pos + 1 >= msg->len)
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
+ /* If next received data is the _LAST_, go to new phase. */
+ if (priv->pos + 1 == msg->len) {
+ if (priv->flags & ID_LAST_MSG) {
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
+ } else {
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
+ priv->flags |= ID_P_REP_AFTER_RD;
+ }
+ }
if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
rcar_i2c_next_msg(priv);
@@ -614,9 +630,11 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
struct rcar_i2c_priv *priv = ptr;
u32 msr, val;
- /* Clear START or STOP as soon as we can */
- val = rcar_i2c_read(priv, ICMCR);
- rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
+ /* Clear START or STOP immediately, except for REPSTART after read */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
+ val = rcar_i2c_read(priv, ICMCR);
+ rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
+ }
msr = rcar_i2c_read(priv, ICMSR);
@@ -739,6 +757,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
}
}
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+ int i, ret;
+
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < LOOP_TIMEOUT; i++) {
+ ret = reset_control_status(priv->rstc);
+ if (ret == 0)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@@ -750,20 +787,24 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
+ /* Gen3 needs a reset before allowing RXDMA once */
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->flags |= ID_P_NO_RXDMA;
+ if (!IS_ERR(priv->rstc)) {
+ ret = rcar_i2c_do_reset(priv);
+ if (ret == 0)
+ priv->flags &= ~ID_P_NO_RXDMA;
+ }
+ }
+
rcar_i2c_init(priv);
ret = rcar_i2c_bus_barrier(priv);
if (ret < 0)
goto out;
- for (i = 0; i < num; i++) {
- /* This HW can't send STOP after address phase */
- if (msgs[i].len == 0) {
- ret = -EOPNOTSUPP;
- goto out;
- }
+ for (i = 0; i < num; i++)
rcar_i2c_request_dma(priv, msgs + i);
- }
/* init first message */
priv->msg = msgs;
@@ -850,6 +891,10 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.unreg_slave = rcar_unreg_slave,
};
+static const struct i2c_adapter_quirks rcar_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
@@ -903,6 +948,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
adap->bus_recovery_info = &rcar_i2c_bri;
+ adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
@@ -920,6 +966,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (!IS_ERR(priv->rstc)) {
+ ret = reset_control_status(priv->rstc);
+ if (ret < 0)
+ priv->rstc = ERR_PTR(-ENOTSUPP);
+ }
+ }
+
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 9fe2b6951895..2f2e28d60ef5 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -919,9 +919,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- i2c_lock_adapter(&i2c->adap);
+ i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
ret = s3c24xx_i2c_clockrate(i2c, &got);
- i2c_unlock_adapter(&i2c->adap);
+ i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency (%d)\n", ret);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 5fda4188a9e5..9c7f6f8ceb22 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -613,11 +613,6 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
bool do_init)
{
- if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) {
- dev_err(pd->dev, "Unsupported zero length i2c read\n");
- return -EOPNOTSUPP;
- }
-
if (do_init) {
/* Initialize channel registers */
iic_wr(pd, ICCR, ICCR_SCP);
@@ -758,6 +753,10 @@ static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
.master_xfer = sh_mobile_i2c_xfer,
};
+static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN_READ,
+};
+
/*
* r8a7740 chip has lasting errata on I2C I/O pad reset.
* this is work-around for it.
@@ -925,6 +924,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->owner = THIS_MODULE;
adap->algo = &sh_mobile_i2c_algorithm;
+ adap->quirks = &sh_mobile_i2c_quirks;
adap->dev.parent = &dev->dev;
adap->retries = 5;
adap->nr = dev->id;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 4053259bccb8..a94e724f51dc 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -590,9 +590,9 @@ static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
{
struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
- i2c_lock_adapter(&i2c_dev->adap);
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = true;
- i2c_unlock_adapter(&i2c_dev->adap);
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
return pm_runtime_force_suspend(pdev);
}
@@ -601,9 +601,9 @@ static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
{
struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
- i2c_lock_adapter(&i2c_dev->adap);
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
i2c_dev->is_suspended = false;
- i2c_unlock_adapter(&i2c_dev->adap);
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
return pm_runtime_force_resume(pdev);
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index e866c481bfc3..5503fa171df0 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -127,7 +127,7 @@ enum stu300_error {
/*
* The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
*/
#define NUM_ADDR_RESEND_ATTEMPTS 12
@@ -673,12 +673,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
msg->addr, msg->len, msg->flags, stop);
}
- /* Zero-length messages are not supported by this hardware */
- if (msg->len == 0) {
- ret = -EINVAL;
- goto exit_disable;
- }
-
/*
* For some reason, sending the address sometimes fails when running
* on the 13 MHz clock. No interrupt arrives. This is a work around,
@@ -863,6 +857,10 @@ static const struct i2c_algorithm stu300_algo = {
.functionality = stu300_func,
};
+static const struct i2c_adapter_quirks stu300_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static int stu300_probe(struct platform_device *pdev)
{
struct stu300_dev *dev;
@@ -920,6 +918,8 @@ static int stu300_probe(struct platform_device *pdev)
adap->algo = &stu300_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
+ adap->quirks = &stu300_quirks;
+
i2c_set_adapdata(adap, dev);
/* i2c device drivers may be active on return from add_adapter() */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 5fccd1f1bca8..60c8561fbe65 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -115,6 +115,18 @@
#define I2C_CONFIG_LOAD_TIMEOUT 1000000
+#define I2C_MST_FIFO_CONTROL 0x0b4
+#define I2C_MST_FIFO_CONTROL_RX_FLUSH BIT(0)
+#define I2C_MST_FIFO_CONTROL_TX_FLUSH BIT(1)
+#define I2C_MST_FIFO_CONTROL_RX_TRIG(x) (((x) - 1) << 4)
+#define I2C_MST_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 16)
+
+#define I2C_MST_FIFO_STATUS 0x0b8
+#define I2C_MST_FIFO_STATUS_RX_MASK 0xff
+#define I2C_MST_FIFO_STATUS_RX_SHIFT 0
+#define I2C_MST_FIFO_STATUS_TX_MASK 0xff0000
+#define I2C_MST_FIFO_STATUS_TX_SHIFT 16
+
/*
* msg_end_type: The bus control which need to be send at end of transfer.
* @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -154,6 +166,7 @@ struct tegra_i2c_hw_feature {
u16 clk_divisor_fast_plus_mode;
bool has_multi_master_mode;
bool has_slcg_override_reg;
+ bool has_mst_fifo;
};
/**
@@ -266,13 +279,24 @@ static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
{
unsigned long timeout = jiffies + HZ;
- u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ unsigned int offset;
+ u32 mask, val;
- val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
- i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ if (i2c_dev->hw->has_mst_fifo) {
+ mask = I2C_MST_FIFO_CONTROL_TX_FLUSH |
+ I2C_MST_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_MST_FIFO_CONTROL;
+ } else {
+ mask = I2C_FIFO_CONTROL_TX_FLUSH |
+ I2C_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_FIFO_CONTROL;
+ }
- while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
- (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ val = i2c_readl(i2c_dev, offset);
+ val |= mask;
+ i2c_writel(i2c_dev, val, offset);
+
+ while (i2c_readl(i2c_dev, offset) & mask) {
if (time_after(jiffies, timeout)) {
dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
return -ETIMEDOUT;
@@ -290,9 +314,15 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
size_t buf_remaining = i2c_dev->msg_buf_remaining;
int words_to_transfer;
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
- I2C_FIFO_STATUS_RX_SHIFT;
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_MST_FIFO_STATUS_RX_MASK) >>
+ I2C_MST_FIFO_STATUS_RX_SHIFT;
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+ }
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
@@ -321,6 +351,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
i2c_dev->msg_buf_remaining = buf_remaining;
i2c_dev->msg_buf = buf;
+
return 0;
}
@@ -332,9 +363,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
size_t buf_remaining = i2c_dev->msg_buf_remaining;
int words_to_transfer;
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
- I2C_FIFO_STATUS_TX_SHIFT;
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_MST_FIFO_STATUS_TX_MASK) >>
+ I2C_MST_FIFO_STATUS_TX_SHIFT;
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+ }
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
@@ -516,9 +553,15 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
}
- val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
- 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
- i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = I2C_MST_FIFO_CONTROL_TX_TRIG(8) |
+ I2C_MST_FIFO_CONTROL_RX_TRIG(1);
+ i2c_writel(i2c_dev, val, I2C_MST_FIFO_CONTROL);
+ } else {
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+ }
err = tegra_i2c_flush_fifos(i2c_dev);
if (err)
@@ -545,6 +588,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
{
u32 cnfg;
+ /*
+ * NACK interrupt is generated before the I2C controller generates
+ * the STOP condition on the bus. So wait for 2 clock periods
+ * before disabling the controller so that the STOP condition has
+ * been delivered properly.
+ */
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +757,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
- /*
- * NACK interrupt is generated before the I2C controller generates
- * the STOP condition on the bus. So wait for 2 clock periods
- * before resetting the controller so that the STOP condition has
- * been delivered properly.
- */
- if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
tegra_i2c_init(i2c_dev);
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
@@ -803,6 +845,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -815,6 +858,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -827,6 +871,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_config_load_reg = false,
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -839,6 +884,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_config_load_reg = true,
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
+ .has_mst_fifo = false,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -851,10 +897,25 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_config_load_reg = true,
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
+ .has_mst_fifo = false,
+};
+
+static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
+ .clk_divisor_fast_plus_mode = 0x10,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
+ .has_mst_fifo = true,
};
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 1f41a4f89c08..8a873975cf12 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
if (priv->len_recv) {
/* read length byte */
rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ /*
+ * We expect at least 2 interrupts for I2C_M_RECV_LEN
+ * transactions. The length is updated during the first
+ * interrupt, and the buffer contents are only copied
+ * during subsequent interrupts. If in case the interrupts
+ * get merged we would complete the transaction without
+ * copying out the bytes from RX fifo. To avoid this now we
+ * drain the fifo as and when data is available.
+ * We drained the rlen byte already, decrement total length
+ * by one.
+ */
+
+ len--;
if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
rlen = 0; /*abort transfer */
priv->msg_buf_remaining = 0;
priv->msg_len = 0;
- } else {
- *buf++ = rlen;
- if (priv->client_pec)
- ++rlen; /* account for error check byte */
- /* update remaining bytes and message length */
- priv->msg_buf_remaining = rlen;
- priv->msg_len = rlen + 1;
+ xlp9xx_i2c_update_rlen(priv);
+ return;
}
+
+ *buf++ = rlen;
+ if (priv->client_pec)
+ ++rlen; /* account for error check byte */
+ /* update remaining bytes and message length */
+ priv->msg_buf_remaining = rlen;
+ priv->msg_len = rlen + 1;
xlp9xx_i2c_update_rlen(priv);
priv->len_recv = false;
- } else {
- len = min(priv->msg_buf_remaining, len);
- for (i = 0; i < len; i++, buf++)
- *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
- priv->msg_buf_remaining -= len;
}
+ len = min(priv->msg_buf_remaining, len);
+ for (i = 0; i < len; i++, buf++)
+ *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+ priv->msg_buf_remaining -= len;
priv->msg_buf = buf;
if (priv->msg_buf_remaining)
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 484bfa15d58e..34cd4b308540 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -173,9 +173,6 @@ static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len,
u8 offset;
u32 xfer;
- if (!len)
- return -EOPNOTSUPP;
-
offset = buf[0];
xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset);
xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
@@ -241,9 +238,6 @@ static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr)
unsigned long timeout, stoptime, checktime;
int nbytes, timedout;
- if (!len)
- return -EOPNOTSUPP;
-
xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
XLR_I2C_CFG_NOADDR | priv->cfg->cfg_extra);
xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
@@ -340,6 +334,10 @@ static const struct i2c_algorithm xlr_i2c_algo = {
.functionality = xlr_func,
};
+static const struct i2c_adapter_quirks xlr_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct xlr_i2c_config xlr_i2c_config_default = {
.status_busy = XLR_I2C_BUS_BUSY,
.cfg_extra = 0,
@@ -427,6 +425,7 @@ static int xlr_i2c_probe(struct platform_device *pdev)
priv->adap.owner = THIS_MODULE;
priv->adap.algo_data = priv;
priv->adap.algo = &xlr_i2c_algo;
+ priv->adap.quirks = &xlr_i2c_quirks;
priv->adap.nr = pdev->id;
priv->adap.class = I2C_CLASS_HWMON;
snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c");
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 7c3b4740b94b..32affd3fa8bd 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -453,8 +453,12 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
else
dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
data_len, client->addr, cmd, ret);
- } else {
+ /* 2 transfers must have completed successfully */
+ } else if (ret == 2) {
memcpy(data, buffer, data_len);
+ ret = 0;
+ } else {
+ ret = -EIO;
}
kfree(buffer);
@@ -482,11 +486,16 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
msgs[0].buf = buffer;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c write failed\n");
kfree(buffer);
- return ret;
+
+ if (ret < 0) {
+ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret);
+ return ret;
+ }
+
+ /* 1 transfer must have completed successfully */
+ return (ret == 1) ? 0 : -EIO;
}
static acpi_status
@@ -590,8 +599,6 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
if (action == ACPI_READ) {
status = acpi_gsb_i2c_read_bytes(client, command,
gsb->data, info->access_length);
- if (status > 0)
- status = 0;
} else {
status = acpi_gsb_i2c_write_bytes(client, command,
gsb->data, info->access_length);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 31d16ada6e7d..5a937109a289 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -158,6 +158,22 @@ static void set_sda_gpio_value(struct i2c_adapter *adap, int val)
gpiod_set_value_cansleep(adap->bus_recovery_info->sda_gpiod, val);
}
+static int i2c_generic_bus_free(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ int ret = -EOPNOTSUPP;
+
+ if (bri->get_bus_free)
+ ret = bri->get_bus_free(adap);
+ else if (bri->get_sda)
+ ret = bri->get_sda(adap);
+
+ if (ret < 0)
+ return ret;
+
+ return ret ? 0 : -EBUSY;
+}
+
/*
* We are generating clock pulses. ndelay() determines durating of clk pulses.
* We will generate clock with rate 100 KHz and so duration of both clock levels
@@ -169,21 +185,28 @@ static void set_sda_gpio_value(struct i2c_adapter *adap, int val)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, val = 1, ret = 0;
+ int i = 0, scl = 1, ret;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
- bri->set_scl(adap, val);
+ /*
+ * If we can set SDA, we will always create a STOP to ensure additional
+ * pulses will do no harm. This is achieved by letting SDA follow SCL
+ * half a cycle later. Check the 'incomplete_write_byte' fault injector
+ * for details.
+ */
+ bri->set_scl(adap, scl);
+ ndelay(RECOVERY_NDELAY / 2);
if (bri->set_sda)
- bri->set_sda(adap, 1);
- ndelay(RECOVERY_NDELAY);
+ bri->set_sda(adap, scl);
+ ndelay(RECOVERY_NDELAY / 2);
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
*/
while (i++ < RECOVERY_CLK_CNT * 2) {
- if (val) {
+ if (scl) {
/* SCL shouldn't be low here */
if (!bri->get_scl(adap)) {
dev_err(&adap->dev,
@@ -191,32 +214,27 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
ret = -EBUSY;
break;
}
- /* Break if SDA is high */
- if (bri->get_sda && bri->get_sda(adap))
- break;
}
- val = !val;
- bri->set_scl(adap, val);
- ndelay(RECOVERY_NDELAY);
- }
-
- /* check if recovery actually succeeded */
- if (bri->get_sda && !bri->get_sda(adap))
- ret = -EBUSY;
-
- /* If all went well, send STOP for a sane bus state. */
- if (ret == 0 && bri->set_sda) {
- bri->set_scl(adap, 0);
- ndelay(RECOVERY_NDELAY / 2);
- bri->set_sda(adap, 0);
- ndelay(RECOVERY_NDELAY / 2);
- bri->set_scl(adap, 1);
+ scl = !scl;
+ bri->set_scl(adap, scl);
+ /* Creating STOP again, see above */
ndelay(RECOVERY_NDELAY / 2);
- bri->set_sda(adap, 1);
+ if (bri->set_sda)
+ bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
+
+ if (scl) {
+ ret = i2c_generic_bus_free(adap);
+ if (ret == 0)
+ break;
+ }
}
+ /* If we can't check bus status, assume recovery worked */
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
if (bri->unprepare_recovery)
bri->unprepare_recovery(adap);
@@ -265,6 +283,10 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
err_str = "no {get|set}_scl() found";
goto err;
}
+ if (!bri->set_sda && !bri->get_sda) {
+ err_str = "either get_sda() or set_sda() needed";
+ goto err;
+ }
}
return;
@@ -615,7 +637,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
- rt_mutex_lock(&adapter->bus_lock);
+ rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
}
/**
@@ -1554,6 +1576,8 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns);
if (ret && use_defaults)
t->sda_fall_ns = t->scl_fall_ns;
+
+ device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
@@ -1817,9 +1841,15 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
if (msgs[i].flags & I2C_M_RD) {
if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len))
return i2c_quirk_error(adap, &msgs[i], "msg too long");
+
+ if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0)
+ return i2c_quirk_error(adap, &msgs[i], "no zero length");
} else {
if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len))
return i2c_quirk_error(adap, &msgs[i], "msg too long");
+
+ if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0)
+ return i2c_quirk_error(adap, &msgs[i], "no zero length");
}
}
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 4a78c65e9971..47a9f70a24a9 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -47,9 +47,9 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
client->slave_cb = slave_cb;
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
ret = client->adapter->algo->reg_slave(client);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
if (ret) {
client->slave_cb = NULL;
@@ -69,9 +69,9 @@ int i2c_slave_unregister(struct i2c_client *client)
return -EOPNOTSUPP;
}
- i2c_lock_adapter(client->adapter);
+ i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
ret = client->adapter->algo->unreg_slave(client);
- i2c_unlock_adapter(client->adapter);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
if (ret == 0)
client->slave_cb = NULL;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 51970bae3c4a..9cd66cabb84f 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -463,7 +463,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[num-1].len++;
}
- status = i2c_transfer(adapter, msg, num);
+ status = __i2c_transfer(adapter, msg, num);
if (status < 0)
goto cleanup;
if (status != num) {
@@ -524,9 +524,24 @@ cleanup:
* This executes an SMBus protocol operation, and returns a negative
* errno code else zero on success.
*/
-s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
- char read_write, u8 command, int protocol,
- union i2c_smbus_data *data)
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
+{
+ s32 res;
+
+ i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
+ res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
+ command, protocol, data);
+ i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
+
+ return res;
+}
+EXPORT_SYMBOL(i2c_smbus_xfer);
+
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
{
unsigned long orig_jiffies;
int try;
@@ -543,8 +558,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
- i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
-
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (res = 0, try = 0; try <= adapter->retries; try++) {
@@ -557,7 +570,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
orig_jiffies + adapter->timeout))
break;
}
- i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
goto trace;
@@ -579,7 +591,7 @@ trace:
return res;
}
-EXPORT_SYMBOL(i2c_smbus_xfer);
+EXPORT_SYMBOL(__i2c_smbus_xfer);
/**
* i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 300ab4b672e4..f330690b4125 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -87,8 +87,8 @@ static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap,
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
- ret = parent->algo->smbus_xfer(parent, addr, flags,
- read_write, command, size, data);
+ ret = __i2c_smbus_xfer(parent, addr, flags,
+ read_write, command, size, data);
if (muxc->deselect)
muxc->deselect(muxc, priv->chan_id);
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
- rt_mutex_lock(&parent->mux_lock);
+ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
return;
i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
- rt_mutex_lock(&parent->mux_lock);
+ rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
i2c_lock_bus(parent, flags);
}
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 12ad8d65faf6..f2bf3e57ed67 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -94,31 +94,11 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
- int ret = -ENODEV;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- u8 msgbuf[] = {pdata->sel_reg_addr, val};
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = msgbuf;
- ret = __i2c_transfer(adap, &msg, 1);
-
- if (ret >= 0 && ret != 1)
- ret = -EREMOTEIO;
- } else if (adap->algo->smbus_xfer) {
- union i2c_smbus_data data;
-
- data.byte = val;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags, I2C_SMBUS_WRITE,
- pdata->sel_reg_addr,
- I2C_SMBUS_BYTE_DATA, &data);
- }
+ union i2c_smbus_data data = { .byte = val };
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, pdata->sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
}
static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 6a39adaf433f..9e75d6b9140b 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -99,31 +99,11 @@ MODULE_DEVICE_TABLE(of, pca9541_of_match);
static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
{
struct i2c_adapter *adap = client->adapter;
- int ret;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- char buf[2];
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2;
- buf[0] = command;
- buf[1] = val;
- msg.buf = buf;
- ret = __i2c_transfer(adap, &msg, 1);
- } else {
- union i2c_smbus_data data;
-
- data.byte = val;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_WRITE,
- command,
- I2C_SMBUS_BYTE_DATA, &data);
- }
+ union i2c_smbus_data data = { .byte = val };
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
/*
@@ -133,41 +113,14 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
static int pca9541_reg_read(struct i2c_client *client, u8 command)
{
struct i2c_adapter *adap = client->adapter;
+ union i2c_smbus_data data;
int ret;
- u8 val;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = &command
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = &val
- }
- };
- ret = __i2c_transfer(adap, msg, 2);
- if (ret == 2)
- ret = val;
- else if (ret >= 0)
- ret = -EIO;
- } else {
- union i2c_smbus_data data;
-
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_READ,
- command,
- I2C_SMBUS_BYTE_DATA, &data);
- if (!ret)
- ret = data.byte;
- }
- return ret;
+
+ ret = __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+
+ return ret ?: data.byte;
}
/*
@@ -345,11 +298,11 @@ static int pca9541_probe(struct i2c_client *client,
/*
* I2C accesses are unprotected here.
- * We have to lock the adapter before releasing the bus.
+ * We have to lock the I2C segment before releasing the bus.
*/
- i2c_lock_adapter(adap);
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
pca9541_release_bus(client);
- i2c_unlock_adapter(adap);
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
/* Create mux adapter */
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index fbc748027087..24bd9275fde5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -220,30 +220,11 @@ MODULE_DEVICE_TABLE(of, pca954x_of_match);
static int pca954x_reg_write(struct i2c_adapter *adap,
struct i2c_client *client, u8 val)
{
- int ret = -ENODEV;
-
- if (adap->algo->master_xfer) {
- struct i2c_msg msg;
- char buf[1];
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 1;
- buf[0] = val;
- msg.buf = buf;
- ret = __i2c_transfer(adap, &msg, 1);
-
- if (ret >= 0 && ret != 1)
- ret = -EREMOTEIO;
- } else {
- union i2c_smbus_data data;
- ret = adap->algo->smbus_xfer(adap, client->addr,
- client->flags,
- I2C_SMBUS_WRITE,
- val, I2C_SMBUS_BYTE, &data);
- }
+ union i2c_smbus_data dummy;
- return ret;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, val,
+ I2C_SMBUS_BYTE, &dummy);
}
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
@@ -368,7 +349,8 @@ static int pca954x_probe(struct i2c_client *client,
{
struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *of_node = client->dev.of_node;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
bool idle_disconnect_dt;
struct gpio_desc *gpio;
int num, force, class;
@@ -379,8 +361,7 @@ static int pca954x_probe(struct i2c_client *client,
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
- muxc = i2c_mux_alloc(adap, &client->dev,
- PCA954X_MAX_NCHANS, sizeof(*data), 0,
+ muxc = i2c_mux_alloc(adap, dev, PCA954X_MAX_NCHANS, sizeof(*data), 0,
pca954x_select_chan, pca954x_deselect_mux);
if (!muxc)
return -ENOMEM;
@@ -390,7 +371,7 @@ static int pca954x_probe(struct i2c_client *client,
data->client = client;
/* Reset the mux if a reset GPIO is specified. */
- gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
@@ -400,7 +381,7 @@ static int pca954x_probe(struct i2c_client *client,
udelay(1);
}
- data->chip = of_device_get_match_data(&client->dev);
+ data->chip = of_device_get_match_data(dev);
if (!data->chip)
data->chip = &chips[id->driver_data];
@@ -414,8 +395,7 @@ static int pca954x_probe(struct i2c_client *client,
if (!ret &&
(id.manufacturer_id != data->chip->id.manufacturer_id ||
id.part_id != data->chip->id.part_id)) {
- dev_warn(&client->dev,
- "unexpected device id %03x-%03x-%x\n",
+ dev_warn(dev, "unexpected device id %03x-%03x-%x\n",
id.manufacturer_id, id.part_id,
id.die_revision);
return -ENODEV;
@@ -427,14 +407,14 @@ static int pca954x_probe(struct i2c_client *client,
* initializes the mux to disconnected state.
*/
if (i2c_smbus_write_byte(client, 0) < 0) {
- dev_warn(&client->dev, "probe failed\n");
+ dev_warn(dev, "probe failed\n");
return -ENODEV;
}
data->last_chan = 0; /* force the first selection */
- idle_disconnect_dt = of_node &&
- of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+ idle_disconnect_dt = np &&
+ of_property_read_bool(np, "i2c-mux-idle-disconnect");
ret = pca954x_irq_setup(muxc);
if (ret)
@@ -465,7 +445,7 @@ static int pca954x_probe(struct i2c_client *client,
}
if (data->irq) {
- ret = devm_request_threaded_irq(&client->dev, data->client->irq,
+ ret = devm_request_threaded_irq(dev, data->client->irq,
NULL, pca954x_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
"pca954x", data);
@@ -473,8 +453,7 @@ static int pca954x_probe(struct i2c_client *client,
goto fail_cleanup;
}
- dev_info(&client->dev,
- "registered %d multiplexed busses for I2C %s %s\n",
+ dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n",
num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index e52c58c29d9a..4d565b0c5a6e 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -574,13 +574,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
if (!HPT370_ALLOW_ATA100_5 ||
check_in_drive_list(drive, bad_ata100_5))
return ATA_UDMA4;
+ /* else: fall through */
case HPT372 :
case HPT372A:
case HPT372N:
case HPT374 :
if (ata_id_is_sata(drive->id))
mask &= ~0x0e;
- /* Fall thru */
+ /* fall through */
default:
return mask;
}
@@ -600,7 +601,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
case HPT374 :
if (ata_id_is_sata(drive->id))
return 0x00;
- /* Fall thru */
+ /* else: fall through */
default:
return 0x07;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 5f178384876f..44a7a255ef74 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -419,10 +419,11 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
- struct request_sense *sense, int timeout,
+ struct scsi_sense_hdr *sshdr, int timeout,
req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
+ struct scsi_sense_hdr local_sshdr;
int retries = 10;
bool failed;
@@ -430,6 +431,9 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
"rq_flags: 0x%x",
cmd[0], write, timeout, rq_flags);
+ if (!sshdr)
+ sshdr = &local_sshdr;
+
/* start of retry loop */
do {
struct request *rq;
@@ -456,8 +460,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = scsi_req(rq)->resid_len;
- if (sense)
- memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
+ scsi_normalize_sense(scsi_req(rq)->sense,
+ scsi_req(rq)->sense_len, sshdr);
/*
* FIXME: we should probably abort/retry or something in case of
@@ -469,12 +473,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
*/
- struct request_sense *reqbuf = scsi_req(rq)->sense;
-
- if (reqbuf->sense_key == UNIT_ATTENTION)
+ if (sshdr->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
- else if (reqbuf->sense_key == NOT_READY &&
- reqbuf->asc == 4 && reqbuf->ascq != 4) {
+ else if (sshdr->sense_key == NOT_READY &&
+ sshdr->asc == 4 && sshdr->ascq != 4) {
/*
* The drive is in the process of loading
* a disk. Retry, but wait a little to give
@@ -864,7 +866,7 @@ static void msf_from_bcd(struct atapi_msf *msf)
msf->frame = bcd2bin(msf->frame);
}
-int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
+int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr)
{
struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *cdi;
@@ -886,12 +888,11 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
- unsigned long *sectors_per_frame,
- struct request_sense *sense)
+ unsigned long *sectors_per_frame)
{
struct {
__be32 lba;
@@ -908,7 +909,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
- stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
+ stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0,
RQF_QUIET);
if (stat)
return stat;
@@ -944,8 +945,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
}
static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
- int format, char *buf, int buflen,
- struct request_sense *sense)
+ int format, char *buf, int buflen)
{
unsigned char cmd[BLK_MAX_CDB];
@@ -962,11 +962,11 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
-int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
+int ide_cd_read_toc(ide_drive_t *drive)
{
int stat, ntracks, i;
struct cdrom_info *info = drive->driver_data;
@@ -996,14 +996,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
* Check to see if the existing data is still valid. If it is,
* just return.
*/
- (void) cdrom_check_status(drive, sense);
+ (void) cdrom_check_status(drive, NULL);
if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
return 0;
/* try to get the total cdrom capacity and sector size */
- stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
- sense);
+ stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame);
if (stat)
toc->capacity = 0x1fffff;
@@ -1016,7 +1015,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
- sizeof(struct atapi_toc_header), sense);
+ sizeof(struct atapi_toc_header));
if (stat)
return stat;
@@ -1036,7 +1035,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry), sense);
+ sizeof(struct atapi_toc_entry));
if (stat && toc->hdr.first_track > 1) {
/*
@@ -1056,8 +1055,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry),
- sense);
+ sizeof(struct atapi_toc_entry));
if (stat)
return stat;
@@ -1094,7 +1092,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (toc->hdr.first_track != CDROM_LEADOUT) {
/* read the multisession information */
stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1108,7 +1106,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1412,7 +1410,7 @@ static sector_t ide_cdrom_capacity(ide_drive_t *drive)
{
unsigned long capacity, sectors_per_frame;
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+ if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame))
return 0;
return capacity * sectors_per_frame;
@@ -1710,9 +1708,8 @@ static unsigned int idecd_check_events(struct gendisk *disk,
static int idecd_revalidate_disk(struct gendisk *disk)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- struct request_sense sense;
- ide_cd_read_toc(info->drive, &sense);
+ ide_cd_read_toc(info->drive);
return 0;
}
@@ -1736,7 +1733,6 @@ static int ide_cd_probe(ide_drive_t *drive)
{
struct cdrom_info *info;
struct gendisk *g;
- struct request_sense sense;
ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x",
drive->driver_req, drive->media);
@@ -1785,7 +1781,7 @@ static int ide_cd_probe(ide_drive_t *drive)
goto failed;
}
- ide_cd_read_toc(drive, &sense);
+ ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
device_add_disk(&drive->gendev, g);
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 04f0f310a856..a69dc7f61c4d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -98,11 +98,11 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, req_flags_t);
-int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
+ unsigned *, struct scsi_sense_hdr *, int, req_flags_t);
+int ide_cd_read_toc(ide_drive_t *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
-int cdrom_check_status(ide_drive_t *, struct request_sense *);
+int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *);
/* ide-cd_ioctl.c */
int ide_cdrom_open_real(struct cdrom_device_info *, int);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index b1322400887b..4a6e1a413ead 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -43,14 +43,14 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
struct media_event_desc med;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int stat;
if (slot_nr != CDSL_CURRENT)
return -EINVAL;
- stat = cdrom_check_status(drive, &sense);
- if (!stat || sense.sense_key == UNIT_ATTENTION)
+ stat = cdrom_check_status(drive, &sshdr);
+ if (!stat || sshdr.sense_key == UNIT_ATTENTION)
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
@@ -62,8 +62,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_NO_DISC;
}
- if (sense.sense_key == NOT_READY && sense.asc == 0x04
- && sense.ascq == 0x04)
+ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04
+ && sshdr.ascq == 0x04)
return CDS_DISC_OK;
/*
@@ -71,8 +71,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
* just return TRAY_OPEN since ATAPI doesn't provide
* any other way to detect this...
*/
- if (sense.sense_key == NOT_READY) {
- if (sense.asc == 0x3a && sense.ascq == 1)
+ if (sshdr.sense_key == NOT_READY) {
+ if (sshdr.asc == 0x3a && sshdr.ascq == 1)
return CDS_NO_DISC;
else
return CDS_TRAY_OPEN;
@@ -105,8 +105,7 @@ unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
/* Eject the disk if EJECTFLAG is 0.
If EJECTFLAG is 1, try to reload the disk. */
static
-int cdrom_eject(ide_drive_t *drive, int ejectflag,
- struct request_sense *sense)
+int cdrom_eject(ide_drive_t *drive, int ejectflag)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
@@ -129,20 +128,16 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
static
-int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
- struct request_sense *sense)
+int ide_cd_lockdoor(ide_drive_t *drive, int lockflag)
{
- struct request_sense my_sense;
+ struct scsi_sense_hdr sshdr;
int stat;
- if (sense == NULL)
- sense = &my_sense;
-
/* If the drive cannot lock the door, just pretend. */
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
stat = 0;
@@ -155,14 +150,14 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
cmd[4] = lockflag ? 1 : 0;
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
- sense, 0, 0);
+ &sshdr, 0, 0);
}
/* If we got an illegal field error, the drive
probably cannot lock the door. */
if (stat != 0 &&
- sense->sense_key == ILLEGAL_REQUEST &&
- (sense->asc == 0x24 || sense->asc == 0x20)) {
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x24 || sshdr.asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
@@ -170,7 +165,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
}
/* no medium, that's alright. */
- if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
+ if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a)
stat = 0;
if (stat == 0) {
@@ -186,23 +181,22 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
{
ide_drive_t *drive = cdi->handle;
- struct request_sense sense;
if (position) {
- int stat = ide_cd_lockdoor(drive, 0, &sense);
+ int stat = ide_cd_lockdoor(drive, 0);
if (stat)
return stat;
}
- return cdrom_eject(drive, !position, &sense);
+ return cdrom_eject(drive, !position);
}
int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
{
ide_drive_t *drive = cdi->handle;
- return ide_cd_lockdoor(drive, lock, NULL);
+ return ide_cd_lockdoor(drive, lock);
}
/*
@@ -213,7 +207,6 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
int stat;
unsigned char cmd[BLK_MAX_CDB];
@@ -236,7 +229,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
cmd[5] = speed & 0xff;
}
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
@@ -252,11 +245,10 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct atapi_toc *toc;
ide_drive_t *drive = cdi->handle;
struct cdrom_info *info = drive->driver_data;
- struct request_sense sense;
int ret;
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
- ret = ide_cd_read_toc(drive, &sense);
+ ret = ide_cd_read_toc(drive);
if (ret)
return ret;
}
@@ -300,7 +292,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
struct request *rq;
int ret;
@@ -315,7 +306,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
* lock it again.
*/
if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
- (void)ide_cd_lockdoor(drive, 1, &sense);
+ (void)ide_cd_lockdoor(drive, 1);
return ret;
}
@@ -355,7 +346,6 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
struct atapi_toc_entry *first_toc, *last_toc;
unsigned long lba_start, lba_end;
int stat;
- struct request_sense sense;
unsigned char cmd[BLK_MAX_CDB];
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
@@ -380,7 +370,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
@@ -391,7 +381,7 @@ static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
int stat;
/* Make sure our saved TOC is valid. */
- stat = ide_cd_read_toc(drive, NULL);
+ stat = ide_cd_read_toc(drive);
if (stat)
return stat;
@@ -461,8 +451,8 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
layer. the packet must be complete, as we do not
touch it at all. */
- if (cgc->sense)
- memset(cgc->sense, 0, sizeof(struct request_sense));
+ if (cgc->sshdr)
+ memset(cgc->sshdr, 0, sizeof(*cgc->sshdr));
if (cgc->quiet)
flags |= RQF_QUIET;
@@ -470,7 +460,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
cgc->buffer, &len,
- cgc->sense, cgc->timeout, flags);
+ cgc->sshdr, cgc->timeout, flags);
if (!cgc->stat)
cgc->buflen -= len;
return cgc->stat;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 5bd2aafc3753..a8df300f949c 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -427,6 +427,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
* (maintains previous driver behaviour)
*/
break;
+ /* else: fall through */
case CAPACITY_CURRENT:
/* Normal Zip/LS-120 disks */
if (memcmp(cap_desc, &floppy->cap_desc, 8))
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index a444bad7a2aa..0d93e0cfbeaf 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -460,7 +460,6 @@ void do_ide_request(struct request_queue *q)
struct ide_host *host = hwif->host;
struct request *rq = NULL;
ide_startstop_t startstop;
- unsigned long queue_run_ms = 3; /* old plug delay */
spin_unlock_irq(q->queue_lock);
@@ -480,9 +479,6 @@ repeat:
prev_port = hwif->host->cur_port;
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
- unsigned long left = jiffies - drive->sleep;
-
- queue_run_ms = jiffies_to_msecs(left + 1);
ide_unlock_port(hwif);
goto plug_device;
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 416a2f353071..3b75a7b7a284 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -142,6 +142,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive)
}
/* Early cdrom models used zero */
type = ide_cdrom;
+ /* fall through */
case ide_cdrom:
drive->dev_flags |= IDE_DFLAG_REMOVABLE;
#ifdef CONFIG_PPC
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index aee7b46d2330..34c1165226a4 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1746,7 +1746,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
unsigned long t;
int speed;
- int buffer_size;
u16 *ctl = (u16 *)&tape->caps[12];
ide_debug_log(IDE_DBG_FUNC, "minor: %d", minor);
@@ -1781,7 +1780,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
*ctl /= 2;
tape->buffer_size = *ctl * tape->blk_size;
}
- buffer_size = tape->buffer_size;
/* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 89b29028d315..c21d5c50ae3a 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -128,7 +128,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
return pre_task_out_intr(drive, cmd);
}
handler = task_pio_intr;
- /* fall-through */
+ /* fall through */
case ATA_PROT_NODATA:
if (handler == NULL)
handler = task_no_data_intr;
@@ -140,6 +140,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
hwif->expiry = dma_ops->dma_timer_expiry;
ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
dma_ops->dma_start(drive);
+ /* fall through */
default:
return ide_started;
}
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index c3062b53056f..024bc7ba49ee 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -494,6 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev)
pci_read_config_byte(dev, 0x09, &reg);
if ((reg & 0x0f) != 0x00)
pci_write_config_byte(dev, 0x09, reg&0xf0);
+ /* fall through */
case ATA_16:
/* force per drive recovery and active timings
needed on ATA_33 and below chips */
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 62ae7e5abcfa..829dc96c9dd6 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -40,7 +40,7 @@ config ADXL345_I2C
select REGMAP_I2C
help
Say Y here if you want to build support for the Analog Devices
- ADXL345 3-axis digital accelerometer.
+ ADXL345 or ADXL375 3-axis digital accelerometer.
To compile this driver as a module, choose M here: the module
will be called adxl345_i2c and you will also get adxl345_core
@@ -54,7 +54,7 @@ config ADXL345_SPI
select REGMAP_SPI
help
Say Y here if you want to build support for the Analog Devices
- ADXL345 3-axis digital accelerometer.
+ ADXL345 or ADXL375 3-axis digital accelerometer.
To compile this driver as a module, choose M here: the module
will be called adxl345_spi and you will also get adxl345_core
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index c1ddf3927c47..ccd63de7a55a 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -11,8 +11,13 @@
#ifndef _ADXL345_H_
#define _ADXL345_H_
+enum adxl345_device_type {
+ ADXL345,
+ ADXL375,
+};
+
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
- const char *name);
+ enum adxl345_device_type type, const char *name);
int adxl345_core_remove(struct device *dev);
#endif /* _ADXL345_H_ */
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 7251d0e63d74..780f87f72338 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -6,21 +6,35 @@
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
+ *
+ * Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/ADXL345.pdf
*/
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include "adxl345.h"
#define ADXL345_REG_DEVID 0x00
+#define ADXL345_REG_OFSX 0x1e
+#define ADXL345_REG_OFSY 0x1f
+#define ADXL345_REG_OFSZ 0x20
+#define ADXL345_REG_OFS_AXIS(index) (ADXL345_REG_OFSX + (index))
+#define ADXL345_REG_BW_RATE 0x2C
#define ADXL345_REG_POWER_CTL 0x2D
#define ADXL345_REG_DATA_FORMAT 0x31
#define ADXL345_REG_DATAX0 0x32
#define ADXL345_REG_DATAY0 0x34
#define ADXL345_REG_DATAZ0 0x36
+#define ADXL345_REG_DATA_AXIS(index) \
+ (ADXL345_REG_DATAX0 + (index) * sizeof(__le16))
+
+#define ADXL345_BW_RATE GENMASK(3, 0)
+#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
+#define NHZ_PER_HZ 1000000000LL
#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
@@ -42,24 +56,33 @@
*/
static const int adxl345_uscale = 38300;
+/*
+ * The Datasheet lists a resolution of Resolution is ~49 mg per LSB. That's
+ * ~480mm/s**2 per LSB.
+ */
+static const int adxl375_uscale = 480000;
+
struct adxl345_data {
struct regmap *regmap;
u8 data_range;
+ enum adxl345_device_type type;
};
-#define ADXL345_CHANNEL(reg, axis) { \
+#define ADXL345_CHANNEL(index, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .address = reg, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
}
static const struct iio_chan_spec adxl345_channels[] = {
- ADXL345_CHANNEL(ADXL345_REG_DATAX0, X),
- ADXL345_CHANNEL(ADXL345_REG_DATAY0, Y),
- ADXL345_CHANNEL(ADXL345_REG_DATAZ0, Z),
+ ADXL345_CHANNEL(0, X),
+ ADXL345_CHANNEL(1, Y),
+ ADXL345_CHANNEL(2, Z),
};
static int adxl345_read_raw(struct iio_dev *indio_dev,
@@ -67,7 +90,9 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct adxl345_data *data = iio_priv(indio_dev);
- __le16 regval;
+ __le16 accel;
+ long long samp_freq_nhz;
+ unsigned int regval;
int ret;
switch (mask) {
@@ -77,29 +102,117 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
* ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
* and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
*/
- ret = regmap_bulk_read(data->regmap, chan->address, &regval,
- sizeof(regval));
+ ret = regmap_bulk_read(data->regmap,
+ ADXL345_REG_DATA_AXIS(chan->address),
+ &accel, sizeof(accel));
if (ret < 0)
return ret;
- *val = sign_extend32(le16_to_cpu(regval), 12);
+ *val = sign_extend32(le16_to_cpu(accel), 12);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = adxl345_uscale;
+ switch (data->type) {
+ case ADXL345:
+ *val2 = adxl345_uscale;
+ break;
+ case ADXL375:
+ *val2 = adxl375_uscale;
+ break;
+ }
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(data->regmap,
+ ADXL345_REG_OFS_AXIS(chan->address), &regval);
+ if (ret < 0)
+ return ret;
+ /*
+ * 8-bit resolution at +/- 2g, that is 4x accel data scale
+ * factor
+ */
+ *val = sign_extend32(regval, 7) * 4;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = regmap_read(data->regmap, ADXL345_REG_BW_RATE, &regval);
+ if (ret < 0)
+ return ret;
+
+ samp_freq_nhz = ADXL345_BASE_RATE_NANO_HZ <<
+ (regval & ADXL345_BW_RATE);
+ *val = div_s64_rem(samp_freq_nhz, NHZ_PER_HZ, val2);
+
+ return IIO_VAL_INT_PLUS_NANO;
}
return -EINVAL;
}
+static int adxl345_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adxl345_data *data = iio_priv(indio_dev);
+ s64 n;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ /*
+ * 8-bit resolution at +/- 2g, that is 4x accel data scale
+ * factor
+ */
+ return regmap_write(data->regmap,
+ ADXL345_REG_OFS_AXIS(chan->address),
+ val / 4);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ);
+
+ return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
+ ADXL345_BW_RATE,
+ clamp_val(ilog2(n), 0,
+ ADXL345_BW_RATE));
+ }
+
+ return -EINVAL;
+}
+
+static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+"0.09765625 0.1953125 0.390625 0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600 3200"
+);
+
+static struct attribute *adxl345_attrs[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adxl345_attrs_group = {
+ .attrs = adxl345_attrs,
+};
+
static const struct iio_info adxl345_info = {
+ .attrs = &adxl345_attrs_group,
.read_raw = adxl345_read_raw,
+ .write_raw = adxl345_write_raw,
+ .write_raw_get_fmt = adxl345_write_raw_get_fmt,
};
int adxl345_core_probe(struct device *dev, struct regmap *regmap,
- const char *name)
+ enum adxl345_device_type type, const char *name)
{
struct adxl345_data *data;
struct iio_dev *indio_dev;
@@ -125,6 +238,7 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
data->regmap = regmap;
+ data->type = type;
/* Enable full-resolution mode */
data->data_range = ADXL345_DATA_FORMAT_FULL_RES;
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index 05e1ec49700c..785c89de91e7 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -34,7 +34,8 @@ static int adxl345_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- return adxl345_core_probe(&client->dev, regmap, id ? id->name : NULL);
+ return adxl345_core_probe(&client->dev, regmap, id->driver_data,
+ id ? id->name : NULL);
}
static int adxl345_i2c_remove(struct i2c_client *client)
@@ -43,7 +44,8 @@ static int adxl345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id adxl345_i2c_id[] = {
- { "adxl345", 0 },
+ { "adxl345", ADXL345 },
+ { "adxl375", ADXL375 },
{ }
};
@@ -51,6 +53,7 @@ MODULE_DEVICE_TABLE(i2c, adxl345_i2c_id);
static const struct of_device_id adxl345_of_match[] = {
{ .compatible = "adi,adxl345" },
+ { .compatible = "adi,adxl375" },
{ },
};
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index 6d658196f81c..67b7c66a8492 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -42,7 +42,7 @@ static int adxl345_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return adxl345_core_probe(&spi->dev, regmap, id->name);
+ return adxl345_core_probe(&spi->dev, regmap, id->driver_data, id->name);
}
static int adxl345_spi_remove(struct spi_device *spi)
@@ -51,7 +51,8 @@ static int adxl345_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id adxl345_spi_id[] = {
- { "adxl345", 0 },
+ { "adxl345", ADXL345 },
+ { "adxl375", ADXL375 },
{ }
};
@@ -59,6 +60,7 @@ MODULE_DEVICE_TABLE(spi, adxl345_spi_id);
static const struct of_device_id adxl345_of_match[] = {
{ .compatible = "adi,adxl345" },
+ { .compatible = "adi,adxl375" },
{ },
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index c149c9c360fc..421a0a8a1379 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1547,6 +1547,7 @@ static int mma8452_probe(struct i2c_client *client,
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
+ /* else: fall through */
default:
return -ENODEV;
}
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 4dceb75e3586..4964561595f5 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev,
mutex_lock(&st->lock);
ret = sca3000_write_3db_freq(st, val);
mutex_unlock(&st->lock);
+ return ret;
default:
return -EINVAL;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 056dddb27236..2ca5d1f6ade0 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -14,8 +14,8 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
-#include <linux/iio/common/st_sensors.h>
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_accel.h"
@@ -107,8 +107,8 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
- {"SMO8840", LNG2DM},
- {"SMO8A90", LNG2DM},
+ {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
+ {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
@@ -117,33 +117,33 @@ MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
#endif
static const struct i2c_device_id st_accel_id_table[] = {
- { LSM303DLH_ACCEL_DEV_NAME, LSM303DLH },
- { LSM303DLHC_ACCEL_DEV_NAME, LSM303DLHC },
- { LIS3DH_ACCEL_DEV_NAME, LIS3DH },
- { LSM330D_ACCEL_DEV_NAME, LSM330D },
- { LSM330DL_ACCEL_DEV_NAME, LSM330DL },
- { LSM330DLC_ACCEL_DEV_NAME, LSM330DLC },
- { LIS331DLH_ACCEL_DEV_NAME, LIS331DLH },
- { LSM303DL_ACCEL_DEV_NAME, LSM303DL },
- { LSM303DLM_ACCEL_DEV_NAME, LSM303DLM },
- { LSM330_ACCEL_DEV_NAME, LSM330 },
- { LSM303AGR_ACCEL_DEV_NAME, LSM303AGR },
- { LIS2DH12_ACCEL_DEV_NAME, LIS2DH12 },
- { LIS3L02DQ_ACCEL_DEV_NAME, LIS3L02DQ },
- { LNG2DM_ACCEL_DEV_NAME, LNG2DM },
- { H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL },
- { LIS331DL_ACCEL_DEV_NAME, LIS331DL },
- { LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL },
- { LIS2DW12_ACCEL_DEV_NAME, LIS2DW12 },
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ { LSM303AGR_ACCEL_DEV_NAME },
+ { LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
+ { LNG2DM_ACCEL_DEV_NAME },
+ { H3LIS331DL_ACCEL_DEV_NAME },
+ { LIS331DL_ACCEL_DEV_NAME },
+ { LIS3LV02DL_ACCEL_DEV_NAME },
+ { LIS2DW12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
-static int st_accel_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st_accel_i2c_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct st_sensor_data *adata;
+ const char *match;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adata));
@@ -152,19 +152,9 @@ static int st_accel_i2c_probe(struct i2c_client *client,
adata = iio_priv(indio_dev);
- if (client->dev.of_node) {
- st_sensors_of_name_probe(&client->dev, st_accel_of_match,
- client->name, sizeof(client->name));
- } else if (ACPI_HANDLE(&client->dev)) {
- ret = st_sensors_match_acpi_device(&client->dev);
- if ((ret < 0) || (ret >= ST_ACCEL_MAX))
- return -ENODEV;
-
- strlcpy(client->name, st_accel_id_table[ret].name,
- sizeof(client->name));
- } else if (!id)
- return -ENODEV;
-
+ match = device_get_match_data(&client->dev);
+ if (match)
+ strlcpy(client->name, match, sizeof(client->name));
st_sensors_i2c_configure(indio_dev, client, adata);
@@ -188,7 +178,7 @@ static struct i2c_driver st_accel_driver = {
.of_match_table = of_match_ptr(st_accel_of_match),
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
- .probe = st_accel_i2c_probe,
+ .probe_new = st_accel_i2c_probe,
.remove = st_accel_i2c_remove,
.id_table = st_accel_id_table,
};
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 9da79070357c..4a754921fb6f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -157,7 +157,6 @@ config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
- depends on HAS_DMA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -621,6 +620,16 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config SC27XX_ADC
+ tristate "Spreadtrum SC27xx series PMICs ADC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ Say yes here to build support for the integrated ADC inside the
+ Spreadtrum SC27xx series PMICs.
+
+ This driver can also be built as a module. If so, the module
+ will be called sc27xx_adc.
+
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -647,7 +656,6 @@ config SD_ADC_MODULATOR
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
- depends on HAS_DMA
depends on OF
depends on REGULATOR
select IIO_BUFFER
@@ -717,6 +725,7 @@ config SUN4I_GPADC
depends on IIO
depends on MFD_SUN4I_GPADC || MACH_SUN8I
depends on THERMAL || !THERMAL_OF
+ select REGMAP_IRQ
help
Say yes here to build support for Allwinner (A10, A13 and A31) SoCs
GPADC. This ADC provides 4 channels which can be used as an ADC or as
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 28a9423997f3..03db7b578f9c 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index cf1b048b0665..fc9510716ac7 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -209,6 +209,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
int ret;
+ unsigned long timeout;
ret = ad_sigma_delta_set_channel(sigma_delta, channel);
if (ret)
@@ -224,8 +225,8 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
sigma_delta->irq_dis = false;
enable_irq(sigma_delta->spi->irq);
- ret = wait_for_completion_timeout(&sigma_delta->completion, 2*HZ);
- if (ret == 0) {
+ timeout = wait_for_completion_timeout(&sigma_delta->completion, 2 * HZ);
+ if (timeout == 0) {
sigma_delta->irq_dis = true;
disable_irq_nosync(sigma_delta->spi->irq);
ret = -EIO;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8729d6524b4d..d5ea84cf6460 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -102,14 +102,26 @@
#define AT91_SAMA5D2_LCDR 0x20
/* Interrupt Enable Register */
#define AT91_SAMA5D2_IER 0x24
+/* Interrupt Enable Register - TS X measurement ready */
+#define AT91_SAMA5D2_IER_XRDY BIT(20)
+/* Interrupt Enable Register - TS Y measurement ready */
+#define AT91_SAMA5D2_IER_YRDY BIT(21)
+/* Interrupt Enable Register - TS pressure measurement ready */
+#define AT91_SAMA5D2_IER_PRDY BIT(22)
/* Interrupt Enable Register - general overrun error */
#define AT91_SAMA5D2_IER_GOVRE BIT(25)
+/* Interrupt Enable Register - Pen detect */
+#define AT91_SAMA5D2_IER_PEN BIT(29)
+/* Interrupt Enable Register - No pen detect */
+#define AT91_SAMA5D2_IER_NOPEN BIT(30)
/* Interrupt Disable Register */
#define AT91_SAMA5D2_IDR 0x28
/* Interrupt Mask Register */
#define AT91_SAMA5D2_IMR 0x2c
/* Interrupt Status Register */
#define AT91_SAMA5D2_ISR 0x30
+/* Interrupt Status Register - Pen touching sense status */
+#define AT91_SAMA5D2_ISR_PENS BIT(31)
/* Last Channel Trigger Mode Register */
#define AT91_SAMA5D2_LCTMR 0x34
/* Last Channel Compare Window Register */
@@ -118,6 +130,15 @@
#define AT91_SAMA5D2_OVER 0x3c
/* Extended Mode Register */
#define AT91_SAMA5D2_EMR 0x40
+/* Extended Mode Register - Oversampling rate */
+#define AT91_SAMA5D2_EMR_OSR(V) ((V) << 16)
+#define AT91_SAMA5D2_EMR_OSR_MASK GENMASK(17, 16)
+#define AT91_SAMA5D2_EMR_OSR_1SAMPLES 0
+#define AT91_SAMA5D2_EMR_OSR_4SAMPLES 1
+#define AT91_SAMA5D2_EMR_OSR_16SAMPLES 2
+
+/* Extended Mode Register - Averaging on single trigger event */
+#define AT91_SAMA5D2_EMR_ASTE(V) ((V) << 20)
/* Compare Window Register */
#define AT91_SAMA5D2_CWR 0x44
/* Channel Gain Register */
@@ -131,8 +152,38 @@
#define AT91_SAMA5D2_CDR0 0x50
/* Analog Control Register */
#define AT91_SAMA5D2_ACR 0x94
+/* Analog Control Register - Pen detect sensitivity mask */
+#define AT91_SAMA5D2_ACR_PENDETSENS_MASK GENMASK(1, 0)
+
/* Touchscreen Mode Register */
#define AT91_SAMA5D2_TSMR 0xb0
+/* Touchscreen Mode Register - No touch mode */
+#define AT91_SAMA5D2_TSMR_TSMODE_NONE 0
+/* Touchscreen Mode Register - 4 wire screen, no pressure measurement */
+#define AT91_SAMA5D2_TSMR_TSMODE_4WIRE_NO_PRESS 1
+/* Touchscreen Mode Register - 4 wire screen, pressure measurement */
+#define AT91_SAMA5D2_TSMR_TSMODE_4WIRE_PRESS 2
+/* Touchscreen Mode Register - 5 wire screen */
+#define AT91_SAMA5D2_TSMR_TSMODE_5WIRE 3
+/* Touchscreen Mode Register - Average samples mask */
+#define AT91_SAMA5D2_TSMR_TSAV_MASK GENMASK(5, 4)
+/* Touchscreen Mode Register - Average samples */
+#define AT91_SAMA5D2_TSMR_TSAV(x) ((x) << 4)
+/* Touchscreen Mode Register - Touch/trigger frequency ratio mask */
+#define AT91_SAMA5D2_TSMR_TSFREQ_MASK GENMASK(11, 8)
+/* Touchscreen Mode Register - Touch/trigger frequency ratio */
+#define AT91_SAMA5D2_TSMR_TSFREQ(x) ((x) << 8)
+/* Touchscreen Mode Register - Pen Debounce Time mask */
+#define AT91_SAMA5D2_TSMR_PENDBC_MASK GENMASK(31, 28)
+/* Touchscreen Mode Register - Pen Debounce Time */
+#define AT91_SAMA5D2_TSMR_PENDBC(x) ((x) << 28)
+/* Touchscreen Mode Register - No DMA for touch measurements */
+#define AT91_SAMA5D2_TSMR_NOTSDMA BIT(22)
+/* Touchscreen Mode Register - Disable pen detection */
+#define AT91_SAMA5D2_TSMR_PENDET_DIS (0 << 24)
+/* Touchscreen Mode Register - Enable pen detection */
+#define AT91_SAMA5D2_TSMR_PENDET_ENA BIT(24)
+
/* Touchscreen X Position Register */
#define AT91_SAMA5D2_XPOSR 0xb4
/* Touchscreen Y Position Register */
@@ -151,6 +202,12 @@
#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL 2
/* Trigger Mode external trigger any edge */
#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY 3
+/* Trigger Mode internal periodic */
+#define AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC 5
+/* Trigger Mode - trigger period mask */
+#define AT91_SAMA5D2_TRGR_TRGPER_MASK GENMASK(31, 16)
+/* Trigger Mode - trigger period */
+#define AT91_SAMA5D2_TRGR_TRGPER(x) ((x) << 16)
/* Correction Select Register */
#define AT91_SAMA5D2_COSR 0xd0
@@ -169,6 +226,22 @@
#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
#define AT91_SAMA5D2_DIFF_CHAN_CNT 6
+#define AT91_SAMA5D2_TIMESTAMP_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1)
+
+#define AT91_SAMA5D2_TOUCH_X_CHAN_IDX (AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT * 2)
+#define AT91_SAMA5D2_TOUCH_Y_CHAN_IDX (AT91_SAMA5D2_TOUCH_X_CHAN_IDX + 1)
+#define AT91_SAMA5D2_TOUCH_P_CHAN_IDX (AT91_SAMA5D2_TOUCH_Y_CHAN_IDX + 1)
+#define AT91_SAMA5D2_MAX_CHAN_IDX AT91_SAMA5D2_TOUCH_P_CHAN_IDX
+
+#define AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
+#define AT91_SAMA5D2_TOUCH_PEN_DETECT_DEBOUNCE_US 200
+
+#define AT91_SAMA5D2_XYZ_MASK GENMASK(11, 0)
+
+#define AT91_SAMA5D2_MAX_POS_BITS 12
+
/*
* Maximum number of bytes to hold conversion from all channels
* without the timestamp.
@@ -184,6 +257,11 @@
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
+/* Possible values for oversampling ratio */
+#define AT91_OSR_1SAMPLES 1
+#define AT91_OSR_4SAMPLES 4
+#define AT91_OSR_16SAMPLES 16
+
#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
@@ -192,12 +270,13 @@
.scan_index = num, \
.scan_type = { \
.sign = 'u', \
- .realbits = 12, \
+ .realbits = 14, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num, \
.indexed = 1, \
}
@@ -212,16 +291,50 @@
.scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \
.scan_type = { \
.sign = 's', \
- .realbits = 12, \
+ .realbits = 14, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num"-CH"#num2, \
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
+ { \
+ .type = IIO_POSITIONRELATIVE, \
+ .modified = 1, \
+ .channel = num, \
+ .channel2 = mod, \
+ .scan_index = num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+#define AT91_SAMA5D2_CHAN_PRESSURE(num, name) \
+ { \
+ .type = IIO_PRESSURE, \
+ .channel = num, \
+ .scan_index = num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+
#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
@@ -260,6 +373,22 @@ struct at91_adc_dma {
s64 dma_ts;
};
+/**
+ * at91_adc_touch - at91-sama5d2 touchscreen information struct
+ * @sample_period_val: the value for periodic trigger interval
+ * @touching: is the pen touching the screen or not
+ * @x_pos: temporary placeholder for pressure computation
+ * @channels_bitmask: bitmask with the touchscreen channels enabled
+ * @workq: workqueue for buffer data pushing
+ */
+struct at91_adc_touch {
+ u16 sample_period_val;
+ bool touching;
+ u16 x_pos;
+ unsigned long channels_bitmask;
+ struct work_struct workq;
+};
+
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -267,14 +396,17 @@ struct at91_adc_state {
struct regulator *reg;
struct regulator *vref;
int vref_uv;
+ unsigned int current_sample_rate;
struct iio_trigger *trig;
const struct at91_adc_trigger *selected_trig;
const struct iio_chan_spec *chan;
bool conversion_done;
u32 conversion_value;
+ unsigned int oversampling_ratio;
struct at91_adc_soc_info soc_info;
wait_queue_head_t wq_data_available;
struct at91_adc_dma dma_st;
+ struct at91_adc_touch touch_st;
u16 buffer[AT91_BUFFER_MAX_HWORDS];
/*
* lock to prevent concurrent 'single conversion' requests through
@@ -329,8 +461,10 @@ static const struct iio_chan_spec at91_adc_channels[] = {
AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
- IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_SINGLE_CHAN_CNT
- + AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
+ IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_TIMESTAMP_CHAN_IDX),
+ AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_X_CHAN_IDX, "x", IIO_MOD_X),
+ AT91_SAMA5D2_CHAN_TOUCH(AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, "y", IIO_MOD_Y),
+ AT91_SAMA5D2_CHAN_PRESSURE(AT91_SAMA5D2_TOUCH_P_CHAN_IDX, "pressure"),
};
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
@@ -354,6 +488,231 @@ at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
return indio_dev->channels + index;
}
+static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
+}
+
+static void at91_adc_config_emr(struct at91_adc_state *st)
+{
+ /* configure the extended mode register */
+ unsigned int emr = at91_adc_readl(st, AT91_SAMA5D2_EMR);
+
+ /* select oversampling per single trigger event */
+ emr |= AT91_SAMA5D2_EMR_ASTE(1);
+
+ /* delete leftover content if it's the case */
+ emr &= ~AT91_SAMA5D2_EMR_OSR_MASK;
+
+ /* select oversampling ratio from configuration */
+ switch (st->oversampling_ratio) {
+ case AT91_OSR_1SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ case AT91_OSR_4SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ case AT91_OSR_16SAMPLES:
+ emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES) &
+ AT91_SAMA5D2_EMR_OSR_MASK;
+ break;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_EMR, emr);
+}
+
+static int at91_adc_adjust_val_osr(struct at91_adc_state *st, int *val)
+{
+ if (st->oversampling_ratio == AT91_OSR_1SAMPLES) {
+ /*
+ * in this case we only have 12 bits of real data, but channel
+ * is registered as 14 bits, so shift left two bits
+ */
+ *val <<= 2;
+ } else if (st->oversampling_ratio == AT91_OSR_4SAMPLES) {
+ /*
+ * in this case we have 13 bits of real data, but channel
+ * is registered as 14 bits, so left shift one bit
+ */
+ *val <<= 1;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static void at91_adc_adjust_val_osr_array(struct at91_adc_state *st, void *buf,
+ int len)
+{
+ int i = 0, val;
+ u16 *buf_u16 = (u16 *) buf;
+
+ /*
+ * We are converting each two bytes (each sample).
+ * First convert the byte based array to u16, and convert each sample
+ * separately.
+ * Each value is two bytes in an array of chars, so to not shift
+ * more than we need, save the value separately.
+ * len is in bytes, so divide by two to get number of samples.
+ */
+ while (i < len / 2) {
+ val = buf_u16[i];
+ at91_adc_adjust_val_osr(st, &val);
+ buf_u16[i] = val;
+ i++;
+ }
+}
+
+static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
+{
+ u32 clk_khz = st->current_sample_rate / 1000;
+ int i = 0;
+ u16 pendbc;
+ u32 tsmr, acr;
+
+ if (!state) {
+ /* disabling touch IRQs and setting mode to no touch enabled */
+ at91_adc_writel(st, AT91_SAMA5D2_IDR,
+ AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
+ at91_adc_writel(st, AT91_SAMA5D2_TSMR, 0);
+ return 0;
+ }
+ /*
+ * debounce time is in microseconds, we need it in milliseconds to
+ * multiply with kilohertz, so, divide by 1000, but after the multiply.
+ * round up to make sure pendbc is at least 1
+ */
+ pendbc = round_up(AT91_SAMA5D2_TOUCH_PEN_DETECT_DEBOUNCE_US *
+ clk_khz / 1000, 1);
+
+ /* get the required exponent */
+ while (pendbc >> i++)
+ ;
+
+ pendbc = i;
+
+ tsmr = AT91_SAMA5D2_TSMR_TSMODE_4WIRE_PRESS;
+
+ tsmr |= AT91_SAMA5D2_TSMR_TSAV(2) & AT91_SAMA5D2_TSMR_TSAV_MASK;
+ tsmr |= AT91_SAMA5D2_TSMR_PENDBC(pendbc) &
+ AT91_SAMA5D2_TSMR_PENDBC_MASK;
+ tsmr |= AT91_SAMA5D2_TSMR_NOTSDMA;
+ tsmr |= AT91_SAMA5D2_TSMR_PENDET_ENA;
+ tsmr |= AT91_SAMA5D2_TSMR_TSFREQ(2) & AT91_SAMA5D2_TSMR_TSFREQ_MASK;
+
+ at91_adc_writel(st, AT91_SAMA5D2_TSMR, tsmr);
+
+ acr = at91_adc_readl(st, AT91_SAMA5D2_ACR);
+ acr &= ~AT91_SAMA5D2_ACR_PENDETSENS_MASK;
+ acr |= 0x02 & AT91_SAMA5D2_ACR_PENDETSENS_MASK;
+ at91_adc_writel(st, AT91_SAMA5D2_ACR, acr);
+
+ /* Sample Period Time = (TRGPER + 1) / ADCClock */
+ st->touch_st.sample_period_val =
+ round_up((AT91_SAMA5D2_TOUCH_SAMPLE_PERIOD_US *
+ clk_khz / 1000) - 1, 1);
+ /* enable pen detect IRQ */
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+
+ return 0;
+}
+
+static u16 at91_adc_touch_pos(struct at91_adc_state *st, int reg)
+{
+ u32 val;
+ u32 scale, result, pos;
+
+ /*
+ * to obtain the actual position we must divide by scale
+ * and multiply with max, where
+ * max = 2^AT91_SAMA5D2_MAX_POS_BITS - 1
+ */
+ /* first half of register is the x or y, second half is the scale */
+ val = at91_adc_readl(st, reg);
+ if (!val)
+ dev_dbg(&iio_priv_to_dev(st)->dev, "pos is 0\n");
+
+ pos = val & AT91_SAMA5D2_XYZ_MASK;
+ result = (pos << AT91_SAMA5D2_MAX_POS_BITS) - pos;
+ scale = (val >> 16) & AT91_SAMA5D2_XYZ_MASK;
+ if (scale == 0) {
+ dev_err(&iio_priv_to_dev(st)->dev, "scale is 0\n");
+ return 0;
+ }
+ result /= scale;
+
+ return result;
+}
+
+static u16 at91_adc_touch_x_pos(struct at91_adc_state *st)
+{
+ st->touch_st.x_pos = at91_adc_touch_pos(st, AT91_SAMA5D2_XPOSR);
+ return st->touch_st.x_pos;
+}
+
+static u16 at91_adc_touch_y_pos(struct at91_adc_state *st)
+{
+ return at91_adc_touch_pos(st, AT91_SAMA5D2_YPOSR);
+}
+
+static u16 at91_adc_touch_pressure(struct at91_adc_state *st)
+{
+ u32 val;
+ u32 z1, z2;
+ u32 pres;
+ u32 rxp = 1;
+ u32 factor = 1000;
+
+ /* calculate the pressure */
+ val = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ z1 = val & AT91_SAMA5D2_XYZ_MASK;
+ z2 = (val >> 16) & AT91_SAMA5D2_XYZ_MASK;
+
+ if (z1 != 0)
+ pres = rxp * (st->touch_st.x_pos * factor / 1024) *
+ (z2 * factor / z1 - factor) /
+ factor;
+ else
+ pres = 0xFFFF; /* no pen contact */
+
+ /*
+ * The pressure from device grows down, minimum is 0xFFFF, maximum 0x0.
+ * We compute it this way, but let's return it in the expected way,
+ * growing from 0 to 0xFFFF.
+ */
+ return 0xFFFF - pres;
+}
+
+static int at91_adc_read_position(struct at91_adc_state *st, int chan, u16 *val)
+{
+ *val = 0;
+ if (!st->touch_st.touching)
+ return -ENODATA;
+ if (chan == AT91_SAMA5D2_TOUCH_X_CHAN_IDX)
+ *val = at91_adc_touch_x_pos(st);
+ else if (chan == AT91_SAMA5D2_TOUCH_Y_CHAN_IDX)
+ *val = at91_adc_touch_y_pos(st);
+ else
+ return -ENODATA;
+
+ return IIO_VAL_INT;
+}
+
+static int at91_adc_read_pressure(struct at91_adc_state *st, int chan, u16 *val)
+{
+ *val = 0;
+ if (!st->touch_st.touching)
+ return -ENODATA;
+ if (chan == AT91_SAMA5D2_TOUCH_P_CHAN_IDX)
+ *val = at91_adc_touch_pressure(st);
+ else
+ return -ENODATA;
+
+ return IIO_VAL_INT;
+}
+
static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
@@ -375,6 +734,11 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
if (!chan)
continue;
+ /* these channel types cannot be handled by this trigger */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
+
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));
@@ -520,7 +884,20 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
{
int ret;
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ /* check if we are enabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen enabling */
+ return at91_adc_configure_touch(st, true);
+ }
+ /* if we are not in triggered mode, we cannot enable the buffer. */
+ if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ return -EINVAL;
+
+ /* we continue with the triggered buffer */
ret = at91_adc_dma_start(indio_dev);
if (ret) {
dev_err(&indio_dev->dev, "buffer postenable failed\n");
@@ -536,6 +913,18 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
int ret;
u8 bit;
+ /* check if we are disabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen disable */
+ return at91_adc_configure_touch(st, false);
+ }
+ /* if we are not in triggered mode, nothing to do here */
+ if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+ return -EINVAL;
+
+ /* continue with the triggered buffer */
ret = iio_triggered_buffer_predisable(indio_dev);
if (ret < 0)
dev_err(&indio_dev->dev, "buffer predisable failed\n");
@@ -558,6 +947,10 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
if (!chan)
continue;
+ /* these channel types are virtual, no need to do anything */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
@@ -613,6 +1006,7 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
{
struct at91_adc_state *st = iio_priv(indio_dev);
int i = 0;
+ int val;
u8 bit;
for_each_set_bit(bit, indio_dev->active_scan_mask,
@@ -622,7 +1016,24 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
if (!chan)
continue;
- st->buffer[i] = at91_adc_readl(st, chan->address);
+ /*
+ * Our external trigger only supports the voltage channels.
+ * In case someone requested a different type of channel
+ * just put zeroes to buffer.
+ * This should not happen because we check the scan mode
+ * and scan mask when we enable the buffer, and we don't allow
+ * the buffer to start with a mixed mask (voltage and something
+ * else).
+ * Thus, emit a warning.
+ */
+ if (chan->type == IIO_VOLTAGE) {
+ val = at91_adc_readl(st, chan->address);
+ at91_adc_adjust_val_osr(st, &val);
+ st->buffer[i] = val;
+ } else {
+ st->buffer[i] = 0;
+ WARN(true, "This trigger cannot handle this type of channel");
+ }
i++;
}
iio_push_to_buffers_with_timestamp(indio_dev, st->buffer,
@@ -654,6 +1065,14 @@ static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
interval = div_s64((ns - st->dma_st.dma_ts), sample_count);
while (transferred_len >= sample_size) {
+ /*
+ * for all the values in the current sample,
+ * adjust the values inside the buffer for oversampling
+ */
+ at91_adc_adjust_val_osr_array(st,
+ &st->dma_st.rx_buf[st->dma_st.buf_idx],
+ sample_size);
+
iio_push_to_buffers_with_timestamp(indio_dev,
(st->dma_st.rx_buf + st->dma_st.buf_idx),
(st->dma_st.dma_ts + interval * sample_index));
@@ -688,9 +1107,20 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
static int at91_adc_buffer_init(struct iio_dev *indio)
{
- return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ struct at91_adc_state *st = iio_priv(indio);
+
+ if (st->selected_trig->hw_trig) {
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
&iio_pollfunc_store_time,
&at91_adc_trigger_handler, &at91_buffer_setup_ops);
+ }
+ /*
+ * we need to prepare the buffer ops in case we will get
+ * another buffer attached (like a callback buffer for the touchscreen)
+ */
+ indio->setup_ops = &at91_buffer_setup_ops;
+
+ return 0;
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -736,19 +1166,83 @@ static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
freq, startup, prescal);
+ st->current_sample_rate = freq;
}
-static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
+static inline unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
{
- unsigned f_adc, f_per = clk_get_rate(st->per_clk);
- unsigned mr, prescal;
+ return st->current_sample_rate;
+}
- mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
- prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
- & AT91_SAMA5D2_MR_PRESCAL_MAX;
- f_adc = f_per / (2 * (prescal + 1));
+static void at91_adc_touch_data_handler(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ u8 bit;
+ u16 val;
+ int i = 0;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+
+ if (chan->type == IIO_POSITIONRELATIVE)
+ at91_adc_read_position(st, chan->channel, &val);
+ else if (chan->type == IIO_PRESSURE)
+ at91_adc_read_pressure(st, chan->channel, &val);
+ else
+ continue;
+ st->buffer[i] = val;
+ i++;
+ }
+ /*
+ * Schedule work to push to buffers.
+ * This is intended to push to the callback buffer that another driver
+ * registered. We are still in a handler from our IRQ. If we push
+ * directly, it means the other driver has it's callback called
+ * from our IRQ context. Which is something we better avoid.
+ * Let's schedule it after our IRQ is completed.
+ */
+ schedule_work(&st->touch_st.workq);
+}
+
+static void at91_adc_pen_detect_interrupt(struct at91_adc_state *st)
+{
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_PEN);
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_NOPEN |
+ AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY);
+ at91_adc_writel(st, AT91_SAMA5D2_TRGR,
+ AT91_SAMA5D2_TRGR_TRGMOD_PERIODIC |
+ AT91_SAMA5D2_TRGR_TRGPER(st->touch_st.sample_period_val));
+ st->touch_st.touching = true;
+}
+
+static void at91_adc_no_pen_detect_interrupt(struct at91_adc_state *st)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+
+ at91_adc_writel(st, AT91_SAMA5D2_TRGR,
+ AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER);
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_NOPEN |
+ AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY);
+ st->touch_st.touching = false;
+
+ at91_adc_touch_data_handler(indio_dev);
+
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_PEN);
+}
+
+static void at91_adc_workq_handler(struct work_struct *workq)
+{
+ struct at91_adc_touch *touch_st = container_of(workq,
+ struct at91_adc_touch, workq);
+ struct at91_adc_state *st = container_of(touch_st,
+ struct at91_adc_state, touch_st);
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
- return f_adc;
+ iio_push_to_buffers(indio_dev, st->buffer);
}
static irqreturn_t at91_adc_interrupt(int irq, void *private)
@@ -757,17 +1251,39 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+ u32 rdy_mask = AT91_SAMA5D2_IER_XRDY | AT91_SAMA5D2_IER_YRDY |
+ AT91_SAMA5D2_IER_PRDY;
if (!(status & imr))
return IRQ_NONE;
-
- if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ if (status & AT91_SAMA5D2_IER_PEN) {
+ /* pen detected IRQ */
+ at91_adc_pen_detect_interrupt(st);
+ } else if ((status & AT91_SAMA5D2_IER_NOPEN)) {
+ /* nopen detected IRQ */
+ at91_adc_no_pen_detect_interrupt(st);
+ } else if ((status & AT91_SAMA5D2_ISR_PENS) &&
+ ((status & rdy_mask) == rdy_mask)) {
+ /* periodic trigger IRQ - during pen sense */
+ at91_adc_touch_data_handler(indio);
+ } else if (status & AT91_SAMA5D2_ISR_PENS) {
+ /*
+ * touching, but the measurements are not ready yet.
+ * read and ignore.
+ */
+ status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
+ status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
+ status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
+ } else if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ /* triggered buffer without DMA */
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
} else if (iio_buffer_enabled(indio) && st->dma_st.dma_chan) {
+ /* triggered buffer with DMA - should not happen */
disable_irq_nosync(irq);
WARN(true, "Unexpected irq occurred\n");
} else if (!iio_buffer_enabled(indio)) {
+ /* software requested conversion */
st->conversion_value = at91_adc_readl(st, st->chan->address);
st->conversion_done = true;
wake_up_interruptible(&st->wq_data_available);
@@ -775,58 +1291,100 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
return IRQ_HANDLED;
}
-static int at91_adc_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
u32 cor = 0;
+ u16 tmp_val;
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* we cannot use software trigger if hw trigger enabled */
+ /*
+ * Keep in mind that we cannot use software trigger or touchscreen
+ * if external trigger is enabled
+ */
+ if (chan->type == IIO_POSITIONRELATIVE) {
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
mutex_lock(&st->lock);
- st->chan = chan;
+ ret = at91_adc_read_position(st, chan->channel,
+ &tmp_val);
+ *val = tmp_val;
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
- if (chan->differential)
- cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
-
- ret = wait_event_interruptible_timeout(st->wq_data_available,
- st->conversion_done,
- msecs_to_jiffies(1000));
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret > 0) {
- *val = st->conversion_value;
- if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
- ret = IIO_VAL_INT;
- st->conversion_done = false;
- }
+ return at91_adc_adjust_val_osr(st, val);
+ }
+ if (chan->type == IIO_PRESSURE) {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
- at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+ ret = at91_adc_read_pressure(st, chan->channel,
+ &tmp_val);
+ *val = tmp_val;
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
- /* Needed to ACK the DRDY interruption */
- at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+ return at91_adc_adjust_val_osr(st, val);
+ }
- mutex_unlock(&st->lock);
+ /* in this case we have a voltage channel */
- iio_device_release_direct_mode(indio_dev);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
return ret;
+ mutex_lock(&st->lock);
+
+ st->chan = chan;
+
+ if (chan->differential)
+ cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
+ ret = wait_event_interruptible_timeout(st->wq_data_available,
+ st->conversion_done,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret > 0) {
+ *val = st->conversion_value;
+ ret = at91_adc_adjust_val_osr(st, val);
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, 11);
+ st->conversion_done = false;
+ }
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
+ mutex_unlock(&st->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int at91_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return at91_adc_read_info_raw(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
if (chan->differential)
@@ -838,6 +1396,10 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
*val = at91_adc_get_sample_freq(st);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = st->oversampling_ratio;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
@@ -849,16 +1411,28 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
{
struct at91_adc_state *st = iio_priv(indio_dev);
- if (mask != IIO_CHAN_INFO_SAMP_FREQ)
- return -EINVAL;
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if ((val != AT91_OSR_1SAMPLES) && (val != AT91_OSR_4SAMPLES) &&
+ (val != AT91_OSR_16SAMPLES))
+ return -EINVAL;
+ /* if no change, optimize out */
+ if (val == st->oversampling_ratio)
+ return 0;
+ st->oversampling_ratio = val;
+ /* update ratio */
+ at91_adc_config_emr(st);
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < st->soc_info.min_sample_rate ||
+ val > st->soc_info.max_sample_rate)
+ return -EINVAL;
- if (val < st->soc_info.min_sample_rate ||
- val > st->soc_info.max_sample_rate)
+ at91_adc_setup_samp_freq(st, val);
+ return 0;
+ default:
return -EINVAL;
-
- at91_adc_setup_samp_freq(st, val);
-
- return 0;
+ };
}
static void at91_adc_dma_init(struct platform_device *pdev)
@@ -974,11 +1548,23 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
return 0;
}
-static const struct iio_info at91_adc_info = {
- .read_raw = &at91_adc_read_raw,
- .write_raw = &at91_adc_write_raw,
- .hwfifo_set_watermark = &at91_adc_set_watermark,
-};
+static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (bitmap_subset(scan_mask, &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ return 0;
+ /*
+ * if the new bitmap is a combination of touchscreen and regular
+ * channels, then we are not fine
+ */
+ if (bitmap_intersects(&st->touch_st.channels_bitmask, scan_mask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1))
+ return -EINVAL;
+ return 0;
+}
static void at91_adc_hw_init(struct at91_adc_state *st)
{
@@ -992,6 +1578,9 @@ static void at91_adc_hw_init(struct at91_adc_state *st)
AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+
+ /* configure extended mode register */
+ at91_adc_config_emr(st);
}
static ssize_t at91_adc_get_fifo_state(struct device *dev,
@@ -1022,6 +1611,20 @@ static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
+static IIO_CONST_ATTR(oversampling_ratio_available,
+ __stringify(AT91_OSR_1SAMPLES) " "
+ __stringify(AT91_OSR_4SAMPLES) " "
+ __stringify(AT91_OSR_16SAMPLES));
+
+static struct attribute *at91_adc_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group at91_adc_attribute_group = {
+ .attrs = at91_adc_attributes,
+};
+
static const struct attribute *at91_adc_fifo_attributes[] = {
&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
@@ -1030,6 +1633,15 @@ static const struct attribute *at91_adc_fifo_attributes[] = {
NULL,
};
+static const struct iio_info at91_adc_info = {
+ .attrs = &at91_adc_attribute_group,
+ .read_raw = &at91_adc_read_raw,
+ .write_raw = &at91_adc_write_raw,
+ .update_scan_mode = &at91_adc_update_scan_mode,
+ .of_xlate = &at91_adc_of_xlate,
+ .hwfifo_set_watermark = &at91_adc_set_watermark,
+};
+
static int at91_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -1044,13 +1656,22 @@ static int at91_adc_probe(struct platform_device *pdev)
indio_dev->dev.parent = &pdev->dev;
indio_dev->name = dev_name(&pdev->dev);
- indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->info = &at91_adc_info;
indio_dev->channels = at91_adc_channels;
indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
st = iio_priv(indio_dev);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_X_CHAN_IDX, 1);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_Y_CHAN_IDX, 1);
+ bitmap_set(&st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_TOUCH_P_CHAN_IDX, 1);
+
+ st->oversampling_ratio = AT91_OSR_1SAMPLES;
+
ret = of_property_read_u32(pdev->dev.of_node,
"atmel,min-sample-rate-hz",
&st->soc_info.min_sample_rate);
@@ -1100,6 +1721,7 @@ static int at91_adc_probe(struct platform_device *pdev)
init_waitqueue_head(&st->wq_data_available);
mutex_init(&st->lock);
+ INIT_WORK(&st->touch_st.workq, at91_adc_workq_handler);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -1159,13 +1781,13 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
- if (st->selected_trig->hw_trig) {
- ret = at91_adc_buffer_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
- goto per_clk_disable_unprepare;
- }
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
+ if (st->selected_trig->hw_trig) {
ret = at91_adc_trigger_init(indio_dev);
if (ret < 0) {
dev_err(&pdev->dev, "couldn't setup the triggers.\n");
@@ -1272,9 +1894,20 @@ static __maybe_unused int at91_adc_resume(struct device *dev)
at91_adc_hw_init(st);
/* reconfiguring trigger hardware state */
- if (iio_buffer_enabled(indio_dev))
- at91_adc_configure_trigger(st->trig, true);
+ if (!iio_buffer_enabled(indio_dev))
+ return 0;
+
+ /* check if we are enabling triggered buffer or the touchscreen */
+ if (bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
+ /* touchscreen enabling */
+ return at91_adc_configure_touch(st, true);
+ } else {
+ return at91_adc_configure_trigger(st->trig, true);
+ }
+ /* not needed but more explicit */
return 0;
vref_disable_resume:
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 9430b54121e0..36b59d8957fb 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -97,6 +97,14 @@ struct hx711_data {
* 2x32-bit channel + 64-bit timestamp
*/
u32 buffer[4];
+ /*
+ * delay after a rising edge on SCK until the data is ready DOUT
+ * this is dependent on the hx711 where the datasheet tells a
+ * maximum value of 100 ns
+ * but also on potential parasitic capacities on the wiring
+ */
+ u32 data_ready_delay_ns;
+ u32 clock_frequency;
};
static int hx711_cycle(struct hx711_data *hx711_data)
@@ -110,6 +118,14 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
preempt_disable();
gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
+
+ /*
+ * wait until DOUT is ready
+ * it turned out that parasitic capacities are extending the time
+ * until DOUT has reached it's value
+ */
+ ndelay(hx711_data->data_ready_delay_ns);
+
val = gpiod_get_value(hx711_data->gpiod_dout);
/*
* here we are not waiting for 0.2 us as suggested by the datasheet,
@@ -120,6 +136,12 @@ static int hx711_cycle(struct hx711_data *hx711_data)
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
preempt_enable();
+ /*
+ * make it a square wave for addressing cases with capacitance on
+ * PC_SCK
+ */
+ ndelay(hx711_data->data_ready_delay_ns);
+
return val;
}
@@ -458,6 +480,7 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
static int hx711_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct hx711_data *hx711_data;
struct iio_dev *indio_dev;
int ret;
@@ -530,6 +553,22 @@ static int hx711_probe(struct platform_device *pdev)
hx711_data->gain_set = 128;
hx711_data->gain_chan_a = 128;
+ hx711_data->clock_frequency = 400000;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &hx711_data->clock_frequency);
+
+ /*
+ * datasheet says the high level of PD_SCK has a maximum duration
+ * of 50 microseconds
+ */
+ if (hx711_data->clock_frequency < 20000) {
+ dev_warn(dev, "clock-frequency too low - assuming 400 kHz\n");
+ hx711_data->clock_frequency = 400000;
+ }
+
+ hx711_data->data_ready_delay_ns =
+ 1000000000 / hx711_data->clock_frequency;
+
platform_set_drvdata(pdev, indio_dev);
indio_dev->name = "hx711";
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 0635a79864bf..d1239624187d 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
+#include <linux/sched/task.h>
#include <linux/util_macros.h>
#include <linux/platform_data/ina2xx.h>
@@ -826,6 +827,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
+ struct task_struct *task;
dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
(unsigned int)(*indio_dev->active_scan_mask),
@@ -835,11 +837,17 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
- "%s:%d-%uus", indio_dev->name, indio_dev->id,
- sampling_us);
+ task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
+ "%s:%d-%uus", indio_dev->name, indio_dev->id,
+ sampling_us);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ get_task_struct(task);
+ wake_up_process(task);
+ chip->task = task;
- return PTR_ERR_OR_ZERO(chip->task);
+ return 0;
}
static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
@@ -848,6 +856,7 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
if (chip->task) {
kthread_stop(chip->task);
+ put_task_struct(chip->task);
chip->task = NULL;
}
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 7fb4f525714a..a8d35aebee80 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1577,7 +1577,6 @@ static int max1363_probe(struct i2c_client *client,
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *vref;
- const struct of_device_id *match;
indio_dev = devm_iio_device_alloc(&client->dev,
sizeof(struct max1363_state));
@@ -1604,11 +1603,8 @@ static int max1363_probe(struct i2c_client *client,
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
- match = of_match_device(of_match_ptr(max1363_of_match),
- &client->dev);
- if (match)
- st->chip_info = of_device_get_match_data(&client->dev);
- else
+ st->chip_info = of_device_get_match_data(&client->dev);
+ if (!st->chip_info)
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2948909f3ee3..da2d16dfa63e 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -922,6 +922,11 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
.name = "meson-meson8b-saradc",
};
+static const struct meson_sar_adc_data meson_sar_adc_meson8m2_data = {
+ .param = &meson_sar_adc_meson8_param,
+ .name = "meson-meson8m2-saradc",
+};
+
static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
.param = &meson_sar_adc_gxbb_param,
.name = "meson-gxbb-saradc",
@@ -952,6 +957,10 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
.data = &meson_sar_adc_meson8b_data,
},
{
+ .compatible = "amlogic,meson8m2-saradc",
+ .data = &meson_sar_adc_meson8m2_data,
+ },
+ {
.compatible = "amlogic,meson-gxbb-saradc",
.data = &meson_sar_adc_gxbb_data,
}, {
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
new file mode 100644
index 000000000000..2b60efea0c39
--- /dev/null
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/hwspinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* PMIC global registers definition */
+#define SC27XX_MODULE_EN 0xc08
+#define SC27XX_MODULE_ADC_EN BIT(5)
+#define SC27XX_ARM_CLK_EN 0xc10
+#define SC27XX_CLK_ADC_EN BIT(5)
+#define SC27XX_CLK_ADC_CLK_EN BIT(6)
+
+/* ADC controller registers definition */
+#define SC27XX_ADC_CTL 0x0
+#define SC27XX_ADC_CH_CFG 0x4
+#define SC27XX_ADC_DATA 0x4c
+#define SC27XX_ADC_INT_EN 0x50
+#define SC27XX_ADC_INT_CLR 0x54
+#define SC27XX_ADC_INT_STS 0x58
+#define SC27XX_ADC_INT_RAW 0x5c
+
+/* Bits and mask definition for SC27XX_ADC_CTL register */
+#define SC27XX_ADC_EN BIT(0)
+#define SC27XX_ADC_CHN_RUN BIT(1)
+#define SC27XX_ADC_12BIT_MODE BIT(2)
+#define SC27XX_ADC_RUN_NUM_MASK GENMASK(7, 4)
+#define SC27XX_ADC_RUN_NUM_SHIFT 4
+
+/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
+#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
+#define SC27XX_ADC_SCALE_SHIFT 8
+
+/* Bits definitions for SC27XX_ADC_INT_EN registers */
+#define SC27XX_ADC_IRQ_EN BIT(0)
+
+/* Bits definitions for SC27XX_ADC_INT_CLR registers */
+#define SC27XX_ADC_IRQ_CLR BIT(0)
+
+/* Mask definition for SC27XX_ADC_DATA register */
+#define SC27XX_ADC_DATA_MASK GENMASK(11, 0)
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SC27XX_ADC_HWLOCK_TIMEOUT 5000
+
+/* Maximum ADC channel number */
+#define SC27XX_ADC_CHANNEL_MAX 32
+
+/* ADC voltage ratio definition */
+#define SC27XX_VOLT_RATIO(n, d) \
+ (((n) << SC27XX_RATIO_NUMERATOR_OFFSET) | (d))
+#define SC27XX_RATIO_NUMERATOR_OFFSET 16
+#define SC27XX_RATIO_DENOMINATOR_MASK GENMASK(15, 0)
+
+struct sc27xx_adc_data {
+ struct device *dev;
+ struct regmap *regmap;
+ /*
+ * One hardware spinlock to synchronize between the multiple
+ * subsystems which will access the unique ADC controller.
+ */
+ struct hwspinlock *hwlock;
+ struct completion completion;
+ int channel_scale[SC27XX_ADC_CHANNEL_MAX];
+ u32 base;
+ int value;
+ int irq;
+};
+
+struct sc27xx_adc_linear_graph {
+ int volt0;
+ int adc0;
+ int volt1;
+ int adc1;
+};
+
+/*
+ * According to the datasheet, we can convert one ADC value to one voltage value
+ * through 2 points in the linear graph. If the voltage is less than 1.2v, we
+ * should use the small-scale graph, and if more than 1.2v, we should use the
+ * big-scale graph.
+ */
+static const struct sc27xx_adc_linear_graph big_scale_graph = {
+ 4200, 3310,
+ 3600, 2832,
+};
+
+static const struct sc27xx_adc_linear_graph small_scale_graph = {
+ 1000, 3413,
+ 100, 341,
+};
+
+static int sc27xx_adc_get_ratio(int channel, int scale)
+{
+ switch (channel) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return scale ? SC27XX_VOLT_RATIO(400, 1025) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 5:
+ return SC27XX_VOLT_RATIO(7, 29);
+ case 6:
+ return SC27XX_VOLT_RATIO(375, 9000);
+ case 7:
+ case 8:
+ return scale ? SC27XX_VOLT_RATIO(100, 125) :
+ SC27XX_VOLT_RATIO(1, 1);
+ case 19:
+ return SC27XX_VOLT_RATIO(1, 3);
+ default:
+ return SC27XX_VOLT_RATIO(1, 1);
+ }
+ return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
+ int scale, int *val)
+{
+ int ret;
+ u32 tmp;
+
+ reinit_completion(&data->completion);
+
+ ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(data->dev, "timeout to get the hwspinlock\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN, SC27XX_ADC_EN);
+ if (ret)
+ goto unlock_adc;
+
+ /* Configure the channel id and scale */
+ tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
+ tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
+ SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
+ tmp);
+ if (ret)
+ goto disable_adc;
+
+ /* Select 12bit conversion mode, and only sample 1 time */
+ tmp = SC27XX_ADC_12BIT_MODE;
+ tmp |= (0 << SC27XX_ADC_RUN_NUM_SHIFT) & SC27XX_ADC_RUN_NUM_MASK;
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_RUN_NUM_MASK | SC27XX_ADC_12BIT_MODE,
+ tmp);
+ if (ret)
+ goto disable_adc;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_CHN_RUN, SC27XX_ADC_CHN_RUN);
+ if (ret)
+ goto disable_adc;
+
+ wait_for_completion(&data->completion);
+
+disable_adc:
+ regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
+ SC27XX_ADC_EN, 0);
+unlock_adc:
+ hwspin_unlock_raw(data->hwlock);
+
+ if (!ret)
+ *val = data->value;
+
+ return ret;
+}
+
+static irqreturn_t sc27xx_adc_isr(int irq, void *dev_id)
+{
+ struct sc27xx_adc_data *data = dev_id;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
+ SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ ret = regmap_read(data->regmap, data->base + SC27XX_ADC_DATA,
+ &data->value);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ data->value &= SC27XX_ADC_DATA_MASK;
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
+ int channel, int scale,
+ u32 *div_numerator, u32 *div_denominator)
+{
+ u32 ratio = sc27xx_adc_get_ratio(channel, scale);
+
+ *div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
+ *div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
+}
+
+static int sc27xx_adc_to_volt(const struct sc27xx_adc_linear_graph *graph,
+ int raw_adc)
+{
+ int tmp;
+
+ tmp = (graph->volt0 - graph->volt1) * (raw_adc - graph->adc1);
+ tmp /= (graph->adc0 - graph->adc1);
+ tmp += graph->volt1;
+
+ return tmp < 0 ? 0 : tmp;
+}
+
+static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
+ int scale, int raw_adc)
+{
+ u32 numerator, denominator;
+ u32 volt;
+
+ /*
+ * Convert ADC values to voltage values according to the linear graph,
+ * and channel 5 and channel 1 has been calibrated, so we can just
+ * return the voltage values calculated by the linear graph. But other
+ * channels need be calculated to the real voltage values with the
+ * voltage ratio.
+ */
+ switch (channel) {
+ case 5:
+ return sc27xx_adc_to_volt(&big_scale_graph, raw_adc);
+
+ case 1:
+ return sc27xx_adc_to_volt(&small_scale_graph, raw_adc);
+
+ default:
+ volt = sc27xx_adc_to_volt(&small_scale_graph, raw_adc);
+ break;
+ }
+
+ sc27xx_adc_volt_ratio(data, channel, scale, &numerator, &denominator);
+
+ return (volt * denominator + numerator / 2) / numerator;
+}
+
+static int sc27xx_adc_read_processed(struct sc27xx_adc_data *data,
+ int channel, int scale, int *val)
+{
+ int ret, raw_adc;
+
+ ret = sc27xx_adc_read(data, channel, scale, &raw_adc);
+ if (ret)
+ return ret;
+
+ *val = sc27xx_adc_convert_volt(data, channel, scale, raw_adc);
+ return 0;
+}
+
+static int sc27xx_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct sc27xx_adc_data *data = iio_priv(indio_dev);
+ int scale = data->channel_scale[chan->channel];
+ int ret, tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&indio_dev->mlock);
+ ret = sc27xx_adc_read_processed(data, chan->channel, scale,
+ &tmp);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret)
+ return ret;
+
+ *val = tmp;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = scale;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sc27xx_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sc27xx_adc_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ data->channel_scale[chan->channel] = val;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sc27xx_info = {
+ .read_raw = &sc27xx_adc_read_raw,
+ .write_raw = &sc27xx_adc_write_raw,
+};
+
+#define SC27XX_ADC_CHANNEL(index) { \
+ .type = IIO_VOLTAGE, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = "CH##index", \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec sc27xx_channels[] = {
+ SC27XX_ADC_CHANNEL(0),
+ SC27XX_ADC_CHANNEL(1),
+ SC27XX_ADC_CHANNEL(2),
+ SC27XX_ADC_CHANNEL(3),
+ SC27XX_ADC_CHANNEL(4),
+ SC27XX_ADC_CHANNEL(5),
+ SC27XX_ADC_CHANNEL(6),
+ SC27XX_ADC_CHANNEL(7),
+ SC27XX_ADC_CHANNEL(8),
+ SC27XX_ADC_CHANNEL(9),
+ SC27XX_ADC_CHANNEL(10),
+ SC27XX_ADC_CHANNEL(11),
+ SC27XX_ADC_CHANNEL(12),
+ SC27XX_ADC_CHANNEL(13),
+ SC27XX_ADC_CHANNEL(14),
+ SC27XX_ADC_CHANNEL(15),
+ SC27XX_ADC_CHANNEL(16),
+ SC27XX_ADC_CHANNEL(17),
+ SC27XX_ADC_CHANNEL(18),
+ SC27XX_ADC_CHANNEL(19),
+ SC27XX_ADC_CHANNEL(20),
+ SC27XX_ADC_CHANNEL(21),
+ SC27XX_ADC_CHANNEL(22),
+ SC27XX_ADC_CHANNEL(23),
+ SC27XX_ADC_CHANNEL(24),
+ SC27XX_ADC_CHANNEL(25),
+ SC27XX_ADC_CHANNEL(26),
+ SC27XX_ADC_CHANNEL(27),
+ SC27XX_ADC_CHANNEL(28),
+ SC27XX_ADC_CHANNEL(29),
+ SC27XX_ADC_CHANNEL(30),
+ SC27XX_ADC_CHANNEL(31),
+};
+
+static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
+ if (ret)
+ return ret;
+
+ /* Enable ADC work clock and controller clock */
+ ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
+ if (ret)
+ goto disable_adc;
+
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
+ SC27XX_ADC_IRQ_EN, SC27XX_ADC_IRQ_EN);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+disable_adc:
+ regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, 0);
+
+ return ret;
+}
+
+static void sc27xx_adc_disable(void *_data)
+{
+ struct sc27xx_adc_data *data = _data;
+
+ regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
+ SC27XX_ADC_IRQ_EN, 0);
+
+ /* Disable ADC work clock and controller clock */
+ regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+ SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
+
+ regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+ SC27XX_MODULE_ADC_EN, 0);
+}
+
+static void sc27xx_adc_free_hwlock(void *_data)
+{
+ struct hwspinlock *hwlock = _data;
+
+ hwspin_lock_free(hwlock);
+}
+
+static int sc27xx_adc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sc27xx_adc_data *sc27xx_data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*sc27xx_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ sc27xx_data = iio_priv(indio_dev);
+
+ sc27xx_data->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sc27xx_data->regmap) {
+ dev_err(&pdev->dev, "failed to get ADC regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &sc27xx_data->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get ADC base address\n");
+ return ret;
+ }
+
+ sc27xx_data->irq = platform_get_irq(pdev, 0);
+ if (sc27xx_data->irq < 0) {
+ dev_err(&pdev->dev, "failed to get ADC irq number\n");
+ return sc27xx_data->irq;
+ }
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ return ret;
+ }
+
+ sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
+ if (!sc27xx_data->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ return -ENXIO;
+ }
+
+ ret = devm_add_action(&pdev->dev, sc27xx_adc_free_hwlock,
+ sc27xx_data->hwlock);
+ if (ret) {
+ sc27xx_adc_free_hwlock(sc27xx_data->hwlock);
+ dev_err(&pdev->dev, "failed to add hwspinlock action\n");
+ return ret;
+ }
+
+ init_completion(&sc27xx_data->completion);
+ sc27xx_data->dev = &pdev->dev;
+
+ ret = sc27xx_adc_enable(sc27xx_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ADC module\n");
+ return ret;
+ }
+
+ ret = devm_add_action(&pdev->dev, sc27xx_adc_disable, sc27xx_data);
+ if (ret) {
+ sc27xx_adc_disable(sc27xx_data);
+ dev_err(&pdev->dev, "failed to add ADC disable action\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, sc27xx_data->irq, NULL,
+ sc27xx_adc_isr, IRQF_ONESHOT,
+ pdev->name, sc27xx_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request ADC irq\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &sc27xx_info;
+ indio_dev->channels = sc27xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sc27xx_channels);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ dev_err(&pdev->dev, "could not register iio (ADC)");
+
+ return ret;
+}
+
+static const struct of_device_id sc27xx_adc_of_match[] = {
+ { .compatible = "sprd,sc2731-adc", },
+ { }
+};
+
+static struct platform_driver sc27xx_adc_driver = {
+ .probe = sc27xx_adc_probe,
+ .driver = {
+ .name = "sc27xx-adc",
+ .of_match_table = sc27xx_adc_of_match,
+ },
+};
+
+module_platform_driver(sc27xx_adc_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC27XX ADC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0225c1b333ab..a5bd5944bc66 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments ADS7950 SPI ADC driver
*
@@ -10,15 +11,6 @@
* And also on hwmon/ads79xx.c
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
@@ -76,6 +68,9 @@ struct ti_ads7950_state {
__be16 rx_buf[TI_ADS7950_MAX_CHAN + TI_ADS7950_TIMESTAMP_SIZE]
____cacheline_aligned;
__be16 tx_buf[TI_ADS7950_MAX_CHAN];
+ __be16 single_tx;
+ __be16 single_rx;
+
};
struct ti_ads7950_chip_info {
@@ -295,18 +290,26 @@ out:
return IRQ_HANDLED;
}
-static int ti_ads7950_scan_direct(struct ti_ads7950_state *st, unsigned int ch)
+static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
+ struct ti_ads7950_state *st = iio_priv(indio_dev);
int ret, cmd;
+ mutex_lock(&indio_dev->mlock);
+
cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
- st->tx_buf[0] = cpu_to_be16(cmd);
+ st->single_tx = cpu_to_be16(cmd);
ret = spi_sync(st->spi, &st->scan_single_msg);
if (ret)
- return ret;
+ goto out;
+
+ ret = be16_to_cpu(st->single_rx);
- return be16_to_cpu(st->rx_buf[0]);
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
}
static int ti_ads7950_get_range(struct ti_ads7950_state *st)
@@ -338,13 +341,7 @@ static int ti_ads7950_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
-
- ret = ti_ads7950_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ ret = ti_ads7950_scan_direct(indio_dev, chan->address);
if (ret < 0)
return ret;
@@ -410,13 +407,13 @@ static int ti_ads7950_probe(struct spi_device *spi)
* was read at the end of the first transfer.
*/
- st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[0].tx_buf = &st->single_tx;
st->scan_single_xfer[0].len = 2;
st->scan_single_xfer[0].cs_change = 1;
- st->scan_single_xfer[1].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[1].tx_buf = &st->single_tx;
st->scan_single_xfer[1].len = 2;
st->scan_single_xfer[1].cs_change = 1;
- st->scan_single_xfer[2].rx_buf = &st->rx_buf[0];
+ st->scan_single_xfer[2].rx_buf = &st->single_rx;
st->scan_single_xfer[2].len = 2;
spi_message_init_with_transfers(&st->scan_single_msg,
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index d4f21d1be6c8..3f6be5ac049a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -322,6 +322,7 @@ static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
#define XADC_ZYNQ_TCK_RATE_MAX 50000000
#define XADC_ZYNQ_IGAP_DEFAULT 20
+#define XADC_ZYNQ_PCAP_RATE_MAX 200000000
static int xadc_zynq_setup(struct platform_device *pdev,
struct iio_dev *indio_dev, int irq)
@@ -332,6 +333,7 @@ static int xadc_zynq_setup(struct platform_device *pdev,
unsigned int div;
unsigned int igap;
unsigned int tck_rate;
+ int ret;
/* TODO: Figure out how to make igap and tck_rate configurable */
igap = XADC_ZYNQ_IGAP_DEFAULT;
@@ -340,9 +342,16 @@ static int xadc_zynq_setup(struct platform_device *pdev,
xadc->zynq_intmask = ~0;
pcap_rate = clk_get_rate(xadc->clk);
+ if (!pcap_rate)
+ return -EINVAL;
+
+ if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
+ ret = clk_set_rate(xadc->clk,
+ (unsigned long)XADC_ZYNQ_PCAP_RATE_MAX);
+ if (ret)
+ return ret;
+ }
- if (tck_rate > XADC_ZYNQ_TCK_RATE_MAX)
- tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
if (tck_rate > pcap_rate / 2) {
div = 2;
} else {
@@ -368,6 +377,12 @@ static int xadc_zynq_setup(struct platform_device *pdev,
XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
tck_div | XADC_ZYNQ_CFG_IGAP(igap));
+ if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
+ ret = clk_set_rate(xadc->clk, pcap_rate);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -889,6 +904,9 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div;
+ if (!clk_rate)
+ return -EINVAL;
+
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
@@ -1045,7 +1063,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int num_channels;
const char *external_mux;
u32 ext_mux_chan;
- int reg;
+ u32 reg;
int ret;
*conf = 0;
@@ -1157,6 +1175,7 @@ static int xadc_probe(struct platform_device *pdev)
xadc = iio_priv(indio_dev);
xadc->ops = id->data;
+ xadc->irq = irq;
init_completion(&xadc->completion);
mutex_init(&xadc->mutex);
spin_lock_init(&xadc->lock);
@@ -1207,14 +1226,14 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
- ret = xadc->ops->setup(pdev, indio_dev, irq);
+ ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
+ dev_name(&pdev->dev), indio_dev);
if (ret)
goto err_clk_disable_unprepare;
- ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_clk_disable_unprepare;
+ goto err_free_irq;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1239,8 +1258,10 @@ static int xadc_probe(struct platform_device *pdev)
goto err_free_irq;
/* Disable all alarms */
- xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
- XADC_CONF1_ALARM_MASK);
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+ XADC_CONF1_ALARM_MASK);
+ if (ret)
+ goto err_free_irq;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1268,7 +1289,7 @@ static int xadc_probe(struct platform_device *pdev)
return 0;
err_free_irq:
- free_irq(irq, indio_dev);
+ free_irq(xadc->irq, indio_dev);
err_clk_disable_unprepare:
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
@@ -1290,7 +1311,6 @@ static int xadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct xadc *xadc = iio_priv(indio_dev);
- int irq = platform_get_irq(pdev, 0);
iio_device_unregister(indio_dev);
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
@@ -1298,7 +1318,7 @@ static int xadc_remove(struct platform_device *pdev)
iio_trigger_free(xadc->convst_trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
- free_irq(irq, indio_dev);
+ free_irq(xadc->irq, indio_dev);
clk_disable_unprepare(xadc->clk);
cancel_delayed_work(&xadc->zynq_unmask_work);
kfree(xadc->data);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 62edbdae1244..8c0009585c16 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -68,6 +68,7 @@ struct xadc {
spinlock_t lock;
struct completion completion;
+ int irq;
};
struct xadc_ops {
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 5cb5be7612b4..b8e005be4f87 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -21,6 +21,29 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config BME680
+ tristate "Bosch Sensortec BME680 sensor driver"
+ depends on (I2C || SPI)
+ select REGMAP
+ select BME680_I2C if I2C
+ select BME680_SPI if SPI
+ help
+ Say yes here to build support for Bosch Sensortec BME680 sensor with
+ temperature, pressure, humidity and gas sensing capability.
+
+ This driver can also be built as a module. If so, the module for I2C
+ would be called bme680_i2c and bme680_spi for SPI support.
+
+config BME680_I2C
+ tristate
+ depends on I2C && BME680
+ select REGMAP_I2C
+
+config BME680_SPI
+ tristate
+ depends on SPI && BME680
+ select REGMAP_SPI
+
config CCS811
tristate "AMS CCS811 VOC sensor"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index a629b29d1e0b..2f4c4ba4d781 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,6 +4,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
+obj-$(CONFIG_BME680) += bme680_core.o
+obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
+obj-$(CONFIG_BME680_SPI) += bme680_spi.o
obj-$(CONFIG_CCS811) += ccs811.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
new file mode 100644
index 000000000000..e049323f209a
--- /dev/null
+++ b/drivers/iio/chemical/bme680.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BME680_H_
+#define BME680_H_
+
+#define BME680_REG_CHIP_I2C_ID 0xD0
+#define BME680_REG_CHIP_SPI_ID 0x50
+#define BME680_CHIP_ID_VAL 0x61
+#define BME680_REG_SOFT_RESET_I2C 0xE0
+#define BME680_REG_SOFT_RESET_SPI 0x60
+#define BME680_CMD_SOFTRESET 0xB6
+#define BME680_REG_STATUS 0x73
+#define BME680_SPI_MEM_PAGE_BIT BIT(4)
+#define BME680_SPI_MEM_PAGE_1_VAL 1
+
+#define BME680_REG_TEMP_MSB 0x22
+#define BME680_REG_PRESS_MSB 0x1F
+#define BM6880_REG_HUMIDITY_MSB 0x25
+#define BME680_REG_GAS_MSB 0x2A
+#define BME680_REG_GAS_R_LSB 0x2B
+#define BME680_GAS_STAB_BIT BIT(4)
+
+#define BME680_REG_CTRL_HUMIDITY 0x72
+#define BME680_OSRS_HUMIDITY_MASK GENMASK(2, 0)
+
+#define BME680_REG_CTRL_MEAS 0x74
+#define BME680_OSRS_TEMP_MASK GENMASK(7, 5)
+#define BME680_OSRS_PRESS_MASK GENMASK(4, 2)
+#define BME680_MODE_MASK GENMASK(1, 0)
+
+#define BME680_MODE_FORCED 1
+#define BME680_MODE_SLEEP 0
+
+#define BME680_REG_CONFIG 0x75
+#define BME680_FILTER_MASK GENMASK(4, 2)
+#define BME680_FILTER_COEFF_VAL BIT(1)
+
+/* TEMP/PRESS/HUMID reading skipped */
+#define BME680_MEAS_SKIPPED 0x8000
+
+#define BME680_MAX_OVERFLOW_VAL 0x40000000
+#define BME680_HUM_REG_SHIFT_VAL 4
+#define BME680_BIT_H1_DATA_MSK 0x0F
+
+#define BME680_REG_RES_HEAT_RANGE 0x02
+#define BME680_RHRANGE_MSK 0x30
+#define BME680_REG_RES_HEAT_VAL 0x00
+#define BME680_REG_RANGE_SW_ERR 0x04
+#define BME680_RSERROR_MSK 0xF0
+#define BME680_REG_RES_HEAT_0 0x5A
+#define BME680_REG_GAS_WAIT_0 0x64
+#define BME680_GAS_RANGE_MASK 0x0F
+#define BME680_ADC_GAS_RES_SHIFT 6
+#define BME680_AMB_TEMP 25
+
+#define BME680_REG_CTRL_GAS_1 0x71
+#define BME680_RUN_GAS_MASK BIT(4)
+#define BME680_NB_CONV_MASK GENMASK(3, 0)
+#define BME680_RUN_GAS_EN_BIT BIT(4)
+#define BME680_NB_CONV_0_VAL 0
+
+#define BME680_REG_MEAS_STAT_0 0x1D
+#define BME680_GAS_MEAS_BIT BIT(6)
+
+/* Calibration Parameters */
+#define BME680_T2_LSB_REG 0x8A
+#define BME680_T3_REG 0x8C
+#define BME680_P1_LSB_REG 0x8E
+#define BME680_P2_LSB_REG 0x90
+#define BME680_P3_REG 0x92
+#define BME680_P4_LSB_REG 0x94
+#define BME680_P5_LSB_REG 0x96
+#define BME680_P7_REG 0x98
+#define BME680_P6_REG 0x99
+#define BME680_P8_LSB_REG 0x9C
+#define BME680_P9_LSB_REG 0x9E
+#define BME680_P10_REG 0xA0
+#define BME680_H2_LSB_REG 0xE2
+#define BME680_H2_MSB_REG 0xE1
+#define BME680_H1_MSB_REG 0xE3
+#define BME680_H1_LSB_REG 0xE2
+#define BME680_H3_REG 0xE4
+#define BME680_H4_REG 0xE5
+#define BME680_H5_REG 0xE6
+#define BME680_H6_REG 0xE7
+#define BME680_H7_REG 0xE8
+#define BME680_T1_LSB_REG 0xE9
+#define BME680_GH2_LSB_REG 0xEB
+#define BME680_GH1_REG 0xED
+#define BME680_GH3_REG 0xEE
+
+extern const struct regmap_config bme680_regmap_config;
+
+int bme680_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name);
+
+#endif /* BME680_H_ */
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
new file mode 100644
index 000000000000..7d9bb62baa3f
--- /dev/null
+++ b/drivers/iio/chemical/bme680_core.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bosch BME680 - Temperature, Pressure, Humidity & Gas Sensor
+ *
+ * Copyright (C) 2017 - 2018 Bosch Sensortec GmbH
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ *
+ * Datasheet:
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME680-DS001-00.pdf
+ */
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/log2.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "bme680.h"
+
+struct bme680_calib {
+ u16 par_t1;
+ s16 par_t2;
+ s8 par_t3;
+ u16 par_p1;
+ s16 par_p2;
+ s8 par_p3;
+ s16 par_p4;
+ s16 par_p5;
+ s8 par_p6;
+ s8 par_p7;
+ s16 par_p8;
+ s16 par_p9;
+ u8 par_p10;
+ u16 par_h1;
+ u16 par_h2;
+ s8 par_h3;
+ s8 par_h4;
+ s8 par_h5;
+ s8 par_h6;
+ s8 par_h7;
+ s8 par_gh1;
+ s16 par_gh2;
+ s8 par_gh3;
+ u8 res_heat_range;
+ s8 res_heat_val;
+ s8 range_sw_err;
+};
+
+struct bme680_data {
+ struct regmap *regmap;
+ struct bme680_calib bme680;
+ u8 oversampling_temp;
+ u8 oversampling_press;
+ u8 oversampling_humid;
+ u16 heater_dur;
+ u16 heater_temp;
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * and humidity compensation calculations.
+ */
+ s32 t_fine;
+};
+
+const struct regmap_config bme680_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+EXPORT_SYMBOL(bme680_regmap_config);
+
+static const struct iio_chan_spec bme680_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_RESISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const int bme680_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+
+static int bme680_read_calib(struct bme680_data *data,
+ struct bme680_calib *calib)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int tmp, tmp_msb, tmp_lsb;
+ int ret;
+ __le16 buf;
+
+ /* Temperature related coefficients */
+ ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
+ return ret;
+ }
+ calib->par_t1 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_t2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_T3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_T3_REG\n");
+ return ret;
+ }
+ calib->par_t3 = tmp;
+
+ /* Pressure related coefficients */
+ ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p1 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P3_REG\n");
+ return ret;
+ }
+ calib->par_p3 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p4 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p5 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P6_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P6_REG\n");
+ return ret;
+ }
+ calib->par_p6 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_P7_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P7_REG\n");
+ return ret;
+ }
+ calib->par_p7 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p8 = le16_to_cpu(buf);
+
+ ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
+ return ret;
+ }
+ calib->par_p9 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_P10_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_P10_REG\n");
+ return ret;
+ }
+ calib->par_p10 = tmp;
+
+ /* Humidity related coefficients */
+ ret = regmap_read(data->regmap, BME680_H1_MSB_REG, &tmp_msb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H1_MSB_REG\n");
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BME680_H1_LSB_REG, &tmp_lsb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H1_LSB_REG\n");
+ return ret;
+ }
+
+ calib->par_h1 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
+ (tmp_lsb & BME680_BIT_H1_DATA_MSK);
+
+ ret = regmap_read(data->regmap, BME680_H2_MSB_REG, &tmp_msb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H2_MSB_REG\n");
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BME680_H2_LSB_REG, &tmp_lsb);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H2_LSB_REG\n");
+ return ret;
+ }
+
+ calib->par_h2 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
+ (tmp_lsb >> BME680_HUM_REG_SHIFT_VAL);
+
+ ret = regmap_read(data->regmap, BME680_H3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H3_REG\n");
+ return ret;
+ }
+ calib->par_h3 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H4_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H4_REG\n");
+ return ret;
+ }
+ calib->par_h4 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H5_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H5_REG\n");
+ return ret;
+ }
+ calib->par_h5 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H6_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H6_REG\n");
+ return ret;
+ }
+ calib->par_h6 = tmp;
+
+ ret = regmap_read(data->regmap, BME680_H7_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_H7_REG\n");
+ return ret;
+ }
+ calib->par_h7 = tmp;
+
+ /* Gas heater related coefficients */
+ ret = regmap_read(data->regmap, BME680_GH1_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH1_REG\n");
+ return ret;
+ }
+ calib->par_gh1 = tmp;
+
+ ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
+ (u8 *) &buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
+ return ret;
+ }
+ calib->par_gh2 = le16_to_cpu(buf);
+
+ ret = regmap_read(data->regmap, BME680_GH3_REG, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read BME680_GH3_REG\n");
+ return ret;
+ }
+ calib->par_gh3 = tmp;
+
+ /* Other coefficients */
+ ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_RANGE, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read resistance heat range\n");
+ return ret;
+ }
+ calib->res_heat_range = (tmp & BME680_RHRANGE_MSK) / 16;
+
+ ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_VAL, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read resistance heat value\n");
+ return ret;
+ }
+ calib->res_heat_val = tmp;
+
+ ret = regmap_read(data->regmap, BME680_REG_RANGE_SW_ERR, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read range software error\n");
+ return ret;
+ }
+ calib->range_sw_err = (tmp & BME680_RSERROR_MSK) / 16;
+
+ return 0;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L876
+ *
+ * Returns temperature measurement in DegC, resolutions is 0.01 DegC. Therefore,
+ * output value of "3233" represents 32.33 DegC.
+ */
+static s16 bme680_compensate_temp(struct bme680_data *data,
+ s32 adc_temp)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s64 var1, var2, var3;
+ s16 calc_temp;
+
+ var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
+ var2 = (var1 * calib->par_t2) >> 11;
+ var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
+ var3 = (var3 * (calib->par_t3 << 4)) >> 14;
+ data->t_fine = var2 + var3;
+ calc_temp = (data->t_fine * 5 + 128) >> 8;
+
+ return calc_temp;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L896
+ *
+ * Returns pressure measurement in Pa. Output value of "97356" represents
+ * 97356 Pa = 973.56 hPa.
+ */
+static u32 bme680_compensate_press(struct bme680_data *data,
+ u32 adc_press)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, press_comp;
+
+ var1 = (data->t_fine >> 1) - 64000;
+ var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
+ var2 = var2 + (var1 * calib->par_p5 << 1);
+ var2 = (var2 >> 2) + (calib->par_p4 << 16);
+ var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
+ (calib->par_p3 << 5)) >> 3) +
+ ((calib->par_p2 * var1) >> 1);
+ var1 = var1 >> 18;
+ var1 = ((32768 + var1) * calib->par_p1) >> 15;
+ press_comp = 1048576 - adc_press;
+ press_comp = ((press_comp - (var2 >> 12)) * 3125);
+
+ if (press_comp >= BME680_MAX_OVERFLOW_VAL)
+ press_comp = ((press_comp / (u32)var1) << 1);
+ else
+ press_comp = ((press_comp << 1) / (u32)var1);
+
+ var1 = (calib->par_p9 * (((press_comp >> 3) *
+ (press_comp >> 3)) >> 13)) >> 12;
+ var2 = ((press_comp >> 2) * calib->par_p8) >> 13;
+ var3 = ((press_comp >> 8) * (press_comp >> 8) *
+ (press_comp >> 8) * calib->par_p10) >> 17;
+
+ press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
+
+ return press_comp;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L937
+ *
+ * Returns humidity measurement in percent, resolution is 0.001 percent. Output
+ * value of "43215" represents 43.215 %rH.
+ */
+static u32 bme680_compensate_humid(struct bme680_data *data,
+ u16 adc_humid)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, var4, var5, var6, temp_scaled, calc_hum;
+
+ temp_scaled = (data->t_fine * 5 + 128) >> 8;
+ var1 = (adc_humid - ((s32) ((s32) calib->par_h1 * 16))) -
+ (((temp_scaled * (s32) calib->par_h3) / 100) >> 1);
+ var2 = ((s32) calib->par_h2 *
+ (((temp_scaled * calib->par_h4) / 100) +
+ (((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
+ >> 6) / 100) + (1 << 14))) >> 10;
+ var3 = var1 * var2;
+ var4 = calib->par_h6 << 7;
+ var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
+ var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
+ var6 = (var4 * var5) >> 1;
+ calc_hum = (((var3 + var6) >> 10) * 1000) >> 12;
+
+ if (calc_hum > 100000) /* Cap at 100%rH */
+ calc_hum = 100000;
+ else if (calc_hum < 0)
+ calc_hum = 0;
+
+ return calc_hum;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L973
+ *
+ * Returns gas measurement in Ohm. Output value of "82986" represent 82986 ohms.
+ */
+static u32 bme680_compensate_gas(struct bme680_data *data, u16 gas_res_adc,
+ u8 gas_range)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s64 var1;
+ u64 var2;
+ s64 var3;
+ u32 calc_gas_res;
+
+ /* Look up table for the possible gas range values */
+ const u32 lookupTable[16] = {2147483647u, 2147483647u,
+ 2147483647u, 2147483647u, 2147483647u,
+ 2126008810u, 2147483647u, 2130303777u,
+ 2147483647u, 2147483647u, 2143188679u,
+ 2136746228u, 2147483647u, 2126008810u,
+ 2147483647u, 2147483647u};
+
+ var1 = ((1340 + (5 * (s64) calib->range_sw_err)) *
+ ((s64) lookupTable[gas_range])) >> 16;
+ var2 = ((gas_res_adc << 15) - 16777216) + var1;
+ var3 = ((125000 << (15 - gas_range)) * var1) >> 9;
+ var3 += (var2 >> 1);
+ calc_gas_res = div64_s64(var3, (s64) var2);
+
+ return calc_gas_res;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L1002
+ */
+static u8 bme680_calc_heater_res(struct bme680_data *data, u16 temp)
+{
+ struct bme680_calib *calib = &data->bme680;
+ s32 var1, var2, var3, var4, var5, heatr_res_x100;
+ u8 heatr_res;
+
+ if (temp > 400) /* Cap temperature */
+ temp = 400;
+
+ var1 = (((s32) BME680_AMB_TEMP * calib->par_gh3) / 1000) * 256;
+ var2 = (calib->par_gh1 + 784) * (((((calib->par_gh2 + 154009) *
+ temp * 5) / 100)
+ + 3276800) / 10);
+ var3 = var1 + (var2 / 2);
+ var4 = (var3 / (calib->res_heat_range + 4));
+ var5 = 131 * calib->res_heat_val + 65536;
+ heatr_res_x100 = ((var4 / var5) - 250) * 34;
+ heatr_res = (heatr_res_x100 + 50) / 100;
+
+ return heatr_res;
+}
+
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L1188
+ */
+static u8 bme680_calc_heater_dur(u16 dur)
+{
+ u8 durval, factor = 0;
+
+ if (dur >= 0xfc0) {
+ durval = 0xff; /* Max duration */
+ } else {
+ while (dur > 0x3F) {
+ dur = dur / 4;
+ factor += 1;
+ }
+ durval = dur + (factor * 64);
+ }
+
+ return durval;
+}
+
+static int bme680_set_mode(struct bme680_data *data, bool mode)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ if (mode) {
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_MODE_MASK, BME680_MODE_FORCED);
+ if (ret < 0)
+ dev_err(dev, "failed to set forced mode\n");
+
+ } else {
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_MODE_MASK, BME680_MODE_SLEEP);
+ if (ret < 0)
+ dev_err(dev, "failed to set sleep mode\n");
+
+ }
+
+ return ret;
+}
+
+static int bme680_chip_config(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 osrs = FIELD_PREP(BME680_OSRS_HUMIDITY_MASK,
+ data->oversampling_humid + 1);
+ /*
+ * Highly recommended to set oversampling of humidity before
+ * temperature/pressure oversampling.
+ */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CTRL_HUMIDITY,
+ BME680_OSRS_HUMIDITY_MASK, osrs);
+ if (ret < 0) {
+ dev_err(dev, "failed to write ctrl_hum register\n");
+ return ret;
+ }
+
+ /* IIR filter settings */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CONFIG,
+ BME680_FILTER_MASK,
+ BME680_FILTER_COEFF_VAL);
+ if (ret < 0) {
+ dev_err(dev, "failed to write config register\n");
+ return ret;
+ }
+
+ osrs = FIELD_PREP(BME680_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
+ FIELD_PREP(BME680_OSRS_PRESS_MASK, data->oversampling_press + 1);
+
+ ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
+ BME680_OSRS_TEMP_MASK |
+ BME680_OSRS_PRESS_MASK,
+ osrs);
+ if (ret < 0)
+ dev_err(dev, "failed to write ctrl_meas register\n");
+
+ return ret;
+}
+
+static int bme680_gas_config(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 heatr_res, heatr_dur;
+
+ heatr_res = bme680_calc_heater_res(data, data->heater_temp);
+
+ /* set target heater temperature */
+ ret = regmap_write(data->regmap, BME680_REG_RES_HEAT_0, heatr_res);
+ if (ret < 0) {
+ dev_err(dev, "failed to write res_heat_0 register\n");
+ return ret;
+ }
+
+ heatr_dur = bme680_calc_heater_dur(data->heater_dur);
+
+ /* set target heating duration */
+ ret = regmap_write(data->regmap, BME680_REG_GAS_WAIT_0, heatr_dur);
+ if (ret < 0) {
+ dev_err(dev, "failted to write gas_wait_0 register\n");
+ return ret;
+ }
+
+ /* Selecting the runGas and NB conversion settings for the sensor */
+ ret = regmap_update_bits(data->regmap, BME680_REG_CTRL_GAS_1,
+ BME680_RUN_GAS_MASK | BME680_NB_CONV_MASK,
+ BME680_RUN_GAS_EN_BIT | BME680_NB_CONV_0_VAL);
+ if (ret < 0)
+ dev_err(dev, "failed to write ctrl_gas_1 register\n");
+
+ return ret;
+}
+
+static int bme680_read_temp(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp;
+ s16 comp_temp;
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ if (adc_temp == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading temperature skipped\n");
+ return -EINVAL;
+ }
+ comp_temp = bme680_compensate_temp(data, adc_temp);
+ /*
+ * val might be NULL if we're called by the read_press/read_humid
+ * routine which is callled to get t_fine value used in
+ * compensate_press/compensate_humid to get compensated
+ * pressure/humidity readings.
+ */
+ if (val && val2) {
+ *val = comp_temp;
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return ret;
+}
+
+static int bme680_read_press(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+
+ /* Read and compensate temperature to get a reading of t_fine */
+ ret = bme680_read_temp(data, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ if (adc_press == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading pressure skipped\n");
+ return -EINVAL;
+ }
+
+ *val = bme680_compensate_press(data, adc_press);
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bme680_read_humid(struct bme680_data *data,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be16 tmp = 0;
+ s32 adc_humidity;
+ u32 comp_humidity;
+
+ /* Read and compensate temperature to get a reading of t_fine */
+ ret = bme680_read_temp(data, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ adc_humidity = be16_to_cpu(tmp);
+ if (adc_humidity == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading humidity skipped\n");
+ return -EINVAL;
+ }
+ comp_humidity = bme680_compensate_humid(data, adc_humidity);
+
+ *val = comp_humidity;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bme680_read_gas(struct bme680_data *data,
+ int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __be16 tmp = 0;
+ unsigned int check;
+ u16 adc_gas_res;
+ u8 gas_range;
+
+ /* Set heater settings */
+ ret = bme680_gas_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set gas config\n");
+ return ret;
+ }
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (check & BME680_GAS_MEAS_BIT) {
+ dev_err(dev, "gas measurement incomplete\n");
+ return -EBUSY;
+ }
+
+ ret = regmap_read(data->regmap, BME680_REG_GAS_R_LSB, &check);
+ if (ret < 0) {
+ dev_err(dev, "failed to read gas_r_lsb register\n");
+ return ret;
+ }
+
+ /*
+ * occurs if either the gas heating duration was insuffient
+ * to reach the target heater temperature or the target
+ * heater temperature was too high for the heater sink to
+ * reach.
+ */
+ if ((check & BME680_GAS_STAB_BIT) == 0) {
+ dev_err(dev, "heater failed to reach the target temperature\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read gas resistance\n");
+ return ret;
+ }
+
+ gas_range = check & BME680_GAS_RANGE_MASK;
+ adc_gas_res = be16_to_cpu(tmp) >> BME680_ADC_GAS_RES_SHIFT;
+
+ *val = bme680_compensate_gas(data, adc_gas_res, gas_range);
+ return IIO_VAL_INT;
+}
+
+static int bme680_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bme680_read_temp(data, val, val2);
+ case IIO_PRESSURE:
+ return bme680_read_press(data, val, val2);
+ case IIO_HUMIDITYRELATIVE:
+ return bme680_read_humid(data, val, val2);
+ case IIO_RESISTANCE:
+ return bme680_read_gas(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 1 << data->oversampling_temp;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ *val = 1 << data->oversampling_press;
+ return IIO_VAL_INT;
+ case IIO_HUMIDITYRELATIVE:
+ *val = 1 << data->oversampling_humid;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bme680_write_oversampling_ratio_temp(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_temp = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_oversampling_ratio_press(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_press = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_oversampling_ratio_humid(struct bme680_data *data,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bme680_oversampling_avail); i++) {
+ if (bme680_oversampling_avail[i] == val) {
+ data->oversampling_humid = ilog2(val);
+
+ return bme680_chip_config(data);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bme680_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bme680_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bme680_write_oversampling_ratio_temp(data, val);
+ case IIO_PRESSURE:
+ return bme680_write_oversampling_ratio_press(data, val);
+ case IIO_HUMIDITYRELATIVE:
+ return bme680_write_oversampling_ratio_humid(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char bme680_oversampling_ratio_show[] = "1 2 4 8 16";
+
+static IIO_CONST_ATTR(oversampling_ratio_available,
+ bme680_oversampling_ratio_show);
+
+static struct attribute *bme680_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bme680_attribute_group = {
+ .attrs = bme680_attributes,
+};
+
+static const struct iio_info bme680_info = {
+ .read_raw = &bme680_read_raw,
+ .write_raw = &bme680_write_raw,
+ .attrs = &bme680_attribute_group,
+};
+
+static const char *bme680_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+int bme680_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct bme680_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!name && ACPI_HANDLE(dev))
+ name = bme680_match_acpi_device(dev);
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->channels = bme680_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bme680_channels);
+ indio_dev->info = &bme680_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* default values for the sensor */
+ data->oversampling_humid = ilog2(2); /* 2X oversampling rate */
+ data->oversampling_press = ilog2(4); /* 4X oversampling rate */
+ data->oversampling_temp = ilog2(8); /* 8X oversampling rate */
+ data->heater_temp = 320; /* degree Celsius */
+ data->heater_dur = 150; /* milliseconds */
+
+ ret = bme680_chip_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set chip_config data\n");
+ return ret;
+ }
+
+ ret = bme680_gas_config(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to set gas config data\n");
+ return ret;
+ }
+
+ ret = bme680_read_calib(data, &data->bme680);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to read calibration coefficients at probe\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(bme680_core_probe);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("Bosch BME680 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
new file mode 100644
index 000000000000..06d4be539d2e
--- /dev/null
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BME680 - I2C Driver
+ *
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ *
+ * 7-Bit I2C slave address is:
+ * - 0x76 if SDO is pulled to GND
+ * - 0x77 if SDO is pulled to VDDIO
+ *
+ * Note: SDO pin cannot be left floating otherwise I2C address
+ * will be undefined.
+ */
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bme680.h"
+
+static int bme680_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+ unsigned int val;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading I2C chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
+ if (id)
+ name = id->name;
+
+ return bme680_core_probe(&client->dev, regmap, name);
+}
+
+static const struct i2c_device_id bme680_i2c_id[] = {
+ {"bme680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bme680_i2c_id);
+
+static const struct acpi_device_id bme680_acpi_match[] = {
+ {"BME0680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+
+static struct i2c_driver bme680_i2c_driver = {
+ .driver = {
+ .name = "bme680_i2c",
+ .acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ },
+ .probe = bme680_i2c_probe,
+ .id_table = bme680_i2c_id,
+};
+module_i2c_driver(bme680_i2c_driver);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("BME680 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
new file mode 100644
index 000000000000..c9fb05e8d0b9
--- /dev/null
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BME680 - SPI Driver
+ *
+ * Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "bme680.h"
+
+static int bme680_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct spi_device *spi = context;
+ u8 buf[2];
+
+ memcpy(buf, data, 2);
+ /*
+ * The SPI register address (= full register address without bit 7)
+ * and the write command (bit7 = RW = '0')
+ */
+ buf[0] &= ~0x80;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int bme680_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct spi_device *spi = context;
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus bme680_regmap_bus = {
+ .write = bme680_regmap_spi_write,
+ .read = bme680_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int bme680_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+ unsigned int val;
+ int ret;
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
+ regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
+ &spi->dev, &bme680_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
+ ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Error reading SPI chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+ /*
+ * select Page 1 of spi_mem_page to enable access to
+ * to registers from address 0x00 to 0x7F.
+ */
+ ret = regmap_write_bits(regmap, BME680_REG_STATUS,
+ BME680_SPI_MEM_PAGE_BIT,
+ BME680_SPI_MEM_PAGE_1_VAL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
+ return ret;
+ }
+
+ return bme680_core_probe(&spi->dev, regmap, id->name);
+}
+
+static const struct spi_device_id bme680_spi_id[] = {
+ {"bme680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, bme680_spi_id);
+
+static const struct acpi_device_id bme680_acpi_match[] = {
+ {"BME0680", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+
+static struct spi_driver bme680_spi_driver = {
+ .driver = {
+ .name = "bme680_spi",
+ .acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ },
+ .probe = bme680_spi_probe,
+ .id_table = bme680_spi_id,
+};
+module_spi_driver(bme680_spi_driver);
+
+MODULE_AUTHOR("Himanshu Jha <himanshujha199640@gmail.com>");
+MODULE_DESCRIPTION("Bosch BME680 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 57db19182e95..26fbd1bd9413 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -380,8 +380,7 @@ void st_sensors_of_name_probe(struct device *dev,
return;
/* The name from the OF match takes precedence if present */
- strncpy(name, of_id->data, len);
- name[len - 1] = '\0';
+ strlcpy(name, of_id->data, len);
}
EXPORT_SYMBOL(st_sensors_of_name_probe);
#else
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index b56985078d8c..92be8d0f7735 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -59,6 +59,39 @@ struct quad8_iio {
unsigned int base;
};
+#define QUAD8_REG_CHAN_OP 0x11
+#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
+/* Borrow Toggle flip-flop */
+#define QUAD8_FLAG_BT BIT(0)
+/* Carry Toggle flip-flop */
+#define QUAD8_FLAG_CT BIT(1)
+/* Error flag */
+#define QUAD8_FLAG_E BIT(4)
+/* Up/Down flag */
+#define QUAD8_FLAG_UD BIT(5)
+/* Reset and Load Signal Decoders */
+#define QUAD8_CTR_RLD 0x00
+/* Counter Mode Register */
+#define QUAD8_CTR_CMR 0x20
+/* Input / Output Control Register */
+#define QUAD8_CTR_IOR 0x40
+/* Index Control Register */
+#define QUAD8_CTR_IDR 0x60
+/* Reset Byte Pointer (three byte data pointer) */
+#define QUAD8_RLD_RESET_BP 0x01
+/* Reset Counter */
+#define QUAD8_RLD_RESET_CNTR 0x02
+/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
+#define QUAD8_RLD_RESET_FLAGS 0x04
+/* Reset Error flag */
+#define QUAD8_RLD_RESET_E 0x06
+/* Preset Register to Counter */
+#define QUAD8_RLD_PRESET_CNTR 0x08
+/* Transfer Counter to Output Latch */
+#define QUAD8_RLD_CNTR_OUT 0x10
+#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
+#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+
static int quad8_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -72,19 +105,21 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->type == IIO_INDEX) {
- *val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+ *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(chan->channel));
return IIO_VAL_INT;
}
flags = inb(base_offset + 1);
- borrow = flags & BIT(0);
- carry = !!(flags & BIT(1));
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24;
/* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(0x11, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
for (i = 0; i < 3; i++)
*val |= (unsigned int)inb(base_offset) << (8 * i);
@@ -120,17 +155,17 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
outb(val >> (8 * i), base_offset);
/* Transfer Preset Register to Counter */
- outb(0x08, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register back to original value */
val = priv->preset[chan->channel];
@@ -138,9 +173,9 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(0x02, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
/* Reset Error flag */
- outb(0x06, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
return 0;
case IIO_CHAN_INFO_ENABLE:
@@ -153,7 +188,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
/* Load I/O control configuration */
- outb(0x40 | ior_cfg, base_offset + 1);
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
return 0;
case IIO_CHAN_INFO_SCALE:
@@ -217,7 +252,7 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
priv->preset[chan->channel] = preset;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register */
for (i = 0; i < 3; i++)
@@ -258,7 +293,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
(unsigned int)preset_enable << 1;
/* Load I/O control configuration to Input / Output Control Register */
- outb(0x40 | ior_cfg, base_offset);
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
return len;
}
@@ -274,7 +309,7 @@ static int quad8_get_noise_error(struct iio_dev *indio_dev,
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel + 1;
- return !!(inb(base_offset) & BIT(4));
+ return !!(inb(base_offset) & QUAD8_FLAG_E);
}
static const struct iio_enum quad8_noise_error_enum = {
@@ -294,7 +329,7 @@ static int quad8_get_count_direction(struct iio_dev *indio_dev,
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel + 1;
- return !!(inb(base_offset) & BIT(5));
+ return !!(inb(base_offset) & QUAD8_FLAG_UD);
}
static const struct iio_enum quad8_count_direction_enum = {
@@ -324,7 +359,7 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
/* Load mode configuration to Counter Mode Register */
- outb(0x20 | mode_cfg, base_offset);
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
return 0;
}
@@ -364,7 +399,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(0x60 | idr_cfg, base_offset);
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
return 0;
}
@@ -410,7 +445,7 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
priv->quadrature_mode[chan->channel] = quadrature_mode;
/* Load mode configuration to Counter Mode Register */
- outb(0x20 | mode_cfg, base_offset);
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
return 0;
}
@@ -446,7 +481,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(0x60 | idr_cfg, base_offset);
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
return 0;
}
@@ -556,28 +591,28 @@ static int quad8_probe(struct device *dev, unsigned int id)
priv->base = base[id];
/* Reset all counters and disable interrupt function */
- outb(0x01, base[id] + 0x11);
+ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */
for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
base_offset = base[id] + 2 * i;
/* Reset Byte Pointer */
- outb(0x01, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Reset Preset Register */
for (j = 0; j < 3; j++)
outb(0x00, base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(0x04, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
/* Reset Error flag */
- outb(0x06, base_offset + 1);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
/* Binary encoding; Normal count; non-quadrature mode */
- outb(0x20, base_offset + 1);
+ outb(QUAD8_CTR_CMR, base_offset + 1);
/* Disable A and B inputs; preset on index; FLG1 as Carry */
- outb(0x40, base_offset + 1);
+ outb(QUAD8_CTR_IOR, base_offset + 1);
/* Disable index function; negative index polarity */
- outb(0x60, base_offset + 1);
+ outb(QUAD8_CTR_IDR, base_offset + 1);
}
/* Enable all counters */
- outb(0x00, base[id] + 0x11);
+ outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 06e90debb9f5..80beb64e9e0c 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -167,6 +167,16 @@ config AD5755
To compile this driver as a module, choose M here: the
module will be called ad5755.
+config AD5758
+ tristate "Analog Devices AD5758 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5758 single channel
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5758.
+
config AD5761
tristate "Analog Devices AD5761/61R/21/21R DAC driver"
depends on SPI_MASTER
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 57aa230d34ab..a1b37cf99441 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
obj-$(CONFIG_AD5592R) += ad5592r.o
obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5755) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index e136f0fd38f0..2ddbfc3fdbae 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -221,6 +221,7 @@ static struct iio_chan_spec name[] = { \
AD5868_CHANNEL(7, 7, bits, _shift), \
}
+DECLARE_AD5693_CHANNELS(ad5311r_channels, 10, 6);
DECLARE_AD5676_CHANNELS(ad5672_channels, 12, 4);
DECLARE_AD5676_CHANNELS(ad5676_channels, 16, 0);
DECLARE_AD5686_CHANNELS(ad5684_channels, 12, 4);
@@ -231,6 +232,12 @@ DECLARE_AD5693_CHANNELS(ad5692r_channels, 14, 2);
DECLARE_AD5693_CHANNELS(ad5691r_channels, 12, 4);
static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
+ [ID_AD5311R] = {
+ .channels = ad5311r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 1,
+ .regmap_type = AD5693_REGMAP,
+ },
[ID_AD5671R] = {
.channels = ad5672_channels,
.int_vref_mv = 2500,
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index d05cda9f1edd..57b3c61bfb91 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -45,6 +45,7 @@
* ad5686_supported_device_ids:
*/
enum ad5686_supported_device_ids {
+ ID_AD5311R,
ID_AD5671R,
ID_AD5672R,
ID_AD5675R,
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index d18735d7d938..7350d9806a11 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -71,6 +71,7 @@ static int ad5686_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id ad5686_i2c_id[] = {
+ {"ad5311r", ID_AD5311R},
{"ad5671r", ID_AD5671R},
{"ad5675r", ID_AD5675R},
{"ad5691r", ID_AD5691R},
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
new file mode 100644
index 000000000000..bd36333257af
--- /dev/null
+++ b/drivers/iio/dac/ad5758.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD5758 Digital to analog converters driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ *
+ * TODO: Currently CRC is not supported in this driver
+ */
+#include <linux/bsearch.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* AD5758 registers definition */
+#define AD5758_NOP 0x00
+#define AD5758_DAC_INPUT 0x01
+#define AD5758_DAC_OUTPUT 0x02
+#define AD5758_CLEAR_CODE 0x03
+#define AD5758_USER_GAIN 0x04
+#define AD5758_USER_OFFSET 0x05
+#define AD5758_DAC_CONFIG 0x06
+#define AD5758_SW_LDAC 0x07
+#define AD5758_KEY 0x08
+#define AD5758_GP_CONFIG1 0x09
+#define AD5758_GP_CONFIG2 0x0A
+#define AD5758_DCDC_CONFIG1 0x0B
+#define AD5758_DCDC_CONFIG2 0x0C
+#define AD5758_WDT_CONFIG 0x0F
+#define AD5758_DIGITAL_DIAG_CONFIG 0x10
+#define AD5758_ADC_CONFIG 0x11
+#define AD5758_FAULT_PIN_CONFIG 0x12
+#define AD5758_TWO_STAGE_READBACK_SELECT 0x13
+#define AD5758_DIGITAL_DIAG_RESULTS 0x14
+#define AD5758_ANALOG_DIAG_RESULTS 0x15
+#define AD5758_STATUS 0x16
+#define AD5758_CHIP_ID 0x17
+#define AD5758_FREQ_MONITOR 0x18
+#define AD5758_DEVICE_ID_0 0x19
+#define AD5758_DEVICE_ID_1 0x1A
+#define AD5758_DEVICE_ID_2 0x1B
+#define AD5758_DEVICE_ID_3 0x1C
+
+/* AD5758_DAC_CONFIG */
+#define AD5758_DAC_CONFIG_RANGE_MSK GENMASK(3, 0)
+#define AD5758_DAC_CONFIG_RANGE_MODE(x) (((x) & 0xF) << 0)
+#define AD5758_DAC_CONFIG_INT_EN_MSK BIT(5)
+#define AD5758_DAC_CONFIG_INT_EN_MODE(x) (((x) & 0x1) << 5)
+#define AD5758_DAC_CONFIG_OUT_EN_MSK BIT(6)
+#define AD5758_DAC_CONFIG_OUT_EN_MODE(x) (((x) & 0x1) << 6)
+#define AD5758_DAC_CONFIG_SR_EN_MSK BIT(8)
+#define AD5758_DAC_CONFIG_SR_EN_MODE(x) (((x) & 0x1) << 8)
+#define AD5758_DAC_CONFIG_SR_CLOCK_MSK GENMASK(12, 9)
+#define AD5758_DAC_CONFIG_SR_CLOCK_MODE(x) (((x) & 0xF) << 9)
+#define AD5758_DAC_CONFIG_SR_STEP_MSK GENMASK(15, 13)
+#define AD5758_DAC_CONFIG_SR_STEP_MODE(x) (((x) & 0x7) << 13)
+
+/* AD5758_KEY */
+#define AD5758_KEY_CODE_RESET_1 0x15FA
+#define AD5758_KEY_CODE_RESET_2 0xAF51
+#define AD5758_KEY_CODE_SINGLE_ADC_CONV 0x1ADC
+#define AD5758_KEY_CODE_RESET_WDT 0x0D06
+#define AD5758_KEY_CODE_CALIB_MEM_REFRESH 0xFCBA
+
+/* AD5758_DCDC_CONFIG1 */
+#define AD5758_DCDC_CONFIG1_DCDC_VPROG_MSK GENMASK(4, 0)
+#define AD5758_DCDC_CONFIG1_DCDC_VPROG_MODE(x) (((x) & 0x1F) << 0)
+#define AD5758_DCDC_CONFIG1_DCDC_MODE_MSK GENMASK(6, 5)
+#define AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(x) (((x) & 0x3) << 5)
+#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK BIT(7)
+#define AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(x) (((x) & 0x1) << 7)
+
+/* AD5758_DCDC_CONFIG2 */
+#define AD5758_DCDC_CONFIG2_ILIMIT_MSK GENMASK(3, 1)
+#define AD5758_DCDC_CONFIG2_ILIMIT_MODE(x) (((x) & 0x7) << 1)
+#define AD5758_DCDC_CONFIG2_INTR_SAT_3WI_MSK BIT(11)
+#define AD5758_DCDC_CONFIG2_BUSY_3WI_MSK BIT(12)
+
+/* AD5758_DIGITAL_DIAG_RESULTS */
+#define AD5758_CAL_MEM_UNREFRESHED_MSK BIT(15)
+
+#define AD5758_WR_FLAG_MSK(x) (0x80 | ((x) & 0x1F))
+
+#define AD5758_FULL_SCALE_MICRO 65535000000ULL
+
+/**
+ * struct ad5758_state - driver instance specific data
+ * @spi: spi_device
+ * @lock: mutex lock
+ * @out_range: struct which stores the output range
+ * @dc_dc_mode: variable which stores the mode of operation
+ * @dc_dc_ilim: variable which stores the dc-to-dc converter current limit
+ * @slew_time: variable which stores the target slew time
+ * @pwr_down: variable which contains whether a channel is powered down or not
+ * @data: spi transfer buffers
+ */
+
+struct ad5758_range {
+ int reg;
+ int min;
+ int max;
+};
+
+struct ad5758_state {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ad5758_range out_range;
+ unsigned int dc_dc_mode;
+ unsigned int dc_dc_ilim;
+ unsigned int slew_time;
+ bool pwr_down;
+ __be32 d32[3];
+};
+
+/**
+ * Output ranges corresponding to bits [3:0] from DAC_CONFIG register
+ * 0000: 0 V to 5 V voltage range
+ * 0001: 0 V to 10 V voltage range
+ * 0010: ±5 V voltage range
+ * 0011: ±10 V voltage range
+ * 1000: 0 mA to 20 mA current range
+ * 1001: 0 mA to 24 mA current range
+ * 1010: 4 mA to 20 mA current range
+ * 1011: ±20 mA current range
+ * 1100: ±24 mA current range
+ * 1101: -1 mA to +22 mA current range
+ */
+enum ad5758_output_range {
+ AD5758_RANGE_0V_5V,
+ AD5758_RANGE_0V_10V,
+ AD5758_RANGE_PLUSMINUS_5V,
+ AD5758_RANGE_PLUSMINUS_10V,
+ AD5758_RANGE_0mA_20mA = 8,
+ AD5758_RANGE_0mA_24mA,
+ AD5758_RANGE_4mA_24mA,
+ AD5758_RANGE_PLUSMINUS_20mA,
+ AD5758_RANGE_PLUSMINUS_24mA,
+ AD5758_RANGE_MINUS_1mA_PLUS_22mA,
+};
+
+enum ad5758_dc_dc_mode {
+ AD5758_DCDC_MODE_POWER_OFF,
+ AD5758_DCDC_MODE_DPC_CURRENT,
+ AD5758_DCDC_MODE_DPC_VOLTAGE,
+ AD5758_DCDC_MODE_PPC_CURRENT,
+};
+
+static const struct ad5758_range ad5758_voltage_range[] = {
+ { AD5758_RANGE_0V_5V, 0, 5000000 },
+ { AD5758_RANGE_0V_10V, 0, 10000000 },
+ { AD5758_RANGE_PLUSMINUS_5V, -5000000, 5000000 },
+ { AD5758_RANGE_PLUSMINUS_10V, -10000000, 10000000 }
+};
+
+static const struct ad5758_range ad5758_current_range[] = {
+ { AD5758_RANGE_0mA_20mA, 0, 20000},
+ { AD5758_RANGE_0mA_24mA, 0, 24000 },
+ { AD5758_RANGE_4mA_24mA, 4, 24000 },
+ { AD5758_RANGE_PLUSMINUS_20mA, -20000, 20000 },
+ { AD5758_RANGE_PLUSMINUS_24mA, -24000, 24000 },
+ { AD5758_RANGE_MINUS_1mA_PLUS_22mA, -1000, 22000 },
+};
+
+static const int ad5758_sr_clk[16] = {
+ 240000, 200000, 150000, 128000, 64000, 32000, 16000, 8000, 4000, 2000,
+ 1000, 512, 256, 128, 64, 16
+};
+
+static const int ad5758_sr_step[8] = {
+ 4, 12, 64, 120, 256, 500, 1820, 2048
+};
+
+static const int ad5758_dc_dc_ilim[6] = {
+ 150000, 200000, 250000, 300000, 350000, 400000
+};
+
+static int ad5758_spi_reg_read(struct ad5758_state *st, unsigned int addr)
+{
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d32[0],
+ .len = 4,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->d32[1],
+ .rx_buf = &st->d32[2],
+ .len = 4,
+ },
+ };
+ int ret;
+
+ st->d32[0] = cpu_to_be32(
+ (AD5758_WR_FLAG_MSK(AD5758_TWO_STAGE_READBACK_SELECT) << 24) |
+ (addr << 8));
+ st->d32[1] = cpu_to_be32(AD5758_WR_FLAG_MSK(AD5758_NOP) << 24);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ return (be32_to_cpu(st->d32[2]) >> 8) & 0xFFFF;
+}
+
+static int ad5758_spi_reg_write(struct ad5758_state *st,
+ unsigned int addr,
+ unsigned int val)
+{
+ st->d32[0] = cpu_to_be32((AD5758_WR_FLAG_MSK(addr) << 24) |
+ ((val & 0xFFFF) << 8));
+
+ return spi_write(st->spi, &st->d32[0], sizeof(st->d32[0]));
+}
+
+static int ad5758_spi_write_mask(struct ad5758_state *st,
+ unsigned int addr,
+ unsigned long int mask,
+ unsigned int val)
+{
+ int regval;
+
+ regval = ad5758_spi_reg_read(st, addr);
+ if (regval < 0)
+ return regval;
+
+ regval &= ~mask;
+ regval |= val;
+
+ return ad5758_spi_reg_write(st, addr, regval);
+}
+
+static int cmpfunc(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int ad5758_find_closest_match(const int *array,
+ unsigned int size, int val)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (val <= array[i])
+ return i;
+ }
+
+ return size - 1;
+}
+
+static int ad5758_wait_for_task_complete(struct ad5758_state *st,
+ unsigned int reg,
+ unsigned int mask)
+{
+ unsigned int timeout;
+ int ret;
+
+ timeout = 10;
+ do {
+ ret = ad5758_spi_reg_read(st, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(100, 1000);
+ } while (--timeout);
+
+ dev_err(&st->spi->dev,
+ "Error reading bit 0x%x in 0x%x register\n", mask, reg);
+
+ return -EIO;
+}
+
+static int ad5758_calib_mem_refresh(struct ad5758_state *st)
+{
+ int ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY,
+ AD5758_KEY_CODE_CALIB_MEM_REFRESH);
+ if (ret < 0) {
+ dev_err(&st->spi->dev,
+ "Failed to initiate a calibration memory refresh\n");
+ return ret;
+ }
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_soft_reset(struct ad5758_state *st)
+{
+ int ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY, AD5758_KEY_CODE_RESET_1);
+ if (ret < 0)
+ return ret;
+
+ ret = ad5758_spi_reg_write(st, AD5758_KEY, AD5758_KEY_CODE_RESET_2);
+
+ /* Perform a software reset and wait at least 100us */
+ usleep_range(100, 1000);
+
+ return ret;
+}
+
+static int ad5758_set_dc_dc_conv_mode(struct ad5758_state *st,
+ enum ad5758_dc_dc_mode mode)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ AD5758_DCDC_CONFIG1_DCDC_MODE_MSK,
+ AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ ret = ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+ if (ret < 0)
+ return ret;
+
+ st->dc_dc_mode = mode;
+
+ return ret;
+}
+
+static int ad5758_set_dc_dc_ilim(struct ad5758_state *st, unsigned int ilim)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_ILIMIT_MSK,
+ AD5758_DCDC_CONFIG2_ILIMIT_MODE(ilim));
+ if (ret < 0)
+ return ret;
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ return ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+}
+
+static int ad5758_slew_rate_set(struct ad5758_state *st,
+ unsigned int sr_clk_idx,
+ unsigned int sr_step_idx)
+{
+ unsigned int mode;
+ unsigned long int mask;
+ int ret;
+
+ mask = AD5758_DAC_CONFIG_SR_EN_MSK |
+ AD5758_DAC_CONFIG_SR_CLOCK_MSK |
+ AD5758_DAC_CONFIG_SR_STEP_MSK;
+ mode = AD5758_DAC_CONFIG_SR_EN_MODE(1) |
+ AD5758_DAC_CONFIG_SR_STEP_MODE(sr_step_idx) |
+ AD5758_DAC_CONFIG_SR_CLOCK_MODE(sr_clk_idx);
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG, mask, mode);
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_slew_rate_config(struct ad5758_state *st)
+{
+ unsigned int sr_clk_idx, sr_step_idx;
+ int i, res;
+ s64 diff_new, diff_old;
+ u64 sr_step, calc_slew_time;
+
+ sr_clk_idx = 0;
+ sr_step_idx = 0;
+ diff_old = S64_MAX;
+ /*
+ * The slew time can be determined by using the formula:
+ * Slew Time = (Full Scale Out / (Step Size x Update Clk Freq))
+ * where Slew time is expressed in microseconds
+ * Given the desired slew time, the following algorithm determines the
+ * best match for the step size and the update clock frequency.
+ */
+ for (i = 0; i < ARRAY_SIZE(ad5758_sr_clk); i++) {
+ /*
+ * Go through each valid update clock freq and determine a raw
+ * value for the step size by using the formula:
+ * Step Size = Full Scale Out / (Update Clk Freq * Slew Time)
+ */
+ sr_step = AD5758_FULL_SCALE_MICRO;
+ do_div(sr_step, ad5758_sr_clk[i]);
+ do_div(sr_step, st->slew_time);
+ /*
+ * After a raw value for step size was determined, find the
+ * closest valid match
+ */
+ res = ad5758_find_closest_match(ad5758_sr_step,
+ ARRAY_SIZE(ad5758_sr_step),
+ sr_step);
+ /* Calculate the slew time */
+ calc_slew_time = AD5758_FULL_SCALE_MICRO;
+ do_div(calc_slew_time, ad5758_sr_step[res]);
+ do_div(calc_slew_time, ad5758_sr_clk[i]);
+ /*
+ * Determine with how many microseconds the calculated slew time
+ * is different from the desired slew time and store the diff
+ * for the next iteration
+ */
+ diff_new = abs(st->slew_time - calc_slew_time);
+ if (diff_new < diff_old) {
+ diff_old = diff_new;
+ sr_clk_idx = i;
+ sr_step_idx = res;
+ }
+ }
+
+ return ad5758_slew_rate_set(st, sr_clk_idx, sr_step_idx);
+}
+
+static int ad5758_set_out_range(struct ad5758_state *st, int range)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_RANGE_MSK,
+ AD5758_DAC_CONFIG_RANGE_MODE(range));
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_fault_prot_switch_en(struct ad5758_state *st, bool enable)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK,
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(enable));
+ if (ret < 0)
+ return ret;
+ /*
+ * Poll the BUSY_3WI bit in the DCDC_CONFIG2 register until it is 0.
+ * This allows the 3-wire interface communication to complete.
+ */
+ return ad5758_wait_for_task_complete(st, AD5758_DCDC_CONFIG2,
+ AD5758_DCDC_CONFIG2_BUSY_3WI_MSK);
+}
+
+static int ad5758_internal_buffers_en(struct ad5758_state *st, bool enable)
+{
+ int ret;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_INT_EN_MSK,
+ AD5758_DAC_CONFIG_INT_EN_MODE(enable));
+ if (ret < 0)
+ return ret;
+
+ /* Wait to allow time for the internal calibrations to complete */
+ return ad5758_wait_for_task_complete(st, AD5758_DIGITAL_DIAG_RESULTS,
+ AD5758_CAL_MEM_UNREFRESHED_MSK);
+}
+
+static int ad5758_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval) {
+ ret = ad5758_spi_reg_read(st, reg);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ *readval = ret;
+ ret = 0;
+ } else {
+ ret = ad5758_spi_reg_write(st, reg, writeval);
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5758_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int max, min, ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ad5758_spi_reg_read(st, AD5758_DAC_INPUT);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ min = st->out_range.min;
+ max = st->out_range.max;
+ *val = (max - min) / 1000;
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ min = st->out_range.min;
+ max = st->out_range.max;
+ *val = ((min * (1 << 16)) / (max - min)) / 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5758_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ad5758_spi_reg_write(st, AD5758_DAC_INPUT, val);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ad5758_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5758_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ struct ad5758_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ unsigned int dcdc_config1_mode, dc_dc_mode, dac_config_mode, val;
+ unsigned long int dcdc_config1_msk, dac_config_msk;
+ int ret;
+
+ ret = kstrtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ if (pwr_down) {
+ dc_dc_mode = AD5758_DCDC_MODE_POWER_OFF;
+ val = 0;
+ } else {
+ dc_dc_mode = st->dc_dc_mode;
+ val = 1;
+ }
+
+ dcdc_config1_mode = AD5758_DCDC_CONFIG1_DCDC_MODE_MODE(dc_dc_mode) |
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MODE(val);
+ dcdc_config1_msk = AD5758_DCDC_CONFIG1_DCDC_MODE_MSK |
+ AD5758_DCDC_CONFIG1_PROT_SW_EN_MSK;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DCDC_CONFIG1,
+ dcdc_config1_msk,
+ dcdc_config1_mode);
+ if (ret < 0)
+ goto err_unlock;
+
+ dac_config_mode = AD5758_DAC_CONFIG_OUT_EN_MODE(val) |
+ AD5758_DAC_CONFIG_INT_EN_MODE(val);
+ dac_config_msk = AD5758_DAC_CONFIG_OUT_EN_MSK |
+ AD5758_DAC_CONFIG_INT_EN_MSK;
+
+ ret = ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ dac_config_msk,
+ dac_config_mode);
+ if (ret < 0)
+ goto err_unlock;
+
+ st->pwr_down = pwr_down;
+
+err_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_info ad5758_info = {
+ .read_raw = ad5758_read_raw,
+ .write_raw = ad5758_write_raw,
+ .debugfs_reg_access = &ad5758_reg_access,
+};
+
+static const struct iio_chan_spec_ext_info ad5758_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5758_read_powerdown,
+ .write = ad5758_write_powerdown,
+ .shared = IIO_SHARED_BY_TYPE,
+ },
+ { }
+};
+
+#define AD5758_DAC_CHAN(_chan_type) { \
+ .type = (_chan_type), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .indexed = 1, \
+ .output = 1, \
+ .ext_info = ad5758_ext_info, \
+}
+
+static const struct iio_chan_spec ad5758_voltage_ch[] = {
+ AD5758_DAC_CHAN(IIO_VOLTAGE)
+};
+
+static const struct iio_chan_spec ad5758_current_ch[] = {
+ AD5758_DAC_CHAN(IIO_CURRENT)
+};
+
+static bool ad5758_is_valid_mode(enum ad5758_dc_dc_mode mode)
+{
+ switch (mode) {
+ case AD5758_DCDC_MODE_DPC_CURRENT:
+ case AD5758_DCDC_MODE_DPC_VOLTAGE:
+ case AD5758_DCDC_MODE_PPC_CURRENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ad5758_crc_disable(struct ad5758_state *st)
+{
+ unsigned int mask;
+
+ mask = (AD5758_WR_FLAG_MSK(AD5758_DIGITAL_DIAG_CONFIG) << 24) | 0x5C3A;
+ st->d32[0] = cpu_to_be32(mask);
+
+ return spi_write(st->spi, &st->d32[0], 4);
+}
+
+static int ad5758_find_out_range(struct ad5758_state *st,
+ const struct ad5758_range *range,
+ unsigned int size,
+ int min, int max)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if ((min == range[i].min) && (max == range[i].max)) {
+ st->out_range.reg = range[i].reg;
+ st->out_range.min = range[i].min;
+ st->out_range.max = range[i].max;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ad5758_parse_dt(struct ad5758_state *st)
+{
+ unsigned int tmp, tmparray[2], size;
+ const struct ad5758_range *range;
+ int *index, ret;
+
+ st->dc_dc_ilim = 0;
+ ret = device_property_read_u32(&st->spi->dev,
+ "adi,dc-dc-ilim-microamp", &tmp);
+ if (ret) {
+ dev_dbg(&st->spi->dev,
+ "Missing \"dc-dc-ilim-microamp\" property\n");
+ } else {
+ index = bsearch(&tmp, ad5758_dc_dc_ilim,
+ ARRAY_SIZE(ad5758_dc_dc_ilim),
+ sizeof(int), cmpfunc);
+ if (!index)
+ dev_dbg(&st->spi->dev, "dc-dc-ilim out of range\n");
+ else
+ st->dc_dc_ilim = index - ad5758_dc_dc_ilim;
+ }
+
+ ret = device_property_read_u32(&st->spi->dev, "adi,dc-dc-mode",
+ &st->dc_dc_mode);
+ if (ret) {
+ dev_err(&st->spi->dev, "Missing \"dc-dc-mode\" property\n");
+ return ret;
+ }
+
+ if (!ad5758_is_valid_mode(st->dc_dc_mode))
+ return -EINVAL;
+
+ if (st->dc_dc_mode == AD5758_DCDC_MODE_DPC_VOLTAGE) {
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "adi,range-microvolt",
+ tmparray, 2);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Missing \"range-microvolt\" property\n");
+ return ret;
+ }
+ range = ad5758_voltage_range;
+ size = ARRAY_SIZE(ad5758_voltage_range);
+ } else {
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "adi,range-microamp",
+ tmparray, 2);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Missing \"range-microamp\" property\n");
+ return ret;
+ }
+ range = ad5758_current_range;
+ size = ARRAY_SIZE(ad5758_current_range);
+ }
+
+ ret = ad5758_find_out_range(st, range, size, tmparray[0], tmparray[1]);
+ if (ret) {
+ dev_err(&st->spi->dev, "range invalid\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&st->spi->dev, "adi,slew-time-us", &tmp);
+ if (ret) {
+ dev_dbg(&st->spi->dev, "Missing \"slew-time-us\" property\n");
+ st->slew_time = 0;
+ } else {
+ st->slew_time = tmp;
+ }
+
+ return 0;
+}
+
+static int ad5758_init(struct ad5758_state *st)
+{
+ int regval, ret;
+
+ /* Disable CRC checks */
+ ret = ad5758_crc_disable(st);
+ if (ret < 0)
+ return ret;
+
+ /* Perform a software reset */
+ ret = ad5758_soft_reset(st);
+ if (ret < 0)
+ return ret;
+
+ /* Disable CRC checks */
+ ret = ad5758_crc_disable(st);
+ if (ret < 0)
+ return ret;
+
+ /* Perform a calibration memory refresh */
+ ret = ad5758_calib_mem_refresh(st);
+ if (ret < 0)
+ return ret;
+
+ regval = ad5758_spi_reg_read(st, AD5758_DIGITAL_DIAG_RESULTS);
+ if (regval < 0)
+ return regval;
+
+ /* Clear all the error flags */
+ ret = ad5758_spi_reg_write(st, AD5758_DIGITAL_DIAG_RESULTS, regval);
+ if (ret < 0)
+ return ret;
+
+ /* Set the dc-to-dc current limit */
+ ret = ad5758_set_dc_dc_ilim(st, st->dc_dc_ilim);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the dc-to-dc controller mode */
+ ret = ad5758_set_dc_dc_conv_mode(st, st->dc_dc_mode);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the output range */
+ ret = ad5758_set_out_range(st, st->out_range.reg);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Slew Rate Control, set the slew rate clock and step */
+ if (st->slew_time) {
+ ret = ad5758_slew_rate_config(st);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable the VIOUT fault protection switch (FPS is closed) */
+ ret = ad5758_fault_prot_switch_en(st, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Power up the DAC and internal (INT) amplifiers */
+ ret = ad5758_internal_buffers_en(st, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable VIOUT */
+ return ad5758_spi_write_mask(st, AD5758_DAC_CONFIG,
+ AD5758_DAC_CONFIG_OUT_EN_MSK,
+ AD5758_DAC_CONFIG_OUT_EN_MODE(1));
+}
+
+static int ad5758_probe(struct spi_device *spi)
+{
+ struct ad5758_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ mutex_init(&st->lock);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5758_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = 1;
+
+ ret = ad5758_parse_dt(st);
+ if (ret < 0)
+ return ret;
+
+ if (st->dc_dc_mode == AD5758_DCDC_MODE_DPC_VOLTAGE)
+ indio_dev->channels = ad5758_voltage_ch;
+ else
+ indio_dev->channels = ad5758_current_ch;
+
+ ret = ad5758_init(st);
+ if (ret < 0) {
+ dev_err(&spi->dev, "AD5758 init failed\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&st->spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad5758_id[] = {
+ { "ad5758", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5758_id);
+
+static struct spi_driver ad5758_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad5758_probe,
+ .id_table = ad5758_id,
+};
+
+module_spi_driver(ad5758_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5758 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index cca278eaa138..28e9b7656b20 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -87,12 +87,7 @@ static int ltc2632_read_raw(struct iio_dev *indio_dev,
int *val2,
long m)
{
- struct ltc2632_chip_info *chip_info;
-
const struct ltc2632_state *st = iio_priv(indio_dev);
- const struct spi_device_id *spi_dev_id = spi_get_device_id(st->spi_dev);
-
- chip_info = (struct ltc2632_chip_info *)spi_dev_id->driver_data;
switch (m) {
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index dd21eebed6a8..e39d1e901353 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -97,9 +97,6 @@ static int dac5571_cmd_quad(struct dac5571_data *data, int channel, u16 val)
static int dac5571_pwrdwn_single(struct dac5571_data *data, int channel, u8 pwrdwn)
{
- unsigned int shift;
-
- shift = 12 - data->spec->resolution;
data->buf[1] = 0;
data->buf[0] = pwrdwn << DAC5571_SINGLE_PWRDWN_BITS;
@@ -111,9 +108,6 @@ static int dac5571_pwrdwn_single(struct dac5571_data *data, int channel, u8 pwrd
static int dac5571_pwrdwn_quad(struct dac5571_data *data, int channel, u8 pwrdwn)
{
- unsigned int shift;
-
- shift = 16 - data->spec->resolution;
data->buf[2] = 0;
data->buf[1] = pwrdwn << DAC5571_QUAD_PWRDWN_BITS;
data->buf[0] = (channel << DAC5571_CHANNEL_SELECT) |
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index ddb6a334ae68..f4a508107f0d 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -12,6 +12,7 @@
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -268,6 +269,9 @@ struct ad9523_state {
struct regulator *reg;
struct ad9523_platform_data *pdata;
struct iio_chan_spec ad9523_channels[AD9523_NUM_CHAN];
+ struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *sync_gpio;
unsigned long vcxo_freq;
unsigned long vco_freq;
@@ -275,6 +279,15 @@ struct ad9523_state {
unsigned char vco_out_map[AD9523_NUM_CHAN_ALT_CLK_SRC];
/*
+ * Lock for accessing device registers. Some operations require
+ * multiple consecutive R/W operations, during which the device
+ * shouldn't be interrupted. The buffers are also shared across
+ * all operations so need to be protected on stand alone reads and
+ * writes.
+ */
+ struct mutex lock;
+
+ /*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
@@ -500,6 +513,7 @@ static ssize_t ad9523_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct ad9523_state *st = iio_priv(indio_dev);
bool state;
int ret;
@@ -508,9 +522,9 @@ static ssize_t ad9523_store(struct device *dev,
return ret;
if (!state)
- return 0;
+ return len;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9523_SYNC:
ret = ad9523_sync(indio_dev);
@@ -521,7 +535,7 @@ static ssize_t ad9523_store(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -532,15 +546,16 @@ static ssize_t ad9523_show(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_READBACK_0);
if (ret >= 0) {
ret = sprintf(buf, "%d\n", !!(ret & (1 <<
(u32)this_attr->address)));
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -623,9 +638,9 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
unsigned int code;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
@@ -642,7 +657,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
- *val2 = (code % 1000000) * 10;
+ *val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -659,7 +674,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
unsigned int reg;
int ret, tmp, code;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
if (ret < 0)
goto out;
@@ -705,7 +720,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
ad9523_io_update(indio_dev);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -713,9 +728,10 @@ static int ad9523_reg_access(struct iio_dev *indio_dev,
unsigned int reg, unsigned int writeval,
unsigned int *readval)
{
+ struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (readval == NULL) {
ret = ad9523_write(indio_dev, reg | AD9523_R1B, writeval);
ad9523_io_update(indio_dev);
@@ -728,7 +744,7 @@ static int ad9523_reg_access(struct iio_dev *indio_dev,
}
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -967,6 +983,8 @@ static int ad9523_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
@@ -974,6 +992,32 @@ static int ad9523_probe(struct spi_device *spi)
return ret;
}
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->pwrdown_gpio)) {
+ ret = PTR_ERR(st->pwrdown_gpio);
+ goto error_disable_reg;
+ }
+
+ st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio)) {
+ ret = PTR_ERR(st->reset_gpio);
+ goto error_disable_reg;
+ }
+
+ if (st->reset_gpio) {
+ udelay(1);
+ gpiod_direction_output(st->reset_gpio, 1);
+ }
+
+ st->sync_gpio = devm_gpiod_get_optional(&spi->dev, "sync",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->sync_gpio)) {
+ ret = PTR_ERR(st->sync_gpio);
+ goto error_disable_reg;
+ }
+
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->pdata = pdata;
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index ad6f91d06185..c771ae6803a9 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -81,9 +81,11 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->tx[9] = (value >> 24) & 0xff;
adis->tx[6] = ADIS_WRITE_REG(reg + 2);
adis->tx[7] = (value >> 16) & 0xff;
+ /* fall through */
case 2:
adis->tx[4] = ADIS_WRITE_REG(reg + 1);
adis->tx[5] = (value >> 8) & 0xff;
+ /* fall through */
case 1:
adis->tx[2] = ADIS_WRITE_REG(reg);
adis->tx[3] = value & 0xff;
@@ -167,6 +169,7 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
adis->tx[2] = ADIS_READ_REG(reg + 2);
adis->tx[3] = 0;
spi_message_add_tail(&xfers[1], &msg);
+ /* fall through */
case 2:
adis->tx[4] = ADIS_READ_REG(reg);
adis->tx[5] = 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 42618fe4f83e..d80ef468508a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -20,8 +20,6 @@
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
#include <linux/iio/iio.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -84,7 +82,7 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
static const struct inv_mpu6050_chip_config chip_config_6050 = {
.fsr = INV_MPU6050_FSR_2000DPS,
.lpf = INV_MPU6050_FILTER_20HZ,
- .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
@@ -106,6 +104,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.config = &chip_config_6050,
},
{
+ .whoami = INV_MPU6515_WHOAMI_VALUE,
+ .name = "MPU6515",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
+ {
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
@@ -280,7 +284,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
goto error_power_off;
- d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE);
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto error_power_off;
@@ -297,6 +301,13 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
memcpy(&st->chip_config, hw_info[st->chip_type].config,
sizeof(struct inv_mpu6050_chip_config));
+ /*
+ * Internal chip period is 1ms (1kHz).
+ * Let's use at the beginning the theorical value before measuring
+ * with interrupt timestamps.
+ */
+ st->chip_period = NSEC_PER_MSEC;
+
return inv_mpu6050_set_power_itg(st, false);
error_power_off:
@@ -630,7 +641,7 @@ static ssize_t
inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- s32 fifo_rate;
+ int fifo_rate;
u8 d;
int result;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -646,8 +657,13 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
return result;
+ /* compute the chip sample rate divider */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate);
+ /* compute back the fifo rate to handle truncation cases */
+ fifo_rate = INV_MPU6050_DIVIDER_TO_FIFO_RATE(d);
+
mutex_lock(&st->lock);
- if (fifo_rate == st->chip_config.fifo_rate) {
+ if (d == st->chip_config.divider) {
result = 0;
goto fifo_rate_fail_unlock;
}
@@ -655,11 +671,10 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_unlock;
- d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto fifo_rate_fail_power_off;
- st->chip_config.fifo_rate = fifo_rate;
+ st->chip_config.divider = d;
result = inv_mpu6050_set_lpf(st, fifo_rate);
if (result)
@@ -687,7 +702,7 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
unsigned fifo_rate;
mutex_lock(&st->lock);
- fifo_rate = st->chip_config.fifo_rate;
+ fifo_rate = INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
mutex_unlock(&st->lock);
return scnprintf(buf, PAGE_SIZE, "%u\n", fifo_rate);
@@ -1005,7 +1020,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->modes = INDIO_BUFFER_TRIGGERED;
result = devm_iio_triggered_buffer_setup(dev, indio_dev,
- inv_mpu6050_irq_handler,
+ iio_pollfunc_store_time,
inv_mpu6050_read_fifo,
NULL);
if (result) {
@@ -1018,8 +1033,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
- INIT_KFIFO(st->timestamps);
- spin_lock_init(&st->time_stamp_lock);
result = devm_iio_device_register(dev, indio_dev);
if (result) {
dev_err(dev, "IIO register fail %d\n", result);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 495409d56207..dd758e3d403d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -174,6 +174,7 @@ static int inv_mpu_remove(struct i2c_client *client)
static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
+ {"mpu6515", INV_MPU6515},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
@@ -193,6 +194,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6500
},
{
+ .compatible = "invensense,mpu6515",
+ .data = (void *)INV_MPU6515
+ },
+ {
.compatible = "invensense,mpu9150",
.data = (void *)INV_MPU9150
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index c54da777945d..e69a59659dbc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -12,8 +12,6 @@
*/
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -73,6 +71,7 @@ struct inv_mpu6050_reg_map {
enum inv_devices {
INV_MPU6050,
INV_MPU6500,
+ INV_MPU6515,
INV_MPU6000,
INV_MPU9150,
INV_MPU9250,
@@ -88,7 +87,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
- * @fifo_rate: FIFO update rate.
+ * @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
unsigned int fsr:2;
@@ -96,7 +95,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
- u16 fifo_rate;
+ u8 divider;
u8 user_ctrl;
};
@@ -116,40 +115,40 @@ struct inv_mpu6050_hw {
/*
* struct inv_mpu6050_state - Driver state variables.
- * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
* @lock: Chip access lock.
* @trig: IIO trigger.
* @chip_config: Cached attribute information.
* @reg: Map of important registers.
* @hw: Other hardware-specific information.
* @chip_type: chip type.
- * @time_stamp_lock: spin lock to time stamp.
* @plat_data: platform data (deprecated in favor of @orientation).
* @orientation: sensor chip orientation relative to main hardware.
- * @timestamps: kfifo queue to store time stamp.
* @map regmap pointer.
* @irq interrupt number.
* @irq_mask the int_pin_cfg mask to configure interrupt type.
+ * @chip_period: chip internal period estimation (~1kHz).
+ * @it_timestamp: timestamp from previous interrupt.
+ * @data_timestamp: timestamp for next data sample.
*/
struct inv_mpu6050_state {
-#define TIMESTAMP_FIFO_SIZE 16
struct mutex lock;
struct iio_trigger *trig;
struct inv_mpu6050_chip_config chip_config;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_hw *hw;
enum inv_devices chip_type;
- spinlock_t time_stamp_lock;
struct i2c_mux_core *muxc;
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
struct iio_mount_matrix orientation;
- DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
struct regmap *map;
int irq;
u8 irq_mask;
unsigned skip_samples;
+ s64 chip_period;
+ s64 it_timestamp;
+ s64 data_timestamp;
};
/*register and associated bit definition*/
@@ -174,6 +173,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_RAW_GYRO 0x43
#define INV_MPU6050_REG_INT_STATUS 0x3A
+#define INV_MPU6050_BIT_FIFO_OVERFLOW_INT 0x10
#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
#define INV_MPU6050_REG_USER_CTRL 0x6A
@@ -198,7 +198,6 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
-#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
@@ -231,13 +230,24 @@ struct inv_mpu6050_state {
#define INV_MPU6050_LATCH_INT_EN 0x20
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+/* Allowed timestamp period jitter in percent */
+#define INV_MPU6050_TS_PERIOD_JITTER 4
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
-#define INV_MPU6050_TIME_STAMP_TOR 5
#define INV_MPU6050_MAX_FIFO_RATE 1000
#define INV_MPU6050_MIN_FIFO_RATE 4
-#define INV_MPU6050_ONE_K_HZ 1000
+
+/* chip internal frequency: 1KHz */
+#define INV_MPU6050_INTERNAL_FREQ_HZ 1000
+/* return the frequency divider (chip sample rate divider + 1) */
+#define INV_MPU6050_FREQ_DIVIDER(st) \
+ ((st)->chip_config.divider + 1)
+/* chip sample rate divider to fifo rate */
+#define INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate) \
+ ((INV_MPU6050_INTERNAL_FREQ_HZ / (fifo_rate)) - 1)
+#define INV_MPU6050_DIVIDER_TO_FIFO_RATE(divider) \
+ (INV_MPU6050_INTERNAL_FREQ_HZ / ((divider) + 1))
#define INV_MPU6050_REG_WHOAMI 117
@@ -247,6 +257,7 @@ struct inv_mpu6050_state {
#define INV_MPU9150_WHOAMI_VALUE 0x68
#define INV_MPU9250_WHOAMI_VALUE 0x71
#define INV_MPU9255_WHOAMI_VALUE 0x73
+#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
@@ -300,7 +311,6 @@ enum inv_mpu6050_clock_sel_e {
NUM_CLK
};
-irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type);
int inv_reset_fifo(struct iio_dev *indio_dev);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 1795418438e4..548e042f7b5b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -19,18 +19,83 @@
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/kfifo.h>
#include <linux/poll.h>
+#include <linux/math64.h>
+#include <asm/unaligned.h>
#include "inv_mpu_iio.h"
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+/**
+ * inv_mpu6050_update_period() - Update chip internal period estimation
+ *
+ * @st: driver state
+ * @timestamp: the interrupt timestamp
+ * @nb: number of data set in the fifo
+ *
+ * This function uses interrupt timestamps to estimate the chip period and
+ * to choose the data timestamp to come.
+ */
+static void inv_mpu6050_update_period(struct inv_mpu6050_state *st,
+ s64 timestamp, size_t nb)
{
- unsigned long flags;
+ /* Period boundaries for accepting timestamp */
+ const s64 period_min =
+ (NSEC_PER_MSEC * (100 - INV_MPU6050_TS_PERIOD_JITTER)) / 100;
+ const s64 period_max =
+ (NSEC_PER_MSEC * (100 + INV_MPU6050_TS_PERIOD_JITTER)) / 100;
+ const s32 divider = INV_MPU6050_FREQ_DIVIDER(st);
+ s64 delta, interval;
+ bool use_it_timestamp = false;
+
+ if (st->it_timestamp == 0) {
+ /* not initialized, forced to use it_timestamp */
+ use_it_timestamp = true;
+ } else if (nb == 1) {
+ /*
+ * Validate the use of it timestamp by checking if interrupt
+ * has been delayed.
+ * nb > 1 means interrupt was delayed for more than 1 sample,
+ * so it's obviously not good.
+ * Compute the chip period between 2 interrupts for validating.
+ */
+ delta = div_s64(timestamp - st->it_timestamp, divider);
+ if (delta > period_min && delta < period_max) {
+ /* update chip period and use it timestamp */
+ st->chip_period = (st->chip_period + delta) / 2;
+ use_it_timestamp = true;
+ }
+ }
+
+ if (use_it_timestamp) {
+ /*
+ * Manage case of multiple samples in the fifo (nb > 1):
+ * compute timestamp corresponding to the first sample using
+ * estimated chip period.
+ */
+ interval = (nb - 1) * st->chip_period * divider;
+ st->data_timestamp = timestamp - interval;
+ }
- /* take the spin lock sem to avoid interrupt kick in */
- spin_lock_irqsave(&st->time_stamp_lock, flags);
- kfifo_reset(&st->timestamps);
- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+ /* save it timestamp */
+ st->it_timestamp = timestamp;
+}
+
+/**
+ * inv_mpu6050_get_timestamp() - Return the current data timestamp
+ *
+ * @st: driver state
+ * @return: current data timestamp
+ *
+ * This function returns the current data timestamp and prepares for next one.
+ */
+static s64 inv_mpu6050_get_timestamp(struct inv_mpu6050_state *st)
+{
+ s64 ts;
+
+ /* return current data timestamp and increment */
+ ts = st->data_timestamp;
+ st->data_timestamp += st->chip_period * INV_MPU6050_FREQ_DIVIDER(st);
+
+ return ts;
}
int inv_reset_fifo(struct iio_dev *indio_dev)
@@ -39,6 +104,9 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
u8 d;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ /* reset it timestamp validation */
+ st->it_timestamp = 0;
+
/* disable interrupt */
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result) {
@@ -62,9 +130,6 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
if (result)
goto reset_fifo_fail;
- /* clear timestamps fifo */
- inv_clear_kfifo(st);
-
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@@ -99,23 +164,6 @@ reset_fifo_fail:
}
/**
- * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
- */
-irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
- s64 timestamp;
-
- timestamp = iio_get_time_ns(indio_dev);
- kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
- &st->time_stamp_lock);
-
- return IRQ_WAKE_THREAD;
-}
-
-/**
* inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
*/
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
@@ -129,6 +177,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u16 fifo_count;
s64 timestamp;
int int_status;
+ size_t i, nb;
mutex_lock(&st->lock);
@@ -139,6 +188,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
+ /* handle fifo overflow by reseting fifo */
+ if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
+ goto flush_fifo;
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
dev_warn(regmap_get_device(st->map),
"spurious interrupt with status 0x%x\n", int_status);
@@ -163,38 +215,23 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
INV_MPU6050_FIFO_COUNT_BYTE);
if (result)
goto end_session;
- fifo_count = be16_to_cpup((__be16 *)(&data[0]));
- if (fifo_count < bytes_per_datum)
- goto end_session;
- /* fifo count can't be an odd number. If it is odd, reset the FIFO. */
- if (fifo_count & 1)
- goto flush_fifo;
- if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
- goto flush_fifo;
- /* Timestamp mismatch. */
- if (kfifo_len(&st->timestamps) >
- fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
- goto flush_fifo;
- do {
+ fifo_count = get_unaligned_be16(&data[0]);
+ /* compute and process all complete datum */
+ nb = fifo_count / bytes_per_datum;
+ inv_mpu6050_update_period(st, pf->timestamp, nb);
+ for (i = 0; i < nb; ++i) {
result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
data, bytes_per_datum);
if (result)
goto flush_fifo;
-
- result = kfifo_out(&st->timestamps, &timestamp, 1);
- /* when there is no timestamp, put timestamp as 0 */
- if (result == 0)
- timestamp = 0;
-
/* skip first samples if needed */
- if (st->skip_samples)
+ if (st->skip_samples) {
st->skip_samples--;
- else
- iio_push_to_buffers_with_timestamp(indio_dev, data,
- timestamp);
-
- fifo_count -= bytes_per_datum;
- } while (fifo_count >= bytes_per_datum);
+ continue;
+ }
+ timestamp = inv_mpu6050_get_timestamp(st);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+ }
end_session:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 4994f920a836..7589f2ad1dae 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -298,8 +298,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
err = regmap_bulk_read(hw->regmap,
hw->settings->fifo_ops.fifo_diff.addr,
&fifo_status, sizeof(fifo_status));
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev, "failed to read fifo status (err=%d)\n",
+ err);
return err;
+ }
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
@@ -313,8 +316,12 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw, hw->buff, pattern_len);
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev,
+ "failed to read pattern from fifo (err=%d)\n",
+ err);
return err;
+ }
/*
* Data are written to the FIFO with a specific pattern
@@ -385,8 +392,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (unlikely(reset_ts)) {
err = st_lsm6dsx_reset_hw_ts(hw);
- if (err < 0)
+ if (err < 0) {
+ dev_err(hw->dev, "failed to reset hw ts (err=%d)\n",
+ err);
return err;
+ }
}
return read_len;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 19bdf3d2962a..a062cfddc5af 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -85,6 +85,8 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_COUNT] = "count",
[IIO_INDEX] = "index",
[IIO_GRAVITY] = "gravity",
+ [IIO_POSITIONRELATIVE] = "positionrelative",
+ [IIO_PHASE] = "phase",
};
static const char * const iio_modifier_names[] = {
@@ -108,6 +110,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
[IIO_MOD_LIGHT_UV] = "uv",
+ [IIO_MOD_LIGHT_DUV] = "duv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
@@ -207,35 +210,27 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
*/
s64 iio_get_time_ns(const struct iio_dev *indio_dev)
{
- struct timespec tp;
+ struct timespec64 tp;
switch (iio_device_get_clock(indio_dev)) {
case CLOCK_REALTIME:
- ktime_get_real_ts(&tp);
- break;
+ return ktime_get_real_ns();
case CLOCK_MONOTONIC:
- ktime_get_ts(&tp);
- break;
+ return ktime_get_ns();
case CLOCK_MONOTONIC_RAW:
- getrawmonotonic(&tp);
- break;
+ return ktime_get_raw_ns();
case CLOCK_REALTIME_COARSE:
- tp = current_kernel_time();
- break;
+ return ktime_to_ns(ktime_get_coarse_real());
case CLOCK_MONOTONIC_COARSE:
- tp = get_monotonic_coarse();
- break;
+ ktime_get_coarse_ts64(&tp);
+ return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
- get_monotonic_boottime(&tp);
- break;
+ return ktime_get_boot_ns();
case CLOCK_TAI:
- timekeeping_clocktai(&tp);
- break;
+ return ktime_get_tai_ns();
default:
BUG();
}
-
- return timespec_to_ns(&tp);
}
EXPORT_SYMBOL(iio_get_time_ns);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index c7ef8d1862d6..d66ea754ffff 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -1,3 +1,4 @@
+
#
# Light sensors
#
@@ -319,6 +320,17 @@ config PA12203001
This driver can also be built as a module. If so, the module
will be called pa12203001.
+config SI1133
+ tristate "SI1133 UV Index Sensor and Ambient Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build a driver for the Silicon Labs SI1133
+ UV Index Sensor and Ambient Light Sensor chip.
+
+ To compile this driver as a module, choose M here: the module will be
+ called si1133.
+
config SI1145
tristate "SI1132 and SI1141/2/3/5/6/7 combined ALS, UV index and proximity sensor"
depends on I2C
@@ -438,11 +450,12 @@ config US5182D
will be called us5182d.
config VCNL4000
- tristate "VCNL4000/4010/4020 combined ALS and proximity sensor"
+ tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VCNL4000,
- VCNL4010, VCNL4020 combined ambient light and proximity sensor.
+ VCNL4010, VCNL4020, VCNL4200 combined ambient light and proximity
+ sensor.
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 80943af5d627..86337b114bc4 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_SI1133) += si1133.o
obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_ST_UVIS25) += st_uvis25_core.o
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
new file mode 100644
index 000000000000..015a21f0c2ef
--- /dev/null
+++ b/drivers/iio/light/si1133.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * si1133.c - Support for Silabs SI1133 combined ambient
+ * light and UV index sensors
+ *
+ * Copyright 2018 Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/util_macros.h>
+
+#define SI1133_REG_PART_ID 0x00
+#define SI1133_REG_REV_ID 0x01
+#define SI1133_REG_MFR_ID 0x02
+#define SI1133_REG_INFO0 0x03
+#define SI1133_REG_INFO1 0x04
+
+#define SI1133_PART_ID 0x33
+
+#define SI1133_REG_HOSTIN0 0x0A
+#define SI1133_REG_COMMAND 0x0B
+#define SI1133_REG_IRQ_ENABLE 0x0F
+#define SI1133_REG_RESPONSE1 0x10
+#define SI1133_REG_RESPONSE0 0x11
+#define SI1133_REG_IRQ_STATUS 0x12
+#define SI1133_REG_MEAS_RATE 0x1A
+
+#define SI1133_IRQ_CHANNEL_ENABLE 0xF
+
+#define SI1133_CMD_RESET_CTR 0x00
+#define SI1133_CMD_RESET_SW 0x01
+#define SI1133_CMD_FORCE 0x11
+#define SI1133_CMD_START_AUTONOMOUS 0x13
+#define SI1133_CMD_PARAM_SET 0x80
+#define SI1133_CMD_PARAM_QUERY 0x40
+#define SI1133_CMD_PARAM_MASK 0x3F
+
+#define SI1133_CMD_ERR_MASK BIT(4)
+#define SI1133_CMD_SEQ_MASK 0xF
+#define SI1133_MAX_CMD_CTR 0xF
+
+#define SI1133_PARAM_REG_CHAN_LIST 0x01
+#define SI1133_PARAM_REG_ADCCONFIG(x) ((x) * 4) + 2
+#define SI1133_PARAM_REG_ADCSENS(x) ((x) * 4) + 3
+#define SI1133_PARAM_REG_ADCPOST(x) ((x) * 4) + 4
+
+#define SI1133_ADCMUX_MASK 0x1F
+
+#define SI1133_ADCCONFIG_DECIM_RATE(x) (x) << 5
+
+#define SI1133_ADCSENS_SCALE_MASK 0x70
+#define SI1133_ADCSENS_SCALE_SHIFT 4
+#define SI1133_ADCSENS_HSIG_MASK BIT(7)
+#define SI1133_ADCSENS_HSIG_SHIFT 7
+#define SI1133_ADCSENS_HW_GAIN_MASK 0xF
+#define SI1133_ADCSENS_NB_MEAS(x) fls(x) << SI1133_ADCSENS_SCALE_SHIFT
+
+#define SI1133_ADCPOST_24BIT_EN BIT(6)
+#define SI1133_ADCPOST_POSTSHIFT_BITQTY(x) (x & GENMASK(2, 0)) << 3
+
+#define SI1133_PARAM_ADCMUX_SMALL_IR 0x0
+#define SI1133_PARAM_ADCMUX_MED_IR 0x1
+#define SI1133_PARAM_ADCMUX_LARGE_IR 0x2
+#define SI1133_PARAM_ADCMUX_WHITE 0xB
+#define SI1133_PARAM_ADCMUX_LARGE_WHITE 0xD
+#define SI1133_PARAM_ADCMUX_UV 0x18
+#define SI1133_PARAM_ADCMUX_UV_DEEP 0x19
+
+#define SI1133_ERR_INVALID_CMD 0x0
+#define SI1133_ERR_INVALID_LOCATION_CMD 0x1
+#define SI1133_ERR_SATURATION_ADC_OR_OVERFLOW_ACCUMULATION 0x2
+#define SI1133_ERR_OUTPUT_BUFFER_OVERFLOW 0x3
+
+#define SI1133_COMPLETION_TIMEOUT_MS 500
+
+#define SI1133_CMD_MINSLEEP_US_LOW 5000
+#define SI1133_CMD_MINSLEEP_US_HIGH 7500
+#define SI1133_CMD_TIMEOUT_MS 25
+#define SI1133_CMD_LUX_TIMEOUT_MS 5000
+#define SI1133_CMD_TIMEOUT_US SI1133_CMD_TIMEOUT_MS * 1000
+
+#define SI1133_REG_HOSTOUT(x) (x) + 0x13
+
+#define SI1133_MEASUREMENT_FREQUENCY 1250
+
+#define SI1133_X_ORDER_MASK 0x0070
+#define SI1133_Y_ORDER_MASK 0x0007
+#define si1133_get_x_order(m) ((m) & SI1133_X_ORDER_MASK) >> 4
+#define si1133_get_y_order(m) ((m) & SI1133_Y_ORDER_MASK)
+
+#define SI1133_LUX_ADC_MASK 0xE
+#define SI1133_ADC_THRESHOLD 16000
+#define SI1133_INPUT_FRACTION_HIGH 7
+#define SI1133_INPUT_FRACTION_LOW 15
+#define SI1133_LUX_OUTPUT_FRACTION 12
+#define SI1133_LUX_BUFFER_SIZE 9
+
+static const int si1133_scale_available[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128};
+
+static IIO_CONST_ATTR(scale_available, "1 2 4 8 16 32 64 128");
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.0244 0.0488 0.0975 0.195 0.390 0.780 "
+ "1.560 3.120 6.24 12.48 25.0 50.0");
+
+/* A.K.A. HW_GAIN in datasheet */
+enum si1133_int_time {
+ _24_4_us = 0,
+ _48_8_us = 1,
+ _97_5_us = 2,
+ _195_0_us = 3,
+ _390_0_us = 4,
+ _780_0_us = 5,
+ _1_560_0_us = 6,
+ _3_120_0_us = 7,
+ _6_240_0_us = 8,
+ _12_480_0_us = 9,
+ _25_ms = 10,
+ _50_ms = 11,
+};
+
+/* Integration time in milliseconds, nanoseconds */
+static const int si1133_int_time_table[][2] = {
+ [_24_4_us] = {0, 24400},
+ [_48_8_us] = {0, 48800},
+ [_97_5_us] = {0, 97500},
+ [_195_0_us] = {0, 195000},
+ [_390_0_us] = {0, 390000},
+ [_780_0_us] = {0, 780000},
+ [_1_560_0_us] = {1, 560000},
+ [_3_120_0_us] = {3, 120000},
+ [_6_240_0_us] = {6, 240000},
+ [_12_480_0_us] = {12, 480000},
+ [_25_ms] = {25, 000000},
+ [_50_ms] = {50, 000000},
+};
+
+static const struct regmap_range si1133_reg_ranges[] = {
+ regmap_reg_range(0x00, 0x02),
+ regmap_reg_range(0x0A, 0x0B),
+ regmap_reg_range(0x0F, 0x0F),
+ regmap_reg_range(0x10, 0x12),
+ regmap_reg_range(0x13, 0x2C),
+};
+
+static const struct regmap_range si1133_reg_ro_ranges[] = {
+ regmap_reg_range(0x00, 0x02),
+ regmap_reg_range(0x10, 0x2C),
+};
+
+static const struct regmap_range si1133_precious_ranges[] = {
+ regmap_reg_range(0x12, 0x12),
+};
+
+static const struct regmap_access_table si1133_write_ranges_table = {
+ .yes_ranges = si1133_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_reg_ranges),
+ .no_ranges = si1133_reg_ro_ranges,
+ .n_no_ranges = ARRAY_SIZE(si1133_reg_ro_ranges),
+};
+
+static const struct regmap_access_table si1133_read_ranges_table = {
+ .yes_ranges = si1133_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_reg_ranges),
+};
+
+static const struct regmap_access_table si1133_precious_table = {
+ .yes_ranges = si1133_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si1133_precious_ranges),
+};
+
+static const struct regmap_config si1133_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0x2C,
+
+ .wr_table = &si1133_write_ranges_table,
+ .rd_table = &si1133_read_ranges_table,
+
+ .precious_table = &si1133_precious_table,
+};
+
+struct si1133_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+
+ /* Lock protecting one command at a time can be processed */
+ struct mutex mutex;
+
+ int rsp_seq;
+ u8 scan_mask;
+ u8 adc_sens[6];
+ u8 adc_config[6];
+
+ struct completion completion;
+};
+
+struct si1133_coeff {
+ s16 info;
+ u16 mag;
+};
+
+struct si1133_lux_coeff {
+ struct si1133_coeff coeff_high[4];
+ struct si1133_coeff coeff_low[9];
+};
+
+static const struct si1133_lux_coeff lux_coeff = {
+ {
+ { 0, 209},
+ { 1665, 93},
+ { 2064, 65},
+ {-2671, 234}
+ },
+ {
+ { 0, 0},
+ { 1921, 29053},
+ {-1022, 36363},
+ { 2320, 20789},
+ { -367, 57909},
+ {-1774, 38240},
+ { -608, 46775},
+ {-1503, 51831},
+ {-1886, 58928}
+ }
+};
+
+static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag,
+ s8 shift)
+{
+ return ((input << fraction) / mag) << shift;
+}
+
+static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
+ u8 input_fraction, s8 sign,
+ const struct si1133_coeff *coeffs)
+{
+ s8 shift;
+ int x1 = 1;
+ int x2 = 1;
+ int y1 = 1;
+ int y2 = 1;
+
+ shift = ((u16)coeffs->info & 0xFF00) >> 8;
+ shift ^= 0xFF;
+ shift += 1;
+ shift = -shift;
+
+ if (x_order > 0) {
+ x1 = si1133_calculate_polynomial_inner(x, input_fraction,
+ coeffs->mag, shift);
+ if (x_order > 1)
+ x2 = x1;
+ }
+
+ if (y_order > 0) {
+ y1 = si1133_calculate_polynomial_inner(y, input_fraction,
+ coeffs->mag, shift);
+ if (y_order > 1)
+ y2 = y1;
+ }
+
+ return sign * x1 * x2 * y1 * y2;
+}
+
+/*
+ * The algorithm is from:
+ * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716
+ */
+static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff,
+ const struct si1133_coeff *coeffs)
+{
+ u8 x_order, y_order;
+ u8 counter;
+ s8 sign;
+ int output = 0;
+
+ for (counter = 0; counter < num_coeff; counter++) {
+ if (coeffs->info < 0)
+ sign = -1;
+ else
+ sign = 1;
+
+ x_order = si1133_get_x_order(coeffs->info);
+ y_order = si1133_get_y_order(coeffs->info);
+
+ if ((x_order == 0) && (y_order == 0))
+ output +=
+ sign * coeffs->mag << SI1133_LUX_OUTPUT_FRACTION;
+ else
+ output += si1133_calculate_output(x, y, x_order,
+ y_order,
+ input_fraction, sign,
+ coeffs);
+ coeffs++;
+ }
+
+ return abs(output);
+}
+
+static int si1133_cmd_reset_sw(struct si1133_data *data)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int resp;
+ unsigned long timeout;
+ int err;
+
+ err = regmap_write(data->regmap, SI1133_REG_COMMAND,
+ SI1133_CMD_RESET_SW);
+ if (err)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(SI1133_CMD_TIMEOUT_MS);
+ while (true) {
+ err = regmap_read(data->regmap, SI1133_REG_RESPONSE0, &resp);
+ if (err == -ENXIO) {
+ usleep_range(SI1133_CMD_MINSLEEP_US_LOW,
+ SI1133_CMD_MINSLEEP_US_HIGH);
+ continue;
+ }
+
+ if ((resp & SI1133_MAX_CMD_CTR) == SI1133_MAX_CMD_CTR)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout on reset ctr resp: %d\n", resp);
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (!err)
+ data->rsp_seq = SI1133_MAX_CMD_CTR;
+
+ return err;
+}
+
+static int si1133_parse_response_err(struct device *dev, u32 resp, u8 cmd)
+{
+ resp &= 0xF;
+
+ switch (resp) {
+ case SI1133_ERR_OUTPUT_BUFFER_OVERFLOW:
+ dev_warn(dev, "Output buffer overflow: %#02hhx\n", cmd);
+ return -EOVERFLOW;
+ case SI1133_ERR_SATURATION_ADC_OR_OVERFLOW_ACCUMULATION:
+ dev_warn(dev, "Saturation of the ADC or overflow of accumulation: %#02hhx\n",
+ cmd);
+ return -EOVERFLOW;
+ case SI1133_ERR_INVALID_LOCATION_CMD:
+ dev_warn(dev,
+ "Parameter access to an invalid location: %#02hhx\n",
+ cmd);
+ return -EINVAL;
+ case SI1133_ERR_INVALID_CMD:
+ dev_warn(dev, "Invalid command %#02hhx\n", cmd);
+ return -EINVAL;
+ default:
+ dev_warn(dev, "Unknown error %#02hhx\n", cmd);
+ return -EINVAL;
+ }
+}
+
+static int si1133_cmd_reset_counter(struct si1133_data *data)
+{
+ int err = regmap_write(data->regmap, SI1133_REG_COMMAND,
+ SI1133_CMD_RESET_CTR);
+ if (err)
+ return err;
+
+ data->rsp_seq = 0;
+
+ return 0;
+}
+
+static int si1133_command(struct si1133_data *data, u8 cmd)
+{
+ struct device *dev = &data->client->dev;
+ u32 resp;
+ int err;
+ int expected_seq;
+
+ mutex_lock(&data->mutex);
+
+ expected_seq = (data->rsp_seq + 1) & SI1133_MAX_CMD_CTR;
+
+ if (cmd == SI1133_CMD_FORCE)
+ reinit_completion(&data->completion);
+
+ err = regmap_write(data->regmap, SI1133_REG_COMMAND, cmd);
+ if (err) {
+ dev_warn(dev, "Failed to write command %#02hhx, ret=%d\n", cmd,
+ err);
+ goto out;
+ }
+
+ if (cmd == SI1133_CMD_FORCE) {
+ /* wait for irq */
+ if (!wait_for_completion_timeout(&data->completion,
+ msecs_to_jiffies(SI1133_COMPLETION_TIMEOUT_MS))) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ err = regmap_read(data->regmap, SI1133_REG_RESPONSE0, &resp);
+ if (err)
+ goto out;
+ } else {
+ err = regmap_read_poll_timeout(data->regmap,
+ SI1133_REG_RESPONSE0, resp,
+ (resp & SI1133_CMD_SEQ_MASK) ==
+ expected_seq ||
+ (resp & SI1133_CMD_ERR_MASK),
+ SI1133_CMD_MINSLEEP_US_LOW,
+ SI1133_CMD_TIMEOUT_MS * 1000);
+ if (err) {
+ dev_warn(dev,
+ "Failed to read command %#02hhx, ret=%d\n",
+ cmd, err);
+ goto out;
+ }
+ }
+
+ if (resp & SI1133_CMD_ERR_MASK) {
+ err = si1133_parse_response_err(dev, resp, cmd);
+ si1133_cmd_reset_counter(data);
+ } else {
+ data->rsp_seq = expected_seq;
+ }
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return err;
+}
+
+static int si1133_param_set(struct si1133_data *data, u8 param, u32 value)
+{
+ int err = regmap_write(data->regmap, SI1133_REG_HOSTIN0, value);
+
+ if (err)
+ return err;
+
+ return si1133_command(data, SI1133_CMD_PARAM_SET |
+ (param & SI1133_CMD_PARAM_MASK));
+}
+
+static int si1133_param_query(struct si1133_data *data, u8 param, u32 *result)
+{
+ int err = si1133_command(data, SI1133_CMD_PARAM_QUERY |
+ (param & SI1133_CMD_PARAM_MASK));
+ if (err)
+ return err;
+
+ return regmap_read(data->regmap, SI1133_REG_RESPONSE1, result);
+}
+
+#define SI1133_CHANNEL(_ch, _type) \
+ .type = _type, \
+ .channel = _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+
+static const struct iio_chan_spec si1133_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .channel = 0,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_WHITE, IIO_INTENSITY)
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_LARGE_WHITE, IIO_INTENSITY)
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .extend_name = "large",
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_SMALL_IR, IIO_INTENSITY)
+ .extend_name = "small",
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_MED_IR, IIO_INTENSITY)
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_LARGE_IR, IIO_INTENSITY)
+ .extend_name = "large",
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_UV, IIO_UVINDEX)
+ },
+ {
+ SI1133_CHANNEL(SI1133_PARAM_ADCMUX_UV_DEEP, IIO_UVINDEX)
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_DUV,
+ }
+};
+
+static int si1133_get_int_time_index(int milliseconds, int nanoseconds)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(si1133_int_time_table); i++) {
+ if (milliseconds == si1133_int_time_table[i][0] &&
+ nanoseconds == si1133_int_time_table[i][1])
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int si1133_set_integration_time(struct si1133_data *data, u8 adc,
+ int milliseconds, int nanoseconds)
+{
+ int index;
+
+ index = si1133_get_int_time_index(milliseconds, nanoseconds);
+ if (index < 0)
+ return index;
+
+ data->adc_sens[adc] &= 0xF0;
+ data->adc_sens[adc] |= index;
+
+ return si1133_param_set(data, SI1133_PARAM_REG_ADCSENS(0),
+ data->adc_sens[adc]);
+}
+
+static int si1133_set_chlist(struct si1133_data *data, u8 scan_mask)
+{
+ /* channel list already set, no need to reprogram */
+ if (data->scan_mask == scan_mask)
+ return 0;
+
+ data->scan_mask = scan_mask;
+
+ return si1133_param_set(data, SI1133_PARAM_REG_CHAN_LIST, scan_mask);
+}
+
+static int si1133_chan_set_adcconfig(struct si1133_data *data, u8 adc,
+ u8 adc_config)
+{
+ int err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCCONFIG(adc),
+ adc_config);
+ if (err)
+ return err;
+
+ data->adc_config[adc] = adc_config;
+
+ return 0;
+}
+
+static int si1133_update_adcconfig(struct si1133_data *data, uint8_t adc,
+ u8 mask, u8 shift, u8 value)
+{
+ u32 adc_config;
+ int err;
+
+ err = si1133_param_query(data, SI1133_PARAM_REG_ADCCONFIG(adc),
+ &adc_config);
+ if (err)
+ return err;
+
+ adc_config &= ~mask;
+ adc_config |= (value << shift);
+
+ return si1133_chan_set_adcconfig(data, adc, adc_config);
+}
+
+static int si1133_set_adcmux(struct si1133_data *data, u8 adc, u8 mux)
+{
+ if ((mux & data->adc_config[adc]) == mux)
+ return 0; /* mux already set to correct value */
+
+ return si1133_update_adcconfig(data, adc, SI1133_ADCMUX_MASK, 0, mux);
+}
+
+static int si1133_force_measurement(struct si1133_data *data)
+{
+ return si1133_command(data, SI1133_CMD_FORCE);
+}
+
+static int si1133_bulk_read(struct si1133_data *data, u8 start_reg, u8 length,
+ u8 *buffer)
+{
+ int err;
+
+ err = si1133_force_measurement(data);
+ if (err)
+ return err;
+
+ return regmap_bulk_read(data->regmap, start_reg, buffer, length);
+}
+
+static int si1133_measure(struct si1133_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int err;
+
+ __be16 resp;
+
+ err = si1133_set_adcmux(data, 0, chan->channel);
+ if (err)
+ return err;
+
+ /* Deactivate lux measurements if they were active */
+ err = si1133_set_chlist(data, BIT(0));
+ if (err)
+ return err;
+
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp),
+ (u8 *)&resp);
+ if (err)
+ return err;
+
+ *val = be16_to_cpu(resp);
+
+ return err;
+}
+
+static irqreturn_t si1133_threaded_irq_handler(int irq, void *private)
+{
+ struct iio_dev *iio_dev = private;
+ struct si1133_data *data = iio_priv(iio_dev);
+ u32 irq_status;
+ int err;
+
+ err = regmap_read(data->regmap, SI1133_REG_IRQ_STATUS, &irq_status);
+ if (err) {
+ dev_err_ratelimited(&iio_dev->dev, "Error reading IRQ\n");
+ goto out;
+ }
+
+ if (irq_status != data->scan_mask)
+ return IRQ_NONE;
+
+out:
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int si1133_scale_to_swgain(int scale_integer, int scale_fractional)
+{
+ scale_integer = find_closest(scale_integer, si1133_scale_available,
+ ARRAY_SIZE(si1133_scale_available));
+ if (scale_integer < 0 ||
+ scale_integer > ARRAY_SIZE(si1133_scale_available) ||
+ scale_fractional != 0)
+ return -EINVAL;
+
+ return scale_integer;
+}
+
+static int si1133_chan_set_adcsens(struct si1133_data *data, u8 adc,
+ u8 adc_sens)
+{
+ int err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCSENS(adc), adc_sens);
+ if (err)
+ return err;
+
+ data->adc_sens[adc] = adc_sens;
+
+ return 0;
+}
+
+static int si1133_update_adcsens(struct si1133_data *data, u8 mask,
+ u8 shift, u8 value)
+{
+ int err;
+ u32 adc_sens;
+
+ err = si1133_param_query(data, SI1133_PARAM_REG_ADCSENS(0),
+ &adc_sens);
+ if (err)
+ return err;
+
+ adc_sens &= ~mask;
+ adc_sens |= (value << shift);
+
+ return si1133_chan_set_adcsens(data, 0, adc_sens);
+}
+
+static int si1133_get_lux(struct si1133_data *data, int *val)
+{
+ int err;
+ int lux;
+ u32 high_vis;
+ u32 low_vis;
+ u32 ir;
+ u8 buffer[SI1133_LUX_BUFFER_SIZE];
+
+ /* Activate lux channels */
+ err = si1133_set_chlist(data, SI1133_LUX_ADC_MASK);
+ if (err)
+ return err;
+
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0),
+ SI1133_LUX_BUFFER_SIZE, buffer);
+ if (err)
+ return err;
+
+ high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2];
+ low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5];
+ ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8];
+
+ if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
+ lux = si1133_calc_polynomial(high_vis, ir,
+ SI1133_INPUT_FRACTION_HIGH,
+ ARRAY_SIZE(lux_coeff.coeff_high),
+ &lux_coeff.coeff_high[0]);
+ else
+ lux = si1133_calc_polynomial(low_vis, ir,
+ SI1133_INPUT_FRACTION_LOW,
+ ARRAY_SIZE(lux_coeff.coeff_low),
+ &lux_coeff.coeff_low[0]);
+
+ *val = lux >> SI1133_LUX_OUTPUT_FRACTION;
+
+ return err;
+}
+
+static int si1133_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+ u8 adc_sens = data->adc_sens[0];
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ err = si1133_get_lux(data, val);
+ if (err)
+ return err;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ err = si1133_measure(data, chan, val);
+ if (err)
+ return err;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens &= SI1133_ADCSENS_HW_GAIN_MASK;
+
+ *val = si1133_int_time_table[adc_sens][0];
+ *val2 = si1133_int_time_table[adc_sens][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens &= SI1133_ADCSENS_SCALE_MASK;
+ adc_sens >>= SI1133_ADCSENS_SCALE_SHIFT;
+
+ *val = BIT(adc_sens);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ adc_sens >>= SI1133_ADCSENS_HSIG_SHIFT;
+
+ *val = adc_sens;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si1133_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ val = si1133_scale_to_swgain(val, val2);
+ if (val < 0)
+ return val;
+
+ return si1133_update_adcsens(data,
+ SI1133_ADCSENS_SCALE_MASK,
+ SI1133_ADCSENS_SCALE_SHIFT,
+ val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ return si1133_set_integration_time(data, 0, val, val2);
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_UVINDEX:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ return si1133_update_adcsens(data,
+ SI1133_ADCSENS_HSIG_MASK,
+ SI1133_ADCSENS_HSIG_SHIFT,
+ val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct attribute *si1133_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ &iio_const_attr_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group si1133_attribute_group = {
+ .attrs = si1133_attributes,
+};
+
+static const struct iio_info si1133_info = {
+ .read_raw = si1133_read_raw,
+ .write_raw = si1133_write_raw,
+ .attrs = &si1133_attribute_group,
+};
+
+/*
+ * si1133_init_lux_channels - Configure 3 different channels(adc) (1,2 and 3)
+ * The channel configuration for the lux measurement was taken from :
+ * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00578
+ *
+ * Reserved the channel 0 for the other raw measurements
+ */
+static int si1133_init_lux_channels(struct si1133_data *data)
+{
+ int err;
+
+ err = si1133_chan_set_adcconfig(data, 1,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_LARGE_WHITE);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(1),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(0));
+ if (err)
+ return err;
+ err = si1133_chan_set_adcsens(data, 1, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(64) | _48_8_us);
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcconfig(data, 2,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_LARGE_WHITE);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(2),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(2));
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcsens(data, 2, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(1) | _3_120_0_us);
+ if (err)
+ return err;
+
+ err = si1133_chan_set_adcconfig(data, 3,
+ SI1133_ADCCONFIG_DECIM_RATE(1) |
+ SI1133_PARAM_ADCMUX_MED_IR);
+ if (err)
+ return err;
+
+ err = si1133_param_set(data, SI1133_PARAM_REG_ADCPOST(3),
+ SI1133_ADCPOST_24BIT_EN |
+ SI1133_ADCPOST_POSTSHIFT_BITQTY(2));
+ if (err)
+ return err;
+
+ return si1133_chan_set_adcsens(data, 3, SI1133_ADCSENS_HSIG_MASK |
+ SI1133_ADCSENS_NB_MEAS(64) | _48_8_us);
+}
+
+static int si1133_initialize(struct si1133_data *data)
+{
+ int err;
+
+ err = si1133_cmd_reset_sw(data);
+ if (err)
+ return err;
+
+ /* Turn off autonomous mode */
+ err = si1133_param_set(data, SI1133_REG_MEAS_RATE, 0);
+ if (err)
+ return err;
+
+ err = si1133_init_lux_channels(data);
+ if (err)
+ return err;
+
+ return regmap_write(data->regmap, SI1133_REG_IRQ_ENABLE,
+ SI1133_IRQ_CHANNEL_ENABLE);
+}
+
+static int si1133_validate_ids(struct iio_dev *iio_dev)
+{
+ struct si1133_data *data = iio_priv(iio_dev);
+
+ unsigned int part_id, rev_id, mfr_id;
+ int err;
+
+ err = regmap_read(data->regmap, SI1133_REG_PART_ID, &part_id);
+ if (err)
+ return err;
+
+ err = regmap_read(data->regmap, SI1133_REG_REV_ID, &rev_id);
+ if (err)
+ return err;
+
+ err = regmap_read(data->regmap, SI1133_REG_MFR_ID, &mfr_id);
+ if (err)
+ return err;
+
+ dev_info(&iio_dev->dev,
+ "Device ID part %#02hhx rev %#02hhx mfr %#02hhx\n",
+ part_id, rev_id, mfr_id);
+ if (part_id != SI1133_PART_ID) {
+ dev_err(&iio_dev->dev,
+ "Part ID mismatch got %#02hhx, expected %#02x\n",
+ part_id, SI1133_PART_ID);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int si1133_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si1133_data *data;
+ struct iio_dev *iio_dev;
+ int err;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(iio_dev);
+
+ init_completion(&data->completion);
+
+ data->regmap = devm_regmap_init_i2c(client, &si1133_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ err = PTR_ERR(data->regmap);
+ dev_err(&client->dev, "Failed to initialise regmap: %d\n", err);
+ return err;
+ }
+
+ i2c_set_clientdata(client, iio_dev);
+ data->client = client;
+
+ iio_dev->dev.parent = &client->dev;
+ iio_dev->name = id->name;
+ iio_dev->channels = si1133_channels;
+ iio_dev->num_channels = ARRAY_SIZE(si1133_channels);
+ iio_dev->info = &si1133_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ mutex_init(&data->mutex);
+
+ err = si1133_validate_ids(iio_dev);
+ if (err)
+ return err;
+
+ err = si1133_initialize(data);
+ if (err) {
+ dev_err(&client->dev,
+ "Error when initializing chip: %d\n", err);
+ return err;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "Required interrupt not provided, cannot proceed\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ si1133_threaded_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ client->name, iio_dev);
+ if (err) {
+ dev_warn(&client->dev, "Request irq %d failed: %i\n",
+ client->irq, err);
+ return err;
+ }
+
+ return devm_iio_device_register(&client->dev, iio_dev);
+}
+
+static const struct i2c_device_id si1133_ids[] = {
+ { "si1133", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si1133_ids);
+
+static struct i2c_driver si1133_driver = {
+ .driver = {
+ .name = "si1133",
+ },
+ .probe = si1133_probe,
+ .id_table = si1133_ids,
+};
+
+module_i2c_driver(si1133_driver);
+
+MODULE_AUTHOR("Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>");
+MODULE_DESCRIPTION("Silabs SI1133, UV index sensor and ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index c599a90506ad..04fd0d4b6f19 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1,5 +1,5 @@
/*
- * vcnl4000.c - Support for Vishay VCNL4000/4010/4020 combined ambient
+ * vcnl4000.c - Support for Vishay VCNL4000/4010/4020/4200 combined ambient
* light and proximity sensor
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
@@ -8,13 +8,15 @@
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * IIO driver for VCNL4000 (7-bit I2C slave address 0x13)
+ * IIO driver for:
+ * VCNL4000/10/20 (7-bit I2C slave address 0x13)
+ * VCNL4200 (7-bit I2C slave address 0x51)
*
* TODO:
* allow to adjust IR current
* proximity threshold and event handling
* periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20)
+ * interrupts (VCNL4010/20, VCNL4200)
*/
#include <linux/module.h>
@@ -26,8 +28,9 @@
#include <linux/iio/sysfs.h>
#define VCNL4000_DRV_NAME "vcnl4000"
-#define VCNL4000_ID 0x01
-#define VCNL4010_ID 0x02 /* for VCNL4020, VCNL4010 */
+#define VCNL4000_PROD_ID 0x01
+#define VCNL4010_PROD_ID 0x02 /* for VCNL4020, VCNL4010 */
+#define VCNL4200_PROD_ID 0x58
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
@@ -40,23 +43,124 @@
#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
+#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
+#define VCNL4200_PS_DATA 0x08 /* Proximity data */
+#define VCNL4200_AL_DATA 0x09 /* Ambient light data */
+#define VCNL4200_DEV_ID 0x0e /* Device ID, slave address and version */
+
/* Bit masks for COMMAND register */
#define VCNL4000_AL_RDY BIT(6) /* ALS data ready? */
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+enum vcnl4000_device_ids {
+ VCNL4000,
+ VCNL4010,
+ VCNL4200,
+};
+
+struct vcnl4200_channel {
+ u8 reg;
+ ktime_t last_measurement;
+ ktime_t sampling_rate;
+ struct mutex lock;
+};
+
struct vcnl4000_data {
struct i2c_client *client;
- struct mutex lock;
+ enum vcnl4000_device_ids id;
+ int rev;
+ int al_scale;
+ const struct vcnl4000_chip_spec *chip_spec;
+ struct mutex vcnl4000_lock;
+ struct vcnl4200_channel vcnl4200_al;
+ struct vcnl4200_channel vcnl4200_ps;
+};
+
+struct vcnl4000_chip_spec {
+ const char *prod;
+ int (*init)(struct vcnl4000_data *data);
+ int (*measure_light)(struct vcnl4000_data *data, int *val);
+ int (*measure_proximity)(struct vcnl4000_data *data, int *val);
};
static const struct i2c_device_id vcnl4000_id[] = {
- { "vcnl4000", 0 },
+ { "vcnl4000", VCNL4000 },
+ { "vcnl4010", VCNL4010 },
+ { "vcnl4020", VCNL4010 },
+ { "vcnl4200", VCNL4200 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vcnl4000_id);
+static int vcnl4000_init(struct vcnl4000_data *data)
+{
+ int ret, prod_id;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
+ if (ret < 0)
+ return ret;
+
+ prod_id = ret >> 4;
+ switch (prod_id) {
+ case VCNL4000_PROD_ID:
+ if (data->id != VCNL4000)
+ dev_warn(&data->client->dev,
+ "wrong device id, use vcnl4000");
+ break;
+ case VCNL4010_PROD_ID:
+ if (data->id != VCNL4010)
+ dev_warn(&data->client->dev,
+ "wrong device id, use vcnl4010/4020");
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ data->rev = ret & 0xf;
+ data->al_scale = 250000;
+ mutex_init(&data->vcnl4000_lock);
+
+ return 0;
+};
+
+static int vcnl4200_init(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_DEV_ID);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0xff) != VCNL4200_PROD_ID)
+ return -ENODEV;
+
+ data->rev = (ret >> 8) & 0xf;
+
+ /* Set defaults and enable both channels */
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4200_AL_CONF, 0x00);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4200_PS_CONF1, 0x00);
+ if (ret < 0)
+ return ret;
+
+ data->al_scale = 24000;
+ data->vcnl4200_al.reg = VCNL4200_AL_DATA;
+ data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
+ /* Integration time is 50ms, but the experiments show 54ms in total. */
+ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
+ data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->vcnl4200_al.last_measurement = ktime_set(0, 0);
+ data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
+ mutex_init(&data->vcnl4200_al.lock);
+ mutex_init(&data->vcnl4200_ps.lock);
+
+ return 0;
+};
+
static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
@@ -64,7 +168,7 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
__be16 buf;
int ret;
- mutex_lock(&data->lock);
+ mutex_lock(&data->vcnl4000_lock);
ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
req_mask);
@@ -93,16 +197,88 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
if (ret < 0)
goto fail;
- mutex_unlock(&data->lock);
+ mutex_unlock(&data->vcnl4000_lock);
*val = be16_to_cpu(buf);
return 0;
fail:
- mutex_unlock(&data->lock);
+ mutex_unlock(&data->vcnl4000_lock);
return ret;
}
+static int vcnl4200_measure(struct vcnl4000_data *data,
+ struct vcnl4200_channel *chan, int *val)
+{
+ int ret;
+ s64 delta;
+ ktime_t next_measurement;
+
+ mutex_lock(&chan->lock);
+
+ next_measurement = ktime_add(chan->last_measurement,
+ chan->sampling_rate);
+ delta = ktime_us_delta(next_measurement, ktime_get());
+ if (delta > 0)
+ usleep_range(delta, delta + 500);
+ chan->last_measurement = ktime_get();
+
+ mutex_unlock(&chan->lock);
+
+ ret = i2c_smbus_read_word_data(data->client, chan->reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int vcnl4000_measure_light(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4000_measure(data,
+ VCNL4000_AL_OD, VCNL4000_AL_RDY,
+ VCNL4000_AL_RESULT_HI, val);
+}
+
+static int vcnl4200_measure_light(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4200_measure(data, &data->vcnl4200_al, val);
+}
+
+static int vcnl4000_measure_proximity(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4000_measure(data,
+ VCNL4000_PS_OD, VCNL4000_PS_RDY,
+ VCNL4000_PS_RESULT_HI, val);
+}
+
+static int vcnl4200_measure_proximity(struct vcnl4000_data *data, int *val)
+{
+ return vcnl4200_measure(data, &data->vcnl4200_ps, val);
+}
+
+static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
+ [VCNL4000] = {
+ .prod = "VCNL4000",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ },
+ [VCNL4010] = {
+ .prod = "VCNL4010/4020",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ },
+ [VCNL4200] = {
+ .prod = "VCNL4200",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ },
+};
+
static const struct iio_chan_spec vcnl4000_channels[] = {
{
.type = IIO_LIGHT,
@@ -125,16 +301,12 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_LIGHT:
- ret = vcnl4000_measure(data,
- VCNL4000_AL_OD, VCNL4000_AL_RDY,
- VCNL4000_AL_RESULT_HI, val);
+ ret = data->chip_spec->measure_light(data, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_PROXIMITY:
- ret = vcnl4000_measure(data,
- VCNL4000_PS_OD, VCNL4000_PS_RDY,
- VCNL4000_PS_RESULT_HI, val);
+ ret = data->chip_spec->measure_proximity(data, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
@@ -146,7 +318,7 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
*val = 0;
- *val2 = 250000;
+ *val2 = data->al_scale;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -162,7 +334,7 @@ static int vcnl4000_probe(struct i2c_client *client,
{
struct vcnl4000_data *data;
struct iio_dev *indio_dev;
- int ret, prod_id;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -171,19 +343,15 @@ static int vcnl4000_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
+ data->id = id->driver_data;
+ data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
- ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
+ ret = data->chip_spec->init(data);
if (ret < 0)
return ret;
- prod_id = ret >> 4;
- if (prod_id != VCNL4010_ID && prod_id != VCNL4000_ID)
- return -ENODEV;
-
dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
- (prod_id == VCNL4010_ID) ? "VCNL4010/4020" : "VCNL4000",
- ret & 0xf);
+ data->chip_spec->prod, data->rev);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &vcnl4000_info;
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index fbb59059e942..2026a1012012 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -94,9 +94,8 @@ static int st_press_i2c_probe(struct i2c_client *client,
if ((ret < 0) || (ret >= ST_PRESS_MAX))
return -ENODEV;
- strncpy(client->name, st_press_id_table[ret].name,
+ strlcpy(client->name, st_press_id_table[ret].name,
sizeof(client->name));
- client->name[sizeof(client->name) - 1] = '\0';
} else if (!id)
return -ENODEV;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index f726f9427602..388ef70c11d2 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -20,6 +20,19 @@ endmenu
menu "Proximity and distance sensors"
+config ISL29501
+ tristate "Intersil ISL29501 Time Of Flight sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say Y here if you want to build a driver for the Intersil ISL29501
+ Time of Flight sensor.
+
+ To compile this driver as a module, choose M here: the module will be
+ called isl29501.
+
config LIDAR_LITE_V2
tristate "PulsedLight LIDAR sensor"
select IIO_BUFFER
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 4f4ed45e87ef..cac3d7d3325e 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
+obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
new file mode 100644
index 000000000000..e5e94540f404
--- /dev/null
+++ b/drivers/iio/proximity/isl29501.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * isl29501.c: ISL29501 Time of Flight sensor driver.
+ *
+ * Copyright (C) 2018
+ * Author: Mathieu Othacehe <m.othacehe@gmail.com>
+ *
+ * 7-bit I2C slave address: 0x57
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* Control, setting and status registers */
+#define ISL29501_DEVICE_ID 0x00
+#define ISL29501_ID 0x0A
+
+/* Sampling control registers */
+#define ISL29501_INTEGRATION_PERIOD 0x10
+#define ISL29501_SAMPLE_PERIOD 0x11
+
+/* Closed loop calibration registers */
+#define ISL29501_CROSSTALK_I_MSB 0x24
+#define ISL29501_CROSSTALK_I_LSB 0x25
+#define ISL29501_CROSSTALK_I_EXPONENT 0x26
+#define ISL29501_CROSSTALK_Q_MSB 0x27
+#define ISL29501_CROSSTALK_Q_LSB 0x28
+#define ISL29501_CROSSTALK_Q_EXPONENT 0x29
+#define ISL29501_CROSSTALK_GAIN_MSB 0x2A
+#define ISL29501_CROSSTALK_GAIN_LSB 0x2B
+#define ISL29501_MAGNITUDE_REF_EXP 0x2C
+#define ISL29501_MAGNITUDE_REF_MSB 0x2D
+#define ISL29501_MAGNITUDE_REF_LSB 0x2E
+#define ISL29501_PHASE_OFFSET_MSB 0x2F
+#define ISL29501_PHASE_OFFSET_LSB 0x30
+
+/* Analog control registers */
+#define ISL29501_DRIVER_RANGE 0x90
+#define ISL29501_EMITTER_DAC 0x91
+
+#define ISL29501_COMMAND_REGISTER 0xB0
+
+/* Commands */
+#define ISL29501_EMUL_SAMPLE_START_PIN 0x49
+#define ISL29501_RESET_ALL_REGISTERS 0xD7
+#define ISL29501_RESET_INT_SM 0xD1
+
+/* Ambiant light and temperature corrections */
+#define ISL29501_TEMP_REFERENCE 0x31
+#define ISL29501_PHASE_EXPONENT 0x33
+#define ISL29501_TEMP_COEFF_A 0x34
+#define ISL29501_TEMP_COEFF_B 0x39
+#define ISL29501_AMBIANT_COEFF_A 0x36
+#define ISL29501_AMBIANT_COEFF_B 0x3B
+
+/* Data output registers */
+#define ISL29501_DISTANCE_MSB_DATA 0xD1
+#define ISL29501_DISTANCE_LSB_DATA 0xD2
+#define ISL29501_PRECISION_MSB 0xD3
+#define ISL29501_PRECISION_LSB 0xD4
+#define ISL29501_MAGNITUDE_EXPONENT 0xD5
+#define ISL29501_MAGNITUDE_MSB 0xD6
+#define ISL29501_MAGNITUDE_LSB 0xD7
+#define ISL29501_PHASE_MSB 0xD8
+#define ISL29501_PHASE_LSB 0xD9
+#define ISL29501_I_RAW_EXPONENT 0xDA
+#define ISL29501_I_RAW_MSB 0xDB
+#define ISL29501_I_RAW_LSB 0xDC
+#define ISL29501_Q_RAW_EXPONENT 0xDD
+#define ISL29501_Q_RAW_MSB 0xDE
+#define ISL29501_Q_RAW_LSB 0xDF
+#define ISL29501_DIE_TEMPERATURE 0xE2
+#define ISL29501_AMBIENT_LIGHT 0xE3
+#define ISL29501_GAIN_MSB 0xE6
+#define ISL29501_GAIN_LSB 0xE7
+
+#define ISL29501_MAX_EXP_VAL 15
+
+#define ISL29501_INT_TIME_AVAILABLE \
+ "0.00007 0.00014 0.00028 0.00057 0.00114 " \
+ "0.00228 0.00455 0.00910 0.01820 0.03640 " \
+ "0.07281 0.14561"
+
+#define ISL29501_CURRENT_SCALE_AVAILABLE \
+ "0.0039 0.0078 0.0118 0.0157 0.0196 " \
+ "0.0235 0.0275 0.0314 0.0352 0.0392 " \
+ "0.0431 0.0471 0.0510 0.0549 0.0588"
+
+enum isl29501_correction_coeff {
+ COEFF_TEMP_A,
+ COEFF_TEMP_B,
+ COEFF_LIGHT_A,
+ COEFF_LIGHT_B,
+ COEFF_MAX,
+};
+
+struct isl29501_private {
+ struct i2c_client *client;
+ struct mutex lock;
+ /* Exact representation of correction coefficients. */
+ unsigned int shadow_coeffs[COEFF_MAX];
+};
+
+enum isl29501_register_name {
+ REG_DISTANCE,
+ REG_PHASE,
+ REG_TEMPERATURE,
+ REG_AMBIENT_LIGHT,
+ REG_GAIN,
+ REG_GAIN_BIAS,
+ REG_PHASE_EXP,
+ REG_CALIB_PHASE_TEMP_A,
+ REG_CALIB_PHASE_TEMP_B,
+ REG_CALIB_PHASE_LIGHT_A,
+ REG_CALIB_PHASE_LIGHT_B,
+ REG_DISTANCE_BIAS,
+ REG_TEMPERATURE_BIAS,
+ REG_INT_TIME,
+ REG_SAMPLE_TIME,
+ REG_DRIVER_RANGE,
+ REG_EMITTER_DAC,
+};
+
+struct isl29501_register_desc {
+ u8 msb;
+ u8 lsb;
+};
+
+static const struct isl29501_register_desc isl29501_registers[] = {
+ [REG_DISTANCE] = {
+ .msb = ISL29501_DISTANCE_MSB_DATA,
+ .lsb = ISL29501_DISTANCE_LSB_DATA,
+ },
+ [REG_PHASE] = {
+ .msb = ISL29501_PHASE_MSB,
+ .lsb = ISL29501_PHASE_LSB,
+ },
+ [REG_TEMPERATURE] = {
+ .lsb = ISL29501_DIE_TEMPERATURE,
+ },
+ [REG_AMBIENT_LIGHT] = {
+ .lsb = ISL29501_AMBIENT_LIGHT,
+ },
+ [REG_GAIN] = {
+ .msb = ISL29501_GAIN_MSB,
+ .lsb = ISL29501_GAIN_LSB,
+ },
+ [REG_GAIN_BIAS] = {
+ .msb = ISL29501_CROSSTALK_GAIN_MSB,
+ .lsb = ISL29501_CROSSTALK_GAIN_LSB,
+ },
+ [REG_PHASE_EXP] = {
+ .lsb = ISL29501_PHASE_EXPONENT,
+ },
+ [REG_CALIB_PHASE_TEMP_A] = {
+ .lsb = ISL29501_TEMP_COEFF_A,
+ },
+ [REG_CALIB_PHASE_TEMP_B] = {
+ .lsb = ISL29501_TEMP_COEFF_B,
+ },
+ [REG_CALIB_PHASE_LIGHT_A] = {
+ .lsb = ISL29501_AMBIANT_COEFF_A,
+ },
+ [REG_CALIB_PHASE_LIGHT_B] = {
+ .lsb = ISL29501_AMBIANT_COEFF_B,
+ },
+ [REG_DISTANCE_BIAS] = {
+ .msb = ISL29501_PHASE_OFFSET_MSB,
+ .lsb = ISL29501_PHASE_OFFSET_LSB,
+ },
+ [REG_TEMPERATURE_BIAS] = {
+ .lsb = ISL29501_TEMP_REFERENCE,
+ },
+ [REG_INT_TIME] = {
+ .lsb = ISL29501_INTEGRATION_PERIOD,
+ },
+ [REG_SAMPLE_TIME] = {
+ .lsb = ISL29501_SAMPLE_PERIOD,
+ },
+ [REG_DRIVER_RANGE] = {
+ .lsb = ISL29501_DRIVER_RANGE,
+ },
+ [REG_EMITTER_DAC] = {
+ .lsb = ISL29501_EMITTER_DAC,
+ },
+};
+
+static int isl29501_register_read(struct isl29501_private *isl29501,
+ enum isl29501_register_name name,
+ u32 *val)
+{
+ const struct isl29501_register_desc *reg = &isl29501_registers[name];
+ u8 msb = 0, lsb = 0;
+ s32 ret;
+
+ mutex_lock(&isl29501->lock);
+ if (reg->msb) {
+ ret = i2c_smbus_read_byte_data(isl29501->client, reg->msb);
+ if (ret < 0)
+ goto err;
+ msb = ret;
+ }
+
+ if (reg->lsb) {
+ ret = i2c_smbus_read_byte_data(isl29501->client, reg->lsb);
+ if (ret < 0)
+ goto err;
+ lsb = ret;
+ }
+ mutex_unlock(&isl29501->lock);
+
+ *val = (msb << 8) + lsb;
+
+ return 0;
+err:
+ mutex_unlock(&isl29501->lock);
+
+ return ret;
+}
+
+static u32 isl29501_register_write(struct isl29501_private *isl29501,
+ enum isl29501_register_name name,
+ u32 value)
+{
+ const struct isl29501_register_desc *reg = &isl29501_registers[name];
+ u8 msb, lsb;
+ int ret;
+
+ if (!reg->msb && value > U8_MAX)
+ return -ERANGE;
+
+ if (value > U16_MAX)
+ return -ERANGE;
+
+ if (!reg->msb) {
+ lsb = value & 0xFF;
+ } else {
+ msb = (value >> 8) & 0xFF;
+ lsb = value & 0xFF;
+ }
+
+ mutex_lock(&isl29501->lock);
+ if (reg->msb) {
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ reg->msb, msb);
+ if (ret < 0)
+ goto err;
+ }
+
+ ret = i2c_smbus_write_byte_data(isl29501->client, reg->lsb, lsb);
+
+err:
+ mutex_unlock(&isl29501->lock);
+ return ret;
+}
+
+static ssize_t isl29501_read_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ enum isl29501_register_name reg = private;
+ int ret;
+ u32 value, gain, coeff, exp;
+
+ switch (reg) {
+ case REG_GAIN:
+ case REG_GAIN_BIAS:
+ ret = isl29501_register_read(isl29501, reg, &gain);
+ if (ret < 0)
+ return ret;
+
+ value = gain;
+ break;
+ case REG_CALIB_PHASE_TEMP_A:
+ case REG_CALIB_PHASE_TEMP_B:
+ case REG_CALIB_PHASE_LIGHT_A:
+ case REG_CALIB_PHASE_LIGHT_B:
+ ret = isl29501_register_read(isl29501, REG_PHASE_EXP, &exp);
+ if (ret < 0)
+ return ret;
+
+ ret = isl29501_register_read(isl29501, reg, &coeff);
+ if (ret < 0)
+ return ret;
+
+ value = coeff << exp;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static int isl29501_set_shadow_coeff(struct isl29501_private *isl29501,
+ enum isl29501_register_name reg,
+ unsigned int val)
+{
+ enum isl29501_correction_coeff coeff;
+
+ switch (reg) {
+ case REG_CALIB_PHASE_TEMP_A:
+ coeff = COEFF_TEMP_A;
+ break;
+ case REG_CALIB_PHASE_TEMP_B:
+ coeff = COEFF_TEMP_B;
+ break;
+ case REG_CALIB_PHASE_LIGHT_A:
+ coeff = COEFF_LIGHT_A;
+ break;
+ case REG_CALIB_PHASE_LIGHT_B:
+ coeff = COEFF_LIGHT_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ isl29501->shadow_coeffs[coeff] = val;
+
+ return 0;
+}
+
+static int isl29501_write_coeff(struct isl29501_private *isl29501,
+ enum isl29501_correction_coeff coeff,
+ int val)
+{
+ enum isl29501_register_name reg;
+
+ switch (coeff) {
+ case COEFF_TEMP_A:
+ reg = REG_CALIB_PHASE_TEMP_A;
+ break;
+ case COEFF_TEMP_B:
+ reg = REG_CALIB_PHASE_TEMP_B;
+ break;
+ case COEFF_LIGHT_A:
+ reg = REG_CALIB_PHASE_LIGHT_A;
+ break;
+ case COEFF_LIGHT_B:
+ reg = REG_CALIB_PHASE_LIGHT_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return isl29501_register_write(isl29501, reg, val);
+}
+
+static unsigned int isl29501_find_corr_exp(unsigned int val,
+ unsigned int max_exp,
+ unsigned int max_mantissa)
+{
+ unsigned int exp = 1;
+
+ /*
+ * Correction coefficients are represented under
+ * mantissa * 2^exponent form, where mantissa and exponent
+ * are stored in two separate registers of the sensor.
+ *
+ * Compute and return the lowest exponent such as:
+ * mantissa = value / 2^exponent
+ *
+ * where mantissa < max_mantissa.
+ */
+ if (val <= max_mantissa)
+ return 0;
+
+ while ((val >> exp) > max_mantissa) {
+ exp++;
+
+ if (exp > max_exp)
+ return max_exp;
+ }
+
+ return exp;
+}
+
+static ssize_t isl29501_write_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ enum isl29501_register_name reg = private;
+ unsigned int val;
+ int max_exp = 0;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case REG_GAIN_BIAS:
+ if (val > U16_MAX)
+ return -ERANGE;
+
+ ret = isl29501_register_write(isl29501, reg, val);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case REG_CALIB_PHASE_TEMP_A:
+ case REG_CALIB_PHASE_TEMP_B:
+ case REG_CALIB_PHASE_LIGHT_A:
+ case REG_CALIB_PHASE_LIGHT_B:
+
+ if (val > (U8_MAX << ISL29501_MAX_EXP_VAL))
+ return -ERANGE;
+
+ /* Store the correction coefficient under its exact form. */
+ ret = isl29501_set_shadow_coeff(isl29501, reg, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Find the highest exponent needed to represent
+ * correction coefficients.
+ */
+ for (i = 0; i < COEFF_MAX; i++) {
+ int corr;
+ int corr_exp;
+
+ corr = isl29501->shadow_coeffs[i];
+ corr_exp = isl29501_find_corr_exp(corr,
+ ISL29501_MAX_EXP_VAL,
+ U8_MAX / 2);
+ dev_dbg(&isl29501->client->dev,
+ "found exp of corr(%d) = %d\n", corr, corr_exp);
+
+ max_exp = max(max_exp, corr_exp);
+ }
+
+ /*
+ * Represent every correction coefficient under
+ * mantissa * 2^max_exponent form and force the
+ * writing of those coefficients on the sensor.
+ */
+ for (i = 0; i < COEFF_MAX; i++) {
+ int corr;
+ int mantissa;
+
+ corr = isl29501->shadow_coeffs[i];
+ if (!corr)
+ continue;
+
+ mantissa = corr >> max_exp;
+
+ ret = isl29501_write_coeff(isl29501, i, mantissa);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = isl29501_register_write(isl29501, REG_PHASE_EXP, max_exp);
+ if (ret < 0)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#define _ISL29501_EXT_INFO(_name, _ident) { \
+ .name = _name, \
+ .read = isl29501_read_ext, \
+ .write = isl29501_write_ext, \
+ .private = _ident, \
+ .shared = IIO_SEPARATE, \
+}
+
+static const struct iio_chan_spec_ext_info isl29501_ext_info[] = {
+ _ISL29501_EXT_INFO("agc_gain", REG_GAIN),
+ _ISL29501_EXT_INFO("agc_gain_bias", REG_GAIN_BIAS),
+ _ISL29501_EXT_INFO("calib_phase_temp_a", REG_CALIB_PHASE_TEMP_A),
+ _ISL29501_EXT_INFO("calib_phase_temp_b", REG_CALIB_PHASE_TEMP_B),
+ _ISL29501_EXT_INFO("calib_phase_light_a", REG_CALIB_PHASE_LIGHT_A),
+ _ISL29501_EXT_INFO("calib_phase_light_b", REG_CALIB_PHASE_LIGHT_B),
+ { },
+};
+
+#define ISL29501_DISTANCE_SCAN_INDEX 0
+#define ISL29501_TIMESTAMP_SCAN_INDEX 1
+
+static const struct iio_chan_spec isl29501_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .scan_index = ISL29501_DISTANCE_SCAN_INDEX,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .ext_info = isl29501_ext_info,
+ },
+ {
+ .type = IIO_PHASE,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_CURRENT,
+ .scan_index = -1,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .scan_index = -1,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(ISL29501_TIMESTAMP_SCAN_INDEX),
+};
+
+static int isl29501_reset_registers(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_RESET_ALL_REGISTERS);
+ if (ret < 0) {
+ dev_err(&isl29501->client->dev,
+ "cannot reset registers %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_RESET_INT_SM);
+ if (ret < 0)
+ dev_err(&isl29501->client->dev,
+ "cannot reset state machine %d\n", ret);
+
+ return ret;
+}
+
+static int isl29501_begin_acquisition(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(isl29501->client,
+ ISL29501_COMMAND_REGISTER,
+ ISL29501_EMUL_SAMPLE_START_PIN);
+ if (ret < 0)
+ dev_err(&isl29501->client->dev,
+ "cannot begin acquisition %d\n", ret);
+
+ return ret;
+}
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL(ISL29501_INT_TIME_AVAILABLE);
+static IIO_CONST_ATTR(out_current_scale_available,
+ ISL29501_CURRENT_SCALE_AVAILABLE);
+
+static struct attribute *isl29501_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ &iio_const_attr_out_current_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group isl29501_attribute_group = {
+ .attrs = isl29501_attributes,
+};
+
+static const int isl29501_current_scale_table[][2] = {
+ {0, 3900}, {0, 7800}, {0, 11800}, {0, 15700},
+ {0, 19600}, {0, 23500}, {0, 27500}, {0, 31400},
+ {0, 35200}, {0, 39200}, {0, 43100}, {0, 47100},
+ {0, 51000}, {0, 54900}, {0, 58800},
+};
+
+static const int isl29501_int_time[][2] = {
+ {0, 70}, /* 0.07 ms */
+ {0, 140}, /* 0.14 ms */
+ {0, 280}, /* 0.28 ms */
+ {0, 570}, /* 0.57 ms */
+ {0, 1140}, /* 1.14 ms */
+ {0, 2280}, /* 2.28 ms */
+ {0, 4550}, /* 4.55 ms */
+ {0, 9100}, /* 9.11 ms */
+ {0, 18200}, /* 18.2 ms */
+ {0, 36400}, /* 36.4 ms */
+ {0, 72810}, /* 72.81 ms */
+ {0, 145610} /* 145.28 ms */
+};
+
+static int isl29501_get_raw(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *raw)
+{
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = isl29501_register_read(isl29501, REG_DISTANCE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = isl29501_register_read(isl29501,
+ REG_AMBIENT_LIGHT,
+ raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_PHASE:
+ ret = isl29501_register_read(isl29501, REG_PHASE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ ret = isl29501_register_read(isl29501, REG_EMITTER_DAC, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ ret = isl29501_register_read(isl29501, REG_TEMPERATURE, raw);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_scale(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ int ret;
+ u32 current_scale;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ /* distance = raw_distance * 33.31 / 65536 (m) */
+ *val = 3331;
+ *val2 = 6553600;
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_PHASE:
+ /* phase = raw_phase * 2pi / 65536 (rad) */
+ *val = 0;
+ *val2 = 95874;
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_INTENSITY:
+ /* light = raw_light * 35 / 10000 (mA) */
+ *val = 35;
+ *val2 = 10000;
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CURRENT:
+ ret = isl29501_register_read(isl29501,
+ REG_DRIVER_RANGE,
+ &current_scale);
+ if (ret < 0)
+ return ret;
+
+ if (current_scale > ARRAY_SIZE(isl29501_current_scale_table))
+ return -EINVAL;
+
+ if (!current_scale) {
+ *val = 0;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+
+ *val = isl29501_current_scale_table[current_scale - 1][0];
+ *val2 = isl29501_current_scale_table[current_scale - 1][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /* temperature = raw_temperature * 125 / 100000 (milli °C) */
+ *val = 125;
+ *val2 = 100000;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_calibbias(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int *bias)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return isl29501_register_read(isl29501,
+ REG_DISTANCE_BIAS,
+ bias);
+ case IIO_TEMP:
+ return isl29501_register_read(isl29501,
+ REG_TEMPERATURE_BIAS,
+ bias);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_get_inttime(struct isl29501_private *isl29501,
+ int *val, int *val2)
+{
+ int ret;
+ u32 inttime;
+
+ ret = isl29501_register_read(isl29501, REG_INT_TIME, &inttime);
+ if (ret < 0)
+ return ret;
+
+ if (inttime >= ARRAY_SIZE(isl29501_int_time))
+ return -EINVAL;
+
+ *val = isl29501_int_time[inttime][0];
+ *val2 = isl29501_int_time[inttime][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int isl29501_get_freq(struct isl29501_private *isl29501,
+ int *val, int *val2)
+{
+ int ret;
+ int sample_time;
+ unsigned long long freq;
+ u32 temp;
+
+ ret = isl29501_register_read(isl29501, REG_SAMPLE_TIME, &sample_time);
+ if (ret < 0)
+ return ret;
+
+ /* freq = 1 / (0.000450 * (sample_time + 1) * 10^-6) */
+ freq = 1000000ULL * 1000000ULL;
+
+ do_div(freq, 450 * (sample_time + 1));
+
+ temp = do_div(freq, 1000000);
+ *val = freq;
+ *val2 = temp;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int isl29501_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return isl29501_get_raw(isl29501, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return isl29501_get_scale(isl29501, chan, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ return isl29501_get_inttime(isl29501, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return isl29501_get_freq(isl29501, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return isl29501_get_calibbias(isl29501, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_raw(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int raw)
+{
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return isl29501_register_write(isl29501, REG_EMITTER_DAC, raw);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_inttime(struct isl29501_private *isl29501,
+ int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(isl29501_int_time); i++) {
+ if (isl29501_int_time[i][0] == val &&
+ isl29501_int_time[i][1] == val2) {
+ return isl29501_register_write(isl29501,
+ REG_INT_TIME,
+ i);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int isl29501_set_scale(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int val, int val2)
+{
+ int i;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(isl29501_current_scale_table); i++) {
+ if (isl29501_current_scale_table[i][0] == val &&
+ isl29501_current_scale_table[i][1] == val2) {
+ return isl29501_register_write(isl29501,
+ REG_DRIVER_RANGE,
+ i + 1);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int isl29501_set_calibbias(struct isl29501_private *isl29501,
+ const struct iio_chan_spec *chan,
+ int bias)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return isl29501_register_write(isl29501,
+ REG_DISTANCE_BIAS,
+ bias);
+ case IIO_TEMP:
+ return isl29501_register_write(isl29501,
+ REG_TEMPERATURE_BIAS,
+ bias);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int isl29501_set_freq(struct isl29501_private *isl29501,
+ int val, int val2)
+{
+ int freq;
+ unsigned long long sample_time;
+
+ /* sample_freq = 1 / (0.000450 * (sample_time + 1) * 10^-6) */
+ freq = val * 1000000 + val2 % 1000000;
+ sample_time = 2222ULL * 1000000ULL;
+ do_div(sample_time, freq);
+
+ sample_time -= 1;
+
+ if (sample_time > 255)
+ return -ERANGE;
+
+ return isl29501_register_write(isl29501, REG_SAMPLE_TIME, sample_time);
+}
+
+static int isl29501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return isl29501_set_raw(isl29501, chan, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ return isl29501_set_inttime(isl29501, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return isl29501_set_freq(isl29501, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return isl29501_set_scale(isl29501, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return isl29501_set_calibbias(isl29501, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info isl29501_info = {
+ .read_raw = &isl29501_read_raw,
+ .write_raw = &isl29501_write_raw,
+ .attrs = &isl29501_attribute_group,
+};
+
+static int isl29501_init_chip(struct isl29501_private *isl29501)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(isl29501->client, ISL29501_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&isl29501->client->dev, "Error reading device id\n");
+ return ret;
+ }
+
+ if (ret != ISL29501_ID) {
+ dev_err(&isl29501->client->dev,
+ "Wrong chip id, got %x expected %x\n",
+ ret, ISL29501_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ ret = isl29501_reset_registers(isl29501);
+ if (ret < 0)
+ return ret;
+
+ return isl29501_begin_acquisition(isl29501);
+}
+
+static irqreturn_t isl29501_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct isl29501_private *isl29501 = iio_priv(indio_dev);
+ const unsigned long *active_mask = indio_dev->active_scan_mask;
+ u32 buffer[4] = {}; /* 1x16-bit + ts */
+
+ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
+ isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int isl29501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct isl29501_private *isl29501;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*isl29501));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ isl29501 = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ isl29501->client = client;
+
+ mutex_init(&isl29501->lock);
+
+ ret = isl29501_init_chip(isl29501);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = isl29501_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl29501_channels);
+ indio_dev->name = client->name;
+ indio_dev->info = &isl29501_info;
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time,
+ isl29501_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id isl29501_id[] = {
+ {"isl29501", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29501_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id isl29501_i2c_matches[] = {
+ { .compatible = "renesas,isl29501" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, isl29501_i2c_matches);
+#endif
+
+static struct i2c_driver isl29501_driver = {
+ .driver = {
+ .name = "isl29501",
+ },
+ .id_table = isl29501_id,
+ .probe = isl29501_probe,
+};
+module_i2c_driver(isl29501_driver);
+
+MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
+MODULE_DESCRIPTION("ISL29501 Time of Flight sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index d619e8634a00..13a4cec64ea8 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -433,11 +433,11 @@ static int mlx90614_wakeup(struct mlx90614_data *data)
dev_dbg(&data->client->dev, "Requesting wake-up");
- i2c_lock_adapter(data->client->adapter);
+ i2c_lock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
gpiod_direction_output(data->wakeup_gpio, 0);
msleep(MLX90614_TIMING_WAKEUP);
gpiod_direction_input(data->wakeup_gpio);
- i2c_unlock_adapter(data->client->adapter);
+ i2c_unlock_bus(data->client->adapter, I2C_LOCK_ROOT_ADAPTER);
data->ready_timestamp = jiffies +
msecs_to_jiffies(MLX90614_TIMING_STARTUP);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b03af54367c0..d160d2d1f3a3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -37,7 +37,7 @@ config INFINIBAND_USER_ACCESS
config INFINIBAND_USER_ACCESS_UCM
bool "Userspace CM (UCM, DEPRECATED)"
- depends on BROKEN
+ depends on BROKEN || COMPILE_TEST
depends on INFINIBAND_USER_ACCESS
help
The UCM module has known security flaws, which no one is
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 61667705d746..867cee5e27b2 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -35,6 +35,7 @@ ib_ucm-y := ucm.o
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
- uverbs_ioctl_merge.o uverbs_std_types_cq.o \
+ uverbs_std_types_cq.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
- uverbs_std_types_mr.o uverbs_std_types_counters.o
+ uverbs_std_types_mr.o uverbs_std_types_counters.o \
+ uverbs_uapi.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 4f32c4062fb6..46b855a42884 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -188,7 +188,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
return -ENODATA;
}
-int rdma_addr_size(struct sockaddr *addr)
+int rdma_addr_size(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
@@ -315,19 +315,17 @@ static int dst_fetch_ha(const struct dst_entry *dst,
int ret = 0;
n = dst_neigh_lookup(dst, daddr);
+ if (!n)
+ return -ENODATA;
- rcu_read_lock();
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
rdma_copy_addr(dev_addr, dst->dev, n->ha);
}
- rcu_read_unlock();
- if (n)
- neigh_release(n);
+ neigh_release(n);
return ret;
}
@@ -587,7 +585,7 @@ static void process_one_req(struct work_struct *_work)
spin_unlock_bh(&lock);
}
-int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 81d66f56e38f..0bee1f4b914e 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -66,20 +66,28 @@ enum gid_attr_find_mask {
GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
};
-enum gid_table_entry_props {
- GID_TABLE_ENTRY_INVALID = 1UL << 0,
- GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
+enum gid_table_entry_state {
+ GID_TABLE_ENTRY_INVALID = 1,
+ GID_TABLE_ENTRY_VALID = 2,
+ /*
+ * Indicates that entry is pending to be removed, there may
+ * be active users of this GID entry.
+ * When last user of the GID entry releases reference to it,
+ * GID entry is detached from the table.
+ */
+ GID_TABLE_ENTRY_PENDING_DEL = 3,
};
struct ib_gid_table_entry {
- unsigned long props;
- union ib_gid gid;
- struct ib_gid_attr attr;
- void *context;
+ struct kref kref;
+ struct work_struct del_work;
+ struct ib_gid_attr attr;
+ void *context;
+ enum gid_table_entry_state state;
};
struct ib_gid_table {
- int sz;
+ int sz;
/* In RoCE, adding a GID to the table requires:
* (a) Find if this GID is already exists.
* (b) Find a free space.
@@ -91,13 +99,16 @@ struct ib_gid_table {
*
**/
/* Any writer to data_vec must hold this lock and the write side of
- * rwlock. readers must hold only rwlock. All writers must be in a
+ * rwlock. Readers must hold only rwlock. All writers must be in a
* sleepable context.
*/
- struct mutex lock;
- /* rwlock protects data_vec[ix]->props. */
- rwlock_t rwlock;
- struct ib_gid_table_entry *data_vec;
+ struct mutex lock;
+ /* rwlock protects data_vec[ix]->state and entry pointer.
+ */
+ rwlock_t rwlock;
+ struct ib_gid_table_entry **data_vec;
+ /* bit field, each bit indicates the index of default GID */
+ u32 default_gid_indices;
};
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
@@ -135,6 +146,19 @@ bool rdma_is_zero_gid(const union ib_gid *gid)
}
EXPORT_SYMBOL(rdma_is_zero_gid);
+/** is_gid_index_default - Check if a given index belongs to
+ * reserved default GIDs or not.
+ * @table: GID table pointer
+ * @index: Index to check in GID table
+ * Returns true if index is one of the reserved default GID index otherwise
+ * returns false.
+ */
+static bool is_gid_index_default(const struct ib_gid_table *table,
+ unsigned int index)
+{
+ return index < 32 && (BIT(index) & table->default_gid_indices);
+}
+
int ib_cache_gid_parse_type_str(const char *buf)
{
unsigned int i;
@@ -164,26 +188,136 @@ static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
return device->cache.ports[port - rdma_start_port(device)].gid;
}
-static void del_roce_gid(struct ib_device *device, u8 port_num,
- struct ib_gid_table *table, int ix)
+static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
+{
+ return !entry;
+}
+
+static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
+{
+ return entry && entry->state == GID_TABLE_ENTRY_VALID;
+}
+
+static void schedule_free_gid(struct kref *kref)
{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
+
+ queue_work(ib_wq, &entry->del_work);
+}
+
+static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ struct ib_device *device = entry->attr.device;
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
+
pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- device->name, port_num, ix,
- table->data_vec[ix].gid.raw);
+ device->name, port_num, entry->attr.index,
+ entry->attr.gid.raw);
+
+ if (rdma_cap_roce_gid_table(device, port_num) &&
+ entry->state != GID_TABLE_ENTRY_INVALID)
+ device->del_gid(&entry->attr, &entry->context);
+
+ write_lock_irq(&table->rwlock);
- if (rdma_cap_roce_gid_table(device, port_num))
- device->del_gid(&table->data_vec[ix].attr,
- &table->data_vec[ix].context);
- dev_put(table->data_vec[ix].attr.ndev);
+ /*
+ * The only way to avoid overwriting NULL in table is
+ * by comparing if it is same entry in table or not!
+ * If new entry in table is added by the time we free here,
+ * don't overwrite the table entry.
+ */
+ if (entry == table->data_vec[entry->attr.index])
+ table->data_vec[entry->attr.index] = NULL;
+ /* Now this index is ready to be allocated */
+ write_unlock_irq(&table->rwlock);
+
+ if (entry->attr.ndev)
+ dev_put(entry->attr.ndev);
+ kfree(entry);
}
-static int add_roce_gid(struct ib_gid_table *table,
- const union ib_gid *gid,
- const struct ib_gid_attr *attr)
+static void free_gid_entry(struct kref *kref)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(kref, struct ib_gid_table_entry, kref);
+
+ free_gid_entry_locked(entry);
+}
+
+/**
+ * free_gid_work - Release reference to the GID entry
+ * @work: Work structure to refer to GID entry which needs to be
+ * deleted.
+ *
+ * free_gid_work() frees the entry from the HCA's hardware table
+ * if provider supports it. It releases reference to netdevice.
+ */
+static void free_gid_work(struct work_struct *work)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(work, struct ib_gid_table_entry, del_work);
+ struct ib_device *device = entry->attr.device;
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table = rdma_gid_table(device, port_num);
+
+ mutex_lock(&table->lock);
+ free_gid_entry_locked(entry);
+ mutex_unlock(&table->lock);
+}
+
+static struct ib_gid_table_entry *
+alloc_gid_entry(const struct ib_gid_attr *attr)
{
struct ib_gid_table_entry *entry;
- int ix = attr->index;
- int ret = 0;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+ kref_init(&entry->kref);
+ memcpy(&entry->attr, attr, sizeof(*attr));
+ if (entry->attr.ndev)
+ dev_hold(entry->attr.ndev);
+ INIT_WORK(&entry->del_work, free_gid_work);
+ entry->state = GID_TABLE_ENTRY_INVALID;
+ return entry;
+}
+
+static void store_gid_entry(struct ib_gid_table *table,
+ struct ib_gid_table_entry *entry)
+{
+ entry->state = GID_TABLE_ENTRY_VALID;
+
+ pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+ entry->attr.device->name, entry->attr.port_num,
+ entry->attr.index, entry->attr.gid.raw);
+
+ lockdep_assert_held(&table->lock);
+ write_lock_irq(&table->rwlock);
+ table->data_vec[entry->attr.index] = entry;
+ write_unlock_irq(&table->rwlock);
+}
+
+static void get_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_get(&entry->kref);
+}
+
+static void put_gid_entry(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, schedule_free_gid);
+}
+
+static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
+{
+ kref_put(&entry->kref, free_gid_entry);
+}
+
+static int add_roce_gid(struct ib_gid_table_entry *entry)
+{
+ const struct ib_gid_attr *attr = &entry->attr;
+ int ret;
if (!attr->ndev) {
pr_err("%s NULL netdev device=%s port=%d index=%d\n",
@@ -191,38 +325,22 @@ static int add_roce_gid(struct ib_gid_table *table,
attr->index);
return -EINVAL;
}
-
- entry = &table->data_vec[ix];
- if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
- WARN(1, "GID table corruption device=%s port=%d index=%d\n",
- attr->device->name, attr->port_num,
- attr->index);
- return -EINVAL;
- }
-
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
- ret = attr->device->add_gid(gid, attr, &entry->context);
+ ret = attr->device->add_gid(attr, &entry->context);
if (ret) {
pr_err("%s GID add failed device=%s port=%d index=%d\n",
__func__, attr->device->name, attr->port_num,
attr->index);
- goto add_err;
+ return ret;
}
}
- dev_hold(attr->ndev);
-
-add_err:
- if (!ret)
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- attr->device->name, attr->port_num, ix, gid->raw);
- return ret;
+ return 0;
}
/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
- * @gid: GID content
* @attr: Attributes of the GID
*
* Returns 0 on success or appropriate error code. It accepts zero
@@ -230,34 +348,42 @@ add_err:
* GID. However such zero GIDs are not added to the cache.
*/
static int add_modify_gid(struct ib_gid_table *table,
- const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- int ret;
+ struct ib_gid_table_entry *entry;
+ int ret = 0;
+
+ /*
+ * Invalidate any old entry in the table to make it safe to write to
+ * this index.
+ */
+ if (is_gid_entry_valid(table->data_vec[attr->index]))
+ put_gid_entry(table->data_vec[attr->index]);
+
+ /*
+ * Some HCA's report multiple GID entries with only one valid GID, and
+ * leave other unused entries as the zero GID. Convert zero GIDs to
+ * empty table entries instead of storing them.
+ */
+ if (rdma_is_zero_gid(&attr->gid))
+ return 0;
+
+ entry = alloc_gid_entry(attr);
+ if (!entry)
+ return -ENOMEM;
if (rdma_protocol_roce(attr->device, attr->port_num)) {
- ret = add_roce_gid(table, gid, attr);
+ ret = add_roce_gid(entry);
if (ret)
- return ret;
- } else {
- /*
- * Some HCA's report multiple GID entries with only one
- * valid GID, but remaining as zero GID.
- * So ignore such behavior for IB link layer and don't
- * fail the call, but don't add such entry to GID cache.
- */
- if (rdma_is_zero_gid(gid))
- return 0;
+ goto done;
}
- lockdep_assert_held(&table->lock);
- memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
- memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
-
- write_lock_irq(&table->rwlock);
- table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
- write_unlock_irq(&table->rwlock);
+ store_gid_entry(table, entry);
return 0;
+
+done:
+ put_gid_entry(entry);
+ return ret;
}
/**
@@ -272,16 +398,25 @@ static int add_modify_gid(struct ib_gid_table *table,
static void del_gid(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table, int ix)
{
+ struct ib_gid_table_entry *entry;
+
lockdep_assert_held(&table->lock);
+
+ pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+ ib_dev->name, port, ix,
+ table->data_vec[ix]->attr.gid.raw);
+
write_lock_irq(&table->rwlock);
- table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
+ entry = table->data_vec[ix];
+ entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+ /*
+ * For non RoCE protocol, GID entry slot is ready to use.
+ */
+ if (!rdma_protocol_roce(ib_dev, port))
+ table->data_vec[ix] = NULL;
write_unlock_irq(&table->rwlock);
- if (rdma_protocol_roce(ib_dev, port))
- del_roce_gid(ib_dev, port, table, ix);
- memset(&table->data_vec[ix].gid, 0, sizeof(table->data_vec[ix].gid));
- memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
- table->data_vec[ix].context = NULL;
+ put_gid_entry_locked(entry);
}
/* rwlock should be read locked, or lock should be held */
@@ -294,8 +429,8 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
int empty = pempty ? -1 : 0;
while (i < table->sz && (found < 0 || empty < 0)) {
- struct ib_gid_table_entry *data = &table->data_vec[i];
- struct ib_gid_attr *attr = &data->attr;
+ struct ib_gid_table_entry *data = table->data_vec[i];
+ struct ib_gid_attr *attr;
int curr_index = i;
i++;
@@ -306,9 +441,9 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
* so lookup free slot only if requested.
*/
if (pempty && empty < 0) {
- if (data->props & GID_TABLE_ENTRY_INVALID &&
- (default_gid ==
- !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
+ if (is_gid_entry_free(data) &&
+ default_gid ==
+ is_gid_index_default(table, curr_index)) {
/*
* Found an invalid (free) entry; allocate it.
* If default GID is requested, then our
@@ -323,22 +458,23 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
/*
* Additionally find_gid() is used to find valid entry during
- * lookup operation, where validity needs to be checked. So
- * find the empty entry first to continue to search for a free
- * slot and ignore its INVALID flag.
+ * lookup operation; so ignore the entries which are marked as
+ * pending for removal and the entries which are marked as
+ * invalid.
*/
- if (data->props & GID_TABLE_ENTRY_INVALID)
+ if (!is_gid_entry_valid(data))
continue;
if (found >= 0)
continue;
+ attr = &data->attr;
if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
attr->gid_type != val->gid_type)
continue;
if (mask & GID_ATTR_FIND_MASK_GID &&
- memcmp(gid, &data->gid, sizeof(*gid)))
+ memcmp(gid, &data->attr.gid, sizeof(*gid)))
continue;
if (mask & GID_ATTR_FIND_MASK_NETDEV &&
@@ -346,8 +482,7 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
continue;
if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
- !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
- default_gid)
+ is_gid_index_default(table, curr_index) != default_gid)
continue;
found = curr_index;
@@ -396,7 +531,8 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
attr->device = ib_dev;
attr->index = empty;
attr->port_num = port;
- ret = add_modify_gid(table, gid, attr);
+ attr->gid = *gid;
+ ret = add_modify_gid(table, attr);
if (!ret)
dispatch_gid_change_event(ib_dev, port);
@@ -492,7 +628,8 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (ix = 0; ix < table->sz; ix++) {
- if (table->data_vec[ix].attr.ndev == ndev) {
+ if (is_gid_entry_valid(table->data_vec[ix]) &&
+ table->data_vec[ix]->attr.ndev == ndev) {
del_gid(ib_dev, port, table, ix);
deleted = true;
}
@@ -506,103 +643,37 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
return 0;
}
-static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
- union ib_gid *gid, struct ib_gid_attr *attr)
-{
- struct ib_gid_table *table;
-
- table = rdma_gid_table(ib_dev, port);
-
- if (index < 0 || index >= table->sz)
- return -EINVAL;
-
- if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
- return -EINVAL;
-
- memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
- if (attr) {
- memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
- if (attr->ndev)
- dev_hold(attr->ndev);
- }
-
- return 0;
-}
-
-static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- const struct ib_gid_attr *val,
- unsigned long mask,
- u8 *port, u16 *index)
-{
- struct ib_gid_table *table;
- u8 p;
- int local_index;
- unsigned long flags;
-
- for (p = 0; p < ib_dev->phys_port_cnt; p++) {
- table = ib_dev->cache.ports[p].gid;
- read_lock_irqsave(&table->rwlock, flags);
- local_index = find_gid(table, gid, val, false, mask, NULL);
- if (local_index >= 0) {
- if (index)
- *index = local_index;
- if (port)
- *port = p + rdma_start_port(ib_dev);
- read_unlock_irqrestore(&table->rwlock, flags);
- return 0;
- }
- read_unlock_irqrestore(&table->rwlock, flags);
- }
-
- return -ENOENT;
-}
-
-static int ib_cache_gid_find(struct ib_device *ib_dev,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- struct net_device *ndev, u8 *port,
- u16 *index)
-{
- unsigned long mask = GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_GID_TYPE;
- struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
-
- if (ndev)
- mask |= GID_ATTR_FIND_MASK_NETDEV;
-
- return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
- mask, port, index);
-}
-
/**
- * ib_find_cached_gid_by_port - Returns the GID table index where a specified
- * GID value occurs. It searches for the specified GID value in the local
- * software cache.
+ * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
+ * a valid GID entry for given search parameters. It searches for the specified
+ * GID value in the local software cache.
* @device: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
* @port_num: The port number of the device where the GID value should be
* searched.
- * @ndev: In RoCE, the net device of the device. Null means ignore.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
+ * @ndev: In RoCE, the net device of the device. NULL means ignore.
+ *
+ * Returns sgid attributes if the GID is found with valid reference or
+ * returns ERR_PTR for the error.
+ * The caller must invoke rdma_put_gid_attr() to release the reference.
*/
-int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- u8 port, struct net_device *ndev,
- u16 *index)
+const struct ib_gid_attr *
+rdma_find_gid_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ u8 port, struct net_device *ndev)
{
int local_index;
struct ib_gid_table *table;
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
+ const struct ib_gid_attr *attr;
unsigned long flags;
if (!rdma_is_port_valid(ib_dev, port))
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
table = rdma_gid_table(ib_dev, port);
@@ -612,89 +683,73 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
read_lock_irqsave(&table->rwlock, flags);
local_index = find_gid(table, gid, &val, false, mask, NULL);
if (local_index >= 0) {
- if (index)
- *index = local_index;
+ get_gid_entry(table->data_vec[local_index]);
+ attr = &table->data_vec[local_index]->attr;
read_unlock_irqrestore(&table->rwlock, flags);
- return 0;
+ return attr;
}
read_unlock_irqrestore(&table->rwlock, flags);
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
-EXPORT_SYMBOL(ib_find_cached_gid_by_port);
+EXPORT_SYMBOL(rdma_find_gid_by_port);
/**
- * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
- * GID value occurs
+ * rdma_find_gid_by_filter - Returns the GID table attribute where a
+ * specified GID value occurs
* @device: The device to query.
* @gid: The GID value to search for.
- * @port_num: The port number of the device where the GID value could be
+ * @port: The port number of the device where the GID value could be
* searched.
* @filter: The filter function is executed on any matching GID in the table.
* If the filter function returns true, the corresponding index is returned,
* otherwise, we continue searching the GID table. It's guaranteed that
* while filter is executed, ndev field is valid and the structure won't
* change. filter is executed in an atomic context. filter must not be NULL.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
*
- * ib_cache_gid_find_by_filter() searches for the specified GID value
+ * rdma_find_gid_by_filter() searches for the specified GID value
* of which the filter function returns true in the port's GID table.
- * This function is only supported on RoCE ports.
*
*/
-static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
- const union ib_gid *gid,
- u8 port,
- bool (*filter)(const union ib_gid *,
- const struct ib_gid_attr *,
- void *),
- void *context,
- u16 *index)
+const struct ib_gid_attr *rdma_find_gid_by_filter(
+ struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
+ bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
+ void *),
+ void *context)
{
+ const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
struct ib_gid_table *table;
- unsigned int i;
unsigned long flags;
- bool found = false;
-
+ unsigned int i;
- if (!rdma_is_port_valid(ib_dev, port) ||
- !rdma_protocol_roce(ib_dev, port))
- return -EPROTONOSUPPORT;
+ if (!rdma_is_port_valid(ib_dev, port))
+ return ERR_PTR(-EINVAL);
table = rdma_gid_table(ib_dev, port);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
- struct ib_gid_attr attr;
+ struct ib_gid_table_entry *entry = table->data_vec[i];
- if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
+ if (!is_gid_entry_valid(entry))
continue;
- if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
+ if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
continue;
- memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
-
- if (filter(gid, &attr, context)) {
- found = true;
- if (index)
- *index = i;
+ if (filter(gid, &entry->attr, context)) {
+ get_gid_entry(entry);
+ res = &entry->attr;
break;
}
}
read_unlock_irqrestore(&table->rwlock, flags);
-
- if (!found)
- return -ENOENT;
- return 0;
+ return res;
}
static struct ib_gid_table *alloc_gid_table(int sz)
{
- struct ib_gid_table *table =
- kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
- int i;
+ struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
@@ -707,12 +762,6 @@ static struct ib_gid_table *alloc_gid_table(int sz)
table->sz = sz;
rwlock_init(&table->rwlock);
-
- /* Mark all entries as invalid so that allocator can allocate
- * one of the invalid (free) entry.
- */
- for (i = 0; i < sz; i++)
- table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
return table;
err_free_table:
@@ -720,12 +769,30 @@ err_free_table:
return NULL;
}
-static void release_gid_table(struct ib_gid_table *table)
+static void release_gid_table(struct ib_device *device, u8 port,
+ struct ib_gid_table *table)
{
- if (table) {
- kfree(table->data_vec);
- kfree(table);
+ bool leak = false;
+ int i;
+
+ if (!table)
+ return;
+
+ for (i = 0; i < table->sz; i++) {
+ if (is_gid_entry_free(table->data_vec[i]))
+ continue;
+ if (kref_read(&table->data_vec[i]->kref) > 1) {
+ pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
+ device->name, i,
+ kref_read(&table->data_vec[i]->kref));
+ leak = true;
+ }
}
+ if (leak)
+ return;
+
+ kfree(table->data_vec);
+ kfree(table);
}
static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
@@ -739,7 +806,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (!rdma_is_zero_gid(&table->data_vec[i].gid)) {
+ if (is_gid_entry_valid(table->data_vec[i])) {
del_gid(ib_dev, port, table, i);
deleted = true;
}
@@ -757,12 +824,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
{
union ib_gid gid = { };
struct ib_gid_attr gid_attr;
- struct ib_gid_table *table;
unsigned int gid_type;
unsigned long mask;
- table = rdma_gid_table(ib_dev, port);
-
mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
GID_ATTR_FIND_MASK_NETDEV;
@@ -792,19 +856,12 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
unsigned int i;
unsigned long roce_gid_type_mask;
unsigned int num_default_gids;
- unsigned int current_gid = 0;
roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
num_default_gids = hweight_long(roce_gid_type_mask);
- for (i = 0; i < num_default_gids && i < table->sz; i++) {
- struct ib_gid_table_entry *entry = &table->data_vec[i];
-
- entry->props |= GID_TABLE_ENTRY_DEFAULT;
- current_gid = find_next_bit(&roce_gid_type_mask,
- BITS_PER_LONG,
- current_gid);
- entry->attr.gid_type = current_gid++;
- }
+ /* Reserve starting indices for default GIDs */
+ for (i = 0; i < num_default_gids && i < table->sz; i++)
+ table->default_gid_indices |= BIT(i);
}
@@ -815,7 +872,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
table = ib_dev->cache.ports[port].gid;
- release_gid_table(table);
+ release_gid_table(ib_dev, port, table);
ib_dev->cache.ports[port].gid = NULL;
}
}
@@ -869,69 +926,94 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
return err;
}
-int ib_get_cached_gid(struct ib_device *device,
- u8 port_num,
- int index,
- union ib_gid *gid,
- struct ib_gid_attr *gid_attr)
+/**
+ * rdma_query_gid - Read the GID content from the GID software cache
+ * @device: Device to query the GID
+ * @port_num: Port number of the device
+ * @index: Index of the GID table entry to read
+ * @gid: Pointer to GID where to store the entry's GID
+ *
+ * rdma_query_gid() only reads the GID entry content for requested device,
+ * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
+ * hold any reference to the GID table entry in the HCA or software cache.
+ *
+ * Returns 0 on success or appropriate error code.
+ *
+ */
+int rdma_query_gid(struct ib_device *device, u8 port_num,
+ int index, union ib_gid *gid)
{
- int res;
- unsigned long flags;
struct ib_gid_table *table;
+ unsigned long flags;
+ int res = -EINVAL;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
- res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
- read_unlock_irqrestore(&table->rwlock, flags);
+ if (index < 0 || index >= table->sz ||
+ !is_gid_entry_valid(table->data_vec[index]))
+ goto done;
+
+ memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
+ res = 0;
+
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
return res;
}
-EXPORT_SYMBOL(ib_get_cached_gid);
+EXPORT_SYMBOL(rdma_query_gid);
/**
- * ib_find_cached_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * rdma_find_gid - Returns SGID attributes if the matching GID is found.
* @device: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
* @ndev: In RoCE, the net device of the device. NULL means ignore.
- * @port_num: The port number of the device where the GID value was found.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
*
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
+ * rdma_find_gid() searches for the specified GID value in the software cache.
+ *
+ * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
+ * error. The caller must invoke rdma_put_gid_attr() to release the reference.
+ *
*/
-int ib_find_cached_gid(struct ib_device *device,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- struct net_device *ndev,
- u8 *port_num,
- u16 *index)
-{
- return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
-}
-EXPORT_SYMBOL(ib_find_cached_gid);
-
-int ib_find_gid_by_filter(struct ib_device *device,
- const union ib_gid *gid,
- u8 port_num,
- bool (*filter)(const union ib_gid *gid,
- const struct ib_gid_attr *,
- void *),
- void *context, u16 *index)
+const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ struct net_device *ndev)
{
- /* Only RoCE GID table supports filter function */
- if (!rdma_protocol_roce(device, port_num) && filter)
- return -EPROTONOSUPPORT;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE;
+ struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
+ u8 p;
+
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
+
+ for (p = 0; p < device->phys_port_cnt; p++) {
+ struct ib_gid_table *table;
+ unsigned long flags;
+ int index;
+
+ table = device->cache.ports[p].gid;
+ read_lock_irqsave(&table->rwlock, flags);
+ index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
+ if (index >= 0) {
+ const struct ib_gid_attr *attr;
+
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ }
- return ib_cache_gid_find_by_filter(device, gid,
- port_num, filter,
- context, index);
+ return ERR_PTR(-ENOENT);
}
+EXPORT_SYMBOL(rdma_find_gid);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
@@ -1089,12 +1171,92 @@ int ib_get_cached_port_state(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_port_state);
+/**
+ * rdma_get_gid_attr - Returns GID attributes for a port of a device
+ * at a requested gid_index, if a valid GID entry exists.
+ * @device: The device to query.
+ * @port_num: The port number on the device where the GID value
+ * is to be queried.
+ * @index: Index of the GID table entry whose attributes are to
+ * be queried.
+ *
+ * rdma_get_gid_attr() acquires reference count of gid attributes from the
+ * cached GID table. Caller must invoke rdma_put_gid_attr() to release
+ * reference to gid attribute regardless of link layer.
+ *
+ * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
+ * code.
+ */
+const struct ib_gid_attr *
+rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
+{
+ const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
+ struct ib_gid_table *table;
+ unsigned long flags;
+
+ if (!rdma_is_port_valid(device, port_num))
+ return ERR_PTR(-EINVAL);
+
+ table = rdma_gid_table(device, port_num);
+ if (index < 0 || index >= table->sz)
+ return ERR_PTR(-EINVAL);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ if (!is_gid_entry_valid(table->data_vec[index]))
+ goto done;
+
+ get_gid_entry(table->data_vec[index]);
+ attr = &table->data_vec[index]->attr;
+done:
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return attr;
+}
+EXPORT_SYMBOL(rdma_get_gid_attr);
+
+/**
+ * rdma_put_gid_attr - Release reference to the GID attribute
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be released.
+ *
+ * rdma_put_gid_attr() must be used to release reference whose
+ * reference is acquired using rdma_get_gid_attr() or any APIs
+ * which returns a pointer to the ib_gid_attr regardless of link layer
+ * of IB or RoCE.
+ *
+ */
+void rdma_put_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ put_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_put_gid_attr);
+
+/**
+ * rdma_hold_gid_attr - Get reference to existing GID attribute
+ *
+ * @attr: Pointer to the GID attribute whose reference
+ * needs to be taken.
+ *
+ * Increase the reference count to a GID attribute to keep it from being
+ * freed. Callers are required to already be holding a reference to attribute.
+ *
+ */
+void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+
+ get_gid_entry(entry);
+}
+EXPORT_SYMBOL(rdma_hold_gid_attr);
+
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
{
struct ib_gid_attr gid_attr = {};
struct ib_gid_table *table;
- union ib_gid gid;
int ret = 0;
int i;
@@ -1106,14 +1268,14 @@ static int config_non_roce_gid_cache(struct ib_device *device,
for (i = 0; i < gid_tbl_len; ++i) {
if (!device->query_gid)
continue;
- ret = device->query_gid(device, port, i, &gid);
+ ret = device->query_gid(device, port, i, &gid_attr.gid);
if (ret) {
pr_warn("query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
gid_attr.index = i;
- add_modify_gid(table, &gid, &gid_attr);
+ add_modify_gid(table, &gid_attr);
}
err:
mutex_unlock(&table->lock);
@@ -1128,13 +1290,10 @@ static void ib_cache_update(struct ib_device *device,
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
int i;
int ret;
- struct ib_gid_table *table;
if (!rdma_is_port_valid(device, port))
return;
- table = rdma_gid_table(device, port);
-
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
return;
@@ -1296,4 +1455,9 @@ void ib_cache_cleanup_one(struct ib_device *device)
ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
+
+ /*
+ * Flush the wq second time for any pending GID delete work.
+ */
+ flush_workqueue(ib_wq);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 27a7b0a2e27a..6e39c27dca8e 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -474,7 +474,7 @@ static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
if (ret)
return ret;
- memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -508,31 +508,50 @@ static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
return ret;
}
-static struct cm_port *get_cm_port_from_path(struct sa_path_rec *path)
+static struct cm_port *
+get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
unsigned long flags;
- u8 p;
- struct net_device *ndev = ib_get_ndev_from_path(path);
-
- read_lock_irqsave(&cm.device_lock, flags);
- list_for_each_entry(cm_dev, &cm.device_list, list) {
- if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
- sa_conv_pathrec_to_gid_type(path),
- ndev, &p, NULL)) {
- port = cm_dev->port[p - 1];
- break;
+
+ if (attr) {
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ if (cm_dev->ib_device == attr->device) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
+ }
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ } else {
+ /* SGID attribute can be NULL in following
+ * conditions.
+ * (a) Alternative path
+ * (b) IB link layer without GRH
+ * (c) LAP send messages
+ */
+ read_lock_irqsave(&cm.device_lock, flags);
+ list_for_each_entry(cm_dev, &cm.device_list, list) {
+ attr = rdma_find_gid(cm_dev->ib_device,
+ &path->sgid,
+ sa_conv_pathrec_to_gid_type(path),
+ NULL);
+ if (!IS_ERR(attr)) {
+ port = cm_dev->port[attr->port_num - 1];
+ break;
+ }
}
+ read_unlock_irqrestore(&cm.device_lock, flags);
+ if (port)
+ rdma_put_gid_attr(attr);
}
- read_unlock_irqrestore(&cm.device_lock, flags);
-
- if (ndev)
- dev_put(ndev);
return port;
}
-static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
+static int cm_init_av_by_path(struct sa_path_rec *path,
+ const struct ib_gid_attr *sgid_attr,
+ struct cm_av *av,
struct cm_id_private *cm_id_priv)
{
struct rdma_ah_attr new_ah_attr;
@@ -540,7 +559,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
struct cm_port *port;
int ret;
- port = get_cm_port_from_path(path);
+ port = get_cm_port_from_path(path, sgid_attr);
if (!port)
return -EINVAL;
cm_dev = port->cm_dev;
@@ -554,22 +573,26 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
/*
* av->ah_attr might be initialized based on wc or during
- * request processing time. So initialize a new ah_attr on stack.
+ * request processing time which might have reference to sgid_attr.
+ * So initialize a new ah_attr on stack.
* If initialization fails, old ah_attr is used for sending any
* responses. If initialization is successful, than new ah_attr
- * is used by overwriting the old one.
+ * is used by overwriting the old one. So that right ah_attr
+ * can be used to return an error response.
*/
ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
- &new_ah_attr);
+ &new_ah_attr, sgid_attr);
if (ret)
return ret;
av->timeout = path->packet_life_time + 1;
ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- if (ret)
+ if (ret) {
+ rdma_destroy_ah_attr(&new_ah_attr);
return ret;
- memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ }
+ rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -1091,6 +1114,9 @@ retest:
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
+
+ rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
+ rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
kfree(cm_id_priv->private_data);
kfree(cm_id_priv);
}
@@ -1230,14 +1256,12 @@ new_id:
}
EXPORT_SYMBOL(ib_cm_insert_listen);
-static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
- enum cm_msg_sequence msg_seq)
+static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
{
u64 hi_tid, low_tid;
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
- (msg_seq << 30));
+ low_tid = (u64)cm_id_priv->id.local_id;
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1265,7 +1289,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_path->opa.slid);
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
+ cm_form_tid(cm_id_priv));
req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id;
@@ -1413,12 +1437,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ ret = cm_init_av_by_path(param->primary_path,
+ param->ppath_sgid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
- ret = cm_init_av_by_path(param->alternate_path,
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
@@ -1646,7 +1671,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
(ib_is_opa_gid(&path->sgid))) {
union ib_gid sgid;
- if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ if (rdma_query_gid(dev, port_num, 0, &sgid)) {
dev_warn(&dev->dev,
"Error updating sgid in CM request\n");
return;
@@ -1691,6 +1716,7 @@ static void cm_format_req_event(struct cm_work *work,
param->retry_count = cm_req_get_retry_count(req_msg);
param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
param->srq = cm_req_get_srq(req_msg);
+ param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &req_msg->private_data;
}
@@ -1914,9 +1940,8 @@ static int cm_req_handler(struct cm_work *work)
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
- union ib_gid gid;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh;
+ const struct ib_gid_attr *gid_attr;
int ret;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1961,24 +1986,13 @@ static int cm_req_handler(struct cm_work *work)
if (cm_req_has_alt_path(req_msg))
memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
- ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num,
- grh->sgid_index,
- &gid, &gid_attr);
- if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
- goto rejected;
- }
+ gid_attr = grh->sgid_attr;
- if (gid_attr.ndev) {
+ if (gid_attr && gid_attr->ndev) {
work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
+ sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
+ /* If no GID attribute or ndev is null, it is not RoCE. */
cm_path_set_rec_type(work->port->cm_dev->ib_device,
work->port->port_num,
&work->path[0],
@@ -1992,15 +2006,14 @@ static int cm_req_handler(struct cm_work *work)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret) {
int err;
- err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- NULL);
+ err = rdma_query_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid);
if (err)
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
NULL, 0, NULL, 0);
@@ -2012,8 +2025,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
- cm_id_priv);
+ ret = cm_init_av_by_path(&work->path[1], NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2451,7 +2464,7 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
+ cm_form_tid(cm_id_priv));
dreq_msg->local_comm_id = cm_id_priv->id.local_id;
dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
@@ -3082,7 +3095,7 @@ static void cm_format_lap(struct cm_lap_msg *lap_msg,
alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
alternate_path->opa.slid);
cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
+ cm_form_tid(cm_id_priv));
lap_msg->local_comm_id = cm_id_priv->id.local_id;
lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
@@ -3136,7 +3149,7 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
cm_id_priv);
if (ret)
goto out;
@@ -3279,7 +3292,7 @@ static int cm_lap_handler(struct cm_work *work)
if (ret)
goto unlock;
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
cm_id_priv);
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
@@ -3458,7 +3471,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
struct ib_cm_sidr_req_param *param)
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
+ cm_form_tid(cm_id_priv));
sidr_req_msg->request_id = cm_id_priv->id.local_id;
sidr_req_msg->pkey = param->path->pkey;
sidr_req_msg->service_id = param->service_id;
@@ -3481,7 +3494,9 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
+ ret = cm_init_av_by_path(param->path, param->sgid_attr,
+ &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto out;
@@ -3518,6 +3533,7 @@ out:
EXPORT_SYMBOL(ib_send_cm_sidr_req);
static void cm_format_sidr_req_event(struct cm_work *work,
+ const struct cm_id_private *rx_cm_id,
struct ib_cm_id *listen_id)
{
struct cm_sidr_req_msg *sidr_req_msg;
@@ -3531,6 +3547,7 @@ static void cm_format_sidr_req_event(struct cm_work *work,
param->service_id = sidr_req_msg->service_id;
param->bth_pkey = cm_get_bth_pkey(work);
param->port = work->port->port_num;
+ param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &sidr_req_msg->private_data;
}
@@ -3588,7 +3605,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
- cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
+ cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
return 0;
@@ -3665,7 +3682,8 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
-static void cm_format_sidr_rep_event(struct cm_work *work)
+static void cm_format_sidr_rep_event(struct cm_work *work,
+ const struct cm_id_private *cm_id_priv)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
@@ -3678,6 +3696,7 @@ static void cm_format_sidr_rep_event(struct cm_work *work)
param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
param->info = &sidr_rep_msg->info;
param->info_len = sidr_rep_msg->info_length;
+ param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
work->cm_event.private_data = &sidr_rep_msg->private_data;
}
@@ -3701,7 +3720,7 @@ static int cm_sidr_rep_handler(struct cm_work *work)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
- cm_format_sidr_rep_event(work);
+ cm_format_sidr_rep_event(work, cm_id_priv);
cm_process_work(cm_id_priv, work);
return 0;
out:
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 8b76f0ef965e..476d4309576d 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,13 +44,6 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-enum cm_msg_sequence {
- CM_MSG_SEQUENCE_REQ,
- CM_MSG_SEQUENCE_LAP,
- CM_MSG_SEQUENCE_DREQ,
- CM_MSG_SEQUENCE_SIDR
-};
-
struct cm_req_msg {
struct ib_mad_hdr hdr;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index bff10ab141b0..f72677291b69 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -366,7 +366,6 @@ struct cma_multicast {
void *context;
struct sockaddr_storage addr;
struct kref mcref;
- bool igmp_joined;
u8 join_state;
};
@@ -412,11 +411,11 @@ struct cma_req_info {
struct sockaddr_storage listen_addr_storage;
struct sockaddr_storage src_addr_storage;
struct ib_device *device;
- int port;
union ib_gid local_gid;
__be64 service_id;
+ int port;
+ bool has_gid;
u16 pkey;
- bool has_gid:1;
};
static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
@@ -491,12 +490,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
{
cma_ref_dev(cma_dev);
id_priv->cma_dev = cma_dev;
- id_priv->gid_type = 0;
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- id_priv->res.type = RDMA_RESTRACK_CM_ID;
rdma_restrack_add(&id_priv->res);
}
@@ -603,46 +600,53 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}
-static inline int cma_validate_port(struct ib_device *device, u8 port,
- enum ib_gid_type gid_type,
- union ib_gid *gid,
- struct rdma_id_private *id_priv)
+static const struct ib_gid_attr *
+cma_validate_port(struct ib_device *device, u8 port,
+ enum ib_gid_type gid_type,
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int bound_if_index = dev_addr->bound_dev_if;
+ const struct ib_gid_attr *sgid_attr;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
- int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
- return ret;
+ return ERR_PTR(-ENODEV);
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
- return ret;
+ return ERR_PTR(-ENODEV);
if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
ndev = dev_get_by_index(dev_addr->net, bound_if_index);
if (!ndev)
- return ret;
+ return ERR_PTR(-ENODEV);
} else {
gid_type = IB_GID_TYPE_IB;
}
- ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, NULL);
-
+ sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
if (ndev)
dev_put(ndev);
+ return sgid_attr;
+}
- return ret;
+static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
+ const struct ib_gid_attr *sgid_attr)
+{
+ WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
+ id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
static int cma_acquire_dev(struct rdma_id_private *id_priv,
- struct rdma_id_private *listen_id_priv)
+ const struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
struct cma_device *cma_dev;
union ib_gid gid, iboe_gid, *gidp;
+ enum ib_gid_type gid_type;
int ret = -ENODEV;
u8 port;
@@ -662,14 +666,13 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
port = listen_id_priv->id.port_num;
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port,
- rdma_protocol_ib(cma_dev->device, port) ?
- IB_GID_TYPE_IB :
- listen_id_priv->gid_type, gidp,
- id_priv);
- if (!ret) {
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
goto out;
}
}
@@ -683,14 +686,13 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
-
- ret = cma_validate_port(cma_dev->device, port,
- rdma_protocol_ib(cma_dev->device, port) ?
- IB_GID_TYPE_IB :
- cma_dev->default_gid_type[port - 1],
- gidp, id_priv);
- if (!ret) {
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
goto out;
}
}
@@ -732,8 +734,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
continue;
- for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
- &gid, NULL);
+ for (i = 0; !rdma_query_gid(cur_dev->device,
+ p, i, &gid);
i++) {
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
@@ -785,12 +787,14 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->res.kern_name = caller;
else
rdma_restrack_set_task(&id_priv->res, current);
+ id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps;
id_priv->id.qp_type = qp_type;
id_priv->tos_set = false;
+ id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
@@ -1036,35 +1040,38 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
}
EXPORT_SYMBOL(rdma_init_qp_attr);
-static inline int cma_zero_addr(struct sockaddr *addr)
+static inline bool cma_zero_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_loopback_addr(struct sockaddr *addr)
+static inline bool cma_loopback_addr(const struct sockaddr *addr)
{
switch (addr->sa_family) {
case AF_INET:
- return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+ return ipv4_is_loopback(
+ ((struct sockaddr_in *)addr)->sin_addr.s_addr);
case AF_INET6:
- return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ return ipv6_addr_loopback(
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
case AF_IB:
- return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
+ return ib_addr_loopback(
+ &((struct sockaddr_ib *)addr)->sib_addr);
default:
- return 0;
+ return false;
}
}
-static inline int cma_any_addr(struct sockaddr *addr)
+static inline bool cma_any_addr(const struct sockaddr *addr)
{
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
@@ -1087,7 +1094,7 @@ static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
}
}
-static __be16 cma_port(struct sockaddr *addr)
+static __be16 cma_port(const struct sockaddr *addr)
{
struct sockaddr_ib *sib;
@@ -1105,15 +1112,15 @@ static __be16 cma_port(struct sockaddr *addr)
}
}
-static inline int cma_any_port(struct sockaddr *addr)
+static inline int cma_any_port(const struct sockaddr *addr)
{
return !cma_port(addr);
}
static void cma_save_ib_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct sa_path_rec *path)
+ const struct rdma_cm_id *listen_id,
+ const struct sa_path_rec *path)
{
struct sockaddr_ib *listen_ib, *ib;
@@ -1198,7 +1205,7 @@ static u16 cma_port_from_service_id(__be64 service_id)
static int cma_save_ip_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct ib_cm_event *ib_event,
+ const struct ib_cm_event *ib_event,
__be64 service_id)
{
struct cma_hdr *hdr;
@@ -1228,8 +1235,8 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
static int cma_save_net_info(struct sockaddr *src_addr,
struct sockaddr *dst_addr,
- struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
+ const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
sa_family_t sa_family, __be64 service_id)
{
if (sa_family == AF_IB) {
@@ -1361,7 +1368,23 @@ static bool validate_net_dev(struct net_device *net_dev,
}
}
-static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
+static struct net_device *
+roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
+{
+ const struct ib_gid_attr *sgid_attr = NULL;
+
+ if (ib_event->event == IB_CM_REQ_RECEIVED)
+ sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
+ else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+ sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
+
+ if (!sgid_attr)
+ return NULL;
+ dev_hold(sgid_attr->ndev);
+ return sgid_attr->ndev;
+}
+
+static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
struct cma_req_info *req)
{
struct sockaddr *listen_addr =
@@ -1376,8 +1399,12 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
if (err)
return ERR_PTR(err);
- net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
- gid, listen_addr);
+ if (rdma_protocol_roce(req->device, req->port))
+ net_dev = roce_get_net_dev_by_cm_event(ib_event);
+ else
+ net_dev = ib_get_net_dev_by_params(req->device, req->port,
+ req->pkey,
+ gid, listen_addr);
if (!net_dev)
return ERR_PTR(-ENODEV);
@@ -1440,14 +1467,20 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
- /* This request is an AF_IB request or a RoCE request */
+ /* This request is an AF_IB request */
return (!id->port_num || id->port_num == port_num) &&
- (addr->src_addr.ss_family == AF_IB ||
- rdma_protocol_roce(id->device, port_num));
+ (addr->src_addr.ss_family == AF_IB);
- return !addr->dev_addr.bound_dev_if ||
- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
+ /*
+ * Net namespaces must match, and if the listner is listening
+ * on a specific netdevice than netdevice must match as well.
+ */
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+ (!!addr->dev_addr.bound_dev_if ==
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+ return true;
+ else
+ return false;
}
static struct rdma_id_private *cma_find_listener(
@@ -1480,9 +1513,10 @@ static struct rdma_id_private *cma_find_listener(
return ERR_PTR(-EINVAL);
}
-static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event,
- struct net_device **net_dev)
+static struct rdma_id_private *
+cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device **net_dev)
{
struct cma_req_info req;
struct rdma_bind_list *bind_list;
@@ -1498,10 +1532,6 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
*net_dev = NULL;
- } else if (rdma_protocol_roce(req.device, req.port)) {
- /* TODO find the net dev matching the request parameters
- * through the RoCE GID table */
- *net_dev = NULL;
} else {
return ERR_CAST(*net_dev);
}
@@ -1629,6 +1659,21 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
+static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ struct net_device *ndev = NULL;
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (ndev) {
+ cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+ dev_put(ndev);
+ }
+ kref_put(&mc->mcref, release_mc);
+}
+
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
{
struct cma_multicast *mc;
@@ -1642,22 +1687,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else {
- if (mc->igmp_joined) {
- struct rdma_dev_addr *dev_addr =
- &id_priv->id.route.addr.dev_addr;
- struct net_device *ndev = NULL;
-
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
- dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev,
- &mc->multicast.ib->rec.mgid,
- false);
- dev_put(ndev);
- }
- }
- kref_put(&mc->mcref, release_mc);
+ cma_leave_roce_mc_group(id_priv, mc);
}
}
}
@@ -1699,6 +1729,10 @@ void rdma_destroy_id(struct rdma_cm_id *id)
cma_deref_id(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
+
+ if (id_priv->id.route.addr.dev_addr.sgid_attr)
+ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
+
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
}
@@ -1730,7 +1764,7 @@ reject:
}
static void cma_set_rep_event_data(struct rdma_cm_event *event,
- struct ib_cm_rep_event_param *rep_data,
+ const struct ib_cm_rep_event_param *rep_data,
void *private_data)
{
event->param.conn.private_data = private_data;
@@ -1743,10 +1777,11 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = rep_data->remote_qpn;
}
-static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_ib_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
mutex_lock(&id_priv->handler_mutex);
@@ -1756,7 +1791,6 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
id_priv->state != RDMA_CM_DISCONNECT))
goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_REQ_ERROR:
case IB_CM_REP_ERROR:
@@ -1825,9 +1859,10 @@ out:
return ret;
}
-static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
@@ -1888,11 +1923,12 @@ err:
return NULL;
}
-static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event,
- struct net_device *net_dev)
+static struct rdma_id_private *
+cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
+ const struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
- struct rdma_id_private *listen_id_priv;
+ const struct rdma_id_private *listen_id_priv;
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
@@ -1932,7 +1968,7 @@ err:
}
static void cma_set_req_event_data(struct rdma_cm_event *event,
- struct ib_cm_req_event_param *req_data,
+ const struct ib_cm_req_event_param *req_data,
void *private_data, int offset)
{
event->param.conn.private_data = private_data + offset;
@@ -1946,7 +1982,8 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = req_data->remote_qpn;
}
-static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
+static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
+ const struct ib_cm_event *ib_event)
{
return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
@@ -1955,19 +1992,20 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
(!id->qp_type));
}
-static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
+static int cma_ib_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id = NULL;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
struct net_device *net_dev;
u8 offset;
int ret;
- listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
- if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
+ if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
ret = -EINVAL;
goto net_dev_put;
}
@@ -1978,16 +2016,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto err1;
}
- memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
- conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
- conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
+ conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
@@ -2087,7 +2124,7 @@ EXPORT_SYMBOL(rdma_read_gids);
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
@@ -2096,7 +2133,6 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
if (id_priv->state != RDMA_CM_CONNECT)
goto out;
- memset(&event, 0, sizeof event);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
event.event = RDMA_CM_EVENT_DISCONNECTED;
@@ -2156,11 +2192,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
+
listen_id = cm_id->context;
mutex_lock(&listen_id->handler_mutex);
@@ -2202,13 +2244,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
- memset(&event, 0, sizeof event);
- event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- event.param.conn.private_data = iw_event->private_data;
- event.param.conn.private_data_len = iw_event->private_data_len;
- event.param.conn.initiator_depth = iw_event->ird;
- event.param.conn.responder_resources = iw_event->ord;
-
/*
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
@@ -2241,7 +2276,8 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
addr = cma_src_addr(id_priv);
svc_id = rdma_get_service_id(&id_priv->id, addr);
- id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
+ id = ib_cm_insert_listen(id_priv->id.device,
+ cma_ib_req_handler, svc_id);
if (IS_ERR(id))
return PTR_ERR(id);
id_priv->cm_id.ib = id;
@@ -2561,8 +2597,6 @@ cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
route->path_rec->roce.route_resolved = true;
- sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
return ndev;
}
@@ -2791,7 +2825,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
p = 1;
port_found:
- ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL);
+ ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
if (ret)
goto out;
@@ -2817,9 +2851,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *dev_addr, void *context)
{
struct rdma_id_private *id_priv = context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
- memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
@@ -2910,7 +2943,7 @@ err:
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr)
+ const struct sockaddr *dst_addr)
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
@@ -2931,31 +2964,25 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr, int timeout_ms)
+ const struct sockaddr *dst_addr, int timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (ret)
return ret;
- }
}
- if (cma_family(id_priv) != dst_addr->sa_family) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (cma_family(id_priv) != dst_addr->sa_family)
return -EINVAL;
- }
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
return -EINVAL;
- }
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
atomic_inc(&id_priv->refcount);
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
@@ -3451,18 +3478,18 @@ static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
}
static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *ib_event)
+ const struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- struct rdma_cm_event event;
- struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
+ struct rdma_cm_event event = {};
+ const struct ib_cm_sidr_rep_event_param *rep =
+ &ib_event->param.sidr_rep_rcvd;
int ret = 0;
mutex_lock(&id_priv->handler_mutex);
if (id_priv->state != RDMA_CM_CONNECT)
goto out;
- memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_SIDR_REQ_ERROR:
event.event = RDMA_CM_EVENT_UNREACHABLE;
@@ -3488,7 +3515,8 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
ib_init_ah_attr_from_path(id_priv->id.device,
id_priv->id.port_num,
id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ &event.param.ud.ah_attr,
+ rep->sgid_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3501,6 +3529,8 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
}
ret = id_priv->id.event_handler(&id_priv->id, &event);
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
@@ -3557,6 +3587,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = id;
req.path = id_priv->id.route.path_rec;
+ req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -3618,6 +3649,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
if (route->num_paths == 2)
req.alternate_path = &route->path_rec[1];
+ req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
+ /* Alternate path SGID attribute currently unsupported */
req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.qp_num = id_priv->qp_num;
req.qp_type = id_priv->id.qp_type;
@@ -3928,7 +3961,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
int ret = 0;
id_priv = mc->id_priv;
@@ -3952,7 +3985,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
}
mutex_unlock(&id_priv->qp_mutex);
- memset(&event, 0, sizeof event);
event.status = status;
event.param.ud.private_data = mc->context;
if (!status) {
@@ -3981,6 +4013,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
ret = id_priv->id.event_handler(&id_priv->id, &event);
+
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
@@ -4010,7 +4044,7 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else if (addr->sa_family == AF_IB) {
memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
- } else if ((addr->sa_family == AF_INET6)) {
+ } else if (addr->sa_family == AF_INET6) {
ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
mc_map[7] = 0x01; /* Use RDMA CM signature */
@@ -4168,8 +4202,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (!send_only) {
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
- if (!err)
- mc->igmp_joined = true;
}
}
} else {
@@ -4221,26 +4253,29 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
memcpy(&mc->addr, addr, rdma_addr_size(addr));
mc->context = context;
mc->id_priv = id_priv;
- mc->igmp_joined = false;
mc->join_state = join_state;
- spin_lock(&id_priv->lock);
- list_add(&mc->list, &id_priv->mc_list);
- spin_unlock(&id_priv->lock);
if (rdma_protocol_roce(id->device, id->port_num)) {
kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc);
- } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+ if (ret)
+ goto out_err;
+ } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ret = cma_join_ib_multicast(id_priv, mc);
- else
+ if (ret)
+ goto out_err;
+ } else {
ret = -ENOSYS;
-
- if (ret) {
- spin_lock_irq(&id_priv->lock);
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
- kfree(mc);
+ goto out_err;
}
+
+ spin_lock(&id_priv->lock);
+ list_add(&mc->list, &id_priv->mc_list);
+ spin_unlock(&id_priv->lock);
+
+ return 0;
+out_err:
+ kfree(mc);
return ret;
}
EXPORT_SYMBOL(rdma_join_multicast);
@@ -4268,23 +4303,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else if (rdma_protocol_roce(id->device, id->port_num)) {
- if (mc->igmp_joined) {
- struct rdma_dev_addr *dev_addr =
- &id->route.addr.dev_addr;
- struct net_device *ndev = NULL;
-
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(dev_addr->net,
- dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev,
- &mc->multicast.ib->rec.mgid,
- false);
- dev_put(ndev);
- }
- mc->igmp_joined = false;
- }
- kref_put(&mc->mcref, release_mc);
+ cma_leave_roce_mc_group(id_priv, mc);
}
return;
}
@@ -4410,7 +4429,7 @@ free_cma_dev:
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
- struct rdma_cm_event event;
+ struct rdma_cm_event event = {};
enum rdma_cm_state state;
int ret = 0;
@@ -4426,7 +4445,6 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
goto out;
- memset(&event, 0, sizeof event);
event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
ret = id_priv->id.event_handler(&id_priv->id, &event);
out:
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index fae417a391fb..77c7005c396c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -91,8 +91,8 @@ void ib_device_unregister_sysfs(struct ib_device *device);
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
-typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
- struct net_device *idev, void *cookie);
+typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
+ struct net_device *idev, void *cookie);
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 6fa4c59dc7a7..db3b6271f09d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -105,8 +105,6 @@ static int ib_device_check_mandatory(struct ib_device *device)
IB_MANDATORY_FUNC(query_pkey),
IB_MANDATORY_FUNC(alloc_pd),
IB_MANDATORY_FUNC(dealloc_pd),
- IB_MANDATORY_FUNC(create_ah),
- IB_MANDATORY_FUNC(destroy_ah),
IB_MANDATORY_FUNC(create_qp),
IB_MANDATORY_FUNC(modify_qp),
IB_MANDATORY_FUNC(destroy_qp),
@@ -862,25 +860,6 @@ int ib_query_port(struct ib_device *device,
EXPORT_SYMBOL(ib_query_port);
/**
- * ib_query_gid - Get GID table entry
- * @device:Device to query
- * @port_num:Port number to query
- * @index:GID table index to query
- * @gid:Returned GID
- * @attr: Returned GID attributes related to this GID index (only in RoCE).
- * NULL means ignore.
- *
- * ib_query_gid() fetches the specified GID table entry from the cache.
- */
-int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid,
- struct ib_gid_attr *attr)
-{
- return ib_get_cached_gid(device, port_num, index, gid, attr);
-}
-EXPORT_SYMBOL(ib_query_gid);
-
-/**
* ib_enum_roce_netdev - enumerate all RoCE ports
* @ib_dev : IB device we want to query
* @filter: Should we call the callback?
@@ -1057,7 +1036,7 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
- ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
+ ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
return ret;
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f742ae7a768b..ef459f2f2eeb 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -38,6 +38,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/security.h>
@@ -58,8 +59,13 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+/*
+ * The mlx4 driver uses the top byte to distinguish which virtual function
+ * generated the MAD, so we must avoid using it.
+ */
+#define AGENT_ID_LIMIT (1 << 24)
+static DEFINE_IDR(ib_mad_clients);
static struct list_head ib_mad_port_list;
-static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -190,6 +196,8 @@ EXPORT_SYMBOL(ib_response_mad);
/*
* ib_register_mad_agent - Register to send/receive MADs
+ *
+ * Context: Process context.
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 port_num,
@@ -210,7 +218,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
struct ib_mad_mgmt_vendor_class *vendor_class;
struct ib_mad_mgmt_method_table *method;
int ret2, qpn;
- unsigned long flags;
u8 mgmt_class, vclass;
/* Validate parameters */
@@ -376,13 +383,24 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error4;
}
- spin_lock_irqsave(&port_priv->reg_lock, flags);
- mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
+ idr_preload(GFP_KERNEL);
+ idr_lock(&ib_mad_clients);
+ ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
+ AGENT_ID_LIMIT, GFP_ATOMIC);
+ idr_unlock(&ib_mad_clients);
+ idr_preload_end();
+
+ if (ret2 < 0) {
+ ret = ERR_PTR(ret2);
+ goto error5;
+ }
+ mad_agent_priv->agent.hi_tid = ret2;
/*
* Make sure MAD registration (if supplied)
* is non overlapping with any existing ones
*/
+ spin_lock_irq(&port_priv->reg_lock);
if (mad_reg_req) {
mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
if (!is_vendor_class(mgmt_class)) {
@@ -393,7 +411,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (method) {
if (method_in_use(&method,
mad_reg_req))
- goto error5;
+ goto error6;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@@ -409,24 +427,25 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (is_vendor_method_in_use(
vendor_class,
mad_reg_req))
- goto error5;
+ goto error6;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
}
if (ret2) {
ret = ERR_PTR(ret2);
- goto error5;
+ goto error6;
}
}
-
- /* Add mad agent into port's agent list */
- list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ spin_unlock_irq(&port_priv->reg_lock);
return &mad_agent_priv->agent;
+error6:
+ spin_unlock_irq(&port_priv->reg_lock);
+ idr_lock(&ib_mad_clients);
+ idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+ idr_unlock(&ib_mad_clients);
error5:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
error4:
kfree(reg_req);
@@ -575,7 +594,6 @@ static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
- unsigned long flags;
/* Note that we could still be handling received MADs */
@@ -587,10 +605,12 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work);
- spin_lock_irqsave(&port_priv->reg_lock, flags);
+ spin_lock_irq(&port_priv->reg_lock);
remove_mad_reg_req(mad_agent_priv);
- list_del(&mad_agent_priv->agent_list);
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ spin_unlock_irq(&port_priv->reg_lock);
+ idr_lock(&ib_mad_clients);
+ idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+ idr_unlock(&ib_mad_clients);
flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
@@ -601,7 +621,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
kfree(mad_agent_priv->reg_req);
- kfree(mad_agent_priv);
+ kfree_rcu(mad_agent_priv, rcu);
}
static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
@@ -625,6 +645,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
+ *
+ * Context: Process context.
*/
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
@@ -1159,7 +1181,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_qp_info *qp_info;
struct list_head *list;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_agent *mad_agent;
struct ib_sge *sge;
unsigned long flags;
@@ -1197,7 +1218,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
list = &qp_info->send_queue.list;
} else {
ret = 0;
@@ -1720,22 +1741,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
struct ib_mad_agent_private *mad_agent = NULL;
unsigned long flags;
- spin_lock_irqsave(&port_priv->reg_lock, flags);
if (ib_response_mad(mad_hdr)) {
u32 hi_tid;
- struct ib_mad_agent_private *entry;
/*
* Routing is based on high 32 bits of transaction ID
* of MAD.
*/
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
- list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
- if (entry->agent.hi_tid == hi_tid) {
- mad_agent = entry;
- break;
- }
- }
+ rcu_read_lock();
+ mad_agent = idr_find(&ib_mad_clients, hi_tid);
+ if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
+ mad_agent = NULL;
+ rcu_read_unlock();
} else {
struct ib_mad_mgmt_class_table *class;
struct ib_mad_mgmt_method_table *method;
@@ -1744,6 +1762,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
const struct ib_vendor_mad *vendor_mad;
int index;
+ spin_lock_irqsave(&port_priv->reg_lock, flags);
/*
* Routing is based on version, class, and method
* For "newer" vendor MADs, also based on OUI
@@ -1783,20 +1802,19 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
~IB_MGMT_METHOD_RESP];
}
}
+ if (mad_agent)
+ atomic_inc(&mad_agent->refcount);
+out:
+ spin_unlock_irqrestore(&port_priv->reg_lock, flags);
}
- if (mad_agent) {
- if (mad_agent->agent.recv_handler)
- atomic_inc(&mad_agent->refcount);
- else {
- dev_notice(&port_priv->device->dev,
- "No receive handler for client %p on port %d\n",
- &mad_agent->agent, port_priv->port_num);
- mad_agent = NULL;
- }
+ if (mad_agent && !mad_agent->agent.recv_handler) {
+ dev_notice(&port_priv->device->dev,
+ "No receive handler for client %p on port %d\n",
+ &mad_agent->agent, port_priv->port_num);
+ deref_mad_agent(mad_agent);
+ mad_agent = NULL;
}
-out:
- spin_unlock_irqrestore(&port_priv->reg_lock, flags);
return mad_agent;
}
@@ -1896,8 +1914,8 @@ static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_
const struct ib_global_route *grh =
rdma_ah_read_grh(&attr);
- if (ib_get_cached_gid(device, port_num,
- grh->sgid_index, &sgid, NULL))
+ if (rdma_query_gid(device, port_num,
+ grh->sgid_index, &sgid))
return 0;
return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
16);
@@ -2457,7 +2475,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
struct ib_mad_qp_info *qp_info;
struct ib_mad_queue *send_queue;
- struct ib_send_wr *bad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
int ret;
@@ -2507,7 +2524,7 @@ retry:
if (queued_send_wr) {
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
if (ret) {
dev_err(&port_priv->device->dev,
"ib_post_send failed: %d\n", ret);
@@ -2552,11 +2569,9 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
if (wc->status == IB_WC_WR_FLUSH_ERR) {
if (mad_send_wr->retry) {
/* Repost send */
- struct ib_send_wr *bad_send_wr;
-
mad_send_wr->retry = 0;
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
- &bad_send_wr);
+ NULL);
if (!ret)
return false;
}
@@ -2872,7 +2887,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
int post, ret;
struct ib_mad_private *mad_priv;
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */
@@ -2916,7 +2931,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
post = (++recv_queue->count < recv_queue->max_active);
list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags);
- ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
+ ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list);
@@ -3159,7 +3174,6 @@ static int ib_mad_port_open(struct ib_device *device,
port_priv->device = device;
port_priv->port_num = port_num;
spin_lock_init(&port_priv->reg_lock);
- INIT_LIST_HEAD(&port_priv->agent_list);
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
@@ -3338,6 +3352,9 @@ int ib_mad_init(void)
INIT_LIST_HEAD(&ib_mad_port_list);
+ /* Client ID 0 is used for snoop-only clients */
+ idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
+
if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n");
return -EINVAL;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 28669f6419e1..d84ae1671898 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -89,7 +89,6 @@ struct ib_rmpp_segment {
};
struct ib_mad_agent_private {
- struct list_head agent_list;
struct ib_mad_agent agent;
struct ib_mad_reg_req *reg_req;
struct ib_mad_qp_info *qp_info;
@@ -105,7 +104,10 @@ struct ib_mad_agent_private {
struct list_head rmpp_list;
atomic_t refcount;
- struct completion comp;
+ union {
+ struct completion comp;
+ struct rcu_head rcu;
+ };
};
struct ib_mad_snoop_private {
@@ -203,7 +205,6 @@ struct ib_mad_port_private {
spinlock_t reg_lock;
struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
- struct list_head agent_list;
struct workqueue_struct *wq;
struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE];
};
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 6c48f4193dda..d50ff70bb24b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -716,14 +716,28 @@ int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
+/**
+ * ib_init_ah_from_mcmember - Initialize AH attribute from multicast
+ * member record and gid of the device.
+ * @device: RDMA device
+ * @port_num: Port of the rdma device to consider
+ * @ndev: Optional netdevice, applicable only for RoCE
+ * @gid_type: GID type to consider
+ * @ah_attr: AH attribute to fillup on successful completion
+ *
+ * ib_init_ah_from_mcmember() initializes AH attribute based on multicast
+ * member record and other device properties. On success the caller is
+ * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on
+ * success or appropriate error code.
+ *
+ */
int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr)
{
- int ret;
- u16 gid_index;
+ const struct ib_gid_attr *sgid_attr;
/* GID table is not based on the netdevice for IB link layer,
* so ignore ndev during search.
@@ -733,26 +747,22 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
else if (!rdma_protocol_roce(device, port_num))
return -EINVAL;
- ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
- gid_type, port_num,
- ndev,
- &gid_index);
- if (ret)
- return ret;
+ sgid_attr = rdma_find_gid_by_port(device, &rec->port_gid,
+ gid_type, port_num, ndev);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
- memset(ah_attr, 0, sizeof *ah_attr);
+ memset(ah_attr, 0, sizeof(*ah_attr));
ah_attr->type = rdma_ah_find_type(device, port_num);
rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->mlid));
rdma_ah_set_sl(ah_attr, rec->sl);
rdma_ah_set_port_num(ah_attr, port_num);
rdma_ah_set_static_rate(ah_attr, rec->rate);
-
- rdma_ah_set_grh(ah_attr, &rec->mgid,
- be32_to_cpu(rec->flow_label),
- (u8)gid_index,
- rec->hop_limit,
- rec->traffic_class);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->mgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ sgid_attr);
return 0;
}
EXPORT_SYMBOL(ib_init_ah_from_mcmember);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 340c7bea45ab..0385ab438320 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -237,15 +237,15 @@ static int fill_port_info(struct sk_buff *msg,
if (ret)
return ret;
- BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
- return -EMSGSIZE;
- if (rdma_protocol_ib(device, port) &&
- nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
- attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
- return -EMSGSIZE;
if (rdma_protocol_ib(device, port)) {
+ BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ (u64)attr.port_cap_flags,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
+ attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
return -EMSGSIZE;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index a6e904973ba8..6eb64c6f0802 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -32,6 +32,7 @@
#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/sched/mm.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <linux/rcupdate.h>
@@ -41,51 +42,6 @@
#include "core_priv.h"
#include "rdma_core.h"
-int uverbs_ns_idx(u16 *id, unsigned int ns_count)
-{
- int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
-
- if (ret >= ns_count)
- return -EINVAL;
-
- *id &= ~UVERBS_ID_NS_MASK;
- return ret;
-}
-
-const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
- uint16_t object)
-{
- const struct uverbs_root_spec *object_hash = ibdev->specs_root;
- const struct uverbs_object_spec_hash *objects;
- int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
-
- if (ret < 0)
- return NULL;
-
- objects = object_hash->object_buckets[ret];
-
- if (object >= objects->num_objects)
- return NULL;
-
- return objects->objects[object];
-}
-
-const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
- uint16_t method)
-{
- const struct uverbs_method_spec_hash *methods;
- int ret = uverbs_ns_idx(&method, object->num_buckets);
-
- if (ret < 0)
- return NULL;
-
- methods = object->method_buckets[ret];
- if (method >= methods->num_methods)
- return NULL;
-
- return methods->methods[method];
-}
-
void uverbs_uobject_get(struct ib_uobject *uobject)
{
kref_get(&uobject->ref);
@@ -96,7 +52,7 @@ static void uverbs_uobject_free(struct kref *ref)
struct ib_uobject *uobj =
container_of(ref, struct ib_uobject, ref);
- if (uobj->type->type_class->needs_kfree_rcu)
+ if (uobj->uapi_object->type_class->needs_kfree_rcu)
kfree_rcu(uobj, rcu);
else
kfree(uobj);
@@ -107,7 +63,8 @@ void uverbs_uobject_put(struct ib_uobject *uobject)
kref_put(&uobject->ref, uverbs_uobject_free);
}
-static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
+static int uverbs_try_lock_object(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
/*
* When a shared access is required, we use a positive counter. Each
@@ -120,27 +77,211 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
- if (!exclusive)
- return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
+ case UVERBS_LOOKUP_WRITE:
+ /* lock is exclusive */
+ return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ case UVERBS_LOOKUP_DESTROY:
+ return 0;
+ }
+ return 0;
+}
+
+static void assert_uverbs_usecnt(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
+{
+#ifdef CONFIG_LOCKDEP
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
+ WARN_ON(atomic_read(&uobj->usecnt) <= 0);
+ break;
+ case UVERBS_LOOKUP_WRITE:
+ WARN_ON(atomic_read(&uobj->usecnt) != -1);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+#endif
+}
+
+/*
+ * This must be called with the hw_destroy_rwsem locked for read or write,
+ * also the uobject itself must be locked for write.
+ *
+ * Upon return the HW object is guaranteed to be destroyed.
+ *
+ * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
+ * however the type's allocat_commit function cannot have been called and the
+ * uobject cannot be on the uobjects_lists
+ *
+ * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
+ * rdma_lookup_get_uobject) and the object is left in a state where the caller
+ * needs to call rdma_lookup_put_uobject.
+ *
+ * For all other destroy modes this function internally unlocks the uobject
+ * and consumes the kref on the uobj.
+ */
+static int uverbs_destroy_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason reason)
+{
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ unsigned long flags;
+ int ret;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+ assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+
+ if (uobj->object) {
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason);
+ if (ret) {
+ if (ib_is_destroy_retryable(ret, reason, uobj))
+ return ret;
+
+ /* Nothing to be done, dangle the memory and move on */
+ WARN(true,
+ "ib_uverbs: failed to remove uobject id %d, driver err=%d",
+ uobj->id, ret);
+ }
+
+ uobj->object = NULL;
+ }
- /* lock is either WRITE or DESTROY - should be exclusive */
- return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
+ if (reason == RDMA_REMOVE_ABORT) {
+ WARN_ON(!list_empty(&uobj->list));
+ WARN_ON(!uobj->context);
+ uobj->uapi_object->type_class->alloc_abort(uobj);
+ }
+
+ uobj->context = NULL;
+
+ /*
+ * For DESTROY the usecnt is held write locked, the caller is expected
+ * to put it unlock and put the object when done with it. Only DESTROY
+ * can remove the IDR handle.
+ */
+ if (reason != RDMA_REMOVE_DESTROY)
+ atomic_set(&uobj->usecnt, 0);
+ else
+ uobj->uapi_object->type_class->remove_handle(uobj);
+
+ if (!list_empty(&uobj->list)) {
+ spin_lock_irqsave(&ufile->uobjects_lock, flags);
+ list_del_init(&uobj->list);
+ spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
+
+ /*
+ * Pairs with the get in rdma_alloc_commit_uobject(), could
+ * destroy uobj.
+ */
+ uverbs_uobject_put(uobj);
+ }
+
+ /*
+ * When aborting the stack kref remains owned by the core code, and is
+ * not transferred into the type. Pairs with the get in alloc_uobj
+ */
+ if (reason == RDMA_REMOVE_ABORT)
+ uverbs_uobject_put(uobj);
+
+ return 0;
}
-static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
- const struct uverbs_obj_type *type)
+/*
+ * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
+ * sequence. It should only be used from command callbacks. On success the
+ * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * version requires the caller to have already obtained an
+ * LOOKUP_DESTROY uobject kref.
+ */
+int uobj_destroy(struct ib_uobject *uobj)
{
- struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
+
+ down_read(&ufile->hw_destroy_rwsem);
+
+ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
+ if (ret)
+ goto out_unlock;
+
+ ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
+ if (ret) {
+ atomic_set(&uobj->usecnt, 0);
+ goto out_unlock;
+ }
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+}
+
+/*
+ * uobj_get_destroy destroys the HW object and returns a handle to the uobj
+ * with a NULL object pointer. The caller must pair this with
+ * uverbs_put_destroy.
+ */
+struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
+ u32 id, struct ib_uverbs_file *ufile)
+{
+ struct ib_uobject *uobj;
+ int ret;
+
+ uobj = rdma_lookup_get_uobject(obj, ufile, id, UVERBS_LOOKUP_DESTROY);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ ret = uobj_destroy(uobj);
+ if (ret) {
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
+ return ERR_PTR(ret);
+ }
+
+ return uobj;
+}
+
+/*
+ * Does both uobj_get_destroy() and uobj_put_destroy(). Returns success_res
+ * on success (negative errno on failure). For use by callers that do not need
+ * the uobj.
+ */
+int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
+ struct ib_uverbs_file *ufile, int success_res)
+{
+ struct ib_uobject *uobj;
+
+ uobj = __uobj_get_destroy(obj, id, ufile);
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ return success_res;
+}
+
+/* alloc_uobj must be undone by uverbs_destroy_uobject() */
+static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
+ const struct uverbs_api_object *obj)
+{
+ struct ib_uobject *uobj;
+ struct ib_ucontext *ucontext;
+
+ ucontext = ib_uverbs_get_ucontext(ufile);
+ if (IS_ERR(ucontext))
+ return ERR_CAST(ucontext);
+
+ uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
if (!uobj)
return ERR_PTR(-ENOMEM);
/*
* user_handle should be filled by the handler,
* The object is added to the list in the commit stage.
*/
- uobj->context = context;
- uobj->type = type;
+ uobj->ufile = ufile;
+ uobj->context = ucontext;
+ INIT_LIST_HEAD(&uobj->list);
+ uobj->uapi_object = obj;
/*
* Allocated objects start out as write locked to deny any other
* syscalls from accessing them until they are committed. See
@@ -157,45 +298,39 @@ static int idr_add_uobj(struct ib_uobject *uobj)
int ret;
idr_preload(GFP_KERNEL);
- spin_lock(&uobj->context->ufile->idr_lock);
+ spin_lock(&uobj->ufile->idr_lock);
/*
* We start with allocating an idr pointing to NULL. This represents an
* object which isn't initialized yet. We'll replace it later on with
* the real object once we commit.
*/
- ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
+ ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
if (ret >= 0)
uobj->id = ret;
- spin_unlock(&uobj->context->ufile->idr_lock);
+ spin_unlock(&uobj->ufile->idr_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
-/*
- * It only removes it from the uobjects list, uverbs_uobject_put() is still
- * required.
- */
-static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
-{
- spin_lock(&uobj->context->ufile->idr_lock);
- idr_remove(&uobj->context->ufile->idr, uobj->id);
- spin_unlock(&uobj->context->ufile->idr_lock);
-}
-
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
-static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+static struct ib_uobject *
+lookup_get_idr_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
+ unsigned long idrno = id;
+
+ if (id < 0 || id > ULONG_MAX)
+ return ERR_PTR(-EINVAL);
rcu_read_lock();
/* object won't be released as we're protected in rcu */
- uobj = idr_find(&ucontext->ufile->idr, id);
+ uobj = idr_find(&ufile->idr, idrno);
if (!uobj) {
uobj = ERR_PTR(-ENOENT);
goto free;
@@ -215,19 +350,28 @@ free:
return uobj;
}
-static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+static struct ib_uobject *
+lookup_get_fd_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
+ const struct uverbs_obj_fd_type *fd_type;
struct file *f;
struct ib_uobject *uobject;
- const struct uverbs_obj_fd_type *fd_type =
- container_of(type, struct uverbs_obj_fd_type, type);
+ int fdno = id;
- if (exclusive)
+ if (fdno != id)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UVERBS_LOOKUP_READ)
return ERR_PTR(-EOPNOTSUPP);
- f = fget(id);
+ if (!obj->type_attrs)
+ return ERR_PTR(-EIO);
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+
+ f = fget(fdno);
if (!f)
return ERR_PTR(-EBADF);
@@ -246,43 +390,55 @@ static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *ty
return uobject;
}
-struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive)
+struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode)
{
struct ib_uobject *uobj;
int ret;
- uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
+
+ uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
if (IS_ERR(uobj))
return uobj;
- if (uobj->type != type) {
+ if (uobj->uapi_object != obj) {
ret = -EINVAL;
goto free;
}
- ret = uverbs_try_lock_object(uobj, exclusive);
- if (ret) {
- WARN(ucontext->cleanup_reason,
- "ib_uverbs: Trying to lookup_get while cleanup context\n");
+ /*
+ * If we have been disassociated block every command except for
+ * DESTROY based commands.
+ */
+ if (mode != UVERBS_LOOKUP_DESTROY &&
+ !srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu)) {
+ ret = -EIO;
goto free;
}
+ ret = uverbs_try_lock_object(uobj, mode);
+ if (ret)
+ goto free;
+
return uobj;
free:
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ obj->type_class->lookup_put(uobj, mode);
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
-static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+static struct ib_uobject *
+alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
int ret;
struct ib_uobject *uobj;
- uobj = alloc_uobj(ucontext, type);
+ uobj = alloc_uobj(ufile, obj);
if (IS_ERR(uobj))
return uobj;
@@ -290,7 +446,7 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
if (ret)
goto uobj_put;
- ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
+ ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
RDMACG_RESOURCE_HCA_OBJECT);
if (ret)
goto idr_remove;
@@ -298,304 +454,305 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
return uobj;
idr_remove:
- uverbs_idr_remove_uobj(uobj);
+ spin_lock(&ufile->idr_lock);
+ idr_remove(&ufile->idr, uobj->id);
+ spin_unlock(&ufile->idr_lock);
uobj_put:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
-static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+static struct ib_uobject *
+alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(type, struct uverbs_obj_fd_type, type);
int new_fd;
struct ib_uobject *uobj;
- struct ib_uobject_file *uobj_file;
- struct file *filp;
new_fd = get_unused_fd_flags(O_CLOEXEC);
if (new_fd < 0)
return ERR_PTR(new_fd);
- uobj = alloc_uobj(ucontext, type);
+ uobj = alloc_uobj(ufile, obj);
if (IS_ERR(uobj)) {
put_unused_fd(new_fd);
return uobj;
}
- uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
- filp = anon_inode_getfile(fd_type->name,
- fd_type->fops,
- uobj_file,
- fd_type->flags);
- if (IS_ERR(filp)) {
- put_unused_fd(new_fd);
- uverbs_uobject_put(uobj);
- return (void *)filp;
- }
-
- uobj_file->uobj.id = new_fd;
- uobj_file->uobj.object = filp;
- uobj_file->ufile = ucontext->ufile;
- INIT_LIST_HEAD(&uobj->list);
- kref_get(&uobj_file->ufile->ref);
+ uobj->id = new_fd;
+ uobj->ufile = ufile;
return uobj;
}
-struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile)
{
- return type->type_class->alloc_begin(type, ucontext);
-}
+ struct ib_uobject *ret;
-static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
-{
- const struct uverbs_obj_idr_type *idr_type =
- container_of(uobj->type, struct uverbs_obj_idr_type,
- type);
- int ret = idr_type->destroy_object(uobj, why);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
/*
- * We can only fail gracefully if the user requested to destroy the
- * object. In the rest of the cases, just remove whatever you can.
+ * The hw_destroy_rwsem is held across the entire object creation and
+ * released during rdma_alloc_commit_uobject or
+ * rdma_alloc_abort_uobject
*/
- if (why == RDMA_REMOVE_DESTROY && ret)
- return ret;
-
- ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
- RDMACG_RESOURCE_HCA_OBJECT);
- uverbs_idr_remove_uobj(uobj);
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ return ERR_PTR(-EIO);
+ ret = obj->type_class->alloc_begin(obj, ufile);
+ if (IS_ERR(ret)) {
+ up_read(&ufile->hw_destroy_rwsem);
+ return ret;
+ }
return ret;
}
-static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
+static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
{
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
- struct file *filp = uobj->object;
- int id = uobj_file->uobj.id;
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
- /* Unsuccessful NEW */
- fput(filp);
- put_unused_fd(id);
+ spin_lock(&uobj->ufile->idr_lock);
+ idr_remove(&uobj->ufile->idr, uobj->id);
+ spin_unlock(&uobj->ufile->idr_lock);
}
-static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(uobj->type, struct uverbs_obj_fd_type, type);
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
- int ret = fd_type->context_closed(uobj_file, why);
+ const struct uverbs_obj_idr_type *idr_type =
+ container_of(uobj->uapi_object->type_attrs,
+ struct uverbs_obj_idr_type, type);
+ int ret = idr_type->destroy_object(uobj, why);
- if (why == RDMA_REMOVE_DESTROY && ret)
+ /*
+ * We can only fail gracefully if the user requested to destroy the
+ * object or when a retry may be called upon an error.
+ * In the rest of the cases, just remove whatever you can.
+ */
+ if (ib_is_destroy_retryable(ret, why, uobj))
return ret;
- if (why == RDMA_REMOVE_DURING_CLEANUP) {
- alloc_abort_fd_uobject(uobj);
- return ret;
- }
+ if (why == RDMA_REMOVE_ABORT)
+ return 0;
- uobj_file->uobj.context = NULL;
- return ret;
+ ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+ RDMACG_RESOURCE_HCA_OBJECT);
+
+ return 0;
}
-static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
+static void remove_handle_idr_uobject(struct ib_uobject *uobj)
{
-#ifdef CONFIG_LOCKDEP
- if (exclusive)
- WARN_ON(atomic_read(&uobj->usecnt) != -1);
- else
- WARN_ON(atomic_read(&uobj->usecnt) <= 0);
-#endif
+ spin_lock(&uobj->ufile->idr_lock);
+ idr_remove(&uobj->ufile->idr, uobj->id);
+ spin_unlock(&uobj->ufile->idr_lock);
+ /* Matches the kref in alloc_commit_idr_uobject */
+ uverbs_uobject_put(uobj);
}
-static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
{
- int ret;
- struct ib_ucontext *ucontext = uobj->context;
-
- ret = uobj->type->type_class->remove_commit(uobj, why);
- if (ret && why == RDMA_REMOVE_DESTROY) {
- /* We couldn't remove the object, so just unlock the uobject */
- atomic_set(&uobj->usecnt, 0);
- uobj->type->type_class->lookup_put(uobj, true);
- } else {
- mutex_lock(&ucontext->uobjects_lock);
- list_del(&uobj->list);
- mutex_unlock(&ucontext->uobjects_lock);
- /* put the ref we took when we created the object */
- uverbs_uobject_put(uobj);
- }
-
- return ret;
+ put_unused_fd(uobj->id);
}
-/* This is called only for user requested DESTROY reasons */
-int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
+static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
- int ret;
- struct ib_ucontext *ucontext = uobj->context;
-
- /* put the ref count we took at lookup_get */
- uverbs_uobject_put(uobj);
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
- WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
- return 0;
- }
- assert_uverbs_usecnt(uobj, true);
- ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
+ const struct uverbs_obj_fd_type *fd_type = container_of(
+ uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
+ int ret = fd_type->context_closed(uobj, why);
- up_read(&ucontext->cleanup_rwsem);
- return ret;
-}
+ if (ib_is_destroy_retryable(ret, why, uobj))
+ return ret;
-static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
-{
return 0;
}
-static const struct uverbs_obj_type null_obj_type = {
- .type_class = &((const struct uverbs_obj_type_class){
- .remove_commit = null_obj_type_class_remove_commit,
- /* be cautious */
- .needs_kfree_rcu = true}),
-};
-
-int rdma_explicit_destroy(struct ib_uobject *uobject)
+static void remove_handle_fd_uobject(struct ib_uobject *uobj)
{
- int ret;
- struct ib_ucontext *ucontext = uobject->context;
-
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
- WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
- return 0;
- }
- assert_uverbs_usecnt(uobject, true);
- ret = uobject->type->type_class->remove_commit(uobject,
- RDMA_REMOVE_DESTROY);
- if (ret)
- goto out;
-
- uobject->type = &null_obj_type;
-
-out:
- up_read(&ucontext->cleanup_rwsem);
- return ret;
}
-static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
+static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
{
- spin_lock(&uobj->context->ufile->idr_lock);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+
+ spin_lock(&ufile->idr_lock);
/*
* We already allocated this IDR with a NULL object, so
* this shouldn't fail.
+ *
+ * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
+ * It will be put by remove_commit_idr_uobject()
*/
- WARN_ON(idr_replace(&uobj->context->ufile->idr,
- uobj, uobj->id));
- spin_unlock(&uobj->context->ufile->idr_lock);
+ WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
+ spin_unlock(&ufile->idr_lock);
+
+ return 0;
}
-static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
+static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
{
- struct ib_uobject_file *uobj_file =
- container_of(uobj, struct ib_uobject_file, uobj);
+ const struct uverbs_obj_fd_type *fd_type = container_of(
+ uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
+ int fd = uobj->id;
+ struct file *filp;
+
+ /*
+ * The kref for uobj is moved into filp->private data and put in
+ * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
+ * must be guaranteed to be called from the provided fops release
+ * callback.
+ */
+ filp = anon_inode_getfile(fd_type->name,
+ fd_type->fops,
+ uobj,
+ fd_type->flags);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ uobj->object = filp;
+
+ /* Matching put will be done in uverbs_close_fd() */
+ kref_get(&uobj->ufile->ref);
- fd_install(uobj_file->uobj.id, uobj->object);
/* This shouldn't be used anymore. Use the file object instead */
- uobj_file->uobj.id = 0;
- /* Get another reference as we export this to the fops */
- uverbs_uobject_get(&uobj_file->uobj);
+ uobj->id = 0;
+
+ /*
+ * NOTE: Once we install the file we loose ownership of our kref on
+ * uobj. It will be put by uverbs_close_fd()
+ */
+ fd_install(fd, filp);
+
+ return 0;
}
-int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
+/*
+ * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
+ * caller can no longer assume uobj is valid. If this function fails it
+ * destroys the uboject, including the attached HW object.
+ */
+int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
{
- /* Cleanup is running. Calling this should have been impossible */
- if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
- int ret;
+ struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
- WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
- ret = uobj->type->type_class->remove_commit(uobj,
- RDMA_REMOVE_DURING_CLEANUP);
- if (ret)
- pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
- uobj->id);
+ /* alloc_commit consumes the uobj kref */
+ ret = uobj->uapi_object->type_class->alloc_commit(uobj);
+ if (ret) {
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+ up_read(&ufile->hw_destroy_rwsem);
return ret;
}
+ /* kref is held so long as the uobj is on the uobj list. */
+ uverbs_uobject_get(uobj);
+ spin_lock_irq(&ufile->uobjects_lock);
+ list_add(&uobj->list, &ufile->uobjects);
+ spin_unlock_irq(&ufile->uobjects_lock);
+
/* matches atomic_set(-1) in alloc_uobj */
- assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
- mutex_lock(&uobj->context->uobjects_lock);
- list_add(&uobj->list, &uobj->context->uobjects);
- mutex_unlock(&uobj->context->uobjects_lock);
-
- uobj->type->type_class->alloc_commit(uobj);
- up_read(&uobj->context->cleanup_rwsem);
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
return 0;
}
-static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
-{
- uverbs_idr_remove_uobj(uobj);
- ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
- RDMACG_RESOURCE_HCA_OBJECT);
- uverbs_uobject_put(uobj);
-}
-
+/*
+ * This consumes the kref for uobj. It is up to the caller to unwind the HW
+ * object and anything else connected to uobj before calling this.
+ */
void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
{
- uobj->type->type_class->alloc_abort(uobj);
+ struct ib_uverbs_file *ufile = uobj->ufile;
+
+ uobj->object = NULL;
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
+
+ /* Matches the down_read in rdma_alloc_begin_uobject */
+ up_read(&ufile->hw_destroy_rwsem);
}
-static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_idr_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
}
-static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
+static void lookup_put_fd_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
struct file *filp = uobj->object;
- WARN_ON(exclusive);
+ WARN_ON(mode != UVERBS_LOOKUP_READ);
/* This indirectly calls uverbs_close_fd and free the object */
fput(filp);
}
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
+void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode)
{
- assert_uverbs_usecnt(uobj, exclusive);
- uobj->type->type_class->lookup_put(uobj, exclusive);
+ assert_uverbs_usecnt(uobj, mode);
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
* uverbs_try_lock_object for locking schema information.
*/
- if (!exclusive)
+ switch (mode) {
+ case UVERBS_LOOKUP_READ:
atomic_dec(&uobj->usecnt);
- else
+ break;
+ case UVERBS_LOOKUP_WRITE:
atomic_set(&uobj->usecnt, 0);
+ break;
+ case UVERBS_LOOKUP_DESTROY:
+ break;
+ }
+ /* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ spin_lock_init(&ufile->idr_lock);
+ idr_init(&ufile->idr);
+}
+
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+{
+ struct ib_uobject *entry;
+ int id;
+
+ /*
+ * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
+ * there are no HW objects left, however the IDR is still populated
+ * with anything that has not been cleaned up by userspace. Since the
+ * kref on ufile is 0, nothing is allowed to call lookup_get.
+ *
+ * This is an optimized equivalent to remove_handle_idr_uobject
+ */
+ idr_for_each_entry(&ufile->idr, entry, id) {
+ WARN_ON(entry->object);
+ uverbs_uobject_put(entry);
+ }
+
+ idr_destroy(&ufile->idr);
+}
+
const struct uverbs_obj_type_class uverbs_idr_class = {
.alloc_begin = alloc_begin_idr_uobject,
.lookup_get = lookup_get_idr_uobject,
.alloc_commit = alloc_commit_idr_uobject,
.alloc_abort = alloc_abort_idr_uobject,
.lookup_put = lookup_put_idr_uobject,
- .remove_commit = remove_commit_idr_uobject,
+ .destroy_hw = destroy_hw_idr_uobject,
+ .remove_handle = remove_handle_idr_uobject,
/*
* When we destroy an object, we first just lock it for WRITE and
* actually DESTROY it in the finalize stage. So, the problematic
@@ -611,103 +768,180 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
*/
.needs_kfree_rcu = true,
};
+EXPORT_SYMBOL(uverbs_idr_class);
-static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
+void uverbs_close_fd(struct file *f)
{
- struct ib_ucontext *ucontext;
- struct ib_uverbs_file *ufile = uobj_file->ufile;
- int ret;
+ struct ib_uobject *uobj = f->private_data;
+ struct ib_uverbs_file *ufile = uobj->ufile;
- mutex_lock(&uobj_file->ufile->cleanup_mutex);
+ if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
+ /*
+ * lookup_get_fd_uobject holds the kref on the struct file any
+ * time a FD uobj is locked, which prevents this release
+ * method from being invoked. Meaning we can always get the
+ * write lock here, or we have a kernel bug.
+ */
+ WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
+ up_read(&ufile->hw_destroy_rwsem);
+ }
- /* uobject was either already cleaned up or is cleaned up right now anyway */
- if (!uobj_file->uobj.context ||
- !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
- goto unlock;
+ /* Matches the get in alloc_begin_fd_uobject */
+ kref_put(&ufile->ref, ib_uverbs_release_file);
- ucontext = uobj_file->uobj.context;
- ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
- up_read(&ucontext->cleanup_rwsem);
- if (ret)
- pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
-unlock:
- mutex_unlock(&ufile->cleanup_mutex);
+ /* Pairs with filp->private_data in alloc_begin_fd_uobject */
+ uverbs_uobject_put(uobj);
}
-void uverbs_close_fd(struct file *f)
-{
- struct ib_uobject_file *uobj_file = f->private_data;
- struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
+static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct ib_device *ib_dev = ibcontext->device;
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
- _uverbs_close_fd(uobj_file);
- uverbs_uobject_put(&uobj_file->uobj);
- kref_put(uverbs_file_ref, ib_uverbs_release_file);
+ down_write(&owning_mm->mmap_sem);
+ ib_dev->disassociate_ucontext(ibcontext);
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
}
-void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
+/*
+ * Drop the ucontext off the ufile and completely disconnect it from the
+ * ib_device
+ */
+static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
{
- enum rdma_remove_reason reason = device_removed ?
- RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
- unsigned int cur_order = 0;
+ struct ib_ucontext *ucontext = ufile->ucontext;
+ int ret;
+
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE)
+ ufile_disassociate_ucontext(ucontext);
+
+ put_pid(ucontext->tgid);
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
- ucontext->cleanup_reason = reason;
/*
- * Waits for all remove_commit and alloc_commit to finish. Logically, We
- * want to hold this forever as the context is going to be destroyed,
- * but we'll release it since it causes a "held lock freed" BUG message.
+ * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
+ * the error return.
*/
- down_write(&ucontext->cleanup_rwsem);
+ ret = ucontext->device->dealloc_ucontext(ucontext);
+ WARN_ON(ret);
- while (!list_empty(&ucontext->uobjects)) {
- struct ib_uobject *obj, *next_obj;
- unsigned int next_order = UINT_MAX;
+ ufile->ucontext = NULL;
+}
+
+static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
+{
+ struct ib_uobject *obj, *next_obj;
+ int ret = -EINVAL;
+ /*
+ * This shouldn't run while executing other commands on this
+ * context. Thus, the only thing we should take care of is
+ * releasing a FD while traversing this list. The FD could be
+ * closed and released from the _release fop of this FD.
+ * In order to mitigate this, we add a lock.
+ * We take and release the lock per traversal in order to let
+ * other threads (which might still use the FDs) chance to run.
+ */
+ list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
/*
- * This shouldn't run while executing other commands on this
- * context. Thus, the only thing we should take care of is
- * releasing a FD while traversing this list. The FD could be
- * closed and released from the _release fop of this FD.
- * In order to mitigate this, we add a lock.
- * We take and release the lock per order traversal in order
- * to let other threads (which might still use the FDs) chance
- * to run.
+ * if we hit this WARN_ON, that means we are
+ * racing with a lookup_get.
*/
- mutex_lock(&ucontext->uobjects_lock);
- list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
- list) {
- if (obj->type->destroy_order == cur_order) {
- int ret;
-
- /*
- * if we hit this WARN_ON, that means we are
- * racing with a lookup_get.
- */
- WARN_ON(uverbs_try_lock_object(obj, true));
- ret = obj->type->type_class->remove_commit(obj,
- reason);
- list_del(&obj->list);
- if (ret)
- pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
- obj->id, cur_order);
- /* put the ref we took when we created the object */
- uverbs_uobject_put(obj);
- } else {
- next_order = min(next_order,
- obj->type->destroy_order);
- }
- }
- mutex_unlock(&ucontext->uobjects_lock);
- cur_order = next_order;
+ WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
+ if (!uverbs_destroy_uobject(obj, reason))
+ ret = 0;
}
- up_write(&ucontext->cleanup_rwsem);
+ return ret;
}
-void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
+/*
+ * Destroy the uncontext and every uobject associated with it. If called with
+ * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
+ * been completed and ufile->ucontext is NULL.
+ *
+ * This is internally locked and can be called in parallel from multiple
+ * contexts.
+ */
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason)
{
- ucontext->cleanup_reason = 0;
- mutex_init(&ucontext->uobjects_lock);
- INIT_LIST_HEAD(&ucontext->uobjects);
- init_rwsem(&ucontext->cleanup_rwsem);
+ if (reason == RDMA_REMOVE_CLOSE) {
+ /*
+ * During destruction we might trigger something that
+ * synchronously calls release on any file descriptor. For
+ * this reason all paths that come from file_operations
+ * release must use try_lock. They can progress knowing that
+ * there is an ongoing uverbs_destroy_ufile_hw that will clean
+ * up the driver resources.
+ */
+ if (!mutex_trylock(&ufile->ucontext_lock))
+ return;
+
+ } else {
+ mutex_lock(&ufile->ucontext_lock);
+ }
+
+ down_write(&ufile->hw_destroy_rwsem);
+
+ /*
+ * If a ucontext was never created then we can't have any uobjects to
+ * cleanup, nothing to do.
+ */
+ if (!ufile->ucontext)
+ goto done;
+
+ ufile->ucontext->closing = true;
+ ufile->ucontext->cleanup_retryable = true;
+ while (!list_empty(&ufile->uobjects))
+ if (__uverbs_cleanup_ufile(ufile, reason)) {
+ /*
+ * No entry was cleaned-up successfully during this
+ * iteration
+ */
+ break;
+ }
+
+ ufile->ucontext->cleanup_retryable = false;
+ if (!list_empty(&ufile->uobjects))
+ __uverbs_cleanup_ufile(ufile, reason);
+
+ ufile_destroy_ucontext(ufile, reason);
+
+done:
+ up_write(&ufile->hw_destroy_rwsem);
+ mutex_unlock(&ufile->ucontext_lock);
}
const struct uverbs_obj_type_class uverbs_fd_class = {
@@ -716,23 +950,33 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
.alloc_commit = alloc_commit_fd_uobject,
.alloc_abort = alloc_abort_fd_uobject,
.lookup_put = lookup_put_fd_uobject,
- .remove_commit = remove_commit_fd_uobject,
+ .destroy_hw = destroy_hw_fd_uobject,
+ .remove_handle = remove_handle_fd_uobject,
.needs_kfree_rcu = false,
};
+EXPORT_SYMBOL(uverbs_fd_class);
-struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
- struct ib_ucontext *ucontext,
- enum uverbs_obj_access access,
- int id)
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id,
+ struct ib_uverbs_file *ufile,
+ enum uverbs_obj_access access, s64 id)
{
+ const struct uverbs_api_object *obj =
+ uapi_get_object(ufile->device->uapi, object_id);
+
switch (access) {
case UVERBS_ACCESS_READ:
- return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_READ);
case UVERBS_ACCESS_DESTROY:
+ /* Actual destruction is done inside uverbs_handle_method */
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_DESTROY);
case UVERBS_ACCESS_WRITE:
- return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
+ return rdma_lookup_get_uobject(obj, ufile, id,
+ UVERBS_LOOKUP_WRITE);
case UVERBS_ACCESS_NEW:
- return rdma_alloc_begin_uobject(type_attrs, ucontext);
+ return rdma_alloc_begin_uobject(obj, ufile);
default:
WARN_ON(true);
return ERR_PTR(-EOPNOTSUPP);
@@ -753,16 +997,14 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
switch (access) {
case UVERBS_ACCESS_READ:
- rdma_lookup_put_uobject(uobj, false);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
break;
case UVERBS_ACCESS_WRITE:
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
break;
case UVERBS_ACCESS_DESTROY:
- if (commit)
- ret = rdma_remove_commit_uobject(uobj);
- else
- rdma_lookup_put_uobject(uobj, true);
+ if (uobj)
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
break;
case UVERBS_ACCESS_NEW:
if (commit)
@@ -777,43 +1019,3 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
return ret;
}
-
-int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
- struct uverbs_attr_spec_hash * const *spec_hash,
- size_t num,
- bool commit)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < num; i++) {
- struct uverbs_attr_bundle_hash *curr_bundle =
- &attrs_bundle->hash[i];
- const struct uverbs_attr_spec_hash *curr_spec_bucket =
- spec_hash[i];
- unsigned int j;
-
- for (j = 0; j < curr_bundle->num_attrs; j++) {
- struct uverbs_attr *attr;
- const struct uverbs_attr_spec *spec;
-
- if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
- continue;
-
- attr = &curr_bundle->attrs[j];
- spec = &curr_spec_bucket->attrs[j];
-
- if (spec->type == UVERBS_ATTR_TYPE_IDR ||
- spec->type == UVERBS_ATTR_TYPE_FD) {
- int current_ret;
-
- current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
- spec->obj.access,
- commit);
- if (!ret)
- ret = current_ret;
- }
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 1efcf93238dd..f962f2a593ba 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -43,20 +43,12 @@
#include <rdma/ib_verbs.h>
#include <linux/mutex.h>
-int uverbs_ns_idx(u16 *id, unsigned int ns_count);
-const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
- uint16_t object);
-const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
- uint16_t method);
-/*
- * These functions initialize the context and cleanups its uobjects.
- * The context has a list of objects which is protected by a mutex
- * on the context. initialize_ucontext should be called when we create
- * a context.
- * cleanup_ucontext removes all uobjects from the context and puts them.
- */
-void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed);
-void uverbs_initialize_ucontext(struct ib_ucontext *ucontext);
+struct ib_uverbs_device;
+
+void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
+ enum rdma_remove_reason reason);
+
+int uobj_destroy(struct ib_uobject *uobj);
/*
* uverbs_uobject_get is called in order to increase the reference count on
@@ -82,7 +74,7 @@ void uverbs_uobject_put(struct ib_uobject *uobject);
void uverbs_close_fd(struct file *f);
/*
- * Get an ib_uobject that corresponds to the given id from ucontext, assuming
+ * Get an ib_uobject that corresponds to the given id from ufile, assuming
* the object is from the given type. Lock it to the required access when
* applicable.
* This function could create (access == NEW), destroy (access == DESTROY)
@@ -90,13 +82,11 @@ void uverbs_close_fd(struct file *f);
* The action will be finalized only when uverbs_finalize_object or
* uverbs_finalize_objects are called.
*/
-struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
- struct ib_ucontext *ucontext,
- enum uverbs_obj_access access,
- int id);
-int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access,
- bool commit);
+struct ib_uobject *
+uverbs_get_uobject_from_file(u16 object_id,
+ struct ib_uverbs_file *ufile,
+ enum uverbs_obj_access access, s64 id);
+
/*
* Note that certain finalize stages could return a status:
* (a) alloc_commit could return a failure if the object is committed at the
@@ -112,9 +102,63 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
* function. For example, this could happen when we couldn't destroy an
* object.
*/
-int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
- struct uverbs_attr_spec_hash * const *spec_hash,
- size_t num,
- bool commit);
+int uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access,
+ bool commit);
+
+void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+
+/*
+ * This is the runtime description of the uverbs API, used by the syscall
+ * machinery to validate and dispatch calls.
+ */
+
+/*
+ * Depending on ID the slot pointer in the radix tree points at one of these
+ * structs.
+ */
+struct uverbs_api_object {
+ const struct uverbs_obj_type *type_attrs;
+ const struct uverbs_obj_type_class *type_class;
+};
+
+struct uverbs_api_ioctl_method {
+ int (__rcu *handler)(struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+ DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN);
+ u16 bundle_size;
+ u8 use_stack:1;
+ u8 driver_method:1;
+ u8 key_bitmap_len;
+ u8 destroy_bkey;
+};
+
+struct uverbs_api_attr {
+ struct uverbs_attr_spec spec;
+};
+
+struct uverbs_api_object;
+struct uverbs_api {
+ /* radix tree contains struct uverbs_api_* pointers */
+ struct radix_tree_root radix;
+ enum rdma_driver_id driver_id;
+};
+
+static inline const struct uverbs_api_object *
+uapi_get_object(struct uverbs_api *uapi, u16 object_id)
+{
+ return radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id));
+}
+
+char *uapi_key_format(char *S, unsigned int key);
+struct uverbs_api *uverbs_alloc_api(
+ const struct uverbs_object_tree_def *const *driver_specs,
+ enum rdma_driver_id driver_id);
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev);
+void uverbs_disassociate_api(struct uverbs_api *uapi);
+void uverbs_destroy_api(struct uverbs_api *uapi);
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs);
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index a4fbdc5d28fa..ee366199b169 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -143,14 +143,15 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
BONDING_SLAVE_STATE_NA)
-static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
struct net_device *real_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
real_dev = rdma_vlan_dev_real_dev(cookie);
@@ -166,14 +167,15 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
return res;
}
-static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool
+is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
struct net_device *master_dev;
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
rcu_read_lock();
master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
@@ -184,22 +186,59 @@ static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
return res;
}
-static int pass_all_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+/** is_ndev_for_default_gid_filter - Check if a given netdevice
+ * can be considered for default GIDs or not.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: rdma netdevice pointer
+ * @cookie_ndev: Netdevice to consider to form a default GID
+ *
+ * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
+ * considered for deriving default RoCE GID, returns false otherwise.
+ */
+static bool
+is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ bool res;
+
+ if (!rdma_ndev)
+ return false;
+
+ rcu_read_lock();
+
+ /*
+ * When rdma netdevice is used in bonding, bonding master netdevice
+ * should be considered for default GIDs. Therefore, ignore slave rdma
+ * netdevices when bonding is considered.
+ * Additionally when event(cookie) netdevice is bond master device,
+ * make sure that it the upper netdevice of rdma netdevice.
+ */
+ res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
+ (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
+
+ rcu_read_unlock();
+ return res;
+}
+
+static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- return 1;
+ return true;
}
-static int upper_device_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
+static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
{
- int res;
+ bool res;
if (!rdma_ndev)
- return 0;
+ return false;
if (rdma_ndev == cookie)
- return 1;
+ return true;
rcu_read_lock();
res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
@@ -208,6 +247,34 @@ static int upper_device_filter(struct ib_device *ib_dev, u8 port,
return res;
}
+/**
+ * is_upper_ndev_bond_master_filter - Check if a given netdevice
+ * is bond master device of netdevice of the the RDMA device of port.
+ * @ib_dev: IB device to check
+ * @port: Port to consider for adding default GID
+ * @rdma_ndev: Pointer to rdma netdevice
+ * @cookie: Netdevice to consider to form a default GID
+ *
+ * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
+ * is bond master device and rdma_ndev is its lower netdevice. It might
+ * not have been established as slave device yet.
+ */
+static bool
+is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ bool match = false;
+
+ rcu_read_lock();
+ if (netif_is_bond_master(cookie_ndev) &&
+ rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
+ match = true;
+ rcu_read_unlock();
+ return match;
+}
+
static void update_gid_ip(enum gid_op_type gid_op,
struct ib_device *ib_dev,
u8 port, struct net_device *ndev,
@@ -223,34 +290,10 @@ static void update_gid_ip(enum gid_op_type gid_op,
update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
}
-static void enum_netdev_default_gids(struct ib_device *ib_dev,
- u8 port, struct net_device *event_ndev,
- struct net_device *rdma_ndev)
-{
- unsigned long gid_type_mask;
-
- rcu_read_lock();
- if (!rdma_ndev ||
- ((rdma_ndev != event_ndev &&
- !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
- is_eth_active_slave_of_bonding_rcu(rdma_ndev,
- netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
- BONDING_SLAVE_STATE_INACTIVE)) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
-
- ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
- IB_CACHE_GID_DEFAULT_MODE_SET);
-}
-
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
u8 port,
- struct net_device *event_ndev,
- struct net_device *rdma_ndev)
+ struct net_device *rdma_ndev,
+ struct net_device *event_ndev)
{
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
unsigned long gid_type_mask;
@@ -381,7 +424,6 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
struct net_device *rdma_ndev, void *cookie)
{
- enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
_add_netdev_ips(ib_dev, port, cookie);
}
@@ -391,6 +433,38 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
}
+/**
+ * del_default_gids - Delete default GIDs of the event/cookie netdevice
+ * @ib_dev: RDMA device pointer
+ * @port: Port of the RDMA device whose GID table to consider
+ * @rdma_ndev: Unused rdma netdevice
+ * @cookie: Pointer to event netdevice
+ *
+ * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
+ */
+static void del_default_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *cookie_ndev = cookie;
+ unsigned long gid_type_mask;
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
+}
+
+static void add_default_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = cookie;
+ unsigned long gid_type_mask;
+
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+ ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_SET);
+}
+
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
u8 port,
struct net_device *rdma_ndev,
@@ -405,9 +479,20 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_lock();
down_read(&net_rwsem);
for_each_net(net)
- for_each_netdev(net, ndev)
- if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
- add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
+ for_each_netdev(net, ndev) {
+ /*
+ * Filter and add default GIDs of the primary netdevice
+ * when not in bonding mode, or add default GIDs
+ * of bond master device, when in bonding mode.
+ */
+ if (is_ndev_for_default_gid_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ add_default_gids(ib_dev, port, rdma_ndev, ndev);
+
+ if (is_eth_port_of_netdev_filter(ib_dev, port,
+ rdma_ndev, ndev))
+ _add_netdev_ips(ib_dev, port, ndev);
+ }
up_read(&net_rwsem);
rtnl_unlock();
}
@@ -513,18 +598,12 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
rcu_read_unlock();
if (master_ndev) {
- bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
- rdma_ndev);
+ bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
+ master_ndev);
dev_put(master_ndev);
}
}
-static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
-{
- bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
-}
-
/* The following functions operate on all IB devices. netdevice_event and
* addr_event execute ib_enum_all_roce_netdevs through a work.
* ib_enum_all_roce_netdevs iterates through all IB devices.
@@ -575,40 +654,94 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
static const struct netdev_event_work_cmd add_cmd = {
- .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
+
static const struct netdev_event_work_cmd add_cmd_upper_ips = {
- .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+ .cb = add_netdev_upper_ips,
+ .filter = is_eth_port_of_netdev_filter
+};
-static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
- struct netdev_event_work_cmd *cmds)
+static void
+ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
{
- static const struct netdev_event_work_cmd upper_ips_del_cmd = {
- .cb = del_netdev_upper_ips, .filter = upper_device_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
-
- if (changeupper_info->linking == false) {
- cmds[0] = upper_ips_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd;
- } else {
- cmds[0] = bonding_default_del_cmd;
- cmds[0].ndev = changeupper_info->upper_dev;
- cmds[1] = add_cmd_upper_ips;
- cmds[1].ndev = changeupper_info->upper_dev;
- cmds[1].filter_ndev = changeupper_info->upper_dev;
- }
+ static const struct netdev_event_work_cmd
+ upper_ips_del_cmd = {
+ .cb = del_netdev_upper_ips,
+ .filter = upper_device_filter
+ };
+
+ cmds[0] = upper_ips_del_cmd;
+ cmds[0].ndev = changeupper_info->upper_dev;
+ cmds[1] = add_cmd;
}
+static const struct netdev_event_work_cmd bonding_default_add_cmd = {
+ .cb = add_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+};
+
+static void
+ndev_event_link(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd = {
+ .cb = del_default_gids,
+ .filter = is_upper_ndev_bond_master_filter
+ };
+ /*
+ * When a lower netdev is linked to its upper bonding
+ * netdev, delete lower slave netdev's default GIDs.
+ */
+ cmds[0] = bonding_default_del_cmd;
+ cmds[0].ndev = event_ndev;
+ cmds[0].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device default GIDs */
+ cmds[1] = bonding_default_add_cmd;
+ cmds[1].ndev = changeupper_info->upper_dev;
+ cmds[1].filter_ndev = changeupper_info->upper_dev;
+
+ /* Now add bonding upper device IP based GIDs */
+ cmds[2] = add_cmd_upper_ips;
+ cmds[2].ndev = changeupper_info->upper_dev;
+ cmds[2].filter_ndev = changeupper_info->upper_dev;
+}
+
+static void netdevice_event_changeupper(struct net_device *event_ndev,
+ struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ if (changeupper_info->linking)
+ ndev_event_link(event_ndev, changeupper_info, cmds);
+ else
+ ndev_event_unlink(changeupper_info, cmds);
+}
+
+static const struct netdev_event_work_cmd add_default_gid_cmd = {
+ .cb = add_default_gids,
+ .filter = is_ndev_for_default_gid_filter,
+};
+
static int netdevice_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
static const struct netdev_event_work_cmd del_cmd = {
.cb = del_netdev_ips, .filter = pass_all_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
- .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
- static const struct netdev_event_work_cmd default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd
+ bonding_default_del_cmd_join = {
+ .cb = del_netdev_default_ips_join,
+ .filter = is_eth_port_inactive_slave_filter
+ };
+ static const struct netdev_event_work_cmd
+ netdev_del_cmd = {
+ .cb = del_netdev_ips,
+ .filter = is_eth_port_of_netdev_filter
+ };
static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
.cb = del_netdev_upper_ips, .filter = upper_device_filter};
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
@@ -621,7 +754,8 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_REGISTER:
case NETDEV_UP:
cmds[0] = bonding_default_del_cmd_join;
- cmds[1] = add_cmd;
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
break;
case NETDEV_UNREGISTER:
@@ -632,19 +766,22 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGEADDR:
- cmds[0] = default_del_cmd;
- cmds[1] = add_cmd;
+ cmds[0] = netdev_del_cmd;
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
break;
case NETDEV_CHANGEUPPER:
- netdevice_event_changeupper(
+ netdevice_event_changeupper(ndev,
container_of(ptr, struct netdev_notifier_changeupper_info, info),
cmds);
break;
case NETDEV_BONDING_FAILOVER:
cmds[0] = bonding_event_ips_del_cmd;
- cmds[1] = bonding_default_del_cmd_join;
+ /* Add default GIDs of the bond device */
+ cmds[1] = bonding_default_add_cmd;
+ /* Add IP based GIDs of the bond device */
cmds[2] = add_cmd_upper_ips;
break;
@@ -660,7 +797,8 @@ static void update_gid_event_work_handler(struct work_struct *_work)
struct update_gid_event_work *work =
container_of(_work, struct update_gid_event_work, work);
- ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
+ ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
+ work->gid_attr.ndev,
callback_for_addr_gid_device_scan, work);
dev_put(work->gid_attr.ndev);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index c8963e91f92a..683e6d11a564 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -87,7 +87,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
}
ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
- if (ret < nents) {
+ if (ret < 0 || ret < nents) {
ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
return -EINVAL;
}
@@ -325,7 +325,7 @@ out_unmap_sg:
EXPORT_SYMBOL(rdma_rw_ctx_init);
/**
- * rdma_rw_ctx_signature init - initialize a RW context with signature offload
+ * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
* @ctx: context to initialize
* @qp: queue pair to operate on
* @port_num: port num to which the connection is bound
@@ -564,10 +564,10 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
- struct ib_send_wr *first_wr, *bad_wr;
+ struct ib_send_wr *first_wr;
first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
- return ib_post_send(qp, first_wr, &bad_wr);
+ return ib_post_send(qp, first_wr, NULL);
}
EXPORT_SYMBOL(rdma_rw_ctx_post);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a61ec7e33613..7b794a14d6e8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,20 +1227,10 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int
-roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec)
+static int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
{
- struct net_device *resolved_dev;
- struct net_device *ndev;
- struct net_device *idev;
- struct rdma_dev_addr dev_addr = {
- .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
- sa_path_get_ifindex(rec) : 0),
- .net = sa_path_get_ndev(rec) ?
- sa_path_get_ndev(rec) :
- &init_net
- };
+ struct rdma_dev_addr dev_addr = {};
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
@@ -1250,9 +1240,14 @@ roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
if (rec->roce.route_resolved)
return 0;
+ if (!attr || !attr->ndev)
+ return -EINVAL;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
+ dev_addr.bound_dev_if = attr->ndev->ifindex;
+ /* TODO: Use net from the ib_gid_attr once it is added to it,
+ * until than, limit itself to init_net.
+ */
+ dev_addr.net = &init_net;
rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
@@ -1268,60 +1263,52 @@ roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
return -EINVAL;
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- resolved_dev = dev_get_by_index(dev_addr.net,
- dev_addr.bound_dev_if);
- if (!resolved_dev) {
- ret = -ENODEV;
- goto done;
- }
- ndev = ib_get_ndev_from_path(rec);
- rcu_read_lock();
- if ((ndev && ndev != resolved_dev) ||
- (resolved_dev != idev &&
- !rdma_is_upper_dev_rcu(idev, resolved_dev)))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(resolved_dev);
- if (ndev)
- dev_put(ndev);
-done:
- dev_put(idev);
- if (!ret)
- rec->roce.route_resolved = true;
- return ret;
+ rec->roce.route_resolved = true;
+ return 0;
}
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
{
enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
- struct net_device *ndev;
- u16 gid_index;
- int ret;
- ndev = ib_get_ndev_from_path(rec);
- ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
- port_num, ndev, &gid_index);
- if (ndev)
- dev_put(ndev);
- if (ret)
- return ret;
+ if (!gid_attr) {
+ gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
+ port_num, NULL);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+ } else
+ rdma_hold_gid_attr(gid_attr);
- rdma_ah_set_grh(ah_attr, &rec->dgid,
- be32_to_cpu(rec->flow_label),
- gid_index, rec->hop_limit,
- rec->traffic_class);
+ rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
+ be32_to_cpu(rec->flow_label),
+ rec->hop_limit, rec->traffic_class,
+ gid_attr);
return 0;
}
+/**
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
+ * @device: Device associated ah attributes initialization.
+ * @port_num: Port on the specified device.
+ * @rec: path record entry to use for ah attributes initialization.
+ * @ah_attr: address handle attributes to initialization from path record.
+ * @sgid_attr: SGID attribute to consider during initialization.
+ *
+ * When ib_init_ah_attr_from_path() returns success,
+ * (a) for IB link layer it optionally contains a reference to SGID attribute
+ * when GRH is present for IB link layer.
+ * (b) for RoCE link layer it contains a reference to SGID attribute.
+ * User must invoke rdma_destroy_ah_attr() to release reference to SGID
+ * attributes which are initialized using ib_init_ah_attr_from_path().
+ */
int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *gid_attr)
{
int ret = 0;
@@ -1332,7 +1319,7 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
rdma_ah_set_static_rate(ah_attr, rec->rate);
if (sa_path_is_roce(rec)) {
- ret = roce_resolve_route_from_path(device, port_num, rec);
+ ret = roce_resolve_route_from_path(rec, gid_attr);
if (ret)
return ret;
@@ -1349,7 +1336,8 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
}
if (rec->hop_limit > 0 || sa_path_is_roce(rec))
- ret = init_ah_attr_grh_fields(device, port_num, rec, ah_attr);
+ ret = init_ah_attr_grh_fields(device, port_num,
+ rec, ah_attr, gid_attr);
return ret;
}
EXPORT_SYMBOL(ib_init_ah_attr_from_path);
@@ -1557,8 +1545,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_ndev(&rec, NULL);
- sa_path_set_ifindex(&rec, 0);
sa_path_set_dmac_zero(&rec);
if (query->conv_pr) {
@@ -2290,6 +2276,7 @@ static void update_sm_ah(struct work_struct *work)
struct ib_sa_sm_ah *new_ah;
struct ib_port_attr port_attr;
struct rdma_ah_attr ah_attr;
+ bool grh_required;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
pr_warn("Couldn't query port\n");
@@ -2314,16 +2301,27 @@ static void update_sm_ah(struct work_struct *work)
rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
rdma_ah_set_port_num(&ah_attr, port->port_num);
- if (port_attr.grh_required) {
- if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA) {
- rdma_ah_set_make_grd(&ah_attr, true);
- } else {
- rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
- rdma_ah_set_subnet_prefix(&ah_attr,
- cpu_to_be64(port_attr.subnet_prefix));
- rdma_ah_set_interface_id(&ah_attr,
- cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
- }
+
+ grh_required = rdma_is_grh_required(port->agent->device,
+ port->port_num);
+
+ /*
+ * The OPA sm_lid of 0xFFFF needs special handling so that it can be
+ * differentiated from a permissive LID of 0xFFFF. We set the
+ * grh_required flag here so the SA can program the DGID in the
+ * address handle appropriately
+ */
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
+ (grh_required ||
+ port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
+ rdma_ah_set_make_grd(&ah_attr, true);
+
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
+ rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
+ rdma_ah_set_subnet_prefix(&ah_attr,
+ cpu_to_be64(port_attr.subnet_prefix));
+ rdma_ah_set_interface_id(&ah_attr,
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
}
new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 31c7efaf8e7a..7fd14ead7b37 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -42,6 +42,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_pma.h>
+#include <rdma/ib_cache.h>
struct ib_port;
@@ -346,7 +347,7 @@ static struct attribute *port_default_attrs[] = {
NULL
};
-static size_t print_ndev(struct ib_gid_attr *gid_attr, char *buf)
+static size_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
if (!gid_attr->ndev)
return -EINVAL;
@@ -354,33 +355,26 @@ static size_t print_ndev(struct ib_gid_attr *gid_attr, char *buf)
return sprintf(buf, "%s\n", gid_attr->ndev->name);
}
-static size_t print_gid_type(struct ib_gid_attr *gid_attr, char *buf)
+static size_t print_gid_type(const struct ib_gid_attr *gid_attr, char *buf)
{
return sprintf(buf, "%s\n", ib_cache_gid_type_str(gid_attr->gid_type));
}
-static ssize_t _show_port_gid_attr(struct ib_port *p,
- struct port_attribute *attr,
- char *buf,
- size_t (*print)(struct ib_gid_attr *gid_attr,
- char *buf))
+static ssize_t _show_port_gid_attr(
+ struct ib_port *p, struct port_attribute *attr, char *buf,
+ size_t (*print)(const struct ib_gid_attr *gid_attr, char *buf))
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- union ib_gid gid;
- struct ib_gid_attr gid_attr = {};
+ const struct ib_gid_attr *gid_attr;
ssize_t ret;
- ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
- &gid_attr);
- if (ret)
- goto err;
+ gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
- ret = print(&gid_attr, buf);
-
-err:
- if (gid_attr.ndev)
- dev_put(gid_attr.ndev);
+ ret = print(gid_attr, buf);
+ rdma_put_gid_attr(gid_attr);
return ret;
}
@@ -389,26 +383,28 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
{
struct port_table_attribute *tab_attr =
container_of(attr, struct port_table_attribute, attr);
- union ib_gid *pgid;
- union ib_gid gid;
+ const struct ib_gid_attr *gid_attr;
ssize_t ret;
- ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, NULL);
+ gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
+ if (IS_ERR(gid_attr)) {
+ const union ib_gid zgid = {};
+
+ /* If reading GID fails, it is likely due to GID entry being
+ * empty (invalid) or reserved GID in the table. User space
+ * expects to read GID table entries as long as it given index
+ * is within GID table size. Administrative/debugging tool
+ * fails to query rest of the GID entries if it hits error
+ * while querying a GID of the given index. To avoid user
+ * space throwing such error on fail to read gid, return zero
+ * GID as before. This maintains backward compatibility.
+ */
+ return sprintf(buf, "%pI6\n", zgid.raw);
+ }
- /* If reading GID fails, it is likely due to GID entry being empty
- * (invalid) or reserved GID in the table.
- * User space expects to read GID table entries as long as it given
- * index is within GID table size.
- * Administrative/debugging tool fails to query rest of the GID entries
- * if it hits error while querying a GID of the given index.
- * To avoid user space throwing such error on fail to read gid, return
- * zero GID as before. This maintains backward compatibility.
- */
- if (ret)
- pgid = &zgid;
- else
- pgid = &gid;
- return sprintf(buf, "%pI6\n", pgid->raw);
+ ret = sprintf(buf, "%pI6\n", gid_attr->gid.raw);
+ rdma_put_gid_attr(gid_attr);
+ return ret;
}
static ssize_t show_port_gid_attr_ndev(struct ib_port *p,
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 9eef96dacbd7..faa9e6116b2f 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -207,7 +207,7 @@ error:
}
static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
- struct ib_cm_req_event_param *kreq)
+ const struct ib_cm_req_event_param *kreq)
{
ureq->remote_ca_guid = kreq->remote_ca_guid;
ureq->remote_qkey = kreq->remote_qkey;
@@ -231,7 +231,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
}
static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
- struct ib_cm_rep_event_param *krep)
+ const struct ib_cm_rep_event_param *krep)
{
urep->remote_ca_guid = krep->remote_ca_guid;
urep->remote_qkey = krep->remote_qkey;
@@ -247,14 +247,14 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
}
static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
- struct ib_cm_sidr_rep_event_param *krep)
+ const struct ib_cm_sidr_rep_event_param *krep)
{
urep->status = krep->status;
urep->qkey = krep->qkey;
urep->qpn = krep->qpn;
};
-static int ib_ucm_event_process(struct ib_cm_event *evt,
+static int ib_ucm_event_process(const struct ib_cm_event *evt,
struct ib_ucm_event *uvt)
{
void *info = NULL;
@@ -351,7 +351,7 @@ err1:
}
static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ib_ucm_event *uevent;
struct ib_ucm_context *ctx;
@@ -1000,14 +1000,11 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
- struct ib_cm_sidr_req_param param;
+ struct ib_cm_sidr_req_param param = {};
struct ib_ucm_context *ctx;
struct ib_ucm_sidr_req cmd;
int result;
- param.private_data = NULL;
- param.path = NULL;
-
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 54ab6335c48d..a41792dbae1f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -84,7 +84,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
- unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
@@ -92,7 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
int i;
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
- int need_release = 0;
unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
@@ -121,10 +119,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
- if (ret) {
- kfree(umem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto umem_kfree;
return umem;
}
@@ -135,8 +131,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
- kfree(umem);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto umem_kfree;
}
/*
@@ -149,41 +145,43 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
npages = ib_umem_num_pages(umem);
- down_write(&current->mm->mmap_sem);
-
- locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm += npages;
+ if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ up_write(&current->mm->mmap_sem);
ret = -ENOMEM;
- goto out;
+ goto vma;
}
+ up_write(&current->mm->mmap_sem);
cur_base = addr & PAGE_MASK;
if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
- goto out;
+ goto vma;
}
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
- goto out;
+ goto vma;
if (!umem->writable)
gup_flags |= FOLL_FORCE;
- need_release = 1;
sg_list_start = umem->sg_head.sgl;
+ down_read(&current->mm->mmap_sem);
while (npages) {
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
-
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
+ goto umem_release;
+ }
umem->npages += ret;
cur_base += ret * PAGE_SIZE;
@@ -199,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* preparing for next loop */
sg_list_start = sg;
}
+ up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -206,27 +205,28 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DMA_BIDIRECTIONAL,
dma_attrs);
- if (umem->nmap <= 0) {
+ if (!umem->nmap) {
ret = -ENOMEM;
- goto out;
+ goto umem_release;
}
ret = 0;
+ goto out;
-out:
- if (ret < 0) {
- if (need_release)
- __ib_umem_release(context->device, umem, 0);
- kfree(umem);
- } else
- current->mm->pinned_vm = locked;
-
+umem_release:
+ __ib_umem_release(context->device, umem, 0);
+vma:
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm -= ib_umem_num_pages(umem);
up_write(&current->mm->mmap_sem);
+out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
-
- return ret < 0 ? ERR_PTR(ret) : umem;
+umem_kfree:
+ if (ret)
+ kfree(umem);
+ return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 182436b92ba9..6ec748eccff7 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -186,6 +186,7 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
ULLONG_MAX,
ib_umem_notifier_release_trampoline,
+ true,
NULL);
up_read(&context->umem_rwsem);
}
@@ -207,22 +208,31 @@ static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
return 0;
}
-static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+ int ret;
if (!context->invalidate_range)
- return;
+ return 0;
+
+ if (blockable)
+ down_read(&context->umem_rwsem);
+ else if (!down_read_trylock(&context->umem_rwsem))
+ return -EAGAIN;
ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
end,
- invalidate_range_start_trampoline, NULL);
+ invalidate_range_start_trampoline,
+ blockable, NULL);
up_read(&context->umem_rwsem);
+
+ return ret;
}
static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
@@ -242,10 +252,15 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
if (!context->invalidate_range)
return;
+ /*
+ * TODO: we currently bail out if there is any sleepable work to be done
+ * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
+ * here. But this is ugly and fragile.
+ */
down_read(&context->umem_rwsem);
rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
end,
- invalidate_range_end_trampoline, NULL);
+ invalidate_range_end_trampoline, true, NULL);
up_read(&context->umem_rwsem);
ib_ucontext_notifier_end_account(context);
}
@@ -798,6 +813,7 @@ EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
u64 start, u64 last,
umem_call_back cb,
+ bool blockable,
void *cookie)
{
int ret_val = 0;
@@ -809,6 +825,9 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
for (node = rbt_ib_umem_iter_first(root, start, last - 1);
node; node = next) {
+ /* TODO move the blockable decision up to the callback */
+ if (!blockable)
+ return -EAGAIN;
next = rbt_ib_umem_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index bb98c9e4a7fd..c34a6852d691 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -268,6 +268,7 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.traffic_class = grh->traffic_class;
memcpy(packet->mad.hdr.gid, &grh->dgid, 16);
packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label);
+ rdma_destroy_ah_attr(&ah_attr);
}
if (queue_packet(file, agent, packet))
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c0d40fc3a53a..5df8e548cc14 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -111,7 +111,7 @@ struct ib_uverbs_device {
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
struct list_head uverbs_events_file_list;
- struct uverbs_root_spec *specs_root;
+ struct uverbs_api *uapi;
};
struct ib_uverbs_event_queue {
@@ -130,21 +130,37 @@ struct ib_uverbs_async_event_file {
};
struct ib_uverbs_completion_event_file {
- struct ib_uobject_file uobj_file;
+ struct ib_uobject uobj;
struct ib_uverbs_event_queue ev_queue;
};
struct ib_uverbs_file {
struct kref ref;
- struct mutex mutex;
- struct mutex cleanup_mutex; /* protect cleanup */
struct ib_uverbs_device *device;
+ struct mutex ucontext_lock;
+ /*
+ * ucontext must be accessed via ib_uverbs_get_ucontext() or with
+ * ucontext_lock held
+ */
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
struct ib_uverbs_async_event_file *async_file;
struct list_head list;
int is_closed;
+ /*
+ * To access the uobjects list hw_destroy_rwsem must be held for write
+ * OR hw_destroy_rwsem held for read AND uobjects_lock held.
+ * hw_destroy_rwsem should be called across any destruction of the HW
+ * object of an associated uobject.
+ */
+ struct rw_semaphore hw_destroy_rwsem;
+ spinlock_t uobjects_lock;
+ struct list_head uobjects;
+
+ u64 uverbs_cmd_mask;
+ u64 uverbs_ex_cmd_mask;
+
struct idr idr;
/* spinlock protects write access to idr */
spinlock_t idr_lock;
@@ -196,7 +212,6 @@ struct ib_uwq_object {
struct ib_ucq_object {
struct ib_uobject uobject;
- struct ib_uverbs_file *uverbs_file;
struct list_head comp_list;
struct list_head async_list;
u32 comp_events_reported;
@@ -230,7 +245,7 @@ void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
-int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd,
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why);
int uverbs_dealloc_mw(struct ib_mw *mw);
@@ -238,12 +253,7 @@ void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj);
void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata);
-extern const struct uverbs_attr_def uverbs_uhw_compat_in;
-extern const struct uverbs_attr_def uverbs_uhw_compat_out;
long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int uverbs_destroy_def_handler(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs);
struct ib_uverbs_flow_spec {
union {
@@ -292,7 +302,6 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
const char __user *buf, int in_len, \
int out_len)
@@ -334,7 +343,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
#define IB_UVERBS_DECLARE_EX_CMD(name) \
int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
- struct ib_device *ib_dev, \
struct ib_udata *ucore, \
struct ib_udata *uhw)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3e90b6a1d9d2..a21d5214afc3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -48,11 +48,10 @@
#include "core_priv.h"
static struct ib_uverbs_completion_event_file *
-ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
+_ib_uverbs_lookup_comp_file(s32 fd, struct ib_uverbs_file *ufile)
{
- struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL,
- fd, context);
- struct ib_uobject_file *uobj_file;
+ struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
+ fd, ufile);
if (IS_ERR(uobj))
return (void *)uobj;
@@ -60,13 +59,13 @@ ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
- uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
- return container_of(uobj_file, struct ib_uverbs_completion_event_file,
- uobj_file);
+ return container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
}
+#define ib_uverbs_lookup_comp_file(_fd, _ufile) \
+ _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -76,6 +75,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_ucontext *ucontext;
struct file *filp;
struct ib_rdmacg_object cg_obj;
+ struct ib_device *ib_dev;
int ret;
if (out_len < sizeof resp)
@@ -84,7 +84,13 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- mutex_lock(&file->mutex);
+ mutex_lock(&file->ucontext_lock);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (!ib_dev) {
+ ret = -EIO;
+ goto err;
+ }
if (file->ucontext) {
ret = -EINVAL;
@@ -110,12 +116,12 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext->cg_obj = cg_obj;
/* ufile is required when some objects are released */
ucontext->ufile = file;
- uverbs_initialize_ucontext(ucontext);
rcu_read_lock();
ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
rcu_read_unlock();
ucontext->closing = 0;
+ ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ucontext->umem_tree = RB_ROOT_CACHED;
@@ -146,11 +152,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_file;
}
- file->ucontext = ucontext;
-
fd_install(resp.async_fd, filp);
- mutex_unlock(&file->mutex);
+ /*
+ * Make sure that ib_uverbs_get_ucontext() sees the pointer update
+ * only after all writes to setup the ucontext have completed
+ */
+ smp_store_release(&file->ucontext, ucontext);
+
+ mutex_unlock(&file->ucontext_lock);
return in_len;
@@ -169,15 +179,16 @@ err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
err:
- mutex_unlock(&file->mutex);
+ mutex_unlock(&file->ucontext_lock);
return ret;
}
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
+static void copy_query_dev_fields(struct ib_ucontext *ucontext,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
+ struct ib_device *ib_dev = ucontext->device;
+
resp->fw_ver = attr->fw_ver;
resp->node_guid = ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
@@ -189,7 +200,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
- resp->max_sge = attr->max_sge;
+ resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
@@ -221,12 +232,16 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_query_device cmd;
struct ib_uverbs_query_device_resp resp;
+ struct ib_ucontext *ucontext;
+
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
if (out_len < sizeof resp)
return -ENOSPC;
@@ -235,7 +250,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return -EFAULT;
memset(&resp, 0, sizeof resp);
- copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
+ copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -243,8 +258,28 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return in_len;
}
+/*
+ * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
+ * PortInfo CapabilityMask, but was extended with unique bits.
+ */
+static u32 make_port_cap_flags(const struct ib_port_attr *attr)
+{
+ u32 res;
+
+ /* All IBA CapabilityMask bits are passed through here, except bit 26,
+ * which is overridden with IP_BASED_GIDS. This is due to a historical
+ * mistake in the implementation of IP_BASED_GIDS. Otherwise all other
+ * bits match the IBA definition across all kernel versions.
+ */
+ res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ if (attr->ip_gids)
+ res |= IB_UVERBS_PCF_IP_BASED_GIDS;
+
+ return res;
+}
+
ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -252,6 +287,13 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
struct ib_uverbs_query_port_resp resp;
struct ib_port_attr attr;
int ret;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -269,12 +311,15 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.max_mtu = attr.max_mtu;
resp.active_mtu = attr.active_mtu;
resp.gid_tbl_len = attr.gid_tbl_len;
- resp.port_cap_flags = attr.port_cap_flags;
+ resp.port_cap_flags = make_port_cap_flags(&attr);
resp.max_msg_sz = attr.max_msg_sz;
resp.bad_pkey_cntr = attr.bad_pkey_cntr;
resp.qkey_viol_cntr = attr.qkey_viol_cntr;
resp.pkey_tbl_len = attr.pkey_tbl_len;
+ if (rdma_is_grh_required(ib_dev, cmd.port_num))
+ resp.flags |= IB_UVERBS_QPF_GRH_REQUIRED;
+
if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
@@ -300,7 +345,6 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -310,6 +354,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -322,11 +367,11 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_PD, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
+ pd = ib_dev->alloc_pd(ib_dev, uobj->context, &udata);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
@@ -348,9 +393,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
goto err_copy;
}
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
ib_dealloc_pd(pd);
@@ -361,25 +404,16 @@ err:
}
ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_dealloc_pd cmd;
- struct ib_uobject *uobj;
- int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
-
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, file,
+ in_len);
}
struct xrcd_table_entry {
@@ -468,7 +502,6 @@ static void xrcd_table_delete(struct ib_uverbs_device *dev,
}
ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -481,6 +514,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
struct inode *inode = NULL;
int ret = 0;
int new_xrcd = 0;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -517,15 +551,15 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
}
}
- obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD,
- file->ucontext);
+ obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, file,
+ &ib_dev);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_tree_mutex_unlock;
}
if (!xrcd) {
- xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
+ xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context, &udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
@@ -564,9 +598,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
mutex_unlock(&file->device->xrcd_tree_mutex);
- uobj_alloc_commit(&obj->uobject);
-
- return in_len;
+ return uobj_alloc_commit(&obj->uobject, in_len);
err_copy:
if (inode) {
@@ -591,32 +623,25 @@ err_tree_mutex_unlock:
}
ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_close_xrcd cmd;
- struct ib_uobject *uobj;
- int ret = 0;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, file,
+ in_len);
}
-int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
+int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
struct ib_xrcd *xrcd,
enum rdma_remove_reason why)
{
struct inode *inode;
int ret;
+ struct ib_uverbs_device *dev = uobject->context->ufile->device;
inode = xrcd->inode;
if (inode && !atomic_dec_and_test(&xrcd->usecnt))
@@ -624,16 +649,18 @@ int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
ret = ib_dealloc_xrcd(xrcd);
- if (why == RDMA_REMOVE_DESTROY && ret)
+ if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt);
- else if (inode)
+ return ret;
+ }
+
+ if (inode)
xrcd_table_delete(dev, inode);
return ret;
}
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -644,6 +671,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
struct ib_pd *pd;
struct ib_mr *mr;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -663,11 +691,11 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (ret)
return ret;
- uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_MR, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_free;
@@ -711,9 +739,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
ib_dereg_mr(mr);
@@ -727,7 +753,6 @@ err_free:
}
ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -759,8 +784,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
(cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
return -EINVAL;
- uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
- file->ucontext);
+ uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
@@ -778,7 +802,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
}
if (cmd.flags & IB_MR_REREG_PD) {
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
+ file);
if (!pd) {
ret = -EINVAL;
goto put_uobjs;
@@ -819,29 +844,19 @@ put_uobjs:
}
ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_dereg_mr cmd;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
-
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, file,
+ in_len);
}
ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -852,6 +867,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
struct ib_mw *mw;
struct ib_udata udata;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof(resp))
return -ENOSPC;
@@ -859,11 +875,11 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_MW, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_free;
@@ -897,9 +913,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
}
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
uverbs_dealloc_mw(mw);
@@ -911,28 +925,19 @@ err_free:
}
ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_dealloc_mw cmd;
- struct ib_uobject *uobj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, file,
+ in_len);
}
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -940,6 +945,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
struct ib_uverbs_create_comp_channel_resp resp;
struct ib_uobject *uobj;
struct ib_uverbs_completion_event_file *ev_file;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -947,14 +953,14 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
resp.fd = uobj->id;
ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
- uobj_file.uobj);
+ uobj);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
@@ -962,12 +968,10 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
return -EFAULT;
}
- uobj_alloc_commit(uobj);
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
}
static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw,
struct ib_uverbs_ex_create_cq *cmd,
@@ -985,21 +989,23 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
int ret;
struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {};
-
- if (!ib_dev->create_cq)
- return ERR_PTR(-EOPNOTSUPP);
+ struct ib_device *ib_dev;
if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL);
- obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ,
- file->ucontext);
+ obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return obj;
+ if (!ib_dev->create_cq) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
if (cmd->comp_channel >= 0) {
- ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
- file->ucontext);
+ ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, file);
if (IS_ERR(ev_file)) {
ret = PTR_ERR(ev_file);
goto err;
@@ -1007,7 +1013,6 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
}
obj->uobject.user_handle = cmd->user_handle;
- obj->uverbs_file = file;
obj->comp_events_reported = 0;
obj->async_events_reported = 0;
INIT_LIST_HEAD(&obj->comp_list);
@@ -1019,7 +1024,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
attr.flags = cmd->flags;
- cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
+ cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
@@ -1047,7 +1052,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
if (ret)
goto err_cb;
- uobj_alloc_commit(&obj->uobject);
+ ret = uobj_alloc_commit(&obj->uobject, 0);
+ if (ret)
+ return ERR_PTR(ret);
return obj;
err_cb:
@@ -1075,7 +1082,6 @@ static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1106,7 +1112,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
cmd_ex.comp_vector = cmd.comp_vector;
cmd_ex.comp_channel = cmd.comp_channel;
- obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
+ obj = create_cq(file, &ucore, &uhw, &cmd_ex,
offsetof(typeof(cmd_ex), comp_channel) +
sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
NULL);
@@ -1129,7 +1135,6 @@ static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -1155,7 +1160,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
sizeof(resp.response_length)))
return -ENOSPC;
- obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
+ obj = create_cq(file, ucore, uhw, &cmd,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
@@ -1163,7 +1168,6 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1181,7 +1185,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1231,7 +1235,6 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
}
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1246,7 +1249,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1262,7 +1265,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (!ret)
break;
- ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
+ ret = copy_wc_to_user(cq->device, data_ptr, &wc);
if (ret)
goto out_put;
@@ -1283,7 +1286,6 @@ out_put:
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1293,7 +1295,7 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
@@ -1306,45 +1308,28 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_destroy_cq cmd;
struct ib_uverbs_destroy_cq_resp resp;
struct ib_uobject *uobj;
- struct ib_cq *cq;
struct ib_ucq_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
- cq = uobj->object;
- obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
-
+ obj = container_of(uobj, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp));
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
-
resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported;
- uverbs_uobject_put(uobj);
+ uobj_put_destroy(uobj);
+
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -1375,12 +1360,13 @@ static int create_qp(struct ib_uverbs_file *file,
int ret;
struct ib_rwq_ind_table *ind_tbl = NULL;
bool has_sq = true;
+ struct ib_device *ib_dev;
if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
return -EPERM;
- obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
- file->ucontext);
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
obj->uxrcd = NULL;
@@ -1390,9 +1376,9 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
sizeof(cmd->rwq_ind_tbl_handle) &&
(cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
- ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL,
- cmd->rwq_ind_tbl_handle,
- file->ucontext);
+ ind_tbl = uobj_get_obj_read(rwq_ind_table,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd->rwq_ind_tbl_handle, file);
if (!ind_tbl) {
ret = -EINVAL;
goto err_put;
@@ -1418,7 +1404,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT) {
xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
- file->ucontext);
+ file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
@@ -1437,8 +1423,8 @@ static int create_qp(struct ib_uverbs_file *file,
cmd->max_recv_sge = 0;
} else {
if (cmd->is_srq) {
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle,
- file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
+ cmd->srq_handle, file);
if (!srq || srq->srq_type == IB_SRQT_XRC) {
ret = -EINVAL;
goto err_put;
@@ -1447,8 +1433,9 @@ static int create_qp(struct ib_uverbs_file *file,
if (!ind_tbl) {
if (cmd->recv_cq_handle != cmd->send_cq_handle) {
- rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle,
- file->ucontext);
+ rcq = uobj_get_obj_read(
+ cq, UVERBS_OBJECT_CQ,
+ cmd->recv_cq_handle, file);
if (!rcq) {
ret = -EINVAL;
goto err_put;
@@ -1458,11 +1445,12 @@ static int create_qp(struct ib_uverbs_file *file,
}
if (has_sq)
- scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle,
- file->ucontext);
+ scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->send_cq_handle, file);
if (!ind_tbl)
rcq = rcq ?: scq;
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
+ file);
if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
goto err_put;
@@ -1602,9 +1590,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_cb:
ib_destroy_qp(qp);
@@ -1637,7 +1623,6 @@ static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1698,7 +1683,6 @@ static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -1735,7 +1719,6 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_open_qp cmd;
@@ -1747,6 +1730,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
struct ib_qp *qp;
struct ib_qp_open_attr attr;
int ret;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -1759,13 +1743,12 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
- file->ucontext);
+ obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
- xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle,
- file->ucontext);
+ xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
goto err_put;
@@ -1809,10 +1792,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
qp->uobject = &obj->uevent.uobject;
uobj_put_read(xrcd_uobj);
-
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return in_len;
+ return uobj_alloc_commit(&obj->uevent.uobject, in_len);
err_destroy:
ib_destroy_qp(qp);
@@ -1846,7 +1826,6 @@ static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
}
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1867,7 +1846,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
goto out;
}
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp) {
ret = -EINVAL;
goto out;
@@ -1968,11 +1947,11 @@ static int modify_qp(struct ib_uverbs_file *file,
struct ib_qp *qp;
int ret;
- attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file);
if (!qp) {
ret = -EINVAL;
goto out;
@@ -1984,15 +1963,64 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
- if ((cmd->base.attr_mask & IB_QP_AV) &&
- !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
- ret = -EINVAL;
- goto release_qp;
+ if ((cmd->base.attr_mask & IB_QP_AV)) {
+ if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
+ if (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state == IB_QPS_RTR) {
+ /* We are in INIT->RTR TRANSITION (if we are not,
+ * this transition will be rejected in subsequent checks).
+ * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
+ * but the IB_QP_STATE flag is required.
+ *
+ * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
+ * when IB_QP_AV is set, has required inclusion of a valid
+ * port number in the primary AV. (AVs are created and handled
+ * differently for infiniband and ethernet (RoCE) ports).
+ *
+ * Check the port number included in the primary AV against
+ * the port number in the qp struct, which was set (and saved)
+ * in the RST->INIT transition.
+ */
+ if (cmd->base.dest.port_num != qp->real_qp->port) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ } else {
+ /* We are in SQD->SQD. (If we are not, this transition will
+ * be rejected later in the verbs layer checks).
+ * Check for both IB_QP_PORT and IB_QP_AV, these can be set
+ * together in the SQD->SQD transition.
+ *
+ * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
+ * verbs layer driver does not track primary port changes
+ * resulting from path migration. Thus, in SQD, if the primary
+ * AV is modified, the primary port should also be modified).
+ *
+ * Note that in this transition, the IB_QP_STATE flag
+ * is not allowed.
+ */
+ if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == (IB_QP_AV | IB_QP_PORT)) &&
+ cmd->base.port_num != cmd->base.dest.port_num) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+ if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+ == IB_QP_AV) {
+ cmd->base.attr_mask |= IB_QP_PORT;
+ cmd->base.port_num = cmd->base.dest.port_num;
+ }
+ }
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
(!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
- !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
+ !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
+ cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
@@ -2049,7 +2077,6 @@ out:
}
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2076,7 +2103,6 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -2112,7 +2138,6 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2120,33 +2145,19 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj;
struct ib_uqp_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- memset(&resp, 0, sizeof resp);
-
- uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
-
+ memset(&resp, 0, sizeof(resp));
resp.events_reported = obj->uevent.events_reported;
- uverbs_uobject_put(uobj);
+
+ uobj_put_destroy(uobj);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
@@ -2165,14 +2176,14 @@ static void *alloc_wr(size_t wr_size, __u32 num_sge)
}
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_send cmd;
struct ib_uverbs_post_send_resp resp;
struct ib_uverbs_send_wr *user_wr;
- struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
+ struct ib_send_wr *wr = NULL, *last, *next;
+ const struct ib_send_wr *bad_wr;
struct ib_qp *qp;
int i, sg_ind;
int is_ud;
@@ -2193,7 +2204,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
if (!user_wr)
return -ENOMEM;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
goto out;
@@ -2229,8 +2240,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
goto out_put;
}
- ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah,
- file->ucontext);
+ ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
+ user_wr->wr.ud.ah, file);
if (!ud->ah) {
kfree(ud);
ret = -EINVAL;
@@ -2445,13 +2456,13 @@ err:
}
ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_recv cmd;
struct ib_uverbs_post_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_qp *qp;
ssize_t ret = -EINVAL;
@@ -2464,7 +2475,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
goto out;
@@ -2494,13 +2505,13 @@ out:
}
ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_post_srq_recv cmd;
struct ib_uverbs_post_srq_recv_resp resp;
- struct ib_recv_wr *wr, *next, *bad_wr;
+ struct ib_recv_wr *wr, *next;
+ const struct ib_recv_wr *bad_wr;
struct ib_srq *srq;
ssize_t ret = -EINVAL;
@@ -2513,12 +2524,13 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
goto out;
resp.bad_wr = 0;
- ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
+ ret = srq->device->post_srq_recv ?
+ srq->device->post_srq_recv(srq, wr, &bad_wr) : -EOPNOTSUPP;
uobj_put_obj_read(srq);
@@ -2543,7 +2555,6 @@ out:
}
ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2552,9 +2563,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_ah *ah;
- struct rdma_ah_attr attr;
+ struct rdma_ah_attr attr = {};
int ret;
struct ib_udata udata;
+ struct ib_device *ib_dev;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2562,19 +2574,21 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
- return -EINVAL;
-
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_AH, file, &ib_dev);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err;
@@ -2616,9 +2630,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
}
uobj_put_obj_read(pd);
- uobj_alloc_commit(uobj);
-
- return in_len;
+ return uobj_alloc_commit(uobj, in_len);
err_copy:
rdma_destroy_ah(ah);
@@ -2632,27 +2644,18 @@ err:
}
ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_destroy_ah cmd;
- struct ib_uobject *uobj;
- int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret ?: in_len;
+ return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, file,
+ in_len);
}
ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2665,7 +2668,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
return -EINVAL;
@@ -2702,7 +2705,6 @@ out_put:
}
ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2716,7 +2718,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp)
return -EINVAL;
@@ -2761,29 +2763,27 @@ static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
resources = kzalloc(sizeof(*resources), GFP_KERNEL);
if (!resources)
- goto err_res;
+ return NULL;
+
+ if (!num_specs)
+ goto out;
resources->counters =
kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
-
- if (!resources->counters)
- goto err_cnt;
-
resources->collection =
kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
- if (!resources->collection)
- goto err_collection;
+ if (!resources->counters || !resources->collection)
+ goto err;
+out:
resources->max = num_specs;
-
return resources;
-err_collection:
+err:
kfree(resources->counters);
-err_cnt:
kfree(resources);
-err_res:
+
return NULL;
}
@@ -2791,6 +2791,9 @@ void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
unsigned int i;
+ if (!uflow_res)
+ return;
+
for (i = 0; i < uflow_res->collection_num; i++)
atomic_dec(&uflow_res->collection[i]->usecnt);
@@ -2826,7 +2829,7 @@ static void flow_resources_add(struct ib_uflow_resources *uflow_res,
uflow_res->num++;
}
-static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
+static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec,
struct ib_uflow_resources *uflow_res)
@@ -2855,7 +2858,7 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
ib_spec->action.act = uobj_get_obj_read(flow_action,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
- ucontext);
+ ufile);
if (!ib_spec->action.act)
return -EINVAL;
ib_spec->action.size =
@@ -2873,7 +2876,7 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
uobj_get_obj_read(counters,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
- ucontext);
+ ufile);
if (!ib_spec->flow_count.counters)
return -EINVAL;
ib_spec->flow_count.size =
@@ -3042,9 +3045,6 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
void *kern_spec_mask;
void *kern_spec_val;
- if (kern_spec->reserved)
- return -EINVAL;
-
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
kern_spec_val = (void *)kern_spec +
@@ -3057,7 +3057,7 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
kern_filter_sz, ib_spec);
}
-static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext,
+static int kern_spec_to_ib_spec(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec,
struct ib_uflow_resources *uflow_res)
@@ -3066,14 +3066,13 @@ static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext,
return -EINVAL;
if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
- return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec,
+ return kern_spec_to_ib_spec_action(ufile, kern_spec, ib_spec,
uflow_res);
else
return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
}
int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3087,6 +3086,7 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
struct ib_wq_init_attr wq_init_attr = {};
size_t required_cmd_sz;
size_t required_resp_len;
+ struct ib_device *ib_dev;
required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
@@ -3109,18 +3109,18 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EOPNOTSUPP;
- obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ,
- file->ucontext);
+ obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file);
if (!pd) {
err = -EINVAL;
goto err_uobj;
}
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq) {
err = -EINVAL;
goto err_put_pd;
@@ -3174,8 +3174,7 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
uobj_put_obj_read(pd);
uobj_put_obj_read(cq);
- uobj_alloc_commit(&obj->uevent.uobject);
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_copy:
ib_destroy_wq(wq);
@@ -3190,7 +3189,6 @@ err_uobj:
}
int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3224,29 +3222,19 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
return -EOPNOTSUPP;
resp.response_length = required_resp_len;
- uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
- ret = uobj_remove_commit(uobj);
resp.events_reported = obj->uevent.events_reported;
- uverbs_uobject_put(uobj);
- if (ret)
- return ret;
+
+ uobj_put_destroy(uobj);
return ib_copy_to_udata(ucore, &resp, resp.response_length);
}
int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3275,7 +3263,7 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
return -EINVAL;
- wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext);
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file);
if (!wq)
return -EINVAL;
@@ -3296,7 +3284,6 @@ out:
}
int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3314,6 +3301,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
u32 expected_in_size;
size_t required_cmd_sz_header;
size_t required_resp_len;
+ struct ib_device *ib_dev;
required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
@@ -3369,8 +3357,8 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
num_read_wqs++) {
- wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs],
- file->ucontext);
+ wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
+ wqs_handles[num_read_wqs], file);
if (!wq) {
err = -EINVAL;
goto put_wqs;
@@ -3379,7 +3367,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
wqs[num_read_wqs] = wq;
}
- uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file, &ib_dev);
if (IS_ERR(uobj)) {
err = PTR_ERR(uobj);
goto put_wqs;
@@ -3423,8 +3411,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
for (j = 0; j < num_read_wqs; j++)
uobj_put_obj_read(wqs[j]);
- uobj_alloc_commit(uobj);
- return 0;
+ return uobj_alloc_commit(uobj, 0);
err_copy:
ib_destroy_rwq_ind_table(rwq_ind_tbl);
@@ -3440,12 +3427,10 @@ err_free:
}
int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
- struct ib_uobject *uobj;
int ret;
size_t required_cmd_sz;
@@ -3466,16 +3451,11 @@ int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EOPNOTSUPP;
- uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- return uobj_remove_commit(uobj);
+ return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
+ cmd.ind_tbl_handle, file, 0);
}
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3488,10 +3468,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
struct ib_uflow_resources *uflow_res;
+ struct ib_uverbs_flow_spec_hdr *kern_spec;
int err = 0;
- void *kern_spec;
void *ib_spec;
int i;
+ struct ib_device *ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
@@ -3538,8 +3519,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (!kern_flow_attr)
return -ENOMEM;
- memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
- err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+ *kern_flow_attr = cmd.flow_attr;
+ err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
cmd.flow_attr.size);
if (err)
goto err_free_attr;
@@ -3547,18 +3528,28 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
kern_flow_attr = &cmd.flow_attr;
}
- uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext);
+ uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file, &ib_dev);
if (IS_ERR(uobj)) {
err = PTR_ERR(uobj);
goto err_free_attr;
}
- qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
+ qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file);
if (!qp) {
err = -EINVAL;
goto err_uobj;
}
+ if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ if (!qp->device->create_flow) {
+ err = -EOPNOTSUPP;
+ goto err_put;
+ }
+
flow_attr = kzalloc(struct_size(flow_attr, flows,
cmd.flow_attr.num_of_specs), GFP_KERNEL);
if (!flow_attr) {
@@ -3578,21 +3569,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr);
- kern_spec = kern_flow_attr + 1;
+ kern_spec = kern_flow_attr->flow_specs;
ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs &&
- cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
- cmd.flow_attr.size >=
- ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
- err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
- uflow_res);
+ cmd.flow_attr.size >= sizeof(*kern_spec) &&
+ cmd.flow_attr.size >= kern_spec->size;
+ i++) {
+ err = kern_spec_to_ib_spec(
+ file, (struct ib_uverbs_flow_spec *)kern_spec,
+ ib_spec, uflow_res);
if (err)
goto err_free;
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
- cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
- kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+ cmd.flow_attr.size -= kern_spec->size;
+ kern_spec = ((void *)kern_spec) + kern_spec->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
@@ -3611,6 +3603,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
}
atomic_inc(&qp->usecnt);
flow_id->qp = qp;
+ flow_id->device = qp->device;
flow_id->uobject = uobj;
uobj->object = flow_id;
uflow = container_of(uobj, typeof(*uflow), uobject);
@@ -3625,13 +3618,13 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_copy;
uobj_put_obj_read(qp);
- uobj_alloc_commit(uobj);
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return 0;
+ return uobj_alloc_commit(uobj, 0);
err_copy:
- ib_destroy_flow(flow_id);
+ if (!qp->device->destroy_flow(flow_id))
+ atomic_dec(&qp->usecnt);
err_free:
ib_uverbs_flow_resources_free(uflow_res);
err_free_flow_attr:
@@ -3647,12 +3640,10 @@ err_free_attr:
}
int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_destroy_flow cmd;
- struct ib_uobject *uobj;
int ret;
if (ucore->inlen < sizeof(cmd))
@@ -3665,17 +3656,11 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EINVAL;
- uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle,
- file->ucontext);
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- ret = uobj_remove_commit(uobj);
- return ret;
+ return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, file,
+ 0);
}
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata)
{
@@ -3686,9 +3671,10 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_srq_init_attr attr;
int ret;
+ struct ib_device *ib_dev;
- obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ,
- file->ucontext);
+ obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, file,
+ &ib_dev);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3697,7 +3683,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
if (cmd->srq_type == IB_SRQT_XRC) {
xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
- file->ucontext);
+ file);
if (IS_ERR(xrcd_uobj)) {
ret = -EINVAL;
goto err;
@@ -3714,15 +3700,15 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
}
if (ib_srq_has_cq(cmd->srq_type)) {
- attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle,
- file->ucontext);
+ attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
+ cmd->cq_handle, file);
if (!attr.ext.cq) {
ret = -EINVAL;
goto err_put_xrcd;
}
}
- pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
+ pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file);
if (!pd) {
ret = -EINVAL;
goto err_put_cq;
@@ -3787,9 +3773,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
uobj_put_obj_read(attr.ext.cq);
uobj_put_obj_read(pd);
- uobj_alloc_commit(&obj->uevent.uobject);
-
- return 0;
+ return uobj_alloc_commit(&obj->uevent.uobject, 0);
err_copy:
ib_destroy_srq(srq);
@@ -3813,7 +3797,6 @@ err:
}
ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3843,7 +3826,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
+ ret = __uverbs_create_xsrq(file, &xcmd, &udata);
if (ret)
return ret;
@@ -3851,7 +3834,6 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_create_xsrq cmd;
@@ -3870,7 +3852,7 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
- ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
+ ret = __uverbs_create_xsrq(file, &cmd, &udata);
if (ret)
return ret;
@@ -3878,7 +3860,6 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3894,7 +3875,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
return -EINVAL;
@@ -3909,7 +3890,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -3925,7 +3905,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
+ srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (!srq)
return -EINVAL;
@@ -3949,7 +3929,6 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3957,32 +3936,20 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
struct ib_uevent_object *obj;
- int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle,
- file->ucontext);
+ uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, file);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
obj = container_of(uobj, struct ib_uevent_object, uobject);
- /*
- * Make sure we don't free the memory in remove_commit as we still
- * needs the uobject memory to create the response.
- */
- uverbs_uobject_get(uobj);
-
memset(&resp, 0, sizeof(resp));
-
- ret = uobj_remove_commit(uobj);
- if (ret) {
- uverbs_uobject_put(uobj);
- return ret;
- }
resp.events_reported = obj->events_reported;
- uverbs_uobject_put(uobj);
+
+ uobj_put_destroy(uobj);
+
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
return -EFAULT;
@@ -3990,15 +3957,21 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp = { {0} };
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr = {0};
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
int err;
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
if (!ib_dev->query_device)
return -EOPNOTSUPP;
@@ -4024,7 +3997,7 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (err)
return err;
- copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
+ copy_query_dev_fields(ucontext, &resp.base, &attr);
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;
@@ -4111,7 +4084,6 @@ end:
}
int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -4141,7 +4113,7 @@ int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
if (cmd.attr_mask > IB_CQ_MODERATE)
return -EOPNOTSUPP;
- cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
+ cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file);
if (!cq)
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8d32c4ae368c..1a6b229e3db3 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -35,6 +35,103 @@
#include "rdma_core.h"
#include "uverbs.h"
+struct bundle_alloc_head {
+ struct bundle_alloc_head *next;
+ u8 data[];
+};
+
+struct bundle_priv {
+ /* Must be first */
+ struct bundle_alloc_head alloc_head;
+ struct bundle_alloc_head *allocated_mem;
+ size_t internal_avail;
+ size_t internal_used;
+
+ struct radix_tree_root *radix;
+ const struct uverbs_api_ioctl_method *method_elm;
+ void __rcu **radix_slots;
+ unsigned long radix_slots_len;
+ u32 method_key;
+
+ struct ib_uverbs_attr __user *user_attrs;
+ struct ib_uverbs_attr *uattrs;
+
+ DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+
+ /*
+ * Must be last. bundle ends in a flex array which overlaps
+ * internal_buffer.
+ */
+ struct uverbs_attr_bundle bundle;
+ u64 internal_buffer[32];
+};
+
+/*
+ * Each method has an absolute minimum amount of memory it needs to allocate,
+ * precompute that amount and determine if the onstack memory can be used or
+ * if allocation is need.
+ */
+void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
+ unsigned int num_attrs)
+{
+ struct bundle_priv *pbundle;
+ size_t bundle_size =
+ offsetof(struct bundle_priv, internal_buffer) +
+ sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
+ sizeof(*pbundle->uattrs) * num_attrs;
+
+ method_elm->use_stack = bundle_size <= sizeof(*pbundle);
+ method_elm->bundle_size =
+ ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
+
+ /* Do not want order-2 allocations for this. */
+ WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
+}
+
+/**
+ * uverbs_alloc() - Quickly allocate memory for use with a bundle
+ * @bundle: The bundle
+ * @size: Number of bytes to allocate
+ * @flags: Allocator flags
+ *
+ * The bundle allocator is intended for allocations that are connected with
+ * processing the system call related to the bundle. The allocated memory is
+ * always freed once the system call completes, and cannot be freed any other
+ * way.
+ *
+ * This tries to use a small pool of pre-allocated memory for performance.
+ */
+__malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
+ gfp_t flags)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ size_t new_used;
+ void *res;
+
+ if (check_add_overflow(size, pbundle->internal_used, &new_used))
+ return ERR_PTR(-EOVERFLOW);
+
+ if (new_used > pbundle->internal_avail) {
+ struct bundle_alloc_head *buf;
+
+ buf = kvmalloc(struct_size(buf, data, size), flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ buf->next = pbundle->allocated_mem;
+ pbundle->allocated_mem = buf;
+ return buf->data;
+ }
+
+ res = (void *)pbundle->internal_buffer + pbundle->internal_used;
+ pbundle->internal_used =
+ ALIGN(new_used, sizeof(*pbundle->internal_buffer));
+ if (flags & __GFP_ZERO)
+ memset(res, 0, size);
+ return res;
+}
+EXPORT_SYMBOL(_uverbs_alloc);
+
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
u16 len)
{
@@ -46,45 +143,24 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
-static int uverbs_process_attr(struct ib_device *ibdev,
- struct ib_ucontext *ucontext,
- const struct ib_uverbs_attr *uattr,
- u16 attr_id,
- const struct uverbs_attr_spec_hash *attr_spec_bucket,
- struct uverbs_attr_bundle_hash *attr_bundle_h,
- struct ib_uverbs_attr __user *uattr_ptr)
+static int uverbs_process_attr(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct ib_uverbs_attr *uattr, u32 attr_bkey)
{
- const struct uverbs_attr_spec *spec;
- const struct uverbs_attr_spec *val_spec;
- struct uverbs_attr *e;
- const struct uverbs_object_spec *object;
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
+ const struct uverbs_attr_spec *val_spec = spec;
struct uverbs_obj_attr *o_attr;
- struct uverbs_attr *elements = attr_bundle_h->attrs;
-
- if (attr_id >= attr_spec_bucket->num_attrs) {
- if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
- return -EINVAL;
- else
- return 0;
- }
-
- if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
- return -EINVAL;
-
- spec = &attr_spec_bucket->attrs[attr_id];
- val_spec = spec;
- e = &elements[attr_id];
- e->uattr = uattr_ptr;
switch (spec->type) {
case UVERBS_ATTR_TYPE_ENUM_IN:
- if (uattr->attr_data.enum_data.elem_id >= spec->enum_def.num_elems)
+ if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
return -EOPNOTSUPP;
if (uattr->attr_data.enum_data.reserved)
return -EINVAL;
- val_spec = &spec->enum_def.ids[uattr->attr_data.enum_data.elem_id];
+ val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
/* Currently we only support PTR_IN based enums */
if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
@@ -98,64 +174,75 @@ static int uverbs_process_attr(struct ib_device *ibdev,
* longer struct will fail here if used with an old kernel and
* non-zero content, making ABI compat/discovery simpler.
*/
- if (uattr->len > val_spec->ptr.len &&
- val_spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO &&
- !uverbs_is_attr_cleared(uattr, val_spec->ptr.len))
+ if (uattr->len > val_spec->u.ptr.len &&
+ val_spec->zero_trailing &&
+ !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
return -EOPNOTSUPP;
/* fall through */
case UVERBS_ATTR_TYPE_PTR_OUT:
- if (uattr->len < val_spec->ptr.min_len ||
- (!(val_spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO) &&
- uattr->len > val_spec->ptr.len))
+ if (uattr->len < val_spec->u.ptr.min_len ||
+ (!val_spec->zero_trailing &&
+ uattr->len > val_spec->u.ptr.len))
return -EINVAL;
if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
uattr->attr_data.reserved)
return -EINVAL;
- e->ptr_attr.data = uattr->data;
+ e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
e->ptr_attr.len = uattr->len;
- e->ptr_attr.flags = uattr->flags;
+
+ if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
+ void *p;
+
+ p = uverbs_alloc(&pbundle->bundle, uattr->len);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ e->ptr_attr.ptr = p;
+
+ if (copy_from_user(p, u64_to_user_ptr(uattr->data),
+ uattr->len))
+ return -EFAULT;
+ } else {
+ e->ptr_attr.data = uattr->data;
+ }
break;
case UVERBS_ATTR_TYPE_IDR:
- if (uattr->data >> 32)
- return -EINVAL;
- /* fall through */
case UVERBS_ATTR_TYPE_FD:
if (uattr->attr_data.reserved)
return -EINVAL;
- if (uattr->len != 0 || !ucontext || uattr->data > INT_MAX)
+ if (uattr->len != 0)
return -EINVAL;
o_attr = &e->obj_attr;
- object = uverbs_get_object(ibdev, spec->obj.obj_type);
- if (!object)
- return -EINVAL;
- o_attr->type = object->type_attrs;
-
- o_attr->id = (int)uattr->data;
- o_attr->uobject = uverbs_get_uobject_from_context(
- o_attr->type,
- ucontext,
- spec->obj.access,
- o_attr->id);
+ o_attr->attr_elm = attr_uapi;
+ /*
+ * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
+ * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
+ * here without caring about truncation as we know that the
+ * IDR implementation today rejects negative IDs
+ */
+ o_attr->uobject = uverbs_get_uobject_from_file(
+ spec->u.obj.obj_type,
+ pbundle->bundle.ufile,
+ spec->u.obj.access,
+ uattr->data_s64);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
+ __set_bit(attr_bkey, pbundle->uobj_finalize);
- if (spec->obj.access == UVERBS_ACCESS_NEW) {
- u64 id = o_attr->uobject->id;
+ if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
+ unsigned int uattr_idx = uattr - pbundle->uattrs;
+ s64 id = o_attr->uobject->id;
/* Copy the allocated id to the user-space */
- if (put_user(id, &e->uattr->data)) {
- uverbs_finalize_object(o_attr->uobject,
- UVERBS_ACCESS_NEW,
- false);
+ if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
return -EFAULT;
- }
}
break;
@@ -163,220 +250,225 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return -EOPNOTSUPP;
}
- set_bit(attr_id, attr_bundle_h->valid_bitmap);
return 0;
}
-static int uverbs_uattrs_process(struct ib_device *ibdev,
- struct ib_ucontext *ucontext,
- const struct ib_uverbs_attr *uattrs,
- size_t num_uattrs,
- const struct uverbs_method_spec *method,
- struct uverbs_attr_bundle *attr_bundle,
- struct ib_uverbs_attr __user *uattr_ptr)
+/*
+ * We search the radix tree with the method prefix and now we want to fast
+ * search the suffix bits to get a particular attribute pointer. It is not
+ * totally clear to me if this breaks the radix tree encasulation or not, but
+ * it uses the iter data to determine if the method iter points at the same
+ * chunk that will store the attribute, if so it just derefs it directly. By
+ * construction in most kernel configs the method and attrs will all fit in a
+ * single radix chunk, so in most cases this will have no search. Other cases
+ * this falls back to a full search.
+ */
+static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
+ u32 attr_key)
{
- size_t i;
- int ret = 0;
- int num_given_buckets = 0;
-
- for (i = 0; i < num_uattrs; i++) {
- const struct ib_uverbs_attr *uattr = &uattrs[i];
- u16 attr_id = uattr->attr_id;
- struct uverbs_attr_spec_hash *attr_spec_bucket;
-
- ret = uverbs_ns_idx(&attr_id, method->num_buckets);
- if (ret < 0) {
- if (uattr->flags & UVERBS_ATTR_F_MANDATORY) {
- uverbs_finalize_objects(attr_bundle,
- method->attr_buckets,
- num_given_buckets,
- false);
- return ret;
- }
- continue;
- }
+ void __rcu **slot;
- /*
- * ret is the found ns, so increase num_given_buckets if
- * necessary.
- */
- if (ret >= num_given_buckets)
- num_given_buckets = ret + 1;
-
- attr_spec_bucket = method->attr_buckets[ret];
- ret = uverbs_process_attr(ibdev, ucontext, uattr, attr_id,
- attr_spec_bucket, &attr_bundle->hash[ret],
- uattr_ptr++);
- if (ret) {
- uverbs_finalize_objects(attr_bundle,
- method->attr_buckets,
- num_given_buckets,
- false);
- return ret;
- }
+ if (likely(attr_key < pbundle->radix_slots_len)) {
+ void *entry;
+
+ slot = pbundle->radix_slots + attr_key;
+ entry = rcu_dereference_raw(*slot);
+ if (likely(!radix_tree_is_internal_node(entry) && entry))
+ return slot;
}
- return num_given_buckets;
+ return radix_tree_lookup_slot(pbundle->radix,
+ pbundle->method_key | attr_key);
}
-static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *method_spec,
- struct uverbs_attr_bundle *attr_bundle)
+static int uverbs_set_attr(struct bundle_priv *pbundle,
+ struct ib_uverbs_attr *uattr)
{
- unsigned int i;
-
- for (i = 0; i < attr_bundle->num_buckets; i++) {
- struct uverbs_attr_spec_hash *attr_spec_bucket =
- method_spec->attr_buckets[i];
+ u32 attr_key = uapi_key_attr(uattr->attr_id);
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ const struct uverbs_api_attr *attr;
+ void __rcu **slot;
+ int ret;
- if (!bitmap_subset(attr_spec_bucket->mandatory_attrs_bitmask,
- attr_bundle->hash[i].valid_bitmap,
- attr_spec_bucket->num_attrs))
- return -EINVAL;
+ slot = uapi_get_attr_for_method(pbundle, attr_key);
+ if (!slot) {
+ /*
+ * Kernel does not support the attribute but user-space says it
+ * is mandatory
+ */
+ if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
+ return -EPROTONOSUPPORT;
+ return 0;
}
+ attr = srcu_dereference(
+ *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
- for (; i < method_spec->num_buckets; i++) {
- struct uverbs_attr_spec_hash *attr_spec_bucket =
- method_spec->attr_buckets[i];
+ /* Reject duplicate attributes from user-space */
+ if (test_bit(attr_bkey, pbundle->bundle.attr_present))
+ return -EINVAL;
- if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
- attr_spec_bucket->num_attrs))
- return -EINVAL;
- }
+ ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
+ if (ret)
+ return ret;
+
+ __set_bit(attr_bkey, pbundle->bundle.attr_present);
return 0;
}
-static int uverbs_handle_method(struct ib_uverbs_attr __user *uattr_ptr,
- const struct ib_uverbs_attr *uattrs,
- size_t num_uattrs,
- struct ib_device *ibdev,
- struct ib_uverbs_file *ufile,
- const struct uverbs_method_spec *method_spec,
- struct uverbs_attr_bundle *attr_bundle)
+static int ib_uverbs_run_method(struct bundle_priv *pbundle,
+ unsigned int num_attrs)
{
+ int (*handler)(struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+ size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
+ unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
+ unsigned int i;
int ret;
- int finalize_ret;
- int num_given_buckets;
- num_given_buckets = uverbs_uattrs_process(ibdev, ufile->ucontext, uattrs,
- num_uattrs, method_spec,
- attr_bundle, uattr_ptr);
- if (num_given_buckets <= 0)
+ /* See uverbs_disassociate_api() */
+ handler = srcu_dereference(
+ pbundle->method_elm->handler,
+ &pbundle->bundle.ufile->device->disassociate_srcu);
+ if (!handler)
+ return -EIO;
+
+ pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
+ if (IS_ERR(pbundle->uattrs))
+ return PTR_ERR(pbundle->uattrs);
+ if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
+ return -EFAULT;
+
+ for (i = 0; i != num_attrs; i++) {
+ ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ /* User space did not provide all the mandatory attributes */
+ if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
+ pbundle->bundle.attr_present,
+ pbundle->method_elm->key_bitmap_len)))
return -EINVAL;
- attr_bundle->num_buckets = num_given_buckets;
- ret = uverbs_validate_kernel_mandatory(method_spec, attr_bundle);
- if (ret)
- goto cleanup;
+ if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
+ struct uverbs_obj_attr *destroy_attr =
+ &pbundle->bundle.attrs[destroy_bkey].obj_attr;
- ret = method_spec->handler(ibdev, ufile, attr_bundle);
-cleanup:
- finalize_ret = uverbs_finalize_objects(attr_bundle,
- method_spec->attr_buckets,
- attr_bundle->num_buckets,
- !ret);
+ ret = uobj_destroy(destroy_attr->uobject);
+ if (ret)
+ return ret;
+ __clear_bit(destroy_bkey, pbundle->uobj_finalize);
- return ret ? ret : finalize_ret;
-}
+ ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
+ uobj_put_destroy(destroy_attr->uobject);
+ } else {
+ ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
+ }
-#define UVERBS_OPTIMIZE_USING_STACK_SZ 256
-static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct ib_uverbs_ioctl_hdr *hdr,
- void __user *buf)
-{
- const struct uverbs_object_spec *object_spec;
- const struct uverbs_method_spec *method_spec;
- long err = 0;
- unsigned int i;
- struct {
- struct ib_uverbs_attr *uattrs;
- struct uverbs_attr_bundle *uverbs_attr_bundle;
- } *ctx = NULL;
- struct uverbs_attr *curr_attr;
- unsigned long *curr_bitmap;
- size_t ctx_size;
- uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
-
- if (hdr->driver_id != ib_dev->driver_id)
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
return -EINVAL;
- object_spec = uverbs_get_object(ib_dev, hdr->object_id);
- if (!object_spec)
- return -EPROTONOSUPPORT;
+ return ret;
+}
- method_spec = uverbs_get_method(object_spec, hdr->method_id);
- if (!method_spec)
- return -EPROTONOSUPPORT;
+static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
+{
+ unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
+ struct bundle_alloc_head *memblock;
+ unsigned int i;
+ int ret = 0;
- if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
- return -EINVAL;
+ i = -1;
+ while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ int current_ret;
+
+ current_ret = uverbs_finalize_object(
+ attr->obj_attr.uobject,
+ attr->obj_attr.attr_elm->spec.u.obj.access, commit);
+ if (!ret)
+ ret = current_ret;
+ }
- ctx_size = sizeof(*ctx) +
- sizeof(struct uverbs_attr_bundle) +
- sizeof(struct uverbs_attr_bundle_hash) * method_spec->num_buckets +
- sizeof(*ctx->uattrs) * hdr->num_attrs +
- sizeof(*ctx->uverbs_attr_bundle->hash[0].attrs) *
- method_spec->num_child_attrs +
- sizeof(*ctx->uverbs_attr_bundle->hash[0].valid_bitmap) *
- (method_spec->num_child_attrs / BITS_PER_LONG +
- method_spec->num_buckets);
-
- if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
- ctx = (void *)data;
- if (!ctx)
- ctx = kmalloc(ctx_size, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->uverbs_attr_bundle = (void *)ctx + sizeof(*ctx);
- ctx->uattrs = (void *)(ctx->uverbs_attr_bundle + 1) +
- (sizeof(ctx->uverbs_attr_bundle->hash[0]) *
- method_spec->num_buckets);
- curr_attr = (void *)(ctx->uattrs + hdr->num_attrs);
- curr_bitmap = (void *)(curr_attr + method_spec->num_child_attrs);
+ for (memblock = pbundle->allocated_mem; memblock;) {
+ struct bundle_alloc_head *tmp = memblock;
- /*
- * We just fill the pointers and num_attrs here. The data itself will be
- * filled at a later stage (uverbs_process_attr)
- */
- for (i = 0; i < method_spec->num_buckets; i++) {
- unsigned int curr_num_attrs = method_spec->attr_buckets[i]->num_attrs;
-
- ctx->uverbs_attr_bundle->hash[i].attrs = curr_attr;
- curr_attr += curr_num_attrs;
- ctx->uverbs_attr_bundle->hash[i].num_attrs = curr_num_attrs;
- ctx->uverbs_attr_bundle->hash[i].valid_bitmap = curr_bitmap;
- bitmap_zero(curr_bitmap, curr_num_attrs);
- curr_bitmap += BITS_TO_LONGS(curr_num_attrs);
+ memblock = memblock->next;
+ kvfree(tmp);
}
- err = copy_from_user(ctx->uattrs, buf,
- sizeof(*ctx->uattrs) * hdr->num_attrs);
- if (err) {
- err = -EFAULT;
- goto out;
- }
+ return ret;
+}
- err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
- file, method_spec, ctx->uverbs_attr_bundle);
+static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
+ struct ib_uverbs_ioctl_hdr *hdr,
+ struct ib_uverbs_attr __user *user_attrs)
+{
+ const struct uverbs_api_ioctl_method *method_elm;
+ struct uverbs_api *uapi = ufile->device->uapi;
+ struct radix_tree_iter attrs_iter;
+ struct bundle_priv *pbundle;
+ struct bundle_priv onstack;
+ void __rcu **slot;
+ int destroy_ret;
+ int ret;
- /*
- * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
- * not invoke the method because the request is not supported. No
- * other cases should return this code.
- */
- if (unlikely(err == -EPROTONOSUPPORT)) {
- WARN_ON_ONCE(err == -EPROTONOSUPPORT);
- err = -EINVAL;
+ if (unlikely(hdr->driver_id != uapi->driver_id))
+ return -EINVAL;
+
+ slot = radix_tree_iter_lookup(
+ &uapi->radix, &attrs_iter,
+ uapi_key_obj(hdr->object_id) |
+ uapi_key_ioctl_method(hdr->method_id));
+ if (unlikely(!slot))
+ return -EPROTONOSUPPORT;
+ method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
+
+ if (!method_elm->use_stack) {
+ pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
+ if (!pbundle)
+ return -ENOMEM;
+ pbundle->internal_avail =
+ method_elm->bundle_size -
+ offsetof(struct bundle_priv, internal_buffer);
+ pbundle->alloc_head.next = NULL;
+ pbundle->allocated_mem = &pbundle->alloc_head;
+ } else {
+ pbundle = &onstack;
+ pbundle->internal_avail = sizeof(pbundle->internal_buffer);
+ pbundle->allocated_mem = NULL;
}
-out:
- if (ctx != (void *)data)
- kfree(ctx);
- return err;
-}
-#define IB_UVERBS_MAX_CMD_SZ 4096
+ /* Space for the pbundle->bundle.attrs flex array */
+ pbundle->method_elm = method_elm;
+ pbundle->method_key = attrs_iter.index;
+ pbundle->bundle.ufile = ufile;
+ pbundle->radix = &uapi->radix;
+ pbundle->radix_slots = slot;
+ pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
+ pbundle->user_attrs = user_attrs;
+
+ pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
+ sizeof(*pbundle->bundle.attrs),
+ sizeof(*pbundle->internal_buffer));
+ memset(pbundle->bundle.attr_present, 0,
+ sizeof(pbundle->bundle.attr_present));
+ memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+
+ ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
+ destroy_ret = bundle_destroy(pbundle, ret == 0);
+ if (unlikely(destroy_ret && !ret))
+ return destroy_ret;
+
+ return ret;
+}
long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -384,39 +476,138 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ib_uverbs_ioctl_hdr __user *user_hdr =
(struct ib_uverbs_ioctl_hdr __user *)arg;
struct ib_uverbs_ioctl_hdr hdr;
- struct ib_device *ib_dev;
int srcu_key;
- long err;
+ int err;
+
+ if (unlikely(cmd != RDMA_VERBS_IOCTL))
+ return -ENOIOCTLCMD;
+
+ err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+ if (err)
+ return -EFAULT;
+
+ if (hdr.length > PAGE_SIZE ||
+ hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
+ return -EINVAL;
+
+ if (hdr.reserved1 || hdr.reserved2)
+ return -EPROTONOSUPPORT;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- err = -EIO;
- goto out;
+ err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return err;
+}
+
+int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ const struct uverbs_attr *attr;
+ u64 flags;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ /* Missing attribute means 0 flags */
+ if (IS_ERR(attr)) {
+ *to = 0;
+ return 0;
}
- if (cmd == RDMA_VERBS_IOCTL) {
- err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+ /*
+ * New userspace code should use 8 bytes to pass flags, but we
+ * transparently support old userspaces that were using 4 bytes as
+ * well.
+ */
+ if (attr->ptr_attr.len == 8)
+ flags = attr->ptr_attr.data;
+ else if (attr->ptr_attr.len == 4)
+ flags = *(u32 *)&attr->ptr_attr.data;
+ else
+ return -EINVAL;
- if (err || hdr.length > IB_UVERBS_MAX_CMD_SZ ||
- hdr.length != sizeof(hdr) + hdr.num_attrs * sizeof(struct ib_uverbs_attr)) {
- err = -EINVAL;
- goto out;
- }
+ if (flags & ~allowed_bits)
+ return -EINVAL;
- if (hdr.reserved1 || hdr.reserved2) {
- err = -EPROTONOSUPPORT;
- goto out;
- }
+ *to = flags;
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags64);
- err = ib_uverbs_cmd_verbs(ib_dev, file, &hdr,
- (__user void *)arg + sizeof(hdr));
+int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ u64 flags;
+ int ret;
+
+ ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
+ if (ret)
+ return ret;
+
+ if (flags > U32_MAX)
+ return -EINVAL;
+ *to = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL(uverbs_get_flags32);
+
+/*
+ * This is for ease of conversion. The purpose is to convert all drivers to
+ * use uverbs_attr_bundle instead of ib_udata. Assume attr == 0 is input and
+ * attr == 1 is output.
+ */
+void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ const struct uverbs_attr *uhw_in =
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN);
+ const struct uverbs_attr *uhw_out =
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(uhw_in)) {
+ udata->inlen = uhw_in->ptr_attr.len;
+ if (uverbs_attr_ptr_is_inline(uhw_in))
+ udata->inbuf =
+ &pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx]
+ .data;
+ else
+ udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else {
- err = -ENOIOCTLCMD;
+ udata->inbuf = NULL;
+ udata->inlen = 0;
}
-out:
- srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- return err;
+ if (!IS_ERR(uhw_out)) {
+ udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
+ udata->outlen = uhw_out->ptr_attr.len;
+ } else {
+ udata->outbuf = NULL;
+ udata->outlen = 0;
+ }
+}
+
+int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
+ const void *from, size_t size)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ u16 flags;
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, attr->ptr_attr.len, size);
+ if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
+ return -EFAULT;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+
+ return 0;
}
+EXPORT_SYMBOL(uverbs_copy_to);
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
deleted file mode 100644
index 6ceb672c4d46..000000000000
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/uverbs_ioctl.h>
-#include <rdma/rdma_user_ioctl.h>
-#include <linux/bitops.h>
-#include "uverbs.h"
-
-#define UVERBS_NUM_NS (UVERBS_ID_NS_MASK >> UVERBS_ID_NS_SHIFT)
-#define GET_NS_ID(idx) (((idx) & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT)
-#define GET_ID(idx) ((idx) & ~UVERBS_ID_NS_MASK)
-
-#define _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset, \
- buckets_offset) \
- for (tmpj = 0, \
- elem = (*(const void ***)((hashes)[tmpi] + \
- (buckets_offset)))[0]; \
- tmpj < *(size_t *)((hashes)[tmpi] + (num_buckets_offset)); \
- tmpj++) \
- if ((elem = ((*(const void ***)(hashes[tmpi] + \
- (buckets_offset)))[tmpj])))
-
-/*
- * Iterate all elements of a few @hashes. The number of given hashes is
- * indicated by @num_hashes. The offset of the number of buckets in the hash is
- * represented by @num_buckets_offset, while the offset of the buckets array in
- * the hash structure is represented by @buckets_offset. tmpi and tmpj are two
- * short (or int) based indices that are given by the user. tmpi iterates over
- * the different hashes. @elem points the current element in the hashes[tmpi]
- * bucket we are looping on. To be honest, @hashes representation isn't exactly
- * a hash, but more a collection of elements. These elements' ids are treated
- * in a hash like manner, where the first upper bits are the bucket number.
- * These elements are later mapped into a perfect-hash.
- */
-#define for_each_element(elem, tmpi, tmpj, hashes, num_hashes, \
- num_buckets_offset, buckets_offset) \
- for (tmpi = 0; tmpi < (num_hashes); tmpi++) \
- _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset,\
- buckets_offset)
-
-#define get_elements_iterators_entry_above(iters, num_elements, elements, \
- num_objects_fld, objects_fld, bucket,\
- min_id) \
- get_elements_above_id((const void **)iters, num_elements, \
- (const void **)(elements), \
- offsetof(typeof(**elements), \
- num_objects_fld), \
- offsetof(typeof(**elements), objects_fld),\
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket, min_id)
-
-#define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \
- get_elements_iterators_entry_above(iters, num_trees, trees, \
- num_objects, objects, bucket, min_id)
-
-#define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\
- get_elements_iterators_entry_above(method_iters, num_iters, iters, \
- num_methods, methods, bucket, min_id)
-
-#define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\
- get_elements_iterators_entry_above(attrs_iters, num_iters, iters, \
- num_attrs, attrs, bucket, min_id)
-
-/*
- * get_elements_above_id get a few hashes represented by @elements and
- * @num_elements. The hashes fields are described by @num_offset, @data_offset
- * and @id_offset in the same way as required by for_each_element. The function
- * returns an array of @iters, represents an array of elements in the hashes
- * buckets, which their ids are the smallest ids in all hashes but are all
- * larger than the id given by min_id. Elements are only added to the iters
- * array if their id belongs to the bucket @bucket. The number of elements in
- * the returned array is returned by the function. @min_id is also updated to
- * reflect the new min_id of all elements in iters.
- */
-static size_t get_elements_above_id(const void **iters,
- unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset,
- u16 bucket,
- short *min_id)
-{
- size_t num_iters = 0;
- short min = SHRT_MAX;
- const void *elem;
- int i, j, last_stored = -1;
- unsigned int equal_min = 0;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) != bucket)
- continue;
-
- if (GET_ID(id) < *min_id ||
- (min != SHRT_MAX && GET_ID(id) > min))
- continue;
-
- /*
- * We first iterate all hashes represented by @elements. When
- * we do, we try to find an element @elem in the bucket @bucket
- * which its id is min. Since we can't ensure the user sorted
- * the elements in increasing order, we override this hash's
- * minimal id element we found, if a new element with a smaller
- * id was just found.
- */
- iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
- last_stored = i;
- if (min == GET_ID(id))
- equal_min++;
- else
- equal_min = 1;
- min = GET_ID(id);
- }
-
- /*
- * We only insert to our iters array an element, if its id is smaller
- * than all previous ids. Therefore, the final iters array is sorted so
- * that smaller ids are in the end of the array.
- * Therefore, we need to clean the beginning of the array to make sure
- * all ids of final elements are equal to min.
- */
- memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
-
- *min_id = min;
- return equal_min;
-}
-
-#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
- objects_fld, bucket) \
- find_max_element_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld), \
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket)
-
-static short find_max_element_ns_id(unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset)
-{
- short max_ns = SHRT_MIN;
- const void *elem;
- int i, j;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) > max_ns)
- max_ns = GET_NS_ID(id);
- }
-
- return max_ns;
-}
-
-static short find_max_element_id(unsigned int num_elements,
- const void **elements,
- size_t num_offset,
- size_t data_offset,
- size_t id_offset,
- u16 bucket)
-{
- short max_id = SHRT_MIN;
- const void *elem;
- int i, j;
-
- for_each_element(elem, i, j, elements, num_elements, num_offset,
- data_offset) {
- u16 id = *(u16 *)(elem + id_offset);
-
- if (GET_NS_ID(id) == bucket &&
- GET_ID(id) > max_id)
- max_id = GET_ID(id);
- }
- return max_id;
-}
-
-#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
- objects_fld, bucket) \
- find_max_element_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld), \
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id),\
- bucket)
-
-#define find_max_element_ns_entry_id(num_elements, elements, \
- num_objects_fld, objects_fld) \
- find_max_element_ns_id(num_elements, (const void **)(elements), \
- offsetof(typeof(**elements), num_objects_fld),\
- offsetof(typeof(**elements), objects_fld), \
- offsetof(typeof(***(*elements)->objects_fld), id))
-
-/*
- * find_max_xxxx_ns_id gets a few elements. Each element is described by an id
- * which its upper bits represents a namespace. It finds the max namespace. This
- * could be used in order to know how many buckets do we need to allocate. If no
- * elements exist, SHRT_MIN is returned. Namespace represents here different
- * buckets. The common example is "common bucket" and "driver bucket".
- *
- * find_max_xxxx_id gets a few elements and a bucket. Each element is described
- * by an id which its upper bits represent a namespace. It returns the max id
- * which is contained in the same namespace defined in @bucket. This could be
- * used in order to know how many elements do we need to allocate in the bucket.
- * If no elements exist, SHRT_MIN is returned.
- */
-
-#define find_max_object_id(num_trees, trees, bucket) \
- find_max_element_entry_id(num_trees, trees, num_objects,\
- objects, bucket)
-#define find_max_object_ns_id(num_trees, trees) \
- find_max_element_ns_entry_id(num_trees, trees, \
- num_objects, objects)
-
-#define find_max_method_id(num_iters, iters, bucket) \
- find_max_element_entry_id(num_iters, iters, num_methods,\
- methods, bucket)
-#define find_max_method_ns_id(num_iters, iters) \
- find_max_element_ns_entry_id(num_iters, iters, \
- num_methods, methods)
-
-#define find_max_attr_id(num_iters, iters, bucket) \
- find_max_element_entry_id(num_iters, iters, num_attrs, \
- attrs, bucket)
-#define find_max_attr_ns_id(num_iters, iters) \
- find_max_element_ns_entry_id(num_iters, iters, \
- num_attrs, attrs)
-
-static void free_method(struct uverbs_method_spec *method)
-{
- unsigned int i;
-
- if (!method)
- return;
-
- for (i = 0; i < method->num_buckets; i++)
- kfree(method->attr_buckets[i]);
-
- kfree(method);
-}
-
-#define IS_ATTR_OBJECT(attr) ((attr)->type == UVERBS_ATTR_TYPE_IDR || \
- (attr)->type == UVERBS_ATTR_TYPE_FD)
-
-/*
- * This function gets array of size @num_method_defs which contains pointers to
- * method definitions @method_defs. The function allocates an
- * uverbs_method_spec structure and initializes its number of buckets and the
- * elements in buckets to the correct attributes. While doing that, it
- * validates that there aren't conflicts between attributes of different
- * method_defs.
- */
-static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_method_def **method_defs,
- size_t num_method_defs)
-{
- int bucket_idx;
- int max_attr_buckets = 0;
- size_t num_attr_buckets = 0;
- int res = 0;
- struct uverbs_method_spec *method = NULL;
- const struct uverbs_attr_def **attr_defs;
- unsigned int num_of_singularities = 0;
-
- max_attr_buckets = find_max_attr_ns_id(num_method_defs, method_defs);
- if (max_attr_buckets >= 0)
- num_attr_buckets = max_attr_buckets + 1;
-
- method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
- GFP_KERNEL);
- if (!method)
- return ERR_PTR(-ENOMEM);
-
- method->num_buckets = num_attr_buckets;
- attr_defs = kcalloc(num_method_defs, sizeof(*attr_defs), GFP_KERNEL);
- if (!attr_defs) {
- res = -ENOMEM;
- goto free_method;
- }
- for (bucket_idx = 0; bucket_idx < method->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- int attr_max_bucket = 0;
- struct uverbs_attr_spec_hash *hash = NULL;
-
- attr_max_bucket = find_max_attr_id(num_method_defs, method_defs,
- bucket_idx);
- if (attr_max_bucket < 0)
- continue;
-
- hash = kzalloc(sizeof(*hash) +
- ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
- sizeof(long)) +
- BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
- hash->num_attrs = attr_max_bucket + 1;
- method->num_child_attrs += hash->num_attrs;
- hash->mandatory_attrs_bitmask = (void *)(hash + 1) +
- ALIGN(sizeof(*hash->attrs) *
- (attr_max_bucket + 1),
- sizeof(long));
-
- method->attr_buckets[bucket_idx] = hash;
-
- do {
- size_t num_attr_defs;
- struct uverbs_attr_spec *attr;
- bool attr_obj_with_special_access;
-
- num_attr_defs =
- get_attrs_above_id(attr_defs,
- num_method_defs,
- method_defs,
- bucket_idx,
- &min_id);
- /* Last attr in bucket */
- if (!num_attr_defs)
- break;
-
- if (num_attr_defs > 1) {
- /*
- * We don't allow two attribute definitions for
- * the same attribute. This is usually a
- * programmer error. If required, it's better to
- * just add a new attribute to capture the new
- * semantics.
- */
- res = -EEXIST;
- goto free;
- }
-
- attr = &hash->attrs[min_id];
- memcpy(attr, &attr_defs[0]->attr, sizeof(*attr));
-
- attr_obj_with_special_access = IS_ATTR_OBJECT(attr) &&
- (attr->obj.access == UVERBS_ACCESS_NEW ||
- attr->obj.access == UVERBS_ACCESS_DESTROY);
- num_of_singularities += !!attr_obj_with_special_access;
- if (WARN(num_of_singularities > 1,
- "ib_uverbs: Method contains more than one object attr (%d) with new/destroy access\n",
- min_id) ||
- WARN(attr_obj_with_special_access &&
- !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
- "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
- min_id) ||
- WARN(IS_ATTR_OBJECT(attr) &&
- attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n",
- min_id)) {
- res = -EINVAL;
- goto free;
- }
-
- if (attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY)
- set_bit(min_id, hash->mandatory_attrs_bitmask);
- min_id++;
-
- } while (1);
- }
- kfree(attr_defs);
- return method;
-
-free:
- kfree(attr_defs);
-free_method:
- free_method(method);
- return ERR_PTR(res);
-}
-
-static void free_object(struct uverbs_object_spec *object)
-{
- unsigned int i, j;
-
- if (!object)
- return;
-
- for (i = 0; i < object->num_buckets; i++) {
- struct uverbs_method_spec_hash *method_buckets =
- object->method_buckets[i];
-
- if (!method_buckets)
- continue;
-
- for (j = 0; j < method_buckets->num_methods; j++)
- free_method(method_buckets->methods[j]);
-
- kfree(method_buckets);
- }
-
- kfree(object);
-}
-
-/*
- * This function gets array of size @num_object_defs which contains pointers to
- * object definitions @object_defs. The function allocated an
- * uverbs_object_spec structure and initialize its number of buckets and the
- * elements in buckets to the correct methods. While doing that, it
- * sorts out the correct relationship between conflicts in the same method.
- */
-static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_object_def **object_defs,
- size_t num_object_defs)
-{
- u16 bucket_idx;
- int max_method_buckets = 0;
- u16 num_method_buckets = 0;
- int res = 0;
- struct uverbs_object_spec *object = NULL;
- const struct uverbs_method_def **method_defs;
-
- max_method_buckets = find_max_method_ns_id(num_object_defs, object_defs);
- if (max_method_buckets >= 0)
- num_method_buckets = max_method_buckets + 1;
-
- object = kzalloc(struct_size(object, method_buckets,
- num_method_buckets),
- GFP_KERNEL);
- if (!object)
- return ERR_PTR(-ENOMEM);
-
- object->num_buckets = num_method_buckets;
- method_defs = kcalloc(num_object_defs, sizeof(*method_defs), GFP_KERNEL);
- if (!method_defs) {
- res = -ENOMEM;
- goto free_object;
- }
-
- for (bucket_idx = 0; bucket_idx < object->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- int methods_max_bucket = 0;
- struct uverbs_method_spec_hash *hash = NULL;
-
- methods_max_bucket = find_max_method_id(num_object_defs, object_defs,
- bucket_idx);
- if (methods_max_bucket < 0)
- continue;
-
- hash = kzalloc(struct_size(hash, methods,
- methods_max_bucket + 1),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
-
- hash->num_methods = methods_max_bucket + 1;
- object->method_buckets[bucket_idx] = hash;
-
- do {
- size_t num_method_defs;
- struct uverbs_method_spec *method;
- int i;
-
- num_method_defs =
- get_methods_above_id(method_defs,
- num_object_defs,
- object_defs,
- bucket_idx,
- &min_id);
- /* Last method in bucket */
- if (!num_method_defs)
- break;
-
- method = build_method_with_attrs(method_defs,
- num_method_defs);
- if (IS_ERR(method)) {
- res = PTR_ERR(method);
- goto free;
- }
-
- /*
- * The last tree which is given as an argument to the
- * merge overrides previous method handler.
- * Therefore, we iterate backwards and search for the
- * first handler which != NULL. This also defines the
- * set of flags used for this handler.
- */
- for (i = num_method_defs - 1;
- i >= 0 && !method_defs[i]->handler; i--)
- ;
- hash->methods[min_id++] = method;
- /* NULL handler isn't allowed */
- if (WARN(i < 0,
- "ib_uverbs: tried to merge function id %d, but all handlers are NULL\n",
- min_id)) {
- res = -EINVAL;
- goto free;
- }
- method->handler = method_defs[i]->handler;
- method->flags = method_defs[i]->flags;
-
- } while (1);
- }
- kfree(method_defs);
- return object;
-
-free:
- kfree(method_defs);
-free_object:
- free_object(object);
- return ERR_PTR(res);
-}
-
-void uverbs_free_spec_tree(struct uverbs_root_spec *root)
-{
- unsigned int i, j;
-
- if (!root)
- return;
-
- for (i = 0; i < root->num_buckets; i++) {
- struct uverbs_object_spec_hash *object_hash =
- root->object_buckets[i];
-
- if (!object_hash)
- continue;
-
- for (j = 0; j < object_hash->num_objects; j++)
- free_object(object_hash->objects[j]);
-
- kfree(object_hash);
- }
-
- kfree(root);
-}
-EXPORT_SYMBOL(uverbs_free_spec_tree);
-
-struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
- const struct uverbs_object_tree_def **trees)
-{
- u16 bucket_idx;
- short max_object_buckets = 0;
- size_t num_objects_buckets = 0;
- struct uverbs_root_spec *root_spec = NULL;
- const struct uverbs_object_def **object_defs;
- int i;
- int res = 0;
-
- max_object_buckets = find_max_object_ns_id(num_trees, trees);
- /*
- * Devices which don't want to support ib_uverbs, should just allocate
- * an empty parsing tree. Every user-space command won't hit any valid
- * entry in the parsing tree and thus will fail.
- */
- if (max_object_buckets >= 0)
- num_objects_buckets = max_object_buckets + 1;
-
- root_spec = kzalloc(struct_size(root_spec, object_buckets,
- num_objects_buckets),
- GFP_KERNEL);
- if (!root_spec)
- return ERR_PTR(-ENOMEM);
- root_spec->num_buckets = num_objects_buckets;
-
- object_defs = kcalloc(num_trees, sizeof(*object_defs),
- GFP_KERNEL);
- if (!object_defs) {
- res = -ENOMEM;
- goto free_root;
- }
-
- for (bucket_idx = 0; bucket_idx < root_spec->num_buckets; bucket_idx++) {
- short min_id = SHRT_MIN;
- short objects_max_bucket;
- struct uverbs_object_spec_hash *hash = NULL;
-
- objects_max_bucket = find_max_object_id(num_trees, trees,
- bucket_idx);
- if (objects_max_bucket < 0)
- continue;
-
- hash = kzalloc(struct_size(hash, objects,
- objects_max_bucket + 1),
- GFP_KERNEL);
- if (!hash) {
- res = -ENOMEM;
- goto free;
- }
- hash->num_objects = objects_max_bucket + 1;
- root_spec->object_buckets[bucket_idx] = hash;
-
- do {
- size_t num_object_defs;
- struct uverbs_object_spec *object;
-
- num_object_defs = get_objects_above_id(object_defs,
- num_trees,
- trees,
- bucket_idx,
- &min_id);
- /* Last object in bucket */
- if (!num_object_defs)
- break;
-
- object = build_object_with_methods(object_defs,
- num_object_defs);
- if (IS_ERR(object)) {
- res = PTR_ERR(object);
- goto free;
- }
-
- /*
- * The last tree which is given as an argument to the
- * merge overrides previous object's type_attrs.
- * Therefore, we iterate backwards and search for the
- * first type_attrs which != NULL.
- */
- for (i = num_object_defs - 1;
- i >= 0 && !object_defs[i]->type_attrs; i--)
- ;
- /*
- * NULL is a valid type_attrs. It means an object we
- * can't instantiate (like DEVICE).
- */
- object->type_attrs = i < 0 ? NULL :
- object_defs[i]->type_attrs;
-
- hash->objects[min_id++] = object;
- } while (1);
- }
-
- kfree(object_defs);
- return root_spec;
-
-free:
- kfree(object_defs);
-free_root:
- uverbs_free_spec_tree(root_spec);
- return ERR_PTR(res);
-}
-EXPORT_SYMBOL(uverbs_alloc_spec_tree);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2094d136513d..823beca448e1 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,8 +41,6 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
@@ -77,7 +75,6 @@ static struct class *uverbs_class;
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len) = {
[IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
@@ -118,7 +115,6 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
};
static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
@@ -138,6 +134,30 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+/*
+ * Must be called with the ufile->device->disassociate_srcu held, and the lock
+ * must be held until use of the ucontext is finished.
+ */
+struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile)
+{
+ /*
+ * We do not hold the hw_destroy_rwsem lock for this flow, instead
+ * srcu is used. It does not matter if someone races this with
+ * get_context, we get NULL or valid ucontext.
+ */
+ struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
+
+ if (!srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu))
+ return ERR_PTR(-EIO);
+
+ if (!ucontext)
+ return ERR_PTR(-EINVAL);
+
+ return ucontext;
+}
+EXPORT_SYMBOL(ib_uverbs_get_ucontext);
+
int uverbs_dealloc_mw(struct ib_mw *mw)
{
struct ib_pd *pd = mw->pd;
@@ -154,6 +174,7 @@ static void ib_uverbs_release_dev(struct kobject *kobj)
struct ib_uverbs_device *dev =
container_of(kobj, struct ib_uverbs_device, kobj);
+ uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
kfree(dev);
}
@@ -184,7 +205,7 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
}
spin_unlock_irq(&ev_file->ev_queue.lock);
- uverbs_uobject_put(&ev_file->uobj_file.uobj);
+ uverbs_uobject_put(&ev_file->uobj);
}
spin_lock_irq(&file->async_file->ev_queue.lock);
@@ -220,20 +241,6 @@ void ib_uverbs_detach_umcast(struct ib_qp *qp,
}
}
-static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
- struct ib_ucontext *context,
- bool device_removed)
-{
- context->closing = 1;
- uverbs_cleanup_ucontext(context, device_removed);
- put_pid(context->tgid);
-
- ib_rdmacg_uncharge(&context->cg_obj, context->device,
- RDMACG_RESOURCE_HCA_HANDLE);
-
- return context->device->dealloc_ucontext(context);
-}
-
static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
{
complete(&dev->comp);
@@ -246,6 +253,8 @@ void ib_uverbs_release_file(struct kref *ref)
struct ib_device *ib_dev;
int srcu_key;
+ release_ufile_idr_uobject(file);
+
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
@@ -338,7 +347,7 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
filp->private_data;
return ib_uverbs_event_read(&comp_ev_file->ev_queue,
- comp_ev_file->uobj_file.ufile, filp,
+ comp_ev_file->uobj.ufile, filp,
buf, count, pos,
sizeof(struct ib_uverbs_comp_event_desc));
}
@@ -420,7 +429,9 @@ static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
{
- struct ib_uverbs_completion_event_file *file = filp->private_data;
+ struct ib_uobject *uobj = filp->private_data;
+ struct ib_uverbs_completion_event_file *file = container_of(
+ uobj, struct ib_uverbs_completion_event_file, uobj);
struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&file->ev_queue.lock);
@@ -528,7 +539,7 @@ void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
struct ib_ucq_object, uobject);
- ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
+ ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
event->event, &uobj->async_list,
&uobj->async_events_reported);
}
@@ -637,13 +648,13 @@ err_put_refs:
return filp;
}
-static bool verify_command_mask(struct ib_device *ib_dev,
- u32 command, bool extended)
+static bool verify_command_mask(struct ib_uverbs_file *ufile, u32 command,
+ bool extended)
{
if (!extended)
- return ib_dev->uverbs_cmd_mask & BIT_ULL(command);
+ return ufile->uverbs_cmd_mask & BIT_ULL(command);
- return ib_dev->uverbs_ex_cmd_mask & BIT_ULL(command);
+ return ufile->uverbs_ex_cmd_mask & BIT_ULL(command);
}
static bool verify_command_idx(u32 command, bool extended)
@@ -713,7 +724,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_ex_cmd_hdr ex_hdr;
- struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
bool extended;
int srcu_key;
@@ -748,24 +758,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
return ret;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
- goto out;
- }
-
- /*
- * Must be after the ib_dev check, as once the RCU clears ib_dev ==
- * NULL means ucontext == NULL
- */
- if (!file->ucontext &&
- (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
- ret = -EINVAL;
- goto out;
- }
- if (!verify_command_mask(ib_dev, command, extended)) {
+ if (!verify_command_mask(file, command, extended)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -773,7 +767,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf += sizeof(hdr);
if (!extended) {
- ret = uverbs_cmd_table[command](file, ib_dev, buf,
+ ret = uverbs_cmd_table[command](file, buf,
hdr.in_words * 4,
hdr.out_words * 4);
} else {
@@ -792,7 +786,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
ex_hdr.provider_in_words * 8,
ex_hdr.provider_out_words * 8);
- ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
+ ret = uverbs_ex_cmd_table[command](file, &ucore, &uhw);
ret = (ret) ? : count;
}
@@ -804,22 +798,18 @@ out:
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_device *ib_dev;
+ struct ib_ucontext *ucontext;
int ret = 0;
int srcu_key;
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
+ ucontext = ib_uverbs_get_ucontext(file);
+ if (IS_ERR(ucontext)) {
+ ret = PTR_ERR(ucontext);
goto out;
}
- if (!file->ucontext)
- ret = -ENODEV;
- else
- ret = ib_dev->mmap(file->ucontext, vma);
+ ret = ucontext->device->mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
return ret;
@@ -879,13 +869,12 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
}
file->device = dev;
- spin_lock_init(&file->idr_lock);
- idr_init(&file->idr);
- file->ucontext = NULL;
- file->async_file = NULL;
kref_init(&file->ref);
- mutex_init(&file->mutex);
- mutex_init(&file->cleanup_mutex);
+ mutex_init(&file->ucontext_lock);
+
+ spin_lock_init(&file->uobjects_lock);
+ INIT_LIST_HEAD(&file->uobjects);
+ init_rwsem(&file->hw_destroy_rwsem);
filp->private_data = file;
kobject_get(&dev->kobj);
@@ -893,6 +882,11 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
+ file->uverbs_cmd_mask = ib_dev->uverbs_cmd_mask;
+ file->uverbs_ex_cmd_mask = ib_dev->uverbs_ex_cmd_mask;
+
+ setup_ufile_idr_uobject(file);
+
return nonseekable_open(inode, filp);
err_module:
@@ -911,13 +905,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- mutex_lock(&file->cleanup_mutex);
- if (file->ucontext) {
- ib_uverbs_cleanup_ucontext(file, file->ucontext, false);
- file->ucontext = NULL;
- }
- mutex_unlock(&file->cleanup_mutex);
- idr_destroy(&file->idr);
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
if (!file->is_closed) {
@@ -1006,6 +994,19 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
+static int ib_uverbs_create_uapi(struct ib_device *device,
+ struct ib_uverbs_device *uverbs_dev)
+{
+ struct uverbs_api *uapi;
+
+ uapi = uverbs_alloc_api(device->driver_specs, device->driver_id);
+ if (IS_ERR(uapi))
+ return PTR_ERR(uapi);
+
+ uverbs_dev->uapi = uapi;
+ return 0;
+}
+
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1048,6 +1049,9 @@ static void ib_uverbs_add_one(struct ib_device *device)
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
+ if (ib_uverbs_create_uapi(device, uverbs_dev))
+ goto err;
+
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
@@ -1067,18 +1071,6 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
- if (!device->specs_root) {
- const struct uverbs_object_tree_def *default_root[] = {
- uverbs_default_get_objects()};
-
- uverbs_dev->specs_root = uverbs_alloc_spec_tree(1,
- default_root);
- if (IS_ERR(uverbs_dev->specs_root))
- goto err_class;
-
- device->specs_root = uverbs_dev->specs_root;
- }
-
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
@@ -1098,44 +1090,6 @@ err:
return;
}
-static void ib_uverbs_disassociate_ucontext(struct ib_ucontext *ibcontext)
-{
- struct ib_device *ib_dev = ibcontext->device;
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- down_write(&owning_mm->mmap_sem);
- ib_dev->disassociate_ucontext(ibcontext);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
@@ -1144,46 +1098,31 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_event event;
/* Pending running commands to terminate */
- synchronize_srcu(&uverbs_dev->disassociate_srcu);
+ uverbs_disassociate_api_pre(uverbs_dev);
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
event.device = ib_dev;
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
- struct ib_ucontext *ucontext;
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
file->is_closed = 1;
list_del(&file->list);
kref_get(&file->ref);
- mutex_unlock(&uverbs_dev->lists_mutex);
-
-
- mutex_lock(&file->cleanup_mutex);
- ucontext = file->ucontext;
- file->ucontext = NULL;
- mutex_unlock(&file->cleanup_mutex);
- /* At this point ib_uverbs_close cannot be running
- * ib_uverbs_cleanup_ucontext
+ /* We must release the mutex before going ahead and calling
+ * uverbs_cleanup_ufile, as it might end up indirectly calling
+ * uverbs_close, for example due to freeing the resources (e.g
+ * mmput).
*/
- if (ucontext) {
- /* We must release the mutex before going ahead and
- * calling disassociate_ucontext. disassociate_ucontext
- * might end up indirectly calling uverbs_close,
- * for example due to freeing the resources
- * (e.g mmput).
- */
- ib_uverbs_event_handler(&file->event_handler, &event);
- ib_uverbs_disassociate_ucontext(ucontext);
- mutex_lock(&file->cleanup_mutex);
- ib_uverbs_cleanup_ucontext(file, ucontext, true);
- mutex_unlock(&file->cleanup_mutex);
- }
+ mutex_unlock(&uverbs_dev->lists_mutex);
- mutex_lock(&uverbs_dev->lists_mutex);
+ ib_uverbs_event_handler(&file->event_handler, &event);
+ uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
+
+ mutex_lock(&uverbs_dev->lists_mutex);
}
while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
@@ -1205,6 +1144,8 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
}
mutex_unlock(&uverbs_dev->lists_mutex);
+
+ uverbs_disassociate_api(uverbs_dev->uapi);
}
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
@@ -1232,7 +1173,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
* cdev was deleted, however active clients can still issue
* commands and close their open files.
*/
- rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
ib_uverbs_free_hw_resources(uverbs_dev, device);
wait_clients = 0;
}
@@ -1241,10 +1181,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- if (uverbs_dev->specs_root) {
- uverbs_free_spec_tree(uverbs_dev->specs_root);
- device->specs_root = NULL;
- }
kobject_put(&uverbs_dev->kobj);
}
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index bb372b4713a4..b8d715c68ca4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -211,7 +211,5 @@ void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
/* TODO: No need to set this */
sa_path_set_dmac_zero(dst);
- sa_path_set_ndev(dst, NULL);
- sa_path_set_ifindex(dst, 0);
}
EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index b570acbd94af..203cc96ac6f5 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -48,14 +48,18 @@ static int uverbs_free_ah(struct ib_uobject *uobject,
static int uverbs_free_flow(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
- int ret;
struct ib_flow *flow = (struct ib_flow *)uobject->object;
struct ib_uflow_object *uflow =
container_of(uobject, struct ib_uflow_object, uobject);
+ struct ib_qp *qp = flow->qp;
+ int ret;
- ret = ib_destroy_flow(flow);
- if (!ret)
+ ret = flow->device->destroy_flow(flow);
+ if (!ret) {
+ if (qp)
+ atomic_dec(&qp->usecnt);
ib_uverbs_flow_resources_free(uflow->resources);
+ }
return ret;
}
@@ -74,6 +78,13 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
container_of(uobject, struct ib_uqp_object, uevent.uobject);
int ret;
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
if (why == RDMA_REMOVE_DESTROY) {
if (!list_empty(&uqp->mcast_list))
return -EBUSY;
@@ -82,7 +93,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
}
ret = ib_destroy_qp(qp);
- if (ret && why == RDMA_REMOVE_DESTROY)
+ if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
if (uqp->uxrcd)
@@ -100,8 +111,10 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- kfree(ind_tbl);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ kfree(ind_tbl);
return ret;
}
@@ -114,8 +127,10 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_wq(wq);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
return ret;
}
@@ -129,8 +144,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_srq(srq);
-
- if (ret && why == RDMA_REMOVE_DESTROY)
+ if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
if (srq_type == IB_SRQT_XRC) {
@@ -152,12 +166,12 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
container_of(uobject, struct ib_uxrcd_object, uobject);
int ret;
+ ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
+ if (ret)
+ return ret;
+
mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
- ret = -EBUSY;
- else
- ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
- xrcd, why);
+ ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why);
mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
return ret;
@@ -167,20 +181,22 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_pd *pd = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
+ if (ret)
+ return ret;
ib_dealloc_pd((struct ib_pd *)uobject->object);
return 0;
}
-static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
+static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
enum rdma_remove_reason why)
{
struct ib_uverbs_completion_event_file *comp_event_file =
- container_of(uobj_file, struct ib_uverbs_completion_event_file,
- uobj_file);
+ container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
spin_lock_irq(&event_queue->lock);
@@ -194,119 +210,77 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_
return 0;
};
-int uverbs_destroy_def_handler(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
+int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
return 0;
}
+EXPORT_SYMBOL(uverbs_destroy_def_handler);
-/*
- * This spec is used in order to pass information to the hardware driver in a
- * legacy way. Every verb that could get driver specific data should get this
- * spec.
- */
-const struct uverbs_attr_def uverbs_uhw_compat_in =
- UVERBS_ATTR_PTR_IN_SZ(UVERBS_ATTR_UHW_IN, UVERBS_ATTR_SIZE(0, USHRT_MAX),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO));
-const struct uverbs_attr_def uverbs_uhw_compat_out =
- UVERBS_ATTR_PTR_OUT_SZ(UVERBS_ATTR_UHW_OUT, UVERBS_ATTR_SIZE(0, USHRT_MAX),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO));
-
-void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata)
-{
- /*
- * This is for ease of conversion. The purpose is to convert all drivers
- * to use uverbs_attr_bundle instead of ib_udata.
- * Assume attr == 0 is input and attr == 1 is output.
- */
- const struct uverbs_attr *uhw_in =
- uverbs_attr_get(ctx, UVERBS_ATTR_UHW_IN);
- const struct uverbs_attr *uhw_out =
- uverbs_attr_get(ctx, UVERBS_ATTR_UHW_OUT);
-
- if (!IS_ERR(uhw_in)) {
- udata->inlen = uhw_in->ptr_attr.len;
- if (uverbs_attr_ptr_is_inline(uhw_in))
- udata->inbuf = &uhw_in->uattr->data;
- else
- udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
- } else {
- udata->inbuf = NULL;
- udata->inlen = 0;
- }
-
- if (!IS_ERR(uhw_out)) {
- udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
- udata->outlen = uhw_out->ptr_attr.len;
- } else {
- udata->outbuf = NULL;
- udata->outlen = 0;
- }
-}
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COMP_CHANNEL,
- &UVERBS_TYPE_ALLOC_FD(0,
- sizeof(struct ib_uverbs_completion_event_file),
- uverbs_hot_unplug_completion_event_file,
- &uverbs_event_fops,
- "[infinibandevent]", O_RDONLY));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
+ uverbs_hot_unplug_completion_event_file,
+ &uverbs_event_fops,
+ "[infinibandevent]",
+ O_RDONLY));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_QP,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
- uverbs_free_qp));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_SRQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
- uverbs_free_srq));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
- 0, uverbs_free_flow));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
+ uverbs_free_flow));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_WQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
- uverbs_free_wq));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl));
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_XRCD,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
- uverbs_free_xrcd));
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_XRCD,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
+ uverbs_free_xrcd));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
- /* 2 is used in order to free the PD after MRs */
- &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DEVICE, NULL);
-
-static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
- &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE),
- &UVERBS_OBJECT(UVERBS_OBJECT_PD),
- &UVERBS_OBJECT(UVERBS_OBJECT_MR),
- &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL),
- &UVERBS_OBJECT(UVERBS_OBJECT_CQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_QP),
- &UVERBS_OBJECT(UVERBS_OBJECT_AH),
- &UVERBS_OBJECT(UVERBS_OBJECT_MW),
- &UVERBS_OBJECT(UVERBS_OBJECT_SRQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
- &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
- &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
- &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
- &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
- &UVERBS_OBJECT(UVERBS_OBJECT_DM),
- &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE);
+
+DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
+ &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE),
+ &UVERBS_OBJECT(UVERBS_OBJECT_PD),
+ &UVERBS_OBJECT(UVERBS_OBJECT_MR),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL),
+ &UVERBS_OBJECT(UVERBS_OBJECT_CQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_QP),
+ &UVERBS_OBJECT(UVERBS_OBJECT_AH),
+ &UVERBS_OBJECT(UVERBS_OBJECT_MW),
+ &UVERBS_OBJECT(UVERBS_OBJECT_SRQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
+ &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
+ &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
+ &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
+ &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
+ &UVERBS_OBJECT(UVERBS_OBJECT_DM),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
return &uverbs_default_objects;
}
-EXPORT_SYMBOL_GPL(uverbs_default_get_objects);
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 03b182a684a6..a0ffdcf9a51c 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -38,20 +38,22 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_counters *counters = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY &&
- atomic_read(&counters->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&counters->usecnt, why, uobject);
+ if (ret)
+ return ret;
return counters->device->destroy_counters(counters);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
+ struct ib_device *ib_dev = uobj->context->device;
struct ib_counters *counters;
- struct ib_uobject *uobj;
int ret;
/*
@@ -62,7 +64,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_de
if (!ib_dev->create_counters)
return -EOPNOTSUPP;
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
counters = ib_dev->create_counters(ib_dev, attrs);
if (IS_ERR(counters)) {
ret = PTR_ERR(counters);
@@ -80,9 +81,8 @@ err_create_counters:
return ret;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_counters_read_attr read_attr = {};
const struct uverbs_attr *uattr;
@@ -90,68 +90,62 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
int ret;
- if (!ib_dev->read_counters)
+ if (!counters->device->read_counters)
return -EOPNOTSUPP;
if (!atomic_read(&counters->usecnt))
return -EINVAL;
- ret = uverbs_copy_from(&read_attr.flags, attrs,
- UVERBS_ATTR_READ_COUNTERS_FLAGS);
+ ret = uverbs_get_flags32(&read_attr.flags, attrs,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ IB_UVERBS_READ_COUNTERS_PREFER_CACHED);
if (ret)
return ret;
uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
- read_attr.counters_buff = kcalloc(read_attr.ncounters,
- sizeof(u64), GFP_KERNEL);
- if (!read_attr.counters_buff)
- return -ENOMEM;
-
- ret = ib_dev->read_counters(counters,
- &read_attr,
- attrs);
- if (ret)
- goto err_read;
+ read_attr.counters_buff = uverbs_zalloc(
+ attrs, array_size(read_attr.ncounters, sizeof(u64)));
+ if (IS_ERR(read_attr.counters_buff))
+ return PTR_ERR(read_attr.counters_buff);
- ret = uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
- read_attr.counters_buff,
- read_attr.ncounters * sizeof(u64));
+ ret = counters->device->read_counters(counters, &read_attr, attrs);
+ if (ret)
+ return ret;
-err_read:
- kfree(read_attr.counters_buff);
- return ret;
+ return uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
+ read_attr.counters_buff,
+ read_attr.ncounters * sizeof(u64));
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_COUNTERS_DESTROY,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-#define MAX_COUNTERS_BUFF_SIZE USHRT_MAX
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
- UVERBS_OBJECT_COUNTERS,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
- UVERBS_ATTR_SIZE(0, MAX_COUNTERS_BUFF_SIZE),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
- UVERBS_ATTR_TYPE(__u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_COUNTERS_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_COUNTERS_READ,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ enum ib_uverbs_read_counters_flags));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_counters),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
&UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
-
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 3d293d01afea..5b5f2052cd52 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -44,21 +44,26 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
int ret;
ret = ib_destroy_cq(cq);
- if (!ret || why != RDMA_REMOVE_DESTROY)
- ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
- container_of(ev_queue,
- struct ib_uverbs_completion_event_file,
- ev_queue) : NULL,
- ucq);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_ucq(
+ uobject->context->ufile,
+ ev_queue ? container_of(ev_queue,
+ struct ib_uverbs_completion_event_file,
+ ev_queue) :
+ NULL,
+ ucq);
return ret;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
- struct ib_ucontext *ucontext = file->ucontext;
- struct ib_ucq_object *obj;
+ struct ib_ucq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
+ typeof(*obj), uobject);
+ struct ib_device *ib_dev = obj->uobject.context->device;
struct ib_udata uhw;
int ret;
u64 user_handle;
@@ -67,7 +72,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_uobject *ev_file_uobj;
- if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
+ if (!ib_dev->create_cq || !ib_dev->destroy_cq)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.comp_vector, attrs,
@@ -81,28 +86,26 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
if (ret)
return ret;
- /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
- if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&attr.flags, attrs,
- UVERBS_ATTR_CREATE_CQ_FLAGS)))
- return -EFAULT;
+ ret = uverbs_get_flags32(&attr.flags, attrs,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION |
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN);
+ if (ret)
+ return ret;
ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
if (!IS_ERR(ev_file_uobj)) {
ev_file = container_of(ev_file_uobj,
struct ib_uverbs_completion_event_file,
- uobj_file.uobj);
+ uobj);
uverbs_uobject_get(ev_file_uobj);
}
- if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
+ if (attr.comp_vector >= file->device->num_comp_vectors) {
ret = -EINVAL;
goto err_event_file;
}
- obj = container_of(uverbs_attr_get_uobject(attrs,
- UVERBS_ATTR_CREATE_CQ_HANDLE),
- typeof(*obj), uobject);
- obj->uverbs_file = ucontext->ufile;
obj->comp_events_reported = 0;
obj->async_events_reported = 0;
INIT_LIST_HEAD(&obj->comp_list);
@@ -111,7 +114,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
/* Temporary, only until drivers get the new uverbs_attr_bundle */
create_udata(attrs, &uhw);
- cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
+ cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, &uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_event_file;
@@ -143,69 +146,64 @@ err_event_file:
return ret;
};
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_CQ_FLAGS,
+ enum ib_uverbs_ex_create_cq_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_ACCESS_READ),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_CQ_FLAGS, UVERBS_ATTR_TYPE(u32)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE, UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
-
-static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
- struct ib_uverbs_destroy_cq_resp resp;
- struct ib_ucq_object *obj;
- int ret;
-
- if (IS_ERR(uobj))
- return PTR_ERR(uobj);
-
- obj = container_of(uobj, struct ib_ucq_object, uobject);
-
- if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
- return -EOPNOTSUPP;
-
- ret = rdma_explicit_destroy(uobj);
- if (ret)
- return ret;
-
- resp.comp_events_reported = obj->comp_events_reported;
- resp.async_events_reported = obj->async_events_reported;
+ struct ib_ucq_object *obj =
+ container_of(uobj, struct ib_ucq_object, uobject);
+ struct ib_uverbs_destroy_cq_resp resp = {
+ .comp_events_reported = obj->comp_events_reported,
+ .async_events_reported = obj->async_events_reported
+ };
return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
sizeof(resp));
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_CQ_DESTROY,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_CQ,
- &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
- uverbs_free_cq),
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_CQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_CQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_CQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), uverbs_free_cq),
+
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI)
- &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY)
#endif
- );
-
+);
diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c
index 8b681575b615..edc3ff7733d4 100644
--- a/drivers/infiniband/core/uverbs_std_types_dm.c
+++ b/drivers/infiniband/core/uverbs_std_types_dm.c
@@ -37,20 +37,24 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_dm *dm = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY && atomic_read(&dm->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&dm->usecnt, why, uobject);
+ if (ret)
+ return ret;
return dm->device->dealloc_dm(dm);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int
+UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
{
- struct ib_ucontext *ucontext = file->ucontext;
struct ib_dm_alloc_attr attr = {};
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)
+ ->obj_attr.uobject;
+ struct ib_device *ib_dev = uobj->context->device;
struct ib_dm *dm;
int ret;
@@ -67,9 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
if (ret)
return ret;
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DM_HANDLE)->obj_attr.uobject;
-
- dm = ib_dev->alloc_dm(ib_dev, ucontext, &attr, attrs);
+ dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs);
if (IS_ERR(dm))
return PTR_ERR(dm);
@@ -83,26 +85,27 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_device *ib_dev,
return 0;
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_ALLOC,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE, UVERBS_OBJECT_DM,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_DM_FREE,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE,
- UVERBS_OBJECT_DM,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_DM_FREE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_FREE_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM,
- /* 1 is used in order to free the DM after MRs */
- &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_dm),
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm),
&UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC),
&UVERBS_METHOD(UVERBS_METHOD_DM_FREE));
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index a7be51cf2e42..d8cfafe23bd9 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -37,10 +37,11 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
struct ib_flow_action *action = uobject->object;
+ int ret;
- if (why == RDMA_REMOVE_DESTROY &&
- atomic_read(&action->usecnt))
- return -EBUSY;
+ ret = ib_destroy_usecnt(&action->usecnt, why, uobject);
+ if (ret)
+ return ret;
return action->device->destroy_flow_action(action);
}
@@ -303,12 +304,13 @@ static int parse_flow_action_esp(struct ib_device *ib_dev,
return 0;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
+ struct ib_device *ib_dev = uobj->context->device;
int ret;
- struct ib_uobject *uobj;
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
@@ -320,7 +322,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return ret;
/* No need to check as this attribute is marked as MANDATORY */
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
if (IS_ERR(action))
return PTR_ERR(action);
@@ -334,102 +335,109 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return 0;
}
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
+ struct ib_flow_action *action = uobj->object;
int ret;
- struct ib_uobject *uobj;
- struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
- if (!ib_dev->modify_flow_action_esp)
+ if (!action->device->modify_flow_action_esp)
return -EOPNOTSUPP;
- ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, true);
+ ret = parse_flow_action_esp(action->device, file, attrs, &esp_attr,
+ true);
if (ret)
return ret;
- uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
- action = uobj->object;
-
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
- return ib_dev->modify_flow_action_esp(action,
- &esp_attr.hdr,
- attrs);
+ return action->device->modify_flow_action_esp(action, &esp_attr.hdr,
+ attrs);
}
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
- .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- } },
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_STRUCT(
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
+ aes_key),
},
};
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- /* No need to specify any data */
- .len = 0,
- } }
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
},
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- { .ptr = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
- .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- } }
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
+ size),
},
};
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY |
- UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type)));
-
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_WRITE,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat),
- &UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type)));
-
-static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_FLOW_ACTION_DESTROY,
- uverbs_destroy_def_handler,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_DESTROY,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_FLOW_ACTION,
- &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
-
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
+ hard_limit_pkts),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ uverbs_flow_action_esp_keymat,
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ uverbs_flow_action_esp_replay,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
+ hard_limit_pkts),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ uverbs_flow_action_esp_keymat,
+ UA_OPTIONAL),
+ UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ uverbs_flow_action_esp_replay,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 68f7cadf088f..cf02e774303e 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -39,14 +39,18 @@ static int uverbs_free_mr(struct ib_uobject *uobject,
return ib_dereg_mr((struct ib_mr *)uobject->object);
}
-static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
- struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs)
+static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
struct ib_dm_mr_attr attr = {};
- struct ib_uobject *uobj;
- struct ib_dm *dm;
- struct ib_pd *pd;
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+ struct ib_dm *dm =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
struct ib_mr *mr;
int ret;
@@ -62,8 +66,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
if (ret)
return ret;
- ret = uverbs_copy_from(&attr.access_flags, attrs,
- UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS);
+ ret = uverbs_get_flags32(&attr.access_flags, attrs,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ IB_ACCESS_SUPPORTED);
if (ret)
return ret;
@@ -74,12 +79,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(struct ib_device *ib_dev,
if (ret)
return ret;
- pd = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
-
- dm = uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
-
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE)->obj_attr.uobject;
-
if (attr.offset > dm->length || attr.length > dm->length ||
attr.length > dm->length - attr.offset)
return -EINVAL;
@@ -115,33 +114,36 @@ err_dereg:
return ret;
}
-static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_DM_MR_REG,
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_OBJECT_MR,
- UVERBS_ACCESS_NEW,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE, UVERBS_OBJECT_PD,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_DM_MR_REG,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE, UVERBS_OBJECT_DM,
- UVERBS_ACCESS_READ,
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
- UVERBS_ATTR_TYPE(u32),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MR,
- /* 1 is used in order to free the MR after all the MWs */
- &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr),
- &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG));
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_MR,
+ UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
+ &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG));
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
new file mode 100644
index 000000000000..73ea6f0db88f
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ */
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
+#include <linux/bitops.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size)
+{
+ void *elm;
+ int rc;
+
+ if (key == UVERBS_API_KEY_ERR)
+ return ERR_PTR(-EOVERFLOW);
+
+ elm = kzalloc(alloc_size, GFP_KERNEL);
+ rc = radix_tree_insert(&uapi->radix, key, elm);
+ if (rc) {
+ kfree(elm);
+ return ERR_PTR(rc);
+ }
+
+ return elm;
+}
+
+static int uapi_merge_method(struct uverbs_api *uapi,
+ struct uverbs_api_object *obj_elm, u32 obj_key,
+ const struct uverbs_method_def *method,
+ bool is_driver)
+{
+ u32 method_key = obj_key | uapi_key_ioctl_method(method->id);
+ struct uverbs_api_ioctl_method *method_elm;
+ unsigned int i;
+
+ if (!method->attrs)
+ return 0;
+
+ method_elm = uapi_add_elm(uapi, method_key, sizeof(*method_elm));
+ if (IS_ERR(method_elm)) {
+ if (method_elm != ERR_PTR(-EEXIST))
+ return PTR_ERR(method_elm);
+
+ /*
+ * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE
+ */
+ if (WARN_ON(method->handler))
+ return -EINVAL;
+ method_elm = radix_tree_lookup(&uapi->radix, method_key);
+ if (WARN_ON(!method_elm))
+ return -EINVAL;
+ } else {
+ WARN_ON(!method->handler);
+ rcu_assign_pointer(method_elm->handler, method->handler);
+ if (method->handler != uverbs_destroy_def_handler)
+ method_elm->driver_method = is_driver;
+ }
+
+ for (i = 0; i != method->num_attrs; i++) {
+ const struct uverbs_attr_def *attr = (*method->attrs)[i];
+ struct uverbs_api_attr *attr_slot;
+
+ if (!attr)
+ continue;
+
+ /*
+ * ENUM_IN contains the 'ids' pointer to the driver's .rodata,
+ * so if it is specified by a driver then it always makes this
+ * into a driver method.
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ method_elm->driver_method |= is_driver;
+
+ attr_slot =
+ uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
+ sizeof(*attr_slot));
+ /* Attributes are not allowed to be modified by drivers */
+ if (IS_ERR(attr_slot))
+ return PTR_ERR(attr_slot);
+
+ attr_slot->spec = attr->attr;
+ }
+
+ return 0;
+}
+
+static int uapi_merge_tree(struct uverbs_api *uapi,
+ const struct uverbs_object_tree_def *tree,
+ bool is_driver)
+{
+ unsigned int i, j;
+ int rc;
+
+ if (!tree->objects)
+ return 0;
+
+ for (i = 0; i != tree->num_objects; i++) {
+ const struct uverbs_object_def *obj = (*tree->objects)[i];
+ struct uverbs_api_object *obj_elm;
+ u32 obj_key;
+
+ if (!obj)
+ continue;
+
+ obj_key = uapi_key_obj(obj->id);
+ obj_elm = uapi_add_elm(uapi, obj_key, sizeof(*obj_elm));
+ if (IS_ERR(obj_elm)) {
+ if (obj_elm != ERR_PTR(-EEXIST))
+ return PTR_ERR(obj_elm);
+
+ /* This occurs when a driver uses ADD_UVERBS_METHODS */
+ if (WARN_ON(obj->type_attrs))
+ return -EINVAL;
+ obj_elm = radix_tree_lookup(&uapi->radix, obj_key);
+ if (WARN_ON(!obj_elm))
+ return -EINVAL;
+ } else {
+ obj_elm->type_attrs = obj->type_attrs;
+ if (obj->type_attrs) {
+ obj_elm->type_class =
+ obj->type_attrs->type_class;
+ /*
+ * Today drivers are only permitted to use
+ * idr_class types. They cannot use FD types
+ * because we currently have no way to revoke
+ * the fops pointer after device
+ * disassociation.
+ */
+ if (WARN_ON(is_driver &&
+ obj->type_attrs->type_class !=
+ &uverbs_idr_class))
+ return -EINVAL;
+ }
+ }
+
+ if (!obj->methods)
+ continue;
+
+ for (j = 0; j != obj->num_methods; j++) {
+ const struct uverbs_method_def *method =
+ (*obj->methods)[j];
+ if (!method)
+ continue;
+
+ rc = uapi_merge_method(uapi, obj_elm, obj_key, method,
+ is_driver);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+uapi_finalize_ioctl_method(struct uverbs_api *uapi,
+ struct uverbs_api_ioctl_method *method_elm,
+ u32 method_key)
+{
+ struct radix_tree_iter iter;
+ unsigned int num_attrs = 0;
+ unsigned int max_bkey = 0;
+ bool single_uobj = false;
+ void __rcu **slot;
+
+ method_elm->destroy_bkey = UVERBS_API_ATTR_BKEY_LEN;
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter,
+ uapi_key_attrs_start(method_key)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+ u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
+ u32 attr_bkey = uapi_bkey_attr(attr_key);
+ u8 type = elm->spec.type;
+
+ if (uapi_key_attr_to_method(iter.index) !=
+ uapi_key_attr_to_method(method_key))
+ break;
+
+ if (elm->spec.mandatory)
+ __set_bit(attr_bkey, method_elm->attr_mandatory);
+
+ if (type == UVERBS_ATTR_TYPE_IDR ||
+ type == UVERBS_ATTR_TYPE_FD) {
+ u8 access = elm->spec.u.obj.access;
+
+ /*
+ * Verbs specs may only have one NEW/DESTROY, we don't
+ * have the infrastructure to abort multiple NEW's or
+ * cope with multiple DESTROY failure.
+ */
+ if (access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY) {
+ if (WARN_ON(single_uobj))
+ return -EINVAL;
+
+ single_uobj = true;
+ if (WARN_ON(!elm->spec.mandatory))
+ return -EINVAL;
+ }
+
+ if (access == UVERBS_ACCESS_DESTROY)
+ method_elm->destroy_bkey = attr_bkey;
+ }
+
+ max_bkey = max(max_bkey, attr_bkey);
+ num_attrs++;
+ }
+
+ method_elm->key_bitmap_len = max_bkey + 1;
+ WARN_ON(method_elm->key_bitmap_len > UVERBS_API_ATTR_BKEY_LEN);
+
+ uapi_compute_bundle_size(method_elm, num_attrs);
+ return 0;
+}
+
+static int uapi_finalize(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int rc;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ rc = uapi_finalize_ioctl_method(uapi, method_elm,
+ iter.index);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+void uverbs_destroy_api(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ if (!uapi)
+ return;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ kfree(rcu_dereference_protected(*slot, true));
+ radix_tree_iter_delete(&uapi->radix, &iter, slot);
+ }
+}
+
+struct uverbs_api *uverbs_alloc_api(
+ const struct uverbs_object_tree_def *const *driver_specs,
+ enum rdma_driver_id driver_id)
+{
+ struct uverbs_api *uapi;
+ int rc;
+
+ uapi = kzalloc(sizeof(*uapi), GFP_KERNEL);
+ if (!uapi)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL);
+ uapi->driver_id = driver_id;
+
+ rc = uapi_merge_tree(uapi, uverbs_default_get_objects(), false);
+ if (rc)
+ goto err;
+
+ for (; driver_specs && *driver_specs; driver_specs++) {
+ rc = uapi_merge_tree(uapi, *driver_specs, true);
+ if (rc)
+ goto err;
+ }
+
+ rc = uapi_finalize(uapi);
+ if (rc)
+ goto err;
+
+ return uapi;
+err:
+ if (rc != -ENOMEM)
+ pr_err("Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n",
+ rc);
+
+ uverbs_destroy_api(uapi);
+ return ERR_PTR(rc);
+}
+
+/*
+ * The pre version is done before destroying the HW objects, it only blocks
+ * off method access. All methods that require the ib_dev or the module data
+ * must test one of these assignments prior to continuing.
+ */
+void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev)
+{
+ struct uverbs_api *uapi = uverbs_dev->uapi;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_ioctl_method(iter.index)) {
+ struct uverbs_api_ioctl_method *method_elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (method_elm->driver_method)
+ rcu_assign_pointer(method_elm->handler, NULL);
+ }
+ }
+
+ synchronize_srcu(&uverbs_dev->disassociate_srcu);
+}
+
+/*
+ * Called when a driver disassociates from the ib_uverbs_device. The
+ * assumption is that the driver module will unload after. Replace everything
+ * related to the driver with NULL as a safety measure.
+ */
+void uverbs_disassociate_api(struct uverbs_api *uapi)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
+ if (uapi_key_is_object(iter.index)) {
+ struct uverbs_api_object *object_elm =
+ rcu_dereference_protected(*slot, true);
+
+ /*
+ * Some type_attrs are in the driver module. We don't
+ * bother to keep track of which since there should be
+ * no use of this after disassociate.
+ */
+ object_elm->type_attrs = NULL;
+ } else if (uapi_key_is_attr(iter.index)) {
+ struct uverbs_api_attr *elm =
+ rcu_dereference_protected(*slot, true);
+
+ if (elm->spec.type == UVERBS_ATTR_TYPE_ENUM_IN)
+ elm->spec.u2.enum_def.ids = NULL;
+ }
+ }
+}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 9d6beb948535..6ee03d6089eb 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -326,12 +326,162 @@ EXPORT_SYMBOL(ib_dealloc_pd);
/* Address handles */
+/**
+ * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
+ * @dest: Pointer to destination ah_attr. Contents of the destination
+ * pointer is assumed to be invalid and attribute are overwritten.
+ * @src: Pointer to source ah_attr.
+ */
+void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
+ const struct rdma_ah_attr *src)
+{
+ *dest = *src;
+ if (dest->grh.sgid_attr)
+ rdma_hold_gid_attr(dest->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_copy_ah_attr);
+
+/**
+ * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
+ * @old: Pointer to existing ah_attr which needs to be replaced.
+ * old is assumed to be valid or zero'd
+ * @new: Pointer to the new ah_attr.
+ *
+ * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
+ * old the ah_attr is valid; after that it copies the new attribute and holds
+ * the reference to the replaced ah_attr.
+ */
+void rdma_replace_ah_attr(struct rdma_ah_attr *old,
+ const struct rdma_ah_attr *new)
+{
+ rdma_destroy_ah_attr(old);
+ *old = *new;
+ if (old->grh.sgid_attr)
+ rdma_hold_gid_attr(old->grh.sgid_attr);
+}
+EXPORT_SYMBOL(rdma_replace_ah_attr);
+
+/**
+ * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
+ * @dest: Pointer to destination ah_attr to copy to.
+ * dest is assumed to be valid or zero'd
+ * @src: Pointer to the new ah_attr.
+ *
+ * rdma_move_ah_attr() first releases any reference in the destination ah_attr
+ * if it is valid. This also transfers ownership of internal references from
+ * src to dest, making src invalid in the process. No new reference of the src
+ * ah_attr is taken.
+ */
+void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
+{
+ rdma_destroy_ah_attr(dest);
+ *dest = *src;
+ src->grh.sgid_attr = NULL;
+}
+EXPORT_SYMBOL(rdma_move_ah_attr);
+
+/*
+ * Validate that the rdma_ah_attr is valid for the device before passing it
+ * off to the driver.
+ */
+static int rdma_check_ah_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ if (!rdma_is_port_valid(device, ah_attr->port_num))
+ return -EINVAL;
+
+ if ((rdma_is_grh_required(device, ah_attr->port_num) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ if (ah_attr->grh.sgid_attr) {
+ /*
+ * Make sure the passed sgid_attr is consistent with the
+ * parameters
+ */
+ if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
+ ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
+ * On success the caller is responsible to call rdma_unfill_sgid_attr().
+ */
+static int rdma_fill_sgid_attr(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr **old_sgid_attr)
+{
+ const struct ib_gid_attr *sgid_attr;
+ struct ib_global_route *grh;
+ int ret;
+
+ *old_sgid_attr = ah_attr->grh.sgid_attr;
+
+ ret = rdma_check_ah_attr(device, ah_attr);
+ if (ret)
+ return ret;
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ return 0;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+ if (grh->sgid_attr)
+ return 0;
+
+ sgid_attr =
+ rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ /* Move ownerhip of the kref into the ah_attr */
+ grh->sgid_attr = sgid_attr;
+ return 0;
+}
+
+static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_sgid_attr)
+{
+ /*
+ * Fill didn't change anything, the caller retains ownership of
+ * whatever it passed
+ */
+ if (ah_attr->grh.sgid_attr == old_sgid_attr)
+ return;
+
+ /*
+ * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
+ * doesn't see any change in the rdma_ah_attr. If we get here
+ * old_sgid_attr is NULL.
+ */
+ rdma_destroy_ah_attr(ah_attr);
+}
+
+static const struct ib_gid_attr *
+rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *old_attr)
+{
+ if (old_attr)
+ rdma_put_gid_attr(old_attr);
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
+ return ah_attr->grh.sgid_attr;
+ }
+ return NULL;
+}
+
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct ib_ah *ah;
+ if (!pd->device->create_ah)
+ return ERR_PTR(-EOPNOTSUPP);
+
ah = pd->device->create_ah(pd, ah_attr, udata);
if (!IS_ERR(ah)) {
@@ -339,15 +489,38 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ah->pd = pd;
ah->uobject = NULL;
ah->type = ah_attr->type;
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+
atomic_inc(&pd->usecnt);
}
return ah;
}
+/**
+ * rdma_create_ah - Creates an address handle for the
+ * given address vector.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
{
- return _rdma_create_ah(pd, ah_attr, NULL);
+ const struct ib_gid_attr *old_sgid_attr;
+ struct ib_ah *ah;
+ int ret;
+
+ ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ah = _rdma_create_ah(pd, ah_attr, NULL);
+
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ah;
}
EXPORT_SYMBOL(rdma_create_ah);
@@ -368,15 +541,27 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
+ const struct ib_gid_attr *old_sgid_attr;
+ struct ib_ah *ah;
int err;
+ err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
+ if (err)
+ return ERR_PTR(err);
+
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
err = ib_resolve_eth_dmac(pd->device, ah_attr);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ ah = ERR_PTR(err);
+ goto out;
+ }
}
- return _rdma_create_ah(pd, ah_attr, udata);
+ ah = _rdma_create_ah(pd, ah_attr, udata);
+
+out:
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ah;
}
EXPORT_SYMBOL(rdma_create_user_ah);
@@ -455,16 +640,16 @@ static bool find_gid_index(const union ib_gid *gid,
return true;
}
-static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
- u16 vlan_id, const union ib_gid *sgid,
- enum ib_gid_type gid_type,
- u16 *gid_index)
+static const struct ib_gid_attr *
+get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
+ u16 vlan_id, const union ib_gid *sgid,
+ enum ib_gid_type gid_type)
{
struct find_gid_index_context context = {.vlan_id = vlan_id,
.gid_type = gid_type};
- return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
- &context, gid_index);
+ return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
+ &context);
}
int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
@@ -508,39 +693,24 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr)
{
- struct ib_gid_attr sgid_attr;
- struct ib_global_route *grh;
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
int hop_limit = 0xff;
- union ib_gid sgid;
- int ret;
-
- grh = rdma_ah_retrieve_grh(ah_attr);
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- return ret;
- }
+ int ret = 0;
/* If destination is link local and source GID is RoCEv1,
* IP stack is not used.
*/
if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
- sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
ah_attr->roce.dmac);
- goto done;
+ return ret;
}
- ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
- sgid_attr.ndev, &hop_limit);
-done:
- dev_put(sgid_attr.ndev);
+ sgid_attr->ndev, &hop_limit);
grh->hop_limit = hop_limit;
return ret;
@@ -555,16 +725,18 @@ done:
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
+ * On success the caller is responsible to call rdma_destroy_ah_attr on the
+ * attr.
*/
int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
- u16 gid_index;
int ret;
enum rdma_network_type net_type = RDMA_NETWORK_IB;
enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ const struct ib_gid_attr *sgid_attr;
int hoplimit = 0xff;
union ib_gid dgid;
union ib_gid sgid;
@@ -595,72 +767,141 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- ret = get_sgid_index_from_eth(device, port_num,
- vlan_id, &dgid,
- gid_type, &gid_index);
- if (ret)
- return ret;
+ sgid_attr = get_sgid_attr_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
- return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
+ flow_class & 0xFFFFF,
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ if (ret)
+ rdma_destroy_ah_attr(ah_attr);
+
+ return ret;
} else {
rdma_ah_set_dlid(ah_attr, wc->slid);
rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
- ret = ib_find_cached_gid_by_port(device, &dgid,
- IB_GID_TYPE_IB,
- port_num, NULL,
- &gid_index);
- if (ret)
- return ret;
- } else {
- gid_index = 0;
- }
+ if ((wc->wc_flags & IB_WC_GRH) == 0)
+ return 0;
+
+ if (dgid.global.interface_id !=
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ sgid_attr = rdma_find_gid_by_port(
+ device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
+ } else
+ sgid_attr = rdma_get_gid_attr(device, port_num, 0);
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_move_grh_sgid_attr(ah_attr,
+ &sgid,
flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
- }
+ hoplimit,
+ (flow_class >> 20) & 0xFF,
+ sgid_attr);
+
return 0;
}
}
EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
+/**
+ * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
+ * of the reference
+ *
+ * @attr: Pointer to AH attribute structure
+ * @dgid: Destination GID
+ * @flow_label: Flow label
+ * @hop_limit: Hop limit
+ * @traffic_class: traffic class
+ * @sgid_attr: Pointer to SGID attribute
+ *
+ * This takes ownership of the sgid_attr reference. The caller must ensure
+ * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
+ * calling this function.
+ */
+void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
+ u32 flow_label, u8 hop_limit, u8 traffic_class,
+ const struct ib_gid_attr *sgid_attr)
+{
+ rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
+ traffic_class);
+ attr->grh.sgid_attr = sgid_attr;
+}
+EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
+
+/**
+ * rdma_destroy_ah_attr - Release reference to SGID attribute of
+ * ah attribute.
+ * @ah_attr: Pointer to ah attribute
+ *
+ * Release reference to the SGID attribute of the ah attribute if it is
+ * non NULL. It is safe to call this multiple times, and safe to call it on
+ * a zero initialized ah_attr.
+ */
+void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
+{
+ if (ah_attr->grh.sgid_attr) {
+ rdma_put_gid_attr(ah_attr->grh.sgid_attr);
+ ah_attr->grh.sgid_attr = NULL;
+ }
+}
+EXPORT_SYMBOL(rdma_destroy_ah_attr);
+
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
{
struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
int ret;
ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
- return rdma_create_ah(pd, &ah_attr);
+ ah = rdma_create_ah(pd, &ah_attr);
+
+ rdma_destroy_ah_attr(&ah_attr);
+ return ah;
}
EXPORT_SYMBOL(ib_create_ah_from_wc);
int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
+ const struct ib_gid_attr *old_sgid_attr;
+ int ret;
+
if (ah->type != ah_attr->type)
return -EINVAL;
- return ah->device->modify_ah ?
+ ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
+ if (ret)
+ return ret;
+
+ ret = ah->device->modify_ah ?
ah->device->modify_ah(ah, ah_attr) :
-EOPNOTSUPP;
+
+ ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return ret;
}
EXPORT_SYMBOL(rdma_modify_ah);
int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
+ ah_attr->grh.sgid_attr = NULL;
+
return ah->device->query_ah ?
ah->device->query_ah(ah, ah_attr) :
-EOPNOTSUPP;
@@ -669,13 +910,17 @@ EXPORT_SYMBOL(rdma_query_ah);
int rdma_destroy_ah(struct ib_ah *ah)
{
+ const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd;
int ret;
pd = ah->pd;
ret = ah->device->destroy_ah(ah);
- if (!ret)
+ if (!ret) {
atomic_dec(&pd->usecnt);
+ if (sgid_attr)
+ rdma_put_gid_attr(sgid_attr);
+ }
return ret;
}
@@ -1290,16 +1535,19 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
+/**
+ * ib_resolve_eth_dmac - Resolve destination mac address
+ * @device: Device to consider
+ * @ah_attr: address handle attribute which describes the
+ * source and destination parameters
+ * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
+ * returns 0 on success or appropriate error code. It initializes the
+ * necessary ah_attr fields when call is successful.
+ */
static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr)
{
- int ret = 0;
- struct ib_global_route *grh;
-
- if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
- return -EINVAL;
-
- grh = rdma_ah_retrieve_grh(ah_attr);
+ int ret = 0;
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
@@ -1317,6 +1565,14 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* IB core internal function to perform QP attributes modification.
*/
@@ -1324,8 +1580,53 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ const struct ib_gid_attr *old_sgid_attr_av;
+ const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
+ if (attr_mask & IB_QP_AV) {
+ ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
+ &old_sgid_attr_av);
+ if (ret)
+ return ret;
+ }
+ if (attr_mask & IB_QP_ALT_PATH) {
+ /*
+ * FIXME: This does not track the migration state, so if the
+ * user loads a new alternate path after the HW has migrated
+ * from primary->alternate we will keep the wrong
+ * references. This is OK for IB because the reference
+ * counting does not serve any functional purpose.
+ */
+ ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
+ &old_sgid_attr_alt_av);
+ if (ret)
+ goto out_av;
+
+ /*
+ * Today the core code can only handle alternate paths and APM
+ * for IB. Ban them in roce mode.
+ */
+ if (!(rdma_protocol_ib(qp->device,
+ attr->alt_ah_attr.port_num) &&
+ rdma_protocol_ib(qp->device, port))) {
+ ret = EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * If the user provided the qp_attr then we have to resolve it. Kernel
+ * users have to provide already resolved rdma_ah_attr's
+ */
+ if (udata && (attr_mask & IB_QP_AV) &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
+ if (ret)
+ goto out;
+ }
+
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
@@ -1341,20 +1642,27 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
}
ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
+ if (ret)
+ goto out;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_AV)
+ qp->av_sgid_attr =
+ rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_path_sgid_attr = rdma_update_sgid_attr(
+ &attr->alt_ah_attr, qp->alt_path_sgid_attr);
+
+out:
+ if (attr_mask & IB_QP_ALT_PATH)
+ rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
+out_av:
+ if (attr_mask & IB_QP_AV)
+ rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
return ret;
}
-static bool is_qp_type_connected(const struct ib_qp *qp)
-{
- return (qp->qp_type == IB_QPT_UC ||
- qp->qp_type == IB_QPT_RC ||
- qp->qp_type == IB_QPT_XRC_INI ||
- qp->qp_type == IB_QPT_XRC_TGT);
-}
-
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
* @ib_qp: The QP to modify.
@@ -1369,17 +1677,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- struct ib_qp *qp = ib_qp->real_qp;
- int ret;
-
- if (attr_mask & IB_QP_AV &&
- attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
- is_qp_type_connected(qp)) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- return ret;
- }
- return _ib_modify_qp(qp, attr, attr_mask, udata);
+ return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1451,6 +1749,9 @@ int ib_query_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
+ qp_attr->ah_attr.grh.sgid_attr = NULL;
+ qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
+
return qp->device->query_qp ?
qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
-EOPNOTSUPP;
@@ -1509,6 +1810,8 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
int ib_destroy_qp(struct ib_qp *qp)
{
+ const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
+ const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
struct ib_srq *srq;
@@ -1539,6 +1842,10 @@ int ib_destroy_qp(struct ib_qp *qp)
rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
+ if (alt_path_sgid_attr)
+ rdma_put_gid_attr(alt_path_sgid_attr);
+ if (av_sgid_attr)
+ rdma_put_gid_attr(av_sgid_attr);
if (pd)
atomic_dec(&pd->usecnt);
if (scq)
@@ -1977,35 +2284,6 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
}
EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
-struct ib_flow *ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr,
- int domain)
-{
- struct ib_flow *flow_id;
- if (!qp->device->create_flow)
- return ERR_PTR(-EOPNOTSUPP);
-
- flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL);
- if (!IS_ERR(flow_id)) {
- atomic_inc(&qp->usecnt);
- flow_id->qp = qp;
- }
- return flow_id;
-}
-EXPORT_SYMBOL(ib_create_flow);
-
-int ib_destroy_flow(struct ib_flow *flow_id)
-{
- int err;
- struct ib_qp *qp = flow_id->qp;
-
- err = qp->device->destroy_flow(flow_id);
- if (!err)
- atomic_dec(&qp->usecnt);
- return err;
-}
-EXPORT_SYMBOL(ib_destroy_flow);
-
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
@@ -2200,7 +2478,6 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain;
- struct ib_send_wr *bad_swr;
struct ib_rdma_wr swr = {
.wr = {
.next = NULL,
@@ -2219,7 +2496,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done);
- ret = ib_post_send(qp, &swr.wr, &bad_swr);
+ ret = ib_post_send(qp, &swr.wr, NULL);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
@@ -2240,7 +2517,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain;
- struct ib_recv_wr rwr = {}, *bad_rwr;
+ struct ib_recv_wr rwr = {};
int ret;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
@@ -2253,7 +2530,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done);
- ret = ib_post_recv(qp, &rwr, &bad_rwr);
+ ret = ib_post_recv(qp, &rwr, NULL);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a76e206704d4..bbfb86eb2d24 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -166,7 +166,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_MEM_MGT_EXTENSIONS;
- ib_attr->max_sge = dev_attr->max_qp_sges;
+ ib_attr->max_send_sge = dev_attr->max_qp_sges;
+ ib_attr->max_recv_sge = dev_attr->max_qp_sges;
ib_attr->max_sge_rd = dev_attr->max_qp_sges;
ib_attr->max_cq = dev_attr->max_cq;
ib_attr->max_cqe = dev_attr->max_cq_wqes;
@@ -243,8 +244,8 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->gid_tbl_len = dev_attr->max_sgid;
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ IB_PORT_VENDOR_CLASS_SUP;
+ port_attr->ip_gids = true;
port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
@@ -364,8 +365,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
return rc;
}
-int bnxt_re_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context)
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
{
int rc;
u32 tbl_idx = 0;
@@ -377,7 +377,7 @@ int bnxt_re_add_gid(const union ib_gid *gid,
if ((attr->ndev) && is_vlan_dev(attr->ndev))
vlan_id = vlan_dev_vlan_id(attr->ndev);
- rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
+ rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
vlan_id, true, &tbl_idx);
if (rc == -EALREADY) {
@@ -673,8 +673,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
int rc;
u8 nw_type;
- struct ib_gid_attr sgid_attr;
-
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
return ERR_PTR(-EINVAL);
@@ -705,20 +703,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
grh->dgid.raw)) {
- union ib_gid sgid;
+ const struct ib_gid_attr *sgid_attr;
- rc = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to query gid at index %d",
- grh->sgid_index);
- goto fail;
- }
- dev_put(sgid_attr.ndev);
+ sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
@@ -1408,7 +1397,7 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
}
if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto exit;
}
@@ -1530,8 +1519,8 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
return 0;
}
-int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
@@ -1599,9 +1588,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
- int status;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
unsigned int flags;
u8 nw_type;
@@ -1668,6 +1654,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_AV) {
const struct ib_global_route *grh =
rdma_ah_read_grh(&qp_attr->ah_attr);
+ const struct ib_gid_attr *sgid_attr;
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
@@ -1691,29 +1678,23 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
- status = ib_get_cached_gid(&rdev->ibdev, 1,
- grh->sgid_index,
- &sgid, &sgid_attr);
- if (!status) {
- memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
- ETH_ALEN);
- dev_put(sgid_attr.ndev);
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
- &sgid);
- switch (nw_type) {
- case RDMA_NETWORK_IPV4:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
- break;
- case RDMA_NETWORK_IPV6:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
- break;
- default:
- qp->qplib_qp.nw_type =
- CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
- break;
- }
+ sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
+ memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
+ ETH_ALEN);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
+ break;
+ default:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
+ break;
}
}
@@ -1895,19 +1876,17 @@ out:
/* Routine for sending QP1 packets for RoCE V1 an V2
*/
static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
- struct ib_device *ibdev = &qp->rdev->ibdev;
struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
ib_ah);
struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
+ const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
struct bnxt_qplib_sge sge;
- union ib_gid sgid;
u8 nw_type;
u16 ether_type;
- struct ib_gid_attr sgid_attr;
union ib_gid dgid;
bool is_eth = false;
bool is_vlan = false;
@@ -1920,22 +1899,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
- rc = ib_get_cached_gid(ibdev, 1,
- qplib_ah->host_sgid_index, &sgid,
- &sgid_attr);
- if (rc) {
- dev_err(rdev_to_dev(qp->rdev),
- "Failed to query gid at index %d",
- qplib_ah->host_sgid_index);
- return rc;
- }
- if (sgid_attr.ndev) {
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
- dev_put(sgid_attr.ndev);
- }
+ if (is_vlan_dev(sgid_attr->ndev))
+ vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
/* Get network header type for this GID */
- nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
@@ -1948,9 +1915,9 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
break;
}
memcpy(&dgid.raw, &qplib_ah->dgid, 16);
- is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+ is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
if (is_udp) {
- if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
ip_version = 4;
ether_type = ETH_P_IP;
} else {
@@ -1983,9 +1950,10 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
if (is_grh || (ip_version == 6)) {
- memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
+ memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
+ sizeof(sgid_attr->gid));
memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
- sizeof(sgid));
+ sizeof(sgid_attr->gid));
qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
}
@@ -1995,7 +1963,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
- memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
+ memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
}
@@ -2080,7 +2048,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
* and the MAD datagram out to the provided SGE.
*/
static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr,
+ const struct ib_recv_wr *wr,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
@@ -2125,7 +2093,7 @@ static int is_ud_qp(struct bnxt_re_qp *qp)
}
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_ah *ah = NULL;
@@ -2163,7 +2131,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
return 0;
}
-static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2195,7 +2163,7 @@ static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
switch (wr->opcode) {
@@ -2222,7 +2190,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
+static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
@@ -2241,7 +2209,7 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
return 0;
}
-static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
+static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
@@ -2283,7 +2251,7 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
}
static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
/* Copy the inline data to the data field */
@@ -2313,7 +2281,7 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
}
static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe)
{
int payload_sz = 0;
@@ -2345,7 +2313,7 @@ static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
@@ -2393,8 +2361,8 @@ bad:
return rc;
}
-int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
@@ -2441,7 +2409,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
default:
break;
}
- /* Fall thru to build the wqe */
+ /* fall through */
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
break;
@@ -2493,7 +2461,7 @@ bad:
static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
struct bnxt_qplib_swqe wqe;
int rc = 0;
@@ -2526,8 +2494,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
return rc;
}
-int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c6414cad4af..aa33e7b82c84 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -158,8 +158,7 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
-int bnxt_re_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context);
+int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
@@ -182,8 +181,8 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq);
-int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
@@ -192,10 +191,10 @@ int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *qp);
-int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr);
-int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 50d8f1fc98d5..e426b990c1dd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2354,7 +2354,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
srq = qp->srq;
if (!srq)
return -EINVAL;
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process RC ");
dev_err(&cq->hwq.pdev->dev,
@@ -2369,7 +2369,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process RC ");
dev_err(&cq->hwq.pdev->dev,
@@ -2437,7 +2437,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (!srq)
return -EINVAL;
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process UD ");
dev_err(&cq->hwq.pdev->dev,
@@ -2452,7 +2452,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process UD ");
dev_err(&cq->hwq.pdev->dev,
@@ -2546,7 +2546,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: FP: SRQ used but not defined??");
return -EINVAL;
}
- if (wr_id_idx > srq->hwq.max_elements) {
+ if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process Raw/QP1 ");
dev_err(&cq->hwq.pdev->dev,
@@ -2561,7 +2561,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
} else {
rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
+ if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
dev_err(&cq->hwq.pdev->dev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 2f3f32eaa1d5..4097f3fa25c5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -197,7 +197,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid)
{
- if (index > sgid_tbl->max) {
+ if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded SGID table max (%d)",
index, sgid_tbl->max);
@@ -402,7 +402,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
*pkey = 0xFFFF;
return 0;
}
- if (index > pkey_tbl->max) {
+ if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded PKEY table max (%d)",
index, pkey_tbl->max);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index 0a8542c20804..a098c0140580 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -32,38 +32,16 @@
#include "iwch_provider.h"
#include "iwch.h"
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
+static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
+ struct iwch_qp *qhp, struct ib_wc *wc)
{
- struct iwch_qp *qhp = NULL;
- struct t3_cqe cqe, *rd_cqe;
- struct t3_wq *wq;
+ struct t3_wq *wq = qhp ? &qhp->wq : NULL;
+ struct t3_cqe cqe;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie;
int ret = 1;
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
&credit);
if (t3a_device(chp->rhp) && credit) {
@@ -79,7 +57,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
ret = 1;
wc->wr_id = cookie;
- wc->qp = &qhp->ibqp;
+ wc->qp = qhp ? &qhp->ibqp : NULL;
wc->vendor_err = CQE_STATUS(cqe);
wc->wc_flags = 0;
@@ -182,8 +160,38 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
}
}
out:
- if (wq)
+ return ret;
+}
+
+/*
+ * Get one cq entry from cxio and map it to openib.
+ *
+ * Returns:
+ * 0 EMPTY;
+ * 1 cqe returned
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
+ struct ib_wc *wc)
+{
+ struct iwch_qp *qhp;
+ struct t3_cqe *rd_cqe;
+ int ret;
+
+ rd_cqe = cxio_next_cqe(&chp->cq);
+
+ if (!rd_cqe)
+ return 0;
+
+ qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
+ if (qhp) {
+ spin_lock(&qhp->lock);
+ ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
spin_unlock(&qhp->lock);
+ } else {
+ ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index be097c6723c0..1b9ff21aa1d5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,42 +61,6 @@
#include <rdma/cxgb3-abi.h>
#include "common.h"
-static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static int iwch_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int iwch_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
static int iwch_dealloc_ucontext(struct ib_ucontext *context)
{
struct iwch_dev *rhp = to_iwch_dev(context->device);
@@ -1103,7 +1067,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->max_mr_size = dev->attr.max_mr_size;
props->max_qp = dev->attr.max_qps;
props->max_qp_wr = dev->attr.max_wrs;
- props->max_sge = dev->attr.max_sge_per_wr;
+ props->max_send_sge = dev->attr.max_sge_per_wr;
+ props->max_recv_sge = dev->attr.max_sge_per_wr;
props->max_sge_rd = 1;
props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
@@ -1398,8 +1363,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.mmap = iwch_mmap;
dev->ibdev.alloc_pd = iwch_allocate_pd;
dev->ibdev.dealloc_pd = iwch_deallocate_pd;
- dev->ibdev.create_ah = iwch_ah_create;
- dev->ibdev.destroy_ah = iwch_ah_destroy;
dev->ibdev.create_qp = iwch_create_qp;
dev->ibdev.modify_qp = iwch_ib_modify_qp;
dev->ibdev.destroy_qp = iwch_destroy_qp;
@@ -1414,9 +1377,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
dev->ibdev.alloc_mr = iwch_alloc_mr;
dev->ibdev.map_mr_sg = iwch_map_mr_sg;
- dev->ibdev.attach_mcast = iwch_multicast_attach;
- dev->ibdev.detach_mcast = iwch_multicast_detach;
- dev->ibdev.process_mad = iwch_process_mad;
dev->ibdev.req_notify_cq = iwch_arm_cq;
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 2e38ddefea8a..8adbe9658935 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -326,10 +326,10 @@ enum iwch_qp_query_flags {
};
u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_ep *ep);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3871e1fd8395..c649faad63f9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -39,8 +39,8 @@
#define NO_SUPPORT -1
-static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 * flit_cnt)
+static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
int i;
u32 plen;
@@ -84,8 +84,8 @@ static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
int i;
u32 plen;
@@ -125,8 +125,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
if (wr->num_sge > 1)
return -EINVAL;
@@ -146,8 +146,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
+static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
+ u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
{
struct iwch_mr *mhp = to_iwch_mr(wr->mr);
int i;
@@ -189,8 +189,8 @@ static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
return 0;
}
-static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
- u8 *flit_cnt)
+static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
+ u8 *flit_cnt)
{
wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->local_inv.reserved = 0;
@@ -246,7 +246,7 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
}
static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
int i, err = 0;
u32 pbl_addr[T3_MAX_SGE];
@@ -286,7 +286,7 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
}
static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- struct ib_recv_wr *wr)
+ const struct ib_recv_wr *wr)
{
int i;
u32 pbl_addr;
@@ -348,8 +348,8 @@ static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
return 0;
}
-int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
u8 uninitialized_var(t3_wr_flit_cnt);
@@ -463,8 +463,8 @@ out:
return err;
}
-int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct iwch_qp *qhp;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0912fa026327..0f83cbec33f3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -587,24 +587,29 @@ static int send_flowc(struct c4iw_ep *ep)
{
struct fw_flowc_wr *flowc;
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
- int i;
u16 vlan = ep->l2t->vlan;
int nparams;
+ int flowclen, flowclen16;
if (WARN_ON(!skb))
return -ENOMEM;
if (vlan == CPL_L2T_VLAN_NONE)
- nparams = 8;
- else
nparams = 9;
+ else
+ nparams = 10;
- flowc = __skb_put(skb, FLOWC_LEN);
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+
+ flowc = __skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
- 16)) | FW_WR_FLOWID_V(ep->hwtid));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
@@ -623,21 +628,13 @@ static int send_flowc(struct c4iw_ep *ep)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- if (nparams == 9) {
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
+ if (nparams == 10) {
u16 pri;
-
pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
- flowc->mnemval[8].val = cpu_to_be32(pri);
- } else {
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
- }
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[9].val = cpu_to_be32(pri);
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -1176,6 +1173,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
+ unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
@@ -1196,8 +1194,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
/* dealloc the atid */
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
@@ -1853,10 +1852,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void complete_cached_srq_buffers(struct c4iw_ep *ep,
+ __be32 srqidx_status)
+{
+ enum chip_type adapter_type;
+ u32 srqidx;
+
+ adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status));
+
+ /*
+ * If this TCB had a srq buffer cached, then we must complete
+ * it. For user mode, that means saving the srqidx in the
+ * user/kernel status page for this qp. For kernel mode, just
+ * synthesize the CQE now.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
+ if (ep->com.qp->ibqp.uobject)
+ t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
+ else
+ c4iw_flush_srqidx(ep->com.qp, srqidx);
+ }
+}
+
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
- struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
unsigned int tid = GET_TID(rpl);
@@ -1865,6 +1887,9 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
+
+ complete_cached_srq_buffers(ep, rpl->srqidx_status);
+
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -2603,16 +2628,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
int ret;
+ u16 tcp_opt = ntohs(req->tcp_opt);
ep = get_ep_from_tid(dev, tid);
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
- pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
- ntohs(req->tcp_opt));
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
- set_emss(ep, ntohs(req->tcp_opt));
+ set_emss(ep, tcp_opt);
dst_confirm(ep->dst);
mutex_lock(&ep->com.mutex);
@@ -2719,28 +2745,35 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_req_rss6 *req = cplhdr(skb);
struct c4iw_ep *ep;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
unsigned int tid = GET_TID(req);
+ u8 status;
+
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- if (cxgb_is_neg_adv(req->status)) {
+ status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
+
+ if (cxgb_is_neg_adv(status)) {
pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ ep->hwtid, status, neg_adv_str(status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
+
+ complete_cached_srq_buffers(ep, req->srqidx_status);
+
pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -3444,9 +3477,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2be2e1ac1b5f..6d3042794094 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int user = (uctx != &rdev->uctx);
int ret;
struct sk_buff *skb;
+ struct c4iw_ucontext *ucontext = NULL;
+
+ if (user)
+ ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
cq->cqid = c4iw_get_cqid(rdev, uctx);
if (!cq->cqid) {
@@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, cq->memsize);
+ if (user && ucontext->is_32b_cqe) {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ (sizeof(*cq->queue) / 2)))->qp_err;
+ } else {
+ cq->qp_errp = &((struct t4_status_page *)
+ ((u8 *)cq->queue + (cq->size - 1) *
+ sizeof(*cq->queue)))->qp_err;
+ }
+
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res;
@@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_IQPCIECH_V(2) |
FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
FW_RI_RES_WR_IQO_F |
- FW_RI_RES_WR_IQESIZE_V(1));
+ ((user && ucontext->is_32b_cqe) ?
+ FW_RI_RES_WR_IQESIZE_V(1) :
+ FW_RI_RES_WR_IQESIZE_V(2)));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
@@ -166,7 +182,7 @@ err1:
return ret;
}
-static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
{
struct t4_cqe cqe;
@@ -179,6 +195,8 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
CQE_SWCQE_V(1) |
CQE_QPID_V(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ if (srqidx)
+ cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@@ -191,7 +209,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count);
while (in_use--) {
- insert_recv_cqe(wq, cq);
+ insert_recv_cqe(wq, cq, 0);
flushed++;
}
return flushed;
@@ -442,6 +460,72 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
pr_debug("cq %p count %d\n", cq, *count);
}
+static void post_pending_srq_wrs(struct t4_srq *srq)
+{
+ struct t4_srq_pending_wr *pwr;
+ u16 idx = 0;
+
+ while (srq->pending_in_use) {
+ pwr = &srq->pending_wrs[srq->pending_cidx];
+ srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
+ srq->sw_rq[srq->pidx].valid = 1;
+
+ pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__,
+ srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ (unsigned long long)pwr->wr_id);
+
+ c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
+ t4_srq_consume_pending_wr(srq);
+ t4_srq_produce(srq, pwr->len16);
+ idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+
+ if (idx) {
+ t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
+ srq->queue[srq->size].status.host_wq_pidx =
+ srq->wq_pidx;
+ }
+}
+
+static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
+{
+ int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
+ u64 wr_id;
+
+ srq->sw_rq[rel_idx].valid = 0;
+ wr_id = srq->sw_rq[rel_idx].wr_id;
+
+ if (rel_idx == srq->cidx) {
+ pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use, srq->size,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_consume(srq);
+ while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
+ pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, srq->cidx, srq->pidx,
+ srq->wq_pidx, srq->in_use,
+ srq->size, srq->ooo_count,
+ (unsigned long long)
+ srq->sw_rq[srq->cidx].wr_id);
+ t4_srq_consume_ooo(srq);
+ }
+ if (srq->ooo_count == 0 && srq->pending_in_use)
+ post_pending_srq_wrs(srq);
+ } else {
+ pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
+ __func__, rel_idx, srq->cidx,
+ srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->size,
+ srq->ooo_count,
+ (unsigned long long)srq->sw_rq[rel_idx].wr_id);
+ t4_srq_produce_ooo(srq);
+ }
+ return wr_id;
+}
+
/*
* poll_cq
*
@@ -459,7 +543,8 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
* -EOVERFLOW CQ overflow detected.
*/
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
+ u8 *cqe_flushed, u64 *cookie, u32 *credit,
+ struct t4_srq *srq)
{
int ret = 0;
struct t4_cqe *hw_cqe, read_cqe;
@@ -524,7 +609,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_TYPE(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -535,7 +620,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
*/
if (CQE_WRID_STAG(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
ret = -EAGAIN;
goto skip_cqe;
}
@@ -560,7 +645,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
}
/*
@@ -574,15 +659,9 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
* then we complete this with T4_ERR_MSN and mark the wq in
* error.
*/
-
- if (t4_rq_empty(wq)) {
- t4_set_wq_in_error(wq);
- ret = -EAGAIN;
- goto skip_cqe;
- }
if (unlikely(!CQE_STATUS(hw_cqe) &&
CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
- t4_set_wq_in_error(wq);
+ t4_set_wq_in_error(wq, 0);
hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
}
goto proc_cqe;
@@ -641,11 +720,16 @@ proc_cqe:
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- pr_debug("completing rq idx %u\n", wq->rq.cidx);
- *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
- if (c4iw_wr_log)
- c4iw_log_wr_stats(wq, hw_cqe);
- t4_rq_consume(wq);
+ if (!srq) {
+ pr_debug("completing rq idx %u\n", wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ if (c4iw_wr_log)
+ c4iw_log_wr_stats(wq, hw_cqe);
+ t4_rq_consume(wq);
+ } else {
+ *cookie = reap_srq_cqe(hw_cqe, srq);
+ }
+ wq->rq.msn++;
goto skip_cqe;
}
@@ -668,46 +752,33 @@ skip_cqe:
return ret;
}
-/*
- * Get one cq entry from c4iw and map it to openib.
- *
- * Returns:
- * 0 cqe returned
- * -ENODATA EMPTY;
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
+ struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct c4iw_qp *qhp = NULL;
- struct t4_cqe uninitialized_var(cqe), *rd_cqe;
- struct t4_wq *wq;
+ struct t4_cqe uninitialized_var(cqe);
+ struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie = 0;
int ret;
- ret = t4_next_cqe(&chp->cq, &rd_cqe);
-
- if (ret)
- return ret;
-
- qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
- if (!qhp)
- wq = NULL;
- else {
- spin_lock(&qhp->lock);
- wq = &(qhp->wq);
- }
- ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
+ srq ? &srq->wq : NULL);
if (ret)
goto out;
wc->wr_id = cookie;
- wc->qp = &qhp->ibqp;
+ wc->qp = qhp ? &qhp->ibqp : NULL;
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
+ /*
+ * Simulate a SRQ_LIMIT_REACHED HW notification if required.
+ */
+ if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
+ srq->wq.in_use < srq->srq_limit)
+ c4iw_dispatch_srq_limit_reached_event(srq);
+
pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
@@ -720,15 +791,32 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->byte_len = CQE_LEN(&cqe);
else
wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
- CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_SEND:
+ wc->opcode = IB_WC_RECV;
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_RECV;
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+ break;
+ case FW_RI_WRITE_IMMEDIATE:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->ex.imm_data = CQE_IMM_DATA(&cqe);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ default:
+ pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
}
} else {
switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_WRITE_IMMEDIATE:
case FW_RI_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
@@ -819,8 +907,43 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq)
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_srq *srq = NULL;
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe *rd_cqe;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (qhp) {
+ spin_lock(&qhp->lock);
+ srq = qhp->srq;
+ if (srq)
+ spin_lock(&srq->lock);
+ ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
spin_unlock(&qhp->lock);
+ if (srq)
+ spin_unlock(&srq->lock);
+ } else {
+ ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
+ }
return ret;
}
@@ -876,6 +999,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
int vector = attr->comp_vector;
struct c4iw_dev *rhp;
struct c4iw_cq *chp;
+ struct c4iw_create_cq ucmd;
struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL;
int ret, wr_len;
@@ -891,9 +1015,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL);
+ if (ib_context) {
+ ucontext = to_c4iw_ucontext(ib_context);
+ if (udata->inlen < sizeof(ucmd))
+ ucontext->is_32b_cqe = 1;
+ }
+
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
+
chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!chp->wr_waitp) {
ret = -ENOMEM;
@@ -908,9 +1039,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err_free_wr_wait;
}
- if (ib_context)
- ucontext = to_c4iw_ucontext(ib_context);
-
/* account for the status page. */
entries++;
@@ -934,13 +1062,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (hwentries < 64)
hwentries = 64;
- memsize = hwentries * sizeof *chp->cq.queue;
+ memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
+ (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
/*
* memsize must be a multiple of the page size if its a user cq.
*/
if (ucontext)
memsize = roundup(memsize, PAGE_SIZE);
+
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
chp->cq.vector = vector;
@@ -971,6 +1101,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (!mm2)
goto err_free_mm;
+ memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size;
@@ -980,9 +1111,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ucontext->key += PAGE_SIZE;
uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
+ /* communicate to the userspace that
+ * kernel driver supports 64B CQE
+ */
+ uresp.flags |= C4IW_64B_CQE;
+
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
- sizeof(uresp) - sizeof(uresp.reserved));
+ ucontext->is_32b_cqe ?
+ sizeof(uresp) - sizeof(uresp.flags) :
+ sizeof(uresp));
if (ret)
goto err_free_mm2;
@@ -1019,11 +1157,6 @@ err_free_chp:
return ERR_PTR(ret);
}
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
- return -ENOSYS;
-}
-
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
@@ -1039,3 +1172,19 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
spin_unlock_irqrestore(&chp->lock, flag);
return ret;
}
+
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
+{
+ struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ unsigned long flag;
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, flag);
+ spin_lock(&qhp->lock);
+
+ /* create a SRQ RECV CQE for srqidx */
+ insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
+
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index a3c3418afd73..c13c0ba30f63 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -275,10 +275,11 @@ static int dump_qp(int id, void *p, void *data)
set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
cc = snprintf(qpd->buf + qpd->pos, space,
- "rc qp sq id %u rq id %u state %u "
+ "rc qp sq id %u %s id %u state %u "
"onchip %u ep tid %u state %u "
"%pI4:%u/%u->%pI4:%u/%u\n",
- qp->wq.sq.qid, qp->wq.rq.qid,
+ qp->wq.sq.qid, qp->srq ? "srq" : "rq",
+ qp->srq ? qp->srq->idx : qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
ep->hwtid, (int)ep->com.state,
@@ -480,6 +481,9 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
+ seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n",
+ dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
+ dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
@@ -530,6 +534,8 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.pbl.fail = 0;
dev->rdev.stats.rqt.max = 0;
dev->rdev.stats.rqt.fail = 0;
+ dev->rdev.stats.rqt.max = 0;
+ dev->rdev.stats.rqt.fail = 0;
dev->rdev.stats.ocqp.max = 0;
dev->rdev.stats.ocqp.fail = 0;
dev->rdev.stats.db_full = 0;
@@ -802,7 +808,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1;
- pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+ pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start,
@@ -811,7 +817,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
- rdev->lldi.vr->cq.size);
+ rdev->lldi.vr->cq.size,
+ rdev->lldi.vr->srq.size);
pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
&rdev->lldi.pdev->resource[2],
rdev->lldi.db_reg, rdev->lldi.gts_reg,
@@ -824,10 +831,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.stag.total = rdev->lldi.vr->stag.size;
rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
+ rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
rdev->stats.qid.total = rdev->lldi.vr->qp.size;
- err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
+ T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
if (err) {
pr_err("error %d initializing resources\n", err);
return err;
@@ -857,6 +866,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
+ rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
if (c4iw_wr_log) {
rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 3e9d8b277ab9..8741d23168f3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
- pr_debug("%016llx %016llx %016llx %016llx\n",
+ pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
- be64_to_cpu(p[3]));
+ be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
+ be64_to_cpu(p[6]), be64_to_cpu(p[7]));
/*
* Ingress WRITE and READ_RESP errors provide
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 870649ff049c..f0fceadd0d12 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -97,6 +97,7 @@ struct c4iw_resource {
struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
+ struct c4iw_id_table srq_table;
};
struct c4iw_qid_list {
@@ -130,6 +131,8 @@ struct c4iw_stats {
struct c4iw_stat stag;
struct c4iw_stat pbl;
struct c4iw_stat rqt;
+ struct c4iw_stat srqt;
+ struct c4iw_stat srq;
struct c4iw_stat ocqp;
u64 db_full;
u64 db_empty;
@@ -549,6 +552,7 @@ struct c4iw_qp {
struct kref kref;
wait_queue_head_t wait;
int sq_sig_all;
+ struct c4iw_srq *srq;
struct work_struct free_work;
struct c4iw_ucontext *ucontext;
struct c4iw_wr_wait *wr_waitp;
@@ -559,6 +563,26 @@ static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
return container_of(ibqp, struct c4iw_qp, ibqp);
}
+struct c4iw_srq {
+ struct ib_srq ibsrq;
+ struct list_head db_fc_entry;
+ struct c4iw_dev *rhp;
+ struct t4_srq wq;
+ struct sk_buff *destroy_skb;
+ u32 srq_limit;
+ u32 pdid;
+ int idx;
+ u32 flags;
+ spinlock_t lock; /* protects srq */
+ struct c4iw_wr_wait *wr_waitp;
+ bool armed;
+};
+
+static inline struct c4iw_srq *to_c4iw_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct c4iw_srq, ibsrq);
+}
+
struct c4iw_ucontext {
struct ib_ucontext ibucontext;
struct c4iw_dev_ucontext uctx;
@@ -566,6 +590,7 @@ struct c4iw_ucontext {
spinlock_t mmap_lock;
struct list_head mmaps;
struct kref kref;
+ bool is_32b_cqe;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -885,7 +910,10 @@ enum conn_pre_alloc_buffers {
CN_MAX_CON_BUF
};
-#define FLOWC_LEN 80
+enum {
+ FLOWC_LEN = offsetof(struct fw_flowc_wr, mnemval[FW_FLOWC_MNEM_MAX])
+};
+
union cpl_wr_size {
struct cpl_abort_req abrt_req;
struct cpl_abort_rpl abrt_rpl;
@@ -952,6 +980,7 @@ struct c4iw_ep {
unsigned int retry_count;
int snd_win;
int rcv_win;
+ u32 snd_wscale;
struct c4iw_ep_stats stats;
};
@@ -988,7 +1017,8 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
@@ -1007,10 +1037,10 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx);
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
int c4iw_destroy_listen(struct iw_cm_id *cm_id);
@@ -1037,8 +1067,14 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context,
struct ib_udata *udata);
-int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int c4iw_destroy_srq(struct ib_srq *ib_srq);
+struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp);
struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *attrs,
@@ -1075,12 +1111,19 @@ extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
enum cxgb4_bar2_qtype qtype,
unsigned int *pbar2_qid, u64 *pbar2_pa);
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev);
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx);
extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq);
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16);
+void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
typedef int c4iw_restrack_func(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1445918e3239..7b76e6f81aeb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
return -ENOMEM;
mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1feade8bb4b3..4eda6872e617 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,41 +58,6 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static int c4iw_ah_destroy(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- return -ENOSYS;
-}
-
-static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- return -ENOSYS;
-}
-
void _c4iw_free_ucontext(struct kref *kref)
{
struct c4iw_ucontext *ucontext;
@@ -342,8 +307,12 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+ props->max_srq = dev->rdev.lldi.vr->srq.size;
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
- props->max_sge = T4_MAX_RECV_SGE;
+ props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
+ props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
+ props->max_recv_sge = T4_MAX_RECV_SGE;
+ props->max_srq_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
@@ -592,7 +561,10 @@ void c4iw_register_device(struct work_struct *work)
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
dev->ibdev.node_type = RDMA_NODE_RNIC;
BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
@@ -608,15 +580,15 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.mmap = c4iw_mmap;
dev->ibdev.alloc_pd = c4iw_allocate_pd;
dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
- dev->ibdev.create_ah = c4iw_ah_create;
- dev->ibdev.destroy_ah = c4iw_ah_destroy;
dev->ibdev.create_qp = c4iw_create_qp;
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
dev->ibdev.query_qp = c4iw_ib_query_qp;
dev->ibdev.destroy_qp = c4iw_destroy_qp;
+ dev->ibdev.create_srq = c4iw_create_srq;
+ dev->ibdev.modify_srq = c4iw_modify_srq;
+ dev->ibdev.destroy_srq = c4iw_destroy_srq;
dev->ibdev.create_cq = c4iw_create_cq;
dev->ibdev.destroy_cq = c4iw_destroy_cq;
- dev->ibdev.resize_cq = c4iw_resize_cq;
dev->ibdev.poll_cq = c4iw_poll_cq;
dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
@@ -625,12 +597,10 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
dev->ibdev.alloc_mr = c4iw_alloc_mr;
dev->ibdev.map_mr_sg = c4iw_map_mr_sg;
- dev->ibdev.attach_mcast = c4iw_multicast_attach;
- dev->ibdev.detach_mcast = c4iw_multicast_detach;
- dev->ibdev.process_mad = c4iw_process_mad;
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
+ dev->ibdev.post_srq_recv = c4iw_post_srq_recv;
dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index aef53305f1c3..b3203afa3b1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -147,21 +147,24 @@ static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
}
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx, int has_rq)
{
/*
* uP clears EQ contexts when the connection exits rdma mode,
* so no need to post a RESET WR for these EQs.
*/
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
dealloc_sq(rdev, &wq->sq);
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
- kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq);
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+
+ if (has_rq) {
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ }
return 0;
}
@@ -195,7 +198,8 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
struct c4iw_dev_ucontext *uctx,
- struct c4iw_wr_wait *wr_waitp)
+ struct c4iw_wr_wait *wr_waitp,
+ int need_rq)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
@@ -209,10 +213,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->sq.qid)
return -ENOMEM;
- wq->rq.qid = c4iw_get_qpid(rdev, uctx);
- if (!wq->rq.qid) {
- ret = -ENOMEM;
- goto free_sq_qid;
+ if (need_rq) {
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid) {
+ ret = -ENOMEM;
+ goto free_sq_qid;
+ }
}
if (!user) {
@@ -220,25 +226,31 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
GFP_KERNEL);
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
- goto free_rq_qid;
+ goto free_rq_qid;//FIXME
}
- wq->rq.sw_rq = kcalloc(wq->rq.size, sizeof(*wq->rq.sw_rq),
- GFP_KERNEL);
- if (!wq->rq.sw_rq) {
- ret = -ENOMEM;
- goto free_sw_sq;
+ if (need_rq) {
+ wq->rq.sw_rq = kcalloc(wq->rq.size,
+ sizeof(*wq->rq.sw_rq),
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq) {
+ ret = -ENOMEM;
+ goto free_sw_sq;
+ }
}
}
- /*
- * RQT must be a power of 2 and at least 16 deep.
- */
- wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
- wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
- if (!wq->rq.rqt_hwaddr) {
- ret = -ENOMEM;
- goto free_sw_rq;
+ if (need_rq) {
+ /*
+ * RQT must be a power of 2 and at least 16 deep.
+ */
+ wq->rq.rqt_size =
+ roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr) {
+ ret = -ENOMEM;
+ goto free_sw_rq;
+ }
}
ret = alloc_sq(rdev, &wq->sq, user);
@@ -247,34 +259,39 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
- wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, &(wq->rq.dma_addr),
- GFP_KERNEL);
- if (!wq->rq.queue) {
- ret = -ENOMEM;
- goto free_sq;
+ if (need_rq) {
+ wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize,
+ &wq->rq.dma_addr,
+ GFP_KERNEL);
+ if (!wq->rq.queue) {
+ ret = -ENOMEM;
+ goto free_sq;
+ }
+ pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ memset(wq->rq.queue, 0, wq->rq.memsize);
+ dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
}
- pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- wq->sq.queue,
- (unsigned long long)virt_to_phys(wq->sq.queue),
- wq->rq.queue,
- (unsigned long long)virt_to_phys(wq->rq.queue));
- memset(wq->rq.queue, 0, wq->rq.memsize);
- dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = rdev->lldi.db_reg;
wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
- wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
- &wq->rq.bar2_qid,
- user ? &wq->rq.bar2_pa : NULL);
+ if (need_rq)
+ wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
+ T4_BAR2_QTYPE_EGRESS,
+ &wq->rq.bar2_qid,
+ user ? &wq->rq.bar2_pa : NULL);
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -285,7 +302,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
-
+ if (need_rq)
+ wr_len += sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
@@ -296,7 +314,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
- FW_RI_RES_WR_NRES_V(2) |
+ FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)wr_waitp;
@@ -327,30 +345,36 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
- res++;
- res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
- res->u.sqrq.op = FW_RI_RES_OP_WRITE;
- /*
- * eqsize is the number of 64B entries plus the status page size.
- */
- eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
- rdev->hw_queue.t4_eq_status_entries;
- res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
- FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
- FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
- FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
- FW_RI_RES_WR_IQID_V(rcq->cqid));
- res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
- FW_RI_RES_WR_DCAEN_V(0) |
- FW_RI_RES_WR_DCACPU_V(0) |
- FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(3) |
- FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
- FW_RI_RES_WR_CIDXFTHRESH_V(0) |
- FW_RI_RES_WR_EQSIZE_V(eqsize));
- res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
- res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+ if (need_rq) {
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.sqrq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ /* don't keep in chip cache */
+ FW_RI_RES_WR_CPRIO_V(0) |
+ /* set by uP at ri_init time */
+ FW_RI_RES_WR_PCIECHN_V(0) |
+ FW_RI_RES_WR_IQID_V(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+ }
c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
@@ -363,26 +387,30 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
return 0;
free_dma:
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->rq.memsize, wq->rq.queue,
- dma_unmap_addr(&wq->rq, mapping));
+ if (need_rq)
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->rq.memsize, wq->rq.queue,
+ dma_unmap_addr(&wq->rq, mapping));
free_sq:
dealloc_sq(rdev, &wq->sq);
free_hwaddr:
- c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ if (need_rq)
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
- kfree(wq->rq.sw_rq);
+ if (need_rq)
+ kfree(wq->rq.sw_rq);
free_sw_sq:
kfree(wq->sq.sw_sq);
free_rq_qid:
- c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ if (need_rq)
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
return ret;
}
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
- struct ib_send_wr *wr, int max, u32 *plenp)
+ const struct ib_send_wr *wr, int max, u32 *plenp)
{
u8 *dstp, *srcp;
u32 plen = 0;
@@ -427,7 +455,12 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
{
int i;
u32 plen = 0;
- __be64 *flitp = (__be64 *)isglp->sge;
+ __be64 *flitp;
+
+ if ((__be64 *)isglp == queue_end)
+ isglp = (struct fw_ri_isgl *)queue_start;
+
+ flitp = (__be64 *)isglp->sge;
for (i = 0; i < num_sge; i++) {
if ((plen + sg_list[i].length) < plen)
@@ -452,7 +485,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
}
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -519,7 +552,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
}
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ const struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
@@ -527,7 +560,15 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
- wqe->write.r2 = 0;
+
+ /*
+ * iWARP protocol supports 64 bit immediate data but rdma api
+ * limits it to 32bit.
+ */
+ if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
+ else
+ wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
if (wr->num_sge) {
@@ -561,7 +602,58 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
+ struct ib_send_wr *wr)
+{
+ memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
+ memset(immdp->r1, 0, 6);
+ immdp->op = FW_RI_DATA_IMMD;
+ immdp->immdlen = 16;
+}
+
+static void build_rdma_write_cmpl(struct t4_sq *sq,
+ struct fw_ri_rdma_write_cmpl_wr *wcwr,
+ const struct ib_send_wr *wr, u8 *len16)
+{
+ u32 plen;
+ int size;
+
+ /*
+ * This code assumes the struct fields preceding the write isgl
+ * fit in one 64B WR slot. This is because the WQE is built
+ * directly in the dma queue, and wrapping is only handled
+ * by the code buildling sgls. IE the "fixed part" of the wr
+ * structs must all fit in 64B. The WQE build code should probably be
+ * redesigned to avoid this restriction, but for now just add
+ * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
+ */
+ BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
+
+ wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
+ wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
+ wcwr->r2 = 0;
+ wcwr->r3 = 0;
+
+ /* SEND_INV SGL */
+ if (wr->next->send_flags & IB_SEND_INLINE)
+ build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
+ else
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
+
+ /* WRITE SGL */
+ build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
+ wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
+
+ size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ wcwr->plen = cpu_to_be32(plen);
+ *len16 = DIV_ROUND_UP(size, 16);
+}
+
+static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
+ u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
@@ -590,8 +682,74 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0;
}
+static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
+{
+ bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
+ struct t4_swsqe *swsqe;
+ union t4_wr *wqe;
+ u16 write_wrid;
+ u8 len16;
+ u16 idx;
+
+ /*
+ * The sw_sq entries still look like a WRITE and a SEND and consume
+ * 2 slots. The FW WR, however, will be a single uber-WR.
+ */
+ wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+ qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+ build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
+
+ /* WRITE swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = write_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ write_wrid = qhp->wq.sq.pidx;
+
+ /* just bump the sw_sq */
+ qhp->wq.sq.in_use++;
+ if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
+ qhp->wq.sq.pidx = 0;
+
+ /* SEND_WITH_INV swsqe */
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = send_signaled;
+ swsqe->flushed = 0;
+ swsqe->wr_id = wr->next->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts =
+ cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
+ swsqe->host_time = ktime_get();
+ }
+
+ wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
+ wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
+
+ init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
+ write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
+ t4_sq_produce(&qhp->wq, len16);
+ idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+
+ t4_ring_sq_db(&qhp->wq, idx, wqe);
+}
+
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
- struct ib_recv_wr *wr, u8 *len16)
+ const struct ib_recv_wr *wr, u8 *len16)
{
int ret;
@@ -605,8 +763,22 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
+static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
+ u8 *len16)
+{
+ int ret;
+
+ ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
+ &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+ if (ret)
+ return ret;
+ *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
+ wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
- struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
u8 *len16)
{
__be64 *p = (__be64 *)fr->pbl;
@@ -638,8 +810,8 @@ static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
}
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
- bool dsgl_supported)
+ const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16, bool dsgl_supported)
{
struct fw_ri_immd *imdp;
__be64 *p;
@@ -701,7 +873,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
+ u8 *len16)
{
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
@@ -721,7 +894,7 @@ static void free_qp_work(struct work_struct *work)
pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
if (ucontext)
c4iw_put_ucontext(ucontext);
@@ -804,6 +977,9 @@ static int ib_to_fw_opcode(int ib_opcode)
case IB_WR_RDMA_WRITE:
opcode = FW_RI_RDMA_WRITE;
break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ opcode = FW_RI_WRITE_IMMEDIATE;
+ break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
opcode = FW_RI_READ_REQ;
@@ -820,7 +996,8 @@ static int ib_to_fw_opcode(int ib_opcode)
return opcode;
}
-static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int complete_sq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
@@ -858,8 +1035,9 @@ static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
return 0;
}
-static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int ret = 0;
@@ -874,7 +1052,8 @@ static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
return ret;
}
-static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+static void complete_rq_drain_wr(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *rchp;
@@ -906,7 +1085,8 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
-static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
+ const struct ib_recv_wr *wr)
{
while (wr) {
complete_rq_drain_wr(qhp, wr);
@@ -914,14 +1094,15 @@ static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
-int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
u8 len16 = 0;
enum fw_wr_opcodes fw_opcode = 0;
enum fw_ri_wr_flags fw_flags;
struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
union t4_wr *wqe = NULL;
u32 num_wrs;
struct t4_swsqe *swsqe;
@@ -929,6 +1110,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u16 idx = 0;
qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
spin_lock_irqsave(&qhp->lock, flag);
/*
@@ -946,6 +1128,30 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
return -ENOMEM;
}
+
+ /*
+ * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
+ * the response for small NVMEe-oF READ requests. If the chain is
+ * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths
+ * meet the requirements of the fw_ri_write_cmpl_wr work request,
+ * then build and post the write_cmpl WR. If any of the tests
+ * below are not true, then we continue on with the tradtional WRITE
+ * and SEND WRs.
+ */
+ if (qhp->rhp->rdev.lldi.write_cmpl_support &&
+ CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
+ CHELSIO_T5 &&
+ wr && wr->next && !wr->next->next &&
+ wr->opcode == IB_WR_RDMA_WRITE &&
+ wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
+ wr->next->opcode == IB_WR_SEND_WITH_INV &&
+ wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
+ wr->next->num_sge == 1 && num_wrs >= 2) {
+ post_write_cmpl(qhp, wr);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return 0;
+ }
+
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
@@ -973,6 +1179,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND_WITH_INV;
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
+ err = -EINVAL;
+ break;
+ }
+ fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
+ /*FALLTHROUGH*/
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
@@ -983,8 +1196,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
- c4iw_invalidate_mr(qhp->rhp,
- wr->sg_list[0].lkey);
+ c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
} else {
fw_flags = 0;
@@ -1000,7 +1212,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
swsqe->opcode = FW_RI_FAST_REGISTER;
- if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
+ if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
!mhp->attr.state && mhp->mpl_len <= 2) {
fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
@@ -1009,7 +1221,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_FR_NSMR_WR;
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
mhp, &len16,
- qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ rhp->rdev.lldi.ulptx_memwrite_dsgl);
if (err)
break;
}
@@ -1022,7 +1234,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
err = build_inv_stag(wqe, wr, &len16);
- c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
+ c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
break;
default:
pr_warn("%s post of type=%d TBD!\n", __func__,
@@ -1041,7 +1253,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->wr_id = wr->wr_id;
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
- qhp->rhp->rdev.lldi.ports[0]);
+ rhp->rdev.lldi.ports[0]);
swsqe->host_time = ktime_get();
}
@@ -1055,7 +1267,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (!qhp->rhp->rdev.status_page->db_off) {
+ if (!rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx, wqe);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
@@ -1065,8 +1277,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
-int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct c4iw_qp *qhp;
@@ -1145,6 +1357,89 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return err;
}
+static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
+ u64 wr_id, u8 len16)
+{
+ struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
+
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
+ __func__, srq->cidx, srq->pidx, srq->wq_pidx,
+ srq->in_use, srq->ooo_count,
+ (unsigned long long)wr_id, srq->pending_cidx,
+ srq->pending_pidx, srq->pending_in_use);
+ pwr->wr_id = wr_id;
+ pwr->len16 = len16;
+ memcpy(&pwr->wqe, wqe, len16 * 16);
+ t4_srq_produce_pending_wr(srq);
+}
+
+int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ union t4_recv_wr *wqe, lwqe;
+ struct c4iw_srq *srq;
+ unsigned long flag;
+ u8 len16 = 0;
+ u16 idx = 0;
+ int err = 0;
+ u32 num_wrs;
+
+ srq = to_c4iw_srq(ibsrq);
+ spin_lock_irqsave(&srq->lock, flag);
+ num_wrs = t4_srq_avail(&srq->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &lwqe;
+ if (num_wrs)
+ err = build_srq_recv(wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = srq->wq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+
+ if (srq->wq.ooo_count ||
+ srq->wq.pending_in_use ||
+ srq->wq.sw_rq[srq->wq.pidx].valid) {
+ defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
+ } else {
+ srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
+ srq->wq.sw_rq[srq->wq.pidx].valid = 1;
+ c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
+ pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
+ __func__, srq->wq.cidx,
+ srq->wq.pidx, srq->wq.wq_pidx,
+ srq->wq.in_use,
+ (unsigned long long)wr->wr_id);
+ t4_srq_produce(&srq->wq, len16);
+ idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ }
+ wr = wr->next;
+ num_wrs--;
+ }
+ if (idx)
+ t4_ring_srq_db(&srq->wq, idx, len16, wqe);
+ spin_unlock_irqrestore(&srq->lock, flag);
+ return err;
+}
+
static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
u8 *ecode)
{
@@ -1321,7 +1616,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
- int rq_flushed, sq_flushed;
+ int rq_flushed = 0, sq_flushed;
unsigned long flag;
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
@@ -1340,11 +1635,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
return;
}
qhp->wq.flushed = 1;
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
c4iw_flush_hw_cq(rchp, qhp);
- c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
- rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ if (!qhp->srq) {
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ }
if (schp != rchp)
c4iw_flush_hw_cq(schp, qhp);
@@ -1388,7 +1685,7 @@ static void flush_qp(struct c4iw_qp *qhp)
schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) {
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1517,16 +1814,21 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
- wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ if (qhp->srq) {
+ wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
+ qhp->srq->idx);
+ } else {
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ }
wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
- wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
- wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
- rhp->rdev.lldi.vr->rq.start);
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
@@ -1643,7 +1945,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
@@ -1656,7 +1958,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err;
break;
case C4IW_QP_STATE_TERMINATE:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
@@ -1673,7 +1975,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_ERROR:
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq, 0);
set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
abort = 1;
@@ -1819,7 +2121,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
- unsigned int sqsize, rqsize;
+ unsigned int sqsize, rqsize = 0;
struct c4iw_ucontext *ucontext;
int ret;
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
@@ -1840,11 +2142,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
return ERR_PTR(-EINVAL);
- if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
- return ERR_PTR(-E2BIG);
- rqsize = attrs->cap.max_recv_wr + 1;
- if (rqsize < 8)
- rqsize = 8;
+ if (!attrs->srq) {
+ if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return ERR_PTR(-E2BIG);
+ rqsize = attrs->cap.max_recv_wr + 1;
+ if (rqsize < 8)
+ rqsize = 8;
+ }
if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
return ERR_PTR(-E2BIG);
@@ -1869,19 +2173,23 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
qhp->wq.sq.flush_cidx = -1;
- qhp->wq.rq.size = rqsize;
- qhp->wq.rq.memsize =
- (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
- sizeof(*qhp->wq.rq.queue);
+ if (!attrs->srq) {
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*qhp->wq.rq.queue);
+ }
if (ucontext) {
qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
- qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ if (!attrs->srq)
+ qhp->wq.rq.memsize =
+ roundup(qhp->wq.rq.memsize, PAGE_SIZE);
}
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
- qhp->wr_waitp);
+ qhp->wr_waitp, !attrs->srq);
if (ret)
goto err_free_wr_wait;
@@ -1894,10 +2202,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ if (!attrs->srq) {
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ }
qhp->attr.state = C4IW_QP_STATE_IDLE;
qhp->attr.next_state = C4IW_QP_STATE_IDLE;
qhp->attr.enable_rdma_read = 1;
@@ -1922,21 +2232,27 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = -ENOMEM;
goto err_remove_handle;
}
- rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
- if (!rq_key_mm) {
- ret = -ENOMEM;
- goto err_free_sq_key;
+ if (!attrs->srq) {
+ rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+ if (!rq_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_key;
+ }
}
sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
if (!sq_db_key_mm) {
ret = -ENOMEM;
goto err_free_rq_key;
}
- rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
- if (!rq_db_key_mm) {
- ret = -ENOMEM;
- goto err_free_sq_db_key;
+ if (!attrs->srq) {
+ rq_db_key_mm =
+ kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+ if (!rq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_sq_db_key;
+ }
}
+ memset(&uresp, 0, sizeof(uresp));
if (t4_sq_onchip(&qhp->wq.sq)) {
ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
GFP_KERNEL);
@@ -1945,30 +2261,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
goto err_free_rq_db_key;
}
uresp.flags = C4IW_QPF_ONCHIP;
- } else
- uresp.flags = 0;
+ }
+ if (rhp->rdev.lldi.write_w_imm_support)
+ uresp.flags |= C4IW_QPF_WRITE_W_IMM;
uresp.qid_mask = rhp->rdev.qpmask;
uresp.sqid = qhp->wq.sq.qid;
uresp.sq_size = qhp->wq.sq.size;
uresp.sq_memsize = qhp->wq.sq.memsize;
- uresp.rqid = qhp->wq.rq.qid;
- uresp.rq_size = qhp->wq.rq.size;
- uresp.rq_memsize = qhp->wq.rq.memsize;
+ if (!attrs->srq) {
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ }
spin_lock(&ucontext->mmap_lock);
if (ma_sync_key_mm) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- } else {
- uresp.ma_sync_key = 0;
}
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
uresp.sq_db_gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
- uresp.rq_db_gts_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
+ if (!attrs->srq) {
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
@@ -1977,18 +2298,23 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
sq_key_mm->addr = qhp->wq.sq.phys_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
insert_mmap(ucontext, sq_key_mm);
- rq_key_mm->key = uresp.rq_key;
- rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
- rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
- insert_mmap(ucontext, rq_key_mm);
+ if (!attrs->srq) {
+ rq_key_mm->key = uresp.rq_key;
+ rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, rq_key_mm);
+ }
sq_db_key_mm->key = uresp.sq_db_gts_key;
sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
sq_db_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, sq_db_key_mm);
- rq_db_key_mm->key = uresp.rq_db_gts_key;
- rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
- rq_db_key_mm->len = PAGE_SIZE;
- insert_mmap(ucontext, rq_db_key_mm);
+ if (!attrs->srq) {
+ rq_db_key_mm->key = uresp.rq_db_gts_key;
+ rq_db_key_mm->addr =
+ (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+ rq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, rq_db_key_mm);
+ }
if (ma_sync_key_mm) {
ma_sync_key_mm->key = uresp.ma_sync_key;
ma_sync_key_mm->addr =
@@ -2001,7 +2327,19 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
}
+ if (!attrs->srq) {
+ qhp->wq.qp_errp =
+ &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
+ } else {
+ qhp->wq.qp_errp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
+ qhp->wq.srqidxp =
+ &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
+ }
+
qhp->ibqp.qp_num = qhp->wq.sq.qid;
+ if (attrs->srq)
+ qhp->srq = to_c4iw_srq(attrs->srq);
INIT_LIST_HEAD(&qhp->db_fc_entry);
pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
@@ -2011,18 +2349,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
err_free_ma_sync_key:
kfree(ma_sync_key_mm);
err_free_rq_db_key:
- kfree(rq_db_key_mm);
+ if (!attrs->srq)
+ kfree(rq_db_key_mm);
err_free_sq_db_key:
kfree(sq_db_key_mm);
err_free_rq_key:
- kfree(rq_key_mm);
+ if (!attrs->srq)
+ kfree(rq_key_mm);
err_free_sq_key:
kfree(sq_key_mm);
err_remove_handle:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
err_free_wr_wait:
c4iw_put_wr_wait(qhp->wr_waitp);
err_free_qhp:
@@ -2088,6 +2428,45 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
+void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
+{
+ struct ib_event event = {};
+
+ event.device = &srq->rhp->ibdev;
+ event.element.srq = &srq->ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ib_dispatch_event(&event);
+}
+
+int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
+ int ret = 0;
+
+ /*
+ * XXX 0 mask == a SW interrupt for srq_limit reached...
+ */
+ if (udata && !srq_attr_mask) {
+ c4iw_dispatch_srq_limit_reached_event(srq);
+ goto out;
+ }
+
+ /* no support for this yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
+ srq->armed = true;
+ srq->srq_limit = attr->srq_limit;
+ }
+out:
+ return ret;
+}
+
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr)
{
@@ -2104,3 +2483,359 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
+
+static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ struct sk_buff *skb = srq->destroy_skb;
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_RESET;
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+
+ c4iw_init_wr_wait(wr_waitp);
+ c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ pci_unmap_addr(wq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+ kfree(wq->sw_rq);
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+}
+
+static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
+{
+ struct c4iw_rdev *rdev = &srq->rhp->rdev;
+ int user = (uctx != &rdev->uctx);
+ struct t4_srq *wq = &srq->wq;
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ struct sk_buff *skb;
+ int wr_len;
+ int eqsize;
+ int ret = -ENOMEM;
+
+ wq->qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->qid)
+ goto err;
+
+ if (!user) {
+ wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
+ GFP_KERNEL);
+ if (!wq->sw_rq)
+ goto err_put_qpid;
+ wq->pending_wrs = kcalloc(srq->wq.size,
+ sizeof(*srq->wq.pending_wrs),
+ GFP_KERNEL);
+ if (!wq->pending_wrs)
+ goto err_free_sw_rq;
+ }
+
+ wq->rqt_size = wq->size;
+ wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
+ if (!wq->rqt_hwaddr)
+ goto err_free_pending_wrs;
+ wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
+ T4_RQT_ENTRY_SHIFT;
+
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, &wq->dma_addr,
+ GFP_KERNEL);
+ if (!wq->queue)
+ goto err_free_rqtpool;
+
+ memset(wq->queue, 0, wq->memsize);
+ pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
+ &wq->bar2_qid,
+ user ? &wq->bar2_pa : NULL);
+
+ /*
+ * User mode must have bar2 access.
+ */
+
+ if (user && !wq->bar2_va) {
+ pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
+ pci_name(rdev->lldi.pdev), wq->qid);
+ ret = -EINVAL;
+ goto err_free_queue;
+ }
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof(*res_wr) + sizeof(*res);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ goto err_free_queue;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
+ FW_RI_RES_WR_NRES_V(1) |
+ FW_WR_COMPL_F);
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (uintptr_t)wr_waitp;
+ res = res_wr->res;
+ res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
+ res->u.srq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
+ res->u.srq.eqid = cpu_to_be32(wq->qid);
+ res->u.srq.fetchszm_to_iqid =
+ /* no host cidx updates */
+ cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
+ FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
+ FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
+ FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
+ res->u.srq.dcaen_to_eqsize =
+ cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+ res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
+ res->u.srq.srqid = cpu_to_be32(srq->idx);
+ res->u.srq.pdid = cpu_to_be32(srq->pdid);
+ res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
+ res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
+ rdev->lldi.vr->rq.start);
+
+ c4iw_init_wr_wait(wr_waitp);
+
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
+ if (ret)
+ goto err_free_queue;
+
+ pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
+ " bar2_addr %p rqt addr 0x%x size %d\n",
+ __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
+ (u64)virt_to_phys(wq->queue), wq->bar2_va,
+ wq->rqt_hwaddr, wq->rqt_size);
+
+ return 0;
+err_free_queue:
+ dma_free_coherent(&rdev->lldi.pdev->dev,
+ wq->memsize, wq->queue,
+ pci_unmap_addr(wq, mapping));
+err_free_rqtpool:
+ c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
+err_free_pending_wrs:
+ if (!user)
+ kfree(wq->pending_wrs);
+err_free_sw_rq:
+ if (!user)
+ kfree(wq->sw_rq);
+err_put_qpid:
+ c4iw_put_qpid(rdev, wq->qid, uctx);
+err:
+ return ret;
+}
+
+void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
+{
+ u64 *src, *dst;
+
+ src = (u64 *)wqe;
+ dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
+ while (len16) {
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ *dst++ = *src++;
+ if (dst >= (u64 *)&srq->queue[srq->size])
+ dst = (u64 *)srq->queue;
+ len16--;
+ }
+}
+
+struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq;
+ struct c4iw_pd *php;
+ struct c4iw_create_srq_resp uresp;
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
+ int rqsize;
+ int ret;
+ int wr_len;
+
+ pr_debug("%s ib_pd %p\n", __func__, pd);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ if (!rhp->rdev.lldi.vr->srq.size)
+ return ERR_PTR(-EINVAL);
+ if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
+ return ERR_PTR(-E2BIG);
+ if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
+ return ERR_PTR(-E2BIG);
+
+ /*
+ * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
+ */
+ rqsize = attrs->attr.max_wr + 1;
+ rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
+
+ ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!srq->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_srq;
+ }
+
+ srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
+ if (srq->idx < 0) {
+ ret = -ENOMEM;
+ goto err_free_wr_wait;
+ }
+
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!srq->destroy_skb) {
+ ret = -ENOMEM;
+ goto err_free_srq_idx;
+ }
+
+ srq->rhp = rhp;
+ srq->pdid = php->pdid;
+
+ srq->wq.size = rqsize;
+ srq->wq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*srq->wq.queue);
+ if (ucontext)
+ srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
+
+ ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
+ &rhp->rdev.uctx, srq->wr_waitp);
+ if (ret)
+ goto err_free_skb;
+ attrs->attr.max_wr = rqsize - 1;
+
+ if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
+ srq->flags = T4_SRQ_LIMIT_SUPPORT;
+
+ ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
+ if (ret)
+ goto err_free_queue;
+
+ if (udata) {
+ srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
+ if (!srq_key_mm) {
+ ret = -ENOMEM;
+ goto err_remove_handle;
+ }
+ srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
+ if (!srq_db_key_mm) {
+ ret = -ENOMEM;
+ goto err_free_srq_key_mm;
+ }
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.flags = srq->flags;
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.srqid = srq->wq.qid;
+ uresp.srq_size = srq->wq.size;
+ uresp.srq_memsize = srq->wq.memsize;
+ uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.srq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.srq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_free_srq_db_key_mm;
+ srq_key_mm->key = uresp.srq_key;
+ srq_key_mm->addr = virt_to_phys(srq->wq.queue);
+ srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
+ insert_mmap(ucontext, srq_key_mm);
+ srq_db_key_mm->key = uresp.srq_db_gts_key;
+ srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
+ srq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, srq_db_key_mm);
+ }
+
+ pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
+ __func__, srq->wq.qid, srq->idx, srq->wq.size,
+ (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
+
+ spin_lock_init(&srq->lock);
+ return &srq->ibsrq;
+err_free_srq_db_key_mm:
+ kfree(srq_db_key_mm);
+err_free_srq_key_mm:
+ kfree(srq_key_mm);
+err_remove_handle:
+ remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
+err_free_queue:
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+err_free_skb:
+ if (srq->destroy_skb)
+ kfree_skb(srq->destroy_skb);
+err_free_srq_idx:
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+err_free_wr_wait:
+ c4iw_put_wr_wait(srq->wr_waitp);
+err_free_srq:
+ kfree(srq);
+ return ERR_PTR(ret);
+}
+
+int c4iw_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_srq *srq;
+ struct c4iw_ucontext *ucontext;
+
+ srq = to_c4iw_srq(ibsrq);
+ rhp = srq->rhp;
+
+ pr_debug("%s id %d\n", __func__, srq->wq.qid);
+
+ remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
+ ucontext = ibsrq->uobject ?
+ to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
+ free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ srq->wr_waitp);
+ c4iw_free_srq_idx(&rhp->rdev, srq->idx);
+ c4iw_put_wr_wait(srq->wr_waitp);
+ kfree(srq);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 0ef25ae05e6f..57ed26b3cc21 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -53,7 +53,8 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
+ u32 nr_pdid, u32 nr_srqt)
{
int err = 0;
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
@@ -67,7 +68,17 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
nr_pdid, 1, 0);
if (err)
goto pdid_err;
+ if (!nr_srqt)
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ 1, 1, 0);
+ else
+ err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0,
+ nr_srqt, 0, 0);
+ if (err)
+ goto srq_err;
return 0;
+ srq_err:
+ c4iw_id_table_free(&rdev->resource.pdid_table);
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
@@ -371,13 +382,21 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
{
unsigned rqt_start, rqt_chunk, rqt_top;
+ int skip = 0;
rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (!rdev->rqt_pool)
return -ENOMEM;
- rqt_start = rdev->lldi.vr->rq.start;
- rqt_chunk = rdev->lldi.vr->rq.size;
+ /*
+ * If SRQs are supported, then never use the first RQE from
+ * the RQT region. This is because HW uses RQT index 0 as NULL.
+ */
+ if (rdev->lldi.vr->srq.size)
+ skip = T4_RQT_ENTRY_SIZE;
+
+ rqt_start = rdev->lldi.vr->rq.start + skip;
+ rqt_chunk = rdev->lldi.vr->rq.size - skip;
rqt_top = rqt_start + rqt_chunk;
while (rqt_start < rqt_top) {
@@ -405,6 +424,32 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
+int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev)
+{
+ int idx;
+
+ idx = c4iw_id_alloc(&rdev->resource.srq_table);
+ mutex_lock(&rdev->stats.lock);
+ if (idx == -1) {
+ rdev->stats.srqt.fail++;
+ mutex_unlock(&rdev->stats.lock);
+ return -ENOMEM;
+ }
+ rdev->stats.srqt.cur++;
+ if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
+ rdev->stats.srqt.max = rdev->stats.srqt.cur;
+ mutex_unlock(&rdev->stats.lock);
+ return idx;
+}
+
+void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx)
+{
+ c4iw_id_free(&rdev->resource.srq_table, idx);
+ mutex_lock(&rdev->stats.lock);
+ rdev->stats.srqt.cur--;
+ mutex_unlock(&rdev->stats.lock);
+}
+
/*
* On-Chip QP Memory.
*/
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 8369c7c8de83..e42021fd6fd6 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -52,12 +52,16 @@ struct t4_status_page {
__be16 pidx;
u8 qp_err; /* flit 1 - sw owns */
u8 db_off;
- u8 pad;
+ u8 pad[2];
u16 host_wq_pidx;
u16 host_cidx;
u16 host_pidx;
+ u16 pad2;
+ u32 srqidx;
};
+#define T4_RQT_ENTRY_SHIFT 6
+#define T4_RQT_ENTRY_SIZE BIT(T4_RQT_ENTRY_SHIFT)
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 5
@@ -87,6 +91,9 @@ static inline int t4_max_fr_depth(int use_dsgl)
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
+#define T4_WRITE_CMPL_MAX_SGL 4
+#define T4_WRITE_CMPL_MAX_CQE 16
+
union t4_wr {
struct fw_ri_res_wr res;
struct fw_ri_wr ri;
@@ -97,6 +104,7 @@ union t4_wr {
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
+ struct fw_ri_rdma_write_cmpl_wr write_cmpl;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
@@ -179,9 +187,32 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ struct {
+ __be32 stag;
+ __be32 msn;
+ __be32 reserved;
+ __be32 abs_rqe_idx;
+ } srcqe;
+ struct {
+ __be32 mo;
+ __be32 msn;
+ /*
+ * Use union for immediate data to be consistent with
+ * stack's 32 bit data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
+ } imm_data_rcqe;
+
u64 drain_cookie;
+ __be64 flits[3];
} u;
- __be64 reserved;
+ __be64 reserved[3];
__be64 bits_type_ts;
};
@@ -237,6 +268,9 @@ struct t4_cqe {
/* used for RQ completion processing */
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+#define CQE_ABS_RQE_IDX(x) (be32_to_cpu((x)->u.srcqe.abs_rqe_idx))
+#define CQE_IMM_DATA(x)( \
+ (x)->u.imm_data_rcqe.iw_imm_data.ib_imm_data.imm_data32)
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
@@ -320,6 +354,7 @@ struct t4_swrqe {
u64 wr_id;
ktime_t host_time;
u64 sge_ts;
+ int valid;
};
struct t4_rq {
@@ -349,8 +384,98 @@ struct t4_wq {
void __iomem *db;
struct c4iw_rdev *rdev;
int flushed;
+ u8 *qp_errp;
+ u32 *srqidxp;
+};
+
+struct t4_srq_pending_wr {
+ u64 wr_id;
+ union t4_recv_wr wqe;
+ u8 len16;
+};
+
+struct t4_srq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
+ size_t memsize;
+ u32 bar2_qid;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u32 rqt_abs_idx;
+ u16 rqt_size;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+ u16 wq_pidx;
+ u16 wq_pidx_inc;
+ u16 in_use;
+ struct t4_srq_pending_wr *pending_wrs;
+ u16 pending_cidx;
+ u16 pending_pidx;
+ u16 pending_in_use;
+ u16 ooo_count;
};
+static inline u32 t4_srq_avail(struct t4_srq *srq)
+{
+ return srq->size - 1 - srq->in_use;
+}
+
+static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
+{
+ srq->in_use++;
+ if (++srq->pidx == srq->size)
+ srq->pidx = 0;
+ srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS)
+ srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS;
+ srq->queue[srq->size].status.host_pidx = srq->pidx;
+}
+
+static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use++;
+ srq->in_use++;
+ if (++srq->pending_pidx == srq->size)
+ srq->pending_pidx = 0;
+}
+
+static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use--;
+ srq->in_use--;
+ if (++srq->pending_cidx == srq->size)
+ srq->pending_cidx = 0;
+}
+
+static inline void t4_srq_produce_ooo(struct t4_srq *srq)
+{
+ srq->in_use--;
+ srq->ooo_count++;
+}
+
+static inline void t4_srq_consume_ooo(struct t4_srq *srq)
+{
+ srq->cidx++;
+ if (srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+ srq->ooo_count--;
+}
+
+static inline void t4_srq_consume(struct t4_srq *srq)
+{
+ srq->in_use--;
+ if (++srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+}
+
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
@@ -384,7 +509,6 @@ static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_rq_consume(struct t4_wq *wq)
{
wq->rq.in_use--;
- wq->rq.msn++;
if (++wq->rq.cidx == wq->rq.size)
wq->rq.cidx = 0;
}
@@ -464,6 +588,25 @@ static inline void pio_copy(u64 __iomem *dst, u64 *src)
}
}
+static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
+ union t4_recv_wr *wqe)
+{
+ /* Flush host queue memory writes. */
+ wmb();
+ if (inc == 1 && srq->bar2_qid == 0 && wqe) {
+ pr_debug("%s : WC srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ pio_copy(srq->bar2_va + SGE_UDB_WCDOORBELL, (u64 *)wqe);
+ } else {
+ pr_debug("%s: DB srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ writel(PIDX_T5_V(inc) | QID_V(srq->bar2_qid),
+ srq->bar2_va + SGE_UDB_KDOORBELL);
+ }
+ /* Flush user doorbell area writes. */
+ wmb();
+}
+
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
{
@@ -515,12 +658,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->rq.queue[wq->rq.size].status.qp_err;
+ return *wq->qp_errp;
}
-static inline void t4_set_wq_in_error(struct t4_wq *wq)
+static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
{
- wq->rq.queue[wq->rq.size].status.qp_err = 1;
+ if (srqidx)
+ *wq->srqidxp = srqidx;
+ *wq->qp_errp = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
@@ -565,6 +710,7 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ u8 *qp_errp;
unsigned long flags;
};
@@ -698,18 +844,18 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline int t4_cq_in_error(struct t4_cq *cq)
{
- return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+ return *cq->qp_errp;
}
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
- ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+ *cq->qp_errp = 1;
}
#endif
struct t4_dev_status_page {
u8 db_off;
- u8 pad1;
+ u8 write_cmpl_supported;
u16 pad2;
u32 pad3;
u64 qp_start;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 58c531db4f4a..cbdb300a4794 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -50,7 +50,8 @@ enum fw_ri_wr_opcode {
FW_RI_BYPASS = 0xd,
FW_RI_RECEIVE = 0xe,
- FW_RI_SGE_EC_CR_RETURN = 0xf
+ FW_RI_SGE_EC_CR_RETURN = 0xf,
+ FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT
};
enum fw_ri_wr_flags {
@@ -59,7 +60,8 @@ enum fw_ri_wr_flags {
FW_RI_SOLICITED_EVENT_FLAG = 0x04,
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
- FW_RI_RDMA_READ_INVALIDATE = 0x20
+ FW_RI_RDMA_READ_INVALIDATE = 0x20,
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
};
enum fw_ri_mpa_attrs {
@@ -263,6 +265,7 @@ enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
+ FW_RI_RES_TYPE_SRQ,
};
enum fw_ri_res_op {
@@ -296,6 +299,20 @@ struct fw_ri_res {
__be32 r6_lo;
__be64 r7;
} cq;
+ struct fw_ri_res_srq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 srqid;
+ __be32 pdid;
+ __be32 hwsrqsize;
+ __be32 hwsrqaddr;
+ } srq;
} u;
};
@@ -531,7 +548,17 @@ struct fw_ri_rdma_write_wr {
__u16 wrid;
__u8 r1[3];
__u8 len16;
- __be64 r2;
+ /*
+ * Use union for immediate data to be consistent with stack's 32 bit
+ * data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
@@ -568,6 +595,37 @@ struct fw_ri_send_wr {
#define FW_RI_SEND_WR_SENDOP_G(x) \
(((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
+struct fw_ri_rdma_write_cmpl_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 r2;
+ __u8 flags_send;
+ __u16 wrid_send;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+ union fw_ri_cmpl {
+ struct fw_ri_immd_cmpl {
+ __u8 op;
+ __u8 r1[6];
+ __u8 immdlen;
+ __u8 data[16];
+ } immd_src;
+ struct fw_ri_isgl isgl_src;
+ } u_cmpl;
+ __be64 r3;
+#ifndef C99_NOT_SUPPORTED
+ union fw_ri_write {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
struct fw_ri_rdma_read_wr {
__u8 opcode;
__u8 flags;
@@ -707,6 +765,10 @@ enum fw_ri_init_p2ptype {
FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
};
+enum fw_ri_init_rqeqid_srq {
+ FW_RI_INIT_RQEQID_SRQ = 1 << 31,
+};
+
struct fw_ri_wr {
__be32 op_compl;
__be32 flowid_len16;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 6deb101cdd43..2c19bf772451 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8143,8 +8143,15 @@ static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
}
}
-/*
+/**
+ * is_rcv_avail_int() - User receive context available IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
+ *
* RX block receive available interrupt. Source is < 160.
+ *
+ * This is the general interrupt handler for user (PSM) receive contexts,
+ * and can only be used for non-threaded IRQs.
*/
static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
{
@@ -8154,12 +8161,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
if (likely(source < dd->num_rcv_contexts)) {
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
- /* Check for non-user contexts, including vnic */
- if (source < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
- rcd->do_interrupt(rcd, 0);
- else
- handle_user_interrupt(rcd);
-
+ handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
return; /* OK */
}
@@ -8173,8 +8175,14 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
err_detail, source);
}
-/*
+/**
+ * is_rcv_urgent_int() - User receive context urgent IRQ handler
+ * @dd: valid dd
+ * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
+ *
* RX block receive urgent interrupt. Source is < 160.
+ *
+ * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
*/
static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
{
@@ -8184,11 +8192,7 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
if (likely(source < dd->num_rcv_contexts)) {
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
- /* only pay attention to user urgent interrupts */
- if (source >= dd->first_dyn_alloc_ctxt &&
- !rcd->is_vnic)
- handle_user_interrupt(rcd);
-
+ handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
return; /* OK */
}
@@ -8260,9 +8264,14 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
dd_dev_err(dd, "invalid interrupt source %u\n", source);
}
-/*
- * General interrupt handler. This is able to correctly handle
- * all interrupts in case INTx is used.
+/**
+ * gerneral_interrupt() - General interrupt handler
+ * @irq: MSIx IRQ vector
+ * @data: hfi1 devdata
+ *
+ * This is able to correctly handle all non-threaded interrupts. Receive
+ * context DATA IRQs are threaded and are not supported by this handler.
+ *
*/
static irqreturn_t general_interrupt(int irq, void *data)
{
@@ -10130,7 +10139,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
(((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
- for (i = 0; i < dd->chip_send_contexts; i++) {
+ for (i = 0; i < chip_send_contexts(dd); i++) {
hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
i, (u32)sreg);
write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
@@ -11857,7 +11866,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
* sequence numbers could land exactly on the same spot.
* E.g. a rcd restart before the receive header wrapped.
*/
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+ memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
/* starting timeout */
rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
@@ -11952,9 +11961,8 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
- rcd->rcvctrl = rcvctrl;
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
- write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
+ write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
/* work around sticky RcvCtxtStatus.BlockedRHQFull */
if (did_enable &&
@@ -12042,7 +12050,7 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
} else if (entry->flags & CNTR_SDMA) {
hfi1_cdbg(CNTR,
"\t Per SDMA Engine\n");
- for (j = 0; j < dd->chip_sdma_engines;
+ for (j = 0; j < chip_sdma_engines(dd);
j++) {
val =
entry->rw_cntr(entry, dd, j,
@@ -12418,6 +12426,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
const char *bit_type_32 = ",32";
const int bit_type_32_sz = strlen(bit_type_32);
+ u32 sdma_engines = chip_sdma_engines(dd);
/* set up the stats timer; the add_timer is done at the end */
timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
@@ -12450,7 +12459,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
} else if (dev_cntrs[i].flags & CNTR_SDMA) {
dev_cntrs[i].offset = dd->ndevcntrs;
- for (j = 0; j < dd->chip_sdma_engines; j++) {
+ for (j = 0; j < sdma_engines; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
dev_cntrs[i].name, j);
sz += strlen(name);
@@ -12507,7 +12516,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
*p++ = '\n';
}
} else if (dev_cntrs[i].flags & CNTR_SDMA) {
- for (j = 0; j < dd->chip_sdma_engines; j++) {
+ for (j = 0; j < sdma_engines; j++) {
snprintf(name, C_MAX_NAME, "%s%d",
dev_cntrs[i].name, j);
memcpy(p, name, strlen(name));
@@ -13020,9 +13029,9 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
@@ -13030,48 +13039,30 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
}
-/* Move to pcie.c? */
-static void disable_intx(struct pci_dev *pdev)
-{
- pci_intx(pdev, 0);
-}
-
/**
* hfi1_clean_up_interrupts() - Free all IRQ resources
* @dd: valid device data data structure
*
- * Free the MSI or INTx IRQs and assoicated PCI resources,
- * if they have been allocated.
+ * Free the MSIx and assoicated PCI resources, if they have been allocated.
*/
void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
{
int i;
+ struct hfi1_msix_entry *me = dd->msix_entries;
/* remove irqs - must happen before disabling/turning off */
- if (dd->num_msix_entries) {
- /* MSI-X */
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, i, me->arg);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
- } else {
- /* INTx */
- if (dd->requested_intx_irq) {
- pci_free_irq(dd->pcidev, 0, dd);
- dd->requested_intx_irq = 0;
- }
- disable_intx(dd->pcidev);
+ for (i = 0; i < dd->num_msix_entries; i++, me++) {
+ if (!me->arg) /* => no irq, no affinity */
+ continue;
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, i, me->arg);
}
+ /* clean structures */
+ kfree(dd->msix_entries);
+ dd->msix_entries = NULL;
+ dd->num_msix_entries = 0;
+
pci_free_irq_vectors(dd->pcidev);
}
@@ -13121,20 +13112,6 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
msix_intr);
}
-static int request_intx_irq(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
- DRIVER_NAME "_%d", dd->unit);
- if (ret)
- dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
- ret);
- else
- dd->requested_intx_irq = 1;
- return ret;
-}
-
static int request_msix_irqs(struct hfi1_devdata *dd)
{
int first_general, last_general;
@@ -13253,11 +13230,6 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
- if (!dd->num_msix_entries) {
- synchronize_irq(pci_irq_vector(dd->pcidev, 0));
- return;
- }
-
for (i = 0; i < dd->vnic.num_ctxt; i++) {
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
@@ -13346,7 +13318,6 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
{
u32 total;
int ret, request;
- int single_interrupt = 0; /* we expect to have all the interrupts */
/*
* Interrupt count:
@@ -13363,17 +13334,6 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
if (request < 0) {
ret = request;
goto fail;
- } else if (request == 0) {
- /* using INTx */
- /* dd->num_msix_entries already zero */
- single_interrupt = 1;
- dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
- } else if (request < total) {
- /* using MSI-X, with reduced interrupts */
- dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
- total, request);
- ret = -EINVAL;
- goto fail;
} else {
dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
GFP_KERNEL);
@@ -13394,10 +13354,7 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
/* reset general handler mask, chip MSI-X mappings */
reset_interrupts(dd);
- if (single_interrupt)
- ret = request_intx_irq(dd);
- else
- ret = request_msix_irqs(dd);
+ ret = request_msix_irqs(dd);
if (ret)
goto fail;
@@ -13429,6 +13386,8 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int qos_rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
+ u32 send_contexts = chip_send_contexts(dd);
+ u32 rcv_contexts = chip_rcv_contexts(dd);
/*
* Kernel receive contexts:
@@ -13450,16 +13409,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
* Every kernel receive context needs an ACK send context.
* one send context is allocated for each VL{0-7} and VL15
*/
- if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
+ if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
dd_dev_err(dd,
"Reducing # kernel rcv contexts to: %d, from %lu\n",
- (int)(dd->chip_send_contexts - num_vls - 1),
+ send_contexts - num_vls - 1,
num_kernel_contexts);
- num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
+ num_kernel_contexts = send_contexts - num_vls - 1;
}
/* Accommodate VNIC contexts if possible */
- if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
+ if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
dd_dev_err(dd, "No receive contexts available for VNIC\n");
num_vnic_contexts = 0;
}
@@ -13477,13 +13436,13 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Adjust the counts given a global max.
*/
- if (total_contexts + n_usr_ctxts > dd->chip_rcv_contexts) {
+ if (total_contexts + n_usr_ctxts > rcv_contexts) {
dd_dev_err(dd,
"Reducing # user receive contexts to: %d, from %u\n",
- (int)(dd->chip_rcv_contexts - total_contexts),
+ rcv_contexts - total_contexts,
n_usr_ctxts);
/* recalculate */
- n_usr_ctxts = dd->chip_rcv_contexts - total_contexts;
+ n_usr_ctxts = rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
@@ -13509,7 +13468,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
"rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
- (int)dd->chip_rcv_contexts,
+ rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
dd->num_vnic_contexts,
@@ -13527,7 +13486,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
* contexts.
*/
dd->rcv_entries.group_size = RCV_INCREMENT;
- ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
+ ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
dd->rcv_entries.nctxt_extra = ngroups -
(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
@@ -13552,7 +13511,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd_dev_info(
dd,
"send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
- dd->chip_send_contexts,
+ send_contexts,
dd->num_send_contexts,
dd->sc_sizes[SC_KERNEL].count,
dd->sc_sizes[SC_ACK].count,
@@ -13610,7 +13569,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
/* SendCtxtCreditReturnAddr */
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
/* PIO Send buffers */
@@ -13623,7 +13582,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
/* RcvHdrAddr */
/* RcvHdrTailAddr */
/* RcvTidFlowTable */
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
@@ -13631,7 +13590,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
}
/* RcvArray */
- for (i = 0; i < dd->chip_rcv_array_count; i++)
+ for (i = 0; i < chip_rcv_array_count(dd); i++)
hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
/* RcvQPMapTable */
@@ -13789,7 +13748,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
- for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
+ for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
@@ -13817,7 +13776,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
/*
* TXE Per-Context CSRs
*/
- for (i = 0; i < dd->chip_send_contexts; i++) {
+ for (i = 0; i < chip_send_contexts(dd); i++) {
write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
@@ -13835,7 +13794,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
/*
* TXE Per-SDMA CSRs
*/
- for (i = 0; i < dd->chip_sdma_engines; i++) {
+ for (i = 0; i < chip_sdma_engines(dd); i++) {
write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
/* SEND_DMA_STATUS read-only */
write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
@@ -13968,7 +13927,7 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
/*
* RXE Kernel and User Per-Context CSRs
*/
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ for (i = 0; i < chip_rcv_contexts(dd); i++) {
/* kernel */
write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
/* RCV_CTXT_STATUS read-only */
@@ -14084,13 +14043,13 @@ static int init_chip(struct hfi1_devdata *dd)
/* disable send contexts and SDMA engines */
write_csr(dd, SEND_CTRL, 0);
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
/* disable port (turn off RXE inbound traffic) and contexts */
write_csr(dd, RCV_CTRL, 0);
- for (i = 0; i < dd->chip_rcv_contexts; i++)
+ for (i = 0; i < chip_rcv_contexts(dd); i++)
write_csr(dd, RCV_CTXT_CTRL, 0);
/* mask all interrupt sources */
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
@@ -14709,9 +14668,9 @@ static void init_txe(struct hfi1_devdata *dd)
write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
/* enable all per-context and per-SDMA engine errors */
- for (i = 0; i < dd->chip_send_contexts; i++)
+ for (i = 0; i < chip_send_contexts(dd); i++)
write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
- for (i = 0; i < dd->chip_sdma_engines; i++)
+ for (i = 0; i < chip_sdma_engines(dd); i++)
write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
/* set the local CU to AU mapping */
@@ -14979,11 +14938,13 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
struct pci_dev *parent = pdev->bus->self;
+ u32 sdma_engines;
dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
sizeof(struct hfi1_pportdata));
if (IS_ERR(dd))
goto bail;
+ sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl;
@@ -15081,11 +15042,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* give a reasonable active value, will be set on link up */
dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
- dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
- dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
- dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
- dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
- dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
/* fix up link widths for emulation _p */
ppd = dd->pport;
if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
@@ -15096,11 +15052,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
OPA_LINK_WIDTH_1X;
}
/* insure num_vls isn't larger than number of sdma engines */
- if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
+ if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
- num_vls, dd->chip_sdma_engines);
- num_vls = dd->chip_sdma_engines;
- ppd->vls_supported = dd->chip_sdma_engines;
+ num_vls, sdma_engines);
+ num_vls = sdma_engines;
+ ppd->vls_supported = sdma_engines;
ppd->vls_operational = ppd->vls_supported;
}
@@ -15216,13 +15172,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
*/
aspm_init(dd);
- dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
- /*
- * rcd[0] is guaranteed to be valid by this point. Also, all
- * context are using the same value, as per the module parameter.
- */
- dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
-
ret = init_pervl_scs(dd);
if (ret)
goto bail_cleanup;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index fdf389e46e19..36b04d6300e5 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -656,6 +656,36 @@ static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
write_csr(dd, offset0 + (0x1000 * ctxt), value);
}
+static inline u32 chip_rcv_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_CONTEXTS);
+}
+
+static inline u32 chip_send_contexts(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_CONTEXTS);
+}
+
+static inline u32 chip_sdma_engines(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_ENGINES);
+}
+
+static inline u32 chip_pio_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_PIO_MEM_SIZE);
+}
+
+static inline u32 chip_sdma_mem_size(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, SEND_DMA_MEM_SIZE);
+}
+
+static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
+{
+ return read_csr(dd, RCV_ARRAY_CNT);
+}
+
u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
u32 dw_len);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 94dca95db04f..a41f85558312 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -208,25 +208,25 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
(offset * RCV_BUF_BLOCK_SIZE));
}
-static inline void *hfi1_get_header(struct hfi1_devdata *dd,
+static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
- return (void *)(rhf_addr - dd->rhf_offset + offset);
+ return (void *)(rhf_addr - rcd->rhf_offset + offset);
}
-static inline struct ib_header *hfi1_get_msgheader(struct hfi1_devdata *dd,
+static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
- return (struct ib_header *)hfi1_get_header(dd, rhf_addr);
+ return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
}
static inline struct hfi1_16b_header
- *hfi1_get_16B_header(struct hfi1_devdata *dd,
+ *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
__le32 *rhf_addr)
{
- return (struct hfi1_16b_header *)hfi1_get_header(dd, rhf_addr);
+ return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
}
/*
@@ -591,13 +591,12 @@ static void __prescan_rxq(struct hfi1_packet *packet)
init_ps_mdata(&mdata, packet);
while (1) {
- struct hfi1_devdata *dd = rcd->dd;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
- dd->rhf_offset;
+ packet->rcd->rhf_offset;
struct rvt_qp *qp;
struct ib_header *hdr;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
int is_ecn = 0;
@@ -612,7 +611,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
hdr = packet->hdr;
lnh = ib_get_lnh(hdr);
@@ -718,7 +717,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
ret = check_max_packet(packet, thread);
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
+ packet->rcd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr);
return ret;
@@ -757,7 +756,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
* crashing down. There is no need to eat another
* comparison in this performance critical code.
*/
- packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
packet->numpkt++;
/* Set up for the next packet */
@@ -768,7 +767,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
ret = check_max_packet(packet, thread);
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
+ packet->rcd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr);
return ret;
@@ -949,12 +948,12 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
u8 sc = SC15_PACKET;
if (etype == RHF_RCV_TYPE_IB) {
- struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
packet->rhf_addr);
sc = hfi1_9B_get_sc5(hdr, packet->rhf);
} else if (etype == RHF_RCV_TYPE_BYPASS) {
struct hfi1_16b_header *hdr = hfi1_get_16B_header(
- packet->rcd->dd,
+ packet->rcd,
packet->rhf_addr);
sc = hfi1_16B_get_sc(hdr);
}
@@ -1034,7 +1033,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
packet.rhqoff += packet.rsize;
packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
packet.rhqoff +
- dd->rhf_offset;
+ rcd->rhf_offset;
packet.rhf = rhf_to_cpu(packet.rhf_addr);
} else if (skip_pkt) {
@@ -1384,7 +1383,7 @@ bail:
static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
{
packet->hdr = (struct hfi1_ib_message_header *)
- hfi1_get_msgheader(packet->rcd->dd,
+ hfi1_get_msgheader(packet->rcd,
packet->rhf_addr);
packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
}
@@ -1485,7 +1484,7 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
u8 l4;
packet->hdr = (struct hfi1_16b_header *)
- hfi1_get_16B_header(packet->rcd->dd,
+ hfi1_get_16B_header(packet->rcd,
packet->rhf_addr);
l4 = hfi1_16B_get_l4(packet->hdr);
if (l4 == OPA_16B_L4_IB_LOCAL) {
@@ -1575,7 +1574,7 @@ void handle_eflags(struct hfi1_packet *packet)
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
*/
-int process_receive_ib(struct hfi1_packet *packet)
+static int process_receive_ib(struct hfi1_packet *packet)
{
if (hfi1_setup_9B_packet(packet))
return RHF_RCV_CONTINUE;
@@ -1607,7 +1606,7 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
return false;
}
-int process_receive_bypass(struct hfi1_packet *packet)
+static int process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
@@ -1649,7 +1648,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int process_receive_error(struct hfi1_packet *packet)
+static int process_receive_error(struct hfi1_packet *packet)
{
/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
if (unlikely(
@@ -1668,7 +1667,7 @@ int process_receive_error(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int kdeth_process_expected(struct hfi1_packet *packet)
+static int kdeth_process_expected(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
@@ -1682,7 +1681,7 @@ int kdeth_process_expected(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int kdeth_process_eager(struct hfi1_packet *packet)
+static int kdeth_process_eager(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
@@ -1695,7 +1694,7 @@ int kdeth_process_eager(struct hfi1_packet *packet)
return RHF_RCV_CONTINUE;
}
-int process_receive_invalid(struct hfi1_packet *packet)
+static int process_receive_invalid(struct hfi1_packet *packet)
{
dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
rhf_rcv_type(packet->rhf));
@@ -1719,9 +1718,8 @@ void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
init_ps_mdata(&mdata, &packet);
while (1) {
- struct hfi1_devdata *dd = rcd->dd;
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
- dd->rhf_offset;
+ rcd->rhf_offset;
struct ib_header *hdr;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn;
@@ -1738,7 +1736,7 @@ void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
if (etype > RHF_RCV_TYPE_IB)
goto next;
- packet.hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
hdr = packet.hdr;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
@@ -1760,3 +1758,14 @@ next:
update_ps_mdata(&mdata, rcd);
}
}
+
+const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
+ [RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
+ [RHF_RCV_TYPE_IB] = process_receive_ib,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 0fc4aa9455c3..1fc75647e47b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -411,7 +411,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
mapio = 1;
break;
case RCV_HDRQ:
- memlen = uctxt->rcvhdrq_size;
+ memlen = rcvhdrq_size(uctxt);
memvirt = uctxt->rcvhdrq;
break;
case RCV_EGRBUF: {
@@ -521,7 +521,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
break;
case SUBCTXT_RCV_HDRQ:
memaddr = (u64)uctxt->subctxt_rcvhdr_base;
- memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
+ memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt;
flags |= VM_IO | VM_DONTEXPAND;
vmf = 1;
break;
@@ -985,7 +985,11 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
* sub contexts.
* This has to be done here so the rest of the sub-contexts find the
* proper base context.
+ * NOTE: _set_bit() can be used here because the context creation is
+ * protected by the mutex (rather than the spin_lock), and will be the
+ * very first instance of this context.
*/
+ __set_bit(0, uctxt->in_use_ctxts);
if (uinfo->subctxt_cnt)
init_subctxts(uctxt, uinfo);
uctxt->userversion = uinfo->userversion;
@@ -1040,7 +1044,7 @@ static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
return -ENOMEM;
/* We can take the size of the RcvHdr Queue from the master */
- uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
+ uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) *
num_subctxts);
if (!uctxt->subctxt_rcvhdr_base) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4ab8b5bfbed1..d9470317983f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -169,12 +169,6 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
struct hfi1_opcode_stats_perctx;
struct ctxt_eager_bufs {
- ssize_t size; /* total size of eager buffers */
- u32 count; /* size of buffers array */
- u32 numbufs; /* number of buffers allocated */
- u32 alloced; /* number of rcvarray entries used */
- u32 rcvtid_size; /* size of each eager rcv tid */
- u32 threshold; /* head update threshold */
struct eager_buffer {
void *addr;
dma_addr_t dma;
@@ -184,6 +178,12 @@ struct ctxt_eager_bufs {
void *addr;
dma_addr_t dma;
} *rcvtids;
+ u32 size; /* total size of eager buffers */
+ u32 rcvtid_size; /* size of each eager rcv tid */
+ u16 count; /* size of buffers array */
+ u16 numbufs; /* number of buffers allocated */
+ u16 alloced; /* number of rcvarray entries used */
+ u16 threshold; /* head update threshold */
};
struct exp_tid_set {
@@ -191,43 +191,84 @@ struct exp_tid_set {
u32 count;
};
+typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
struct hfi1_ctxtdata {
- /* shadow the ctxt's RcvCtrl register */
- u64 rcvctrl;
/* rcvhdrq base, needs mmap before useful */
void *rcvhdrq;
/* kernel virtual address where hdrqtail is updated */
volatile __le64 *rcvhdrtail_kvaddr;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
+ /* so functions that need physical port can get it easily */
+ struct hfi1_pportdata *ppd;
+ /* so file ops can get at unit */
+ struct hfi1_devdata *dd;
+ /* this receive context's assigned PIO ACK send context */
+ struct send_context *sc;
+ /* per context recv functions */
+ const rhf_rcv_function_ptr *rhf_rcv_function_map;
+ /*
+ * The interrupt handler for a particular receive context can vary
+ * throughout it's lifetime. This is not a lock protected data member so
+ * it must be updated atomically and the prev and new value must always
+ * be valid. Worst case is we process an extra interrupt and up to 64
+ * packets with the wrong interrupt handler.
+ */
+ int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
+ /* verbs rx_stats per rcd */
+ struct hfi1_opcode_stats_perctx *opstats;
+ /* clear interrupt mask */
+ u64 imask;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
/* number of rcvhdrq entries */
u16 rcvhdrq_cnt;
+ u8 ireg; /* clear interrupt register */
+ /* receive packet sequence counter */
+ u8 seq_cnt;
/* size of each of the rcvhdrq entries */
- u16 rcvhdrqentsize;
+ u8 rcvhdrqentsize;
+ /* offset of RHF within receive header entry */
+ u8 rhf_offset;
+ /* dynamic receive available interrupt timeout */
+ u8 rcvavail_timeout;
+ /* Indicates that this is vnic context */
+ bool is_vnic;
+ /* vnic queue index this context is mapped to */
+ u8 vnic_q_idx;
+ /* Is ASPM interrupt supported for this context */
+ bool aspm_intr_supported;
+ /* ASPM state (enabled/disabled) for this context */
+ bool aspm_enabled;
+ /* Is ASPM processing enabled for this context (in intr context) */
+ bool aspm_intr_enable;
+ struct ctxt_eager_bufs egrbufs;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+ /* tid allocation lists */
+ struct exp_tid_set tid_group_list;
+ struct exp_tid_set tid_used_list;
+ struct exp_tid_set tid_full_list;
+
+ /* Timer for re-enabling ASPM if interrupt activity quiets down */
+ struct timer_list aspm_timer;
+ /* per-context configuration flags */
+ unsigned long flags;
+ /* array of tid_groups */
+ struct tid_group *groups;
/* mmap of hdrq, must fit in 44 bits */
dma_addr_t rcvhdrq_dma;
dma_addr_t rcvhdrqtailaddr_dma;
- struct ctxt_eager_bufs egrbufs;
- /* this receive context's assigned PIO ACK send context */
- struct send_context *sc;
-
- /* dynamic receive available interrupt timeout */
- u32 rcvavail_timeout;
+ /* Last interrupt timestamp */
+ ktime_t aspm_ts_last_intr;
+ /* Last timestamp at which we scheduled a timer for this context */
+ ktime_t aspm_ts_timer_sched;
+ /* Lock to serialize between intr, timer intr and user threads */
+ spinlock_t aspm_lock;
/* Reference count the base context usage */
struct kref kref;
-
- /* Device context index */
- u16 ctxt;
- /*
- * non-zero if ctxt can be shared, and defines the maximum number of
- * sub-contexts for this device context.
- */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- u8 uuid[16];
+ /* numa node of this context */
+ int numa_id;
+ /* associated msix interrupt. */
+ s16 msix_intr;
/* job key */
u16 jkey;
/* number of RcvArray groups for this context. */
@@ -238,87 +279,59 @@ struct hfi1_ctxtdata {
u16 expected_count;
/* index of first expected TID entry. */
u16 expected_base;
- /* array of tid_groups */
- struct tid_group *groups;
-
- struct exp_tid_set tid_group_list;
- struct exp_tid_set tid_used_list;
- struct exp_tid_set tid_full_list;
+ /* Device context index */
+ u8 ctxt;
- /* lock protecting all Expected TID data of user contexts */
+ /* PSM Specific fields */
+ /* lock protecting all Expected TID data */
struct mutex exp_mutex;
- /* per-context configuration flags */
- unsigned long flags;
- /* per-context event flags for fileops/intr communication */
- unsigned long event_flags;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /* uuid from PSM */
+ u8 uuid[16];
/* same size as task_struct .comm[], command that opened context */
char comm[TASK_COMM_LEN];
- /* so file ops can get at unit */
- struct hfi1_devdata *dd;
- /* so functions that need physical port can get it easily */
- struct hfi1_pportdata *ppd;
- /* associated msix interrupt */
- u32 msix_intr;
+ /* Bitmask of in use context(s) */
+ DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
+ /* per-context event flags for fileops/intr communication */
+ unsigned long event_flags;
/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
void *subctxt_uregbase;
/* An array of pages for the eager receive buffers * N */
void *subctxt_rcvegrbuf;
/* An array of pages for the eager header queue entries * N */
void *subctxt_rcvhdr_base;
- /* Bitmask of in use context(s) */
- DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
- /* The version of the library which opened this ctxt */
- u32 userversion;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
/* Type of packets or conditions we want to poll for */
u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
- /* interrupt handling */
- u64 imask; /* clear interrupt mask */
- int ireg; /* clear interrupt register */
- int numa_id; /* numa node of this context */
- /* verbs rx_stats per rcd */
- struct hfi1_opcode_stats_perctx *opstats;
-
- /* Is ASPM interrupt supported for this context */
- bool aspm_intr_supported;
- /* ASPM state (enabled/disabled) for this context */
- bool aspm_enabled;
- /* Timer for re-enabling ASPM if interrupt activity quietens down */
- struct timer_list aspm_timer;
- /* Lock to serialize between intr, timer intr and user threads */
- spinlock_t aspm_lock;
- /* Is ASPM processing enabled for this context (in intr context) */
- bool aspm_intr_enable;
- /* Last interrupt timestamp */
- ktime_t aspm_ts_last_intr;
- /* Last timestamp at which we scheduled a timer for this context */
- ktime_t aspm_ts_timer_sched;
-
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
/*
- * The interrupt handler for a particular receive context can vary
- * throughout it's lifetime. This is not a lock protected data member so
- * it must be updated atomically and the prev and new value must always
- * be valid. Worst case is we process an extra interrupt and up to 64
- * packets with the wrong interrupt handler.
+ * non-zero if ctxt can be shared, and defines the maximum number of
+ * sub-contexts for this device context.
*/
- int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
-
- /* Indicates that this is vnic context */
- bool is_vnic;
+ u8 subctxt_cnt;
- /* vnic queue index this context is mapped to */
- u8 vnic_q_idx;
};
+/**
+ * rcvhdrq_size - return total size in bytes for header queue
+ * @rcd: the receive context
+ *
+ * rcvhdrqentsize is in DWs, so we have to convert to bytes
+ *
+ */
+static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
+{
+ return PAGE_ALIGN(rcd->rcvhdrq_cnt *
+ rcd->rcvhdrqentsize * sizeof(u32));
+}
+
/*
* Represents a single packet at a high level. Put commonly computed things in
* here so we do not have to keep doing them over and over. The rule of thumb is
@@ -897,12 +910,11 @@ struct hfi1_pportdata {
u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
};
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
-
typedef void (*opcode_handler)(struct hfi1_packet *packet);
typedef void (*hfi1_make_req)(struct rvt_qp *qp,
struct hfi1_pkt_state *ps,
struct rvt_swqe *wqe);
+extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
/* return values for the RHF receive functions */
@@ -1046,8 +1058,6 @@ struct hfi1_devdata {
dma_addr_t sdma_pad_phys;
/* for deallocation */
size_t sdma_heads_size;
- /* number from the chip */
- u32 chip_sdma_engines;
/* num used */
u32 num_sdma;
/* array of engines sized by num_sdma */
@@ -1102,8 +1112,6 @@ struct hfi1_devdata {
/* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
- u32 freezelen; /* max length of freezemsg */
- u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
spinlock_t uctxt_lock; /* protect rcd changes */
@@ -1130,25 +1138,6 @@ struct hfi1_devdata {
/* Base GUID for device (network order) */
u64 base_guid;
- /* these are the "32 bit" regs */
-
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* number of receive contexts the chip supports */
- u32 chip_rcv_contexts;
- /* number of receive array entries */
- u32 chip_rcv_array_count;
- /* number of PIO send contexts the chip supports */
- u32 chip_send_contexts;
- /* number of bytes in the PIO memory buffer */
- u32 chip_pio_mem_size;
- /* number of bytes in the SDMA memory buffer */
- u32 chip_sdma_mem_size;
-
- /* size of each rcvegrbuffer */
- u32 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
u8 dc_shutdown;
@@ -1221,9 +1210,6 @@ struct hfi1_devdata {
u32 num_msix_entries;
u32 first_dyn_msix_idx;
- /* INTx information */
- u32 requested_intx_irq; /* did we request one? */
-
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1289,8 +1275,6 @@ struct hfi1_devdata {
u64 sw_cce_err_status_aggregate;
/* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
- /* receive interrupt function */
- rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
/* Save the enabled LCB error bits */
u64 lcb_err_en;
@@ -1329,10 +1313,7 @@ struct hfi1_devdata {
/* seqlock for sc2vl */
seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
u64 sc2vl[4];
- /* receive interrupt functions */
- rhf_rcv_function_ptr *rhf_rcv_function_map;
u64 __percpu *rcv_limit;
- u16 rhf_offset; /* offset of RHF within receive header entry */
/* adding a new field here would make it part of this cacheline */
/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
@@ -1471,7 +1452,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp,
/* calculate the current RHF address */
static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
{
- return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset;
+ return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
}
int hfi1_reset_device(int);
@@ -2021,12 +2002,6 @@ static inline void flush_wc(void)
}
void handle_eflags(struct hfi1_packet *packet);
-int process_receive_ib(struct hfi1_packet *packet);
-int process_receive_bypass(struct hfi1_packet *packet);
-int process_receive_error(struct hfi1_packet *packet);
-int kdeth_process_expected(struct hfi1_packet *packet);
-int kdeth_process_eager(struct hfi1_packet *packet);
-int process_receive_invalid(struct hfi1_packet *packet);
void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
/* global module parameter variables */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index f110842b91f5..758d273c32cf 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -364,9 +364,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
hfi1_exp_tid_group_init(rcd);
rcd->ppd = ppd;
rcd->dd = dd;
- __set_bit(0, rcd->in_use_ctxts);
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
+ rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
mutex_init(&rcd->exp_mutex);
@@ -404,6 +404,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->rcvhdrq_cnt = rcvhdrcnt;
rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
+ rcd->rhf_offset =
+ rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
/*
* Simple Eager buffer allocation: we have already pre-allocated
* the number of RcvArray entry groups. Each ctxtdata structure
@@ -853,24 +855,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
struct hfi1_ctxtdata *rcd;
struct hfi1_pportdata *ppd;
- /* Set up recv low level handlers */
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
- kdeth_process_expected;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
- kdeth_process_eager;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
- process_receive_error;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
- process_receive_bypass;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
- process_receive_invalid;
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
-
/* Set up send low level handlers */
dd->process_pio_send = hfi1_verbs_send_pio;
dd->process_dma_send = hfi1_verbs_send_dma;
@@ -936,7 +920,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
}
/* Allocate enough memory for user event notification. */
- len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
+ len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
sizeof(*dd->events));
dd->events = vmalloc_user(len);
if (!dd->events)
@@ -948,9 +932,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
dd->status = vmalloc_user(PAGE_SIZE);
if (!dd->status)
dd_dev_err(dd, "Failed to allocate dev status page\n");
- else
- dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
- sizeof(dd->status->freezemsg));
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (dd->status)
@@ -1144,7 +1125,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
return;
if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
rcd->rcvhdrq, rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
if (rcd->rcvhdrtail_kvaddr) {
@@ -1855,12 +1836,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
- /*
- * rcvhdrqentsize is in DWs, so we have to convert to bytes
- * (* sizeof(u32)).
- */
- amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
- sizeof(u32));
+ amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
@@ -1885,8 +1861,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
-
- rcd->rcvhdrq_size = amt;
}
/*
* These values are per-context:
@@ -1902,7 +1876,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
& RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
<< RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
- reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
+ reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
@@ -1938,9 +1912,9 @@ bail:
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
{
struct hfi1_devdata *dd = rcd->dd;
- u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
+ u32 max_entries, egrtop, alloced_bytes = 0;
gfp_t gfp_flags;
- u16 order;
+ u16 order, idx = 0;
int ret = 0;
u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 70aceefe14d5..e1c7996c018e 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,9 +67,9 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static void mmu_notifier_range_start(struct mmu_notifier *,
+static int mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
- unsigned long, unsigned long);
+ unsigned long, unsigned long, bool);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler,
@@ -284,10 +284,11 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static void mmu_notifier_range_start(struct mmu_notifier *mn,
+static int mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
@@ -313,6 +314,8 @@ static void mmu_notifier_range_start(struct mmu_notifier *mn,
if (added)
queue_work(handler->wq, &handler->del_work);
+
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 4d4371bf2c7c..eec83757d55f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,6 +157,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
unsigned long len;
resource_size_t addr;
int ret = 0;
+ u32 rcv_array_count;
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
@@ -186,9 +187,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
goto nomem;
}
- dd->chip_rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
- dd_dev_info(dd, "RcvArray count: %u\n", dd->chip_rcv_array_count);
- dd->base2_start = RCV_ARRAY + dd->chip_rcv_array_count * 8;
+ rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
+ dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
+ dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
dd->kregbase2 = ioremap_nocache(
addr + dd->base2_start,
@@ -214,13 +215,13 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
* to write an entire cacheline worth of entries in one shot.
*/
dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
- dd->chip_rcv_array_count * 8);
+ rcv_array_count * 8);
if (!dd->rcvarray_wc) {
dd_dev_err(dd, "WC mapping of receive array failed\n");
goto nomem;
}
dd_dev_info(dd, "WC RcvArray: %p for %x\n",
- dd->rcvarray_wc, dd->chip_rcv_array_count * 8);
+ dd->rcvarray_wc, rcv_array_count * 8);
dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */
return 0;
@@ -346,15 +347,13 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* Returns:
* - actual number of interrupts allocated or
- * - 0 if fell back to INTx.
* - error
*/
int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
int nvec;
- nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
- PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
+ nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
if (nvec < 0) {
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
return nvec;
@@ -362,10 +361,6 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
tune_pcie_caps(dd);
- /* check for legacy IRQ */
- if (nvec == 1 && !dd->pcidev->msix_enabled)
- return 0;
-
return nvec;
}
@@ -905,9 +900,7 @@ static int trigger_sbr(struct hfi1_devdata *dd)
* delay after a reset is required. Per spec requirements,
* the link is either working or not after that point.
*/
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
+ return pci_reset_bus(dev);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 9cac15d10c4f..c2c1cba5b23b 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -226,7 +226,7 @@ static const char *sc_type_name(int index)
int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
{
struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
- int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
+ int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
int total_contexts = 0;
int fixed_blocks;
int pool_blocks;
@@ -343,8 +343,8 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
sc_type_name(i), count);
return -EINVAL;
}
- if (total_contexts + count > dd->chip_send_contexts)
- count = dd->chip_send_contexts - total_contexts;
+ if (total_contexts + count > chip_send_contexts(dd))
+ count = chip_send_contexts(dd) - total_contexts;
total_contexts += count;
@@ -507,7 +507,7 @@ static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
if (sci->type == type && sci->allocated == 0) {
sci->allocated = 1;
/* use a 1:1 mapping, but make them non-equal */
- context = dd->chip_send_contexts - index - 1;
+ context = chip_send_contexts(dd) - index - 1;
dd->hw_to_sw[context] = index;
*sw_index = index;
*hw_context = context;
@@ -1618,11 +1618,11 @@ static void sc_piobufavail(struct send_context *sc)
/* Wake up the most starved one first */
if (n)
hfi1_qp_wakeup(qps[max_idx],
- RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
for (i = 0; i < n; i++)
if (i != max_idx)
hfi1_qp_wakeup(qps[i],
- RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
+ RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
/* translate a send credit update to a bit code of reasons */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 1697d96151bd..9b1e84a6b1cc 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -273,7 +273,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PATH_MIG_STATE &&
attr->path_mig_state == IB_MIG_MIGRATED &&
qp->s_mig_state == IB_MIG_ARMED) {
- qp->s_flags |= RVT_S_AHG_CLEAR;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
@@ -717,7 +717,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= RVT_S_AHG_CLEAR;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
qp_set_16b(qp);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index b2d4cba8d15b..078cff7560b6 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -1,7 +1,7 @@
#ifndef _QP_H
#define _QP_H
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -70,6 +70,26 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
}
/*
+ * Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
+ *
+ * HFI1_S_AHG_VALID - ahg header valid on chip
+ * HFI1_S_AHG_CLEAR - have send engine clear ahg state
+ * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
+ * HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1
+ */
+#define HFI1_S_AHG_VALID 0x80000000
+#define HFI1_S_AHG_CLEAR 0x40000000
+#define HFI1_S_WAIT_PIO_DRAIN 0x20000000
+#define HFI1_S_MIN_BIT_MASK 0x01000000
+
+/*
+ * overload wait defines
+ */
+
+#define HFI1_S_ANY_WAIT_IO (RVT_S_ANY_WAIT_IO | HFI1_S_WAIT_PIO_DRAIN)
+#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
+
+/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct rvt_qp *qp)
@@ -77,7 +97,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv;
priv->s_ahg->ahgcount = 0;
- qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
+ qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a1a47ac53c6..9bd63abb2dfe 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -241,7 +241,7 @@ bail:
smp_wmb();
qp->s_flags &= ~(RVT_S_RESP_PENDING
| RVT_S_ACK_PENDING
- | RVT_S_AHG_VALID);
+ | HFI1_S_AHG_VALID);
return 0;
}
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
@@ -1024,7 +1024,7 @@ done:
if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
qp->s_flags |= RVT_S_WAIT_PSN;
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index ef4c566e206f..5f56f3c1b4c4 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -194,7 +194,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
goto unlock;
@@ -533,9 +533,9 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{
struct hfi1_qp_priv *priv = qp->priv;
- if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
+ if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
clear_ahg(qp);
- if (!(qp->s_flags & RVT_S_AHG_VALID)) {
+ if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
/* first middle that needs copy */
if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
@@ -544,7 +544,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
priv->s_ahg->ahgidx = qp->s_ahgidx;
- qp->s_flags |= RVT_S_AHG_VALID;
+ qp->s_flags |= HFI1_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
@@ -650,7 +650,7 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
bth0 |= pkey;
bth0 |= extra_bytes << 20;
@@ -727,7 +727,7 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~RVT_S_AHG_VALID;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
bth0 |= pkey;
bth0 |= extra_bytes << 20;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 7fb350b87b49..88e326d6cc49 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1351,7 +1351,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
struct hfi1_pportdata *ppd = dd->pport + port;
u32 per_sdma_credits;
uint idle_cnt = sdma_idle_cnt;
- size_t num_engines = dd->chip_sdma_engines;
+ size_t num_engines = chip_sdma_engines(dd);
int ret = -ENOMEM;
if (!HFI1_CAP_IS_KSET(SDMA)) {
@@ -1360,18 +1360,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
if (mod_num_sdma &&
/* can't exceed chip support */
- mod_num_sdma <= dd->chip_sdma_engines &&
+ mod_num_sdma <= chip_sdma_engines(dd) &&
/* count must be >= vls */
mod_num_sdma >= num_vls)
num_engines = mod_num_sdma;
dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
- dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
+ dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
- dd->chip_sdma_mem_size);
+ chip_sdma_mem_size(dd));
per_sdma_credits =
- dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
+ chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
/* set up freeze waitqueue */
init_waitqueue_head(&dd->sdma_unfreeze_wq);
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index b7b671017e59..e254dcec6f64 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int middle = 0;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1ab332f1866e..70d39fc450a1 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
u32 lid;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 08991874c0e2..13374c727b14 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1007,7 +1007,7 @@ static int pio_wait(struct rvt_qp *qp,
int was_empty;
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
- dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
+ dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
iowait_queue(ps->pkts_sent, &priv->s_iowait,
@@ -1376,7 +1376,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
return pio_wait(qp,
ps->s_txreq->psc,
ps,
- RVT_S_WAIT_PIO_DRAIN);
+ HFI1_S_WAIT_PIO_DRAIN);
return sr(qp, ps, 0);
}
@@ -1410,7 +1410,8 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
rdi->dparms.props.max_qp = hfi1_max_qps;
rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
- rdi->dparms.props.max_sge = hfi1_max_sges;
+ rdi->dparms.props.max_send_sge = hfi1_max_sges;
+ rdi->dparms.props.max_recv_sge = hfi1_max_sges;
rdi->dparms.props.max_sge_rd = hfi1_max_sges;
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
@@ -1497,15 +1498,6 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
- /*
- * sm_lid of 0xFFFF needs special handling so that it can
- * be differentiated from a permissve LID of 0xFFFF.
- * We set the grh_required flag here so the SA can program
- * the DGID in the address handle appropriately
- */
- if (props->sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))
- props->grh_required = true;
-
return 0;
}
@@ -1892,7 +1884,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ibdev->process_mad = hfi1_process_mad;
ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
- strncpy(ibdev->node_desc, init_utsname()->nodename,
+ strlcpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
/*
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 873e48ea923f..c4ab2d5b4502 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
- struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+ struct verbs_txreq *tx = NULL;
write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 729244c3086c..1c19bbc764b2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
if (unlikely(!tx)) {
/* call slow path to get the lock */
tx = __get_txreq(dev, qp);
- if (IS_ERR(tx))
+ if (!tx)
return tx;
}
tx->qp = qp;
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 5d65582fe4d9..c643d80c5a53 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -120,8 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->seq_cnt = 1;
uctxt->is_vnic = true;
- if (dd->num_msix_entries)
- hfi1_set_vnic_msix_info(uctxt);
+ hfi1_set_vnic_msix_info(uctxt);
hfi1_stats.sps_ctxts++;
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
@@ -136,8 +135,7 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
- if (dd->num_msix_entries)
- hfi1_reset_vnic_msix_info(uctxt);
+ hfi1_reset_vnic_msix_info(uctxt);
/*
* Disable receive context and interrupt available, reset all
@@ -423,7 +421,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
@@ -818,14 +816,14 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->chip_sdma_engines, dd->num_vnic_contexts);
+ chip_sdma_engines(dd), dd->num_vnic_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = dd->chip_sdma_engines;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index d74928621559..0d96c5bb38cd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -44,13 +44,11 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
+ const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
- struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- union ib_gid sgid;
- int ret;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
@@ -59,18 +57,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- /* Get source gid */
- ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &gid_attr);
- if (ret) {
- dev_err(dev, "get sgid failed! ret = %d\n", ret);
- kfree(ah);
- return ERR_PTR(ret);
- }
-
- if (is_vlan_dev(gid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
- dev_put(gid_attr.ndev);
+ gid_attr = ah_attr->grh.sgid_attr;
+ if (is_vlan_dev(gid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
@@ -108,7 +97,7 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
rdma_ah_set_grh(ah_attr, NULL,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) &
- HNS_ROCE_FLOW_LABLE_MASK), ah->av.gid_index,
+ HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index,
ah->av.hop_limit,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_TCLASS_SHIFT));
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 319cb74aebaf..93d4b4ec002d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -382,15 +382,6 @@
#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
-#define ROCEE_VF_SMAC_CFG0_REG 0x12000
-#define ROCEE_VF_SMAC_CFG1_REG 0x12004
-
-#define ROCEE_VF_SGID_CFG0_REG 0x10000
-#define ROCEE_VF_SGID_CFG1_REG 0x10004
-#define ROCEE_VF_SGID_CFG2_REG 0x10008
-#define ROCEE_VF_SGID_CFG3_REG 0x1000c
-#define ROCEE_VF_SGID_CFG4_REG 0x10010
-
#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
#define ROCEE_VF_ABN_INT_ST_REG 0x13004
#define ROCEE_VF_ABN_INT_EN_REG 0x13008
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index ebee2782a573..e2f93c1ce86a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -41,6 +41,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
found:
db->dma = sg_dma_address(page->umem->sg_head.sgl) +
(virt & ~PAGE_MASK);
+ page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl);
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 31221d506d9a..9a24fd0ee3e7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -76,7 +76,7 @@
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
-#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
+#define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
#define HNS_ROCE_MAX_PORTS 6
#define HNS_ROCE_MAX_GID_NUM 16
@@ -110,6 +110,7 @@
enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
+ HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
};
enum {
@@ -190,7 +191,8 @@ enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
- HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3)
+ HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
+ HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
};
enum hns_roce_mtt_type {
@@ -385,6 +387,7 @@ struct hns_roce_db {
struct hns_roce_user_db_page *user_page;
} u;
dma_addr_t dma;
+ void *virt_addr;
int index;
int order;
};
@@ -524,7 +527,9 @@ struct hns_roce_qp {
struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
struct hns_roce_db rdb;
+ struct hns_roce_db sdb;
u8 rdb_en;
+ u8 sdb_en;
u32 doorbell_qpn;
__le32 sq_signal_bits;
u32 sq_next_wqe;
@@ -579,22 +584,22 @@ struct hns_roce_ceqe {
};
struct hns_roce_aeqe {
- u32 asyn;
+ __le32 asyn;
union {
struct {
- u32 qp;
+ __le32 qp;
u32 rsv0;
u32 rsv1;
} qp_event;
struct {
- u32 cq;
+ __le32 cq;
u32 rsv0;
u32 rsv1;
} cq_event;
struct {
- u32 ceqe;
+ __le32 ceqe;
u32 rsv0;
u32 rsv1;
} ce_event;
@@ -641,6 +646,8 @@ struct hns_roce_eq {
int shift;
dma_addr_t cur_eqe_ba;
dma_addr_t nxt_eqe_ba;
+ int event_type;
+ int sub_type;
};
struct hns_roce_eq_table {
@@ -720,10 +727,21 @@ struct hns_roce_caps {
u32 eqe_ba_pg_sz;
u32 eqe_buf_pg_sz;
u32 eqe_hop_num;
+ u32 sl_num;
+ u32 tsq_buf_pg_sz;
+ u32 tpq_buf_pg_sz;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
+struct hns_roce_work {
+ struct hns_roce_dev *hr_dev;
+ struct work_struct work;
+ u32 qpn;
+ int event_type;
+ int sub_type;
+};
+
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
@@ -736,7 +754,7 @@ struct hns_roce_hw {
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid, const struct ib_gid_attr *attr);
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
@@ -760,10 +778,10 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp);
- int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
- int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+ int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
@@ -816,6 +834,7 @@ struct hns_roce_dev {
u32 tptr_size; /*only for hw v1*/
const struct hns_roce_hw *hw;
void *priv;
+ struct workqueue_struct *irq_workq;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -864,7 +883,7 @@ static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
}
-static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
+static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
}
@@ -982,7 +1001,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt);
-__be32 send_ieth(struct ib_send_wr *wr);
+__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 63b5b3edabcb..f6faefed96e8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -170,7 +170,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
case 3:
mhop->l2_idx = table_idx & (chunk_ba_num - 1);
mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
- mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
+ mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
break;
case 2:
mhop->l1_idx = table_idx & (chunk_ba_num - 1);
@@ -342,7 +342,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
} else {
break;
}
- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_l = (u32)bt_ba;
@@ -494,6 +494,9 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
step_idx = 1;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
step_idx = 0;
+ } else {
+ ret = -EINVAL;
+ goto err_dma_alloc_l1;
}
/* set HEM base address to hardware */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 8013d69c5ac4..081aa91fc162 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -58,8 +58,9 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0;
}
-static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v1_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
@@ -173,12 +174,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_FLOW_LABEL_M,
- UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
+ UD_SEND_WQE_U32_36_FLOW_LABEL_S,
+ ah->av.sl_tclass_flowlabel &
+ HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_PRIORITY_M,
- UD_SEND_WQE_U32_36_PRIORITY_S,
- ah->av.sl_tclass_flowlabel >>
- HNS_ROCE_SL_SHIFT);
+ UD_SEND_WQE_U32_36_PRIORITY_M,
+ UD_SEND_WQE_U32_36_PRIORITY_S,
+ le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_SL_SHIFT);
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
@@ -191,7 +194,9 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ah->av.hop_limit);
roce_set_field(ud_sq_wqe->u32_40,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_TCLASS_SHIFT);
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
@@ -333,7 +338,7 @@ out:
doorbell[0] = le32_to_cpu(sq_db.u32_4);
doorbell[1] = le32_to_cpu(sq_db.u32_8);
- hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
}
@@ -342,14 +347,15 @@ out:
return ret;
}
-static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int ret = 0;
int nreq = 0;
int ind = 0;
int i = 0;
- u32 reg_val = 0;
+ u32 reg_val;
unsigned long flags = 0;
struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
struct hns_roce_wqe_data_seg *scat = NULL;
@@ -402,14 +408,18 @@ out:
wmb();
if (ibqp->qp_type == IB_QPT_GSI) {
+ __le32 tmp;
+
/* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port);
- roce_set_field(reg_val,
+ tmp = cpu_to_le32(reg_val);
+ roce_set_field(tmp,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
+ reg_val = le32_to_cpu(tmp);
roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
@@ -430,7 +440,8 @@ out:
doorbell[0] = le32_to_cpu(rq_db.u32_4);
doorbell[1] = le32_to_cpu(rq_db.u32_8);
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k((__le32 *)doorbell,
+ hr_qp->rq.db_reg_l);
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -441,51 +452,63 @@ out:
static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
int sdb_mode, int odb_mode)
{
+ __le32 tmp;
u32 val;
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
- roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
u32 odb_mode)
{
+ __le32 tmp;
u32 val;
/* Configure SDB/ODB extend mode */
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
- roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
u32 sdb_alful)
{
+ __le32 tmp;
u32 val;
/* Configure SDB */
val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
- roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
- roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
+ roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
}
static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
u32 odb_alful)
{
+ __le32 tmp;
u32 val;
/* Configure ODB */
val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
- roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
- roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
+ roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
}
@@ -496,6 +519,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
+ __le32 tmp;
u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->priv;
@@ -511,7 +535,8 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
/* Configure extend SDB depth */
val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
db->ext_db->esdb_dep);
/*
@@ -519,8 +544,9 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
@@ -535,6 +561,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
dma_addr_t odb_dma_addr;
+ __le32 tmp;
u32 val;
priv = (struct hns_roce_v1_priv *)hr_dev->priv;
@@ -550,12 +577,14 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
/* Configure extend ODB depth */
val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
- roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
db->ext_db->eodb_dep);
- roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
db->ext_db->eodb_dep);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
@@ -762,6 +791,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n");
+ ret = -ENOMEM;
goto create_lp_qp_failed;
}
hr_qp = free_mr->mr_free_qp[i];
@@ -831,7 +861,7 @@ alloc_pd_failed:
if (hns_roce_ib_destroy_cq(cq))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
- return -EINVAL;
+ return ret;
}
static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
@@ -969,7 +999,8 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
struct device *dev = &hr_dev->pdev->dev;
- struct ib_send_wr send_wr, *bad_wr;
+ struct ib_send_wr send_wr;
+ const struct ib_send_wr *bad_wr;
int ret;
memset(&send_wr, 0, sizeof(send_wr));
@@ -1161,9 +1192,10 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
{
int ret;
+ u32 val;
+ __le32 tmp;
int raq_shift = 0;
dma_addr_t addr;
- u32 val;
struct hns_roce_v1_priv *priv;
struct hns_roce_raq_table *raq;
struct device *dev = &hr_dev->pdev->dev;
@@ -1189,46 +1221,54 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
/* Configure raq_shift */
raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
- roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
+ roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
raq->e_raq_buf->map >> 44);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
/* Configure raq threshold */
val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
- roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
HNS_ROCE_V1_EXT_RAQ_WF);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
/* Enable extend raq */
val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
- roce_set_field(val,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
POL_TIME_INTERVAL_VAL);
- roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
- roce_set_field(val,
+ roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
+ roce_set_field(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
2);
- roce_set_bit(val,
+ roce_set_bit(tmp,
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
/* Enable raq drop */
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ tmp = cpu_to_le32(val);
+ roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
@@ -1255,20 +1295,25 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
{
+ __le32 tmp;
u32 val;
if (enable_flag) {
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
/* Open all ports */
- roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
ALL_PORT_VAL_OPEN);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
} else {
val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
/* Close all ports */
- roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
}
}
@@ -1435,7 +1480,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
}
fwnode = &dsaf_node->fwnode;
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"dsaf-handle", 0, &args);
@@ -1443,7 +1488,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
dev_err(dev, "could not find dsaf-handle\n");
return ret;
}
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
@@ -1498,13 +1543,11 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps;
- hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
- hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
- ROCEE_VENDOR_PART_ID_REG));
- hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_L_REG)) |
- ((u64)le32_to_cpu(roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+ hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
+ hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
+ hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
+ ((u64)roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
hr_dev->hw_rev = HNS_ROCE_HW_VER1;
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
@@ -1557,8 +1600,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
- caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
- ROCEE_ACK_DELAY_REG));
+ caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
caps->max_mtu = IB_MTU_2048;
return 0;
@@ -1568,21 +1610,25 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
{
int ret;
u32 val;
+ __le32 tmp;
struct device *dev = &hr_dev->pdev->dev;
/* DMAE user config */
val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
- roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
- roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1 << PAGES_SHIFT_16);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
- roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
- roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
+ roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1 << PAGES_SHIFT_16);
@@ -1668,6 +1714,7 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
unsigned long end;
u32 val = 0;
+ __le32 tmp;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (hns_roce_v1_cmd_pending(hr_dev)) {
@@ -1679,15 +1726,17 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
cond_resched();
}
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+ val = le32_to_cpu(tmp);
writeq(in_param, hcr + 0);
writeq(out_param, hcr + 2);
writel(in_modifier, hcr + 4);
@@ -1717,7 +1766,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return -ETIMEDOUT;
}
- status = le32_to_cpu((__force __be32)
+ status = le32_to_cpu((__force __le32)
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
@@ -1728,7 +1777,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
- int gid_index, union ib_gid *gid,
+ int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
u32 *p = NULL;
@@ -1760,6 +1809,7 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
{
u32 reg_smac_l;
u16 reg_smac_h;
+ __le32 tmp;
u16 *p_h;
u32 *p;
u32 val;
@@ -1784,10 +1834,12 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
val = roce_read(hr_dev,
ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ tmp = cpu_to_le32(val);
p_h = (u16 *)(&addr[4]);
reg_smac_h = *p_h;
- roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
+ roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
@@ -1797,12 +1849,15 @@ static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu)
{
+ __le32 tmp;
u32 val;
val = roce_read(hr_dev,
ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
+ tmp = cpu_to_le32(val);
+ roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
+ val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
}
@@ -1848,9 +1903,9 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
- mpt_entry->virt_addr_l = (u32)mr->iova;
- mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
- mpt_entry->length = (u32)mr->size;
+ mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
+ mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
+ mpt_entry->length = cpu_to_le32((u32)mr->size);
roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
MPT_BYTE_28_PD_S, mr->pd);
@@ -1885,64 +1940,59 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->mpt_byte_36,
MPT_BYTE_36_PA0_H_M,
MPT_BYTE_36_PA0_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ (u32)(pages[i] >> PAGES_SHIFT_32));
break;
case 1:
roce_set_field(mpt_entry->mpt_byte_36,
MPT_BYTE_36_PA1_L_M,
- MPT_BYTE_36_PA1_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_40,
MPT_BYTE_40_PA1_H_M,
MPT_BYTE_40_PA1_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ (u32)(pages[i] >> PAGES_SHIFT_24));
break;
case 2:
roce_set_field(mpt_entry->mpt_byte_40,
MPT_BYTE_40_PA2_L_M,
- MPT_BYTE_40_PA2_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_44,
MPT_BYTE_44_PA2_H_M,
MPT_BYTE_44_PA2_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ (u32)(pages[i] >> PAGES_SHIFT_16));
break;
case 3:
roce_set_field(mpt_entry->mpt_byte_44,
MPT_BYTE_44_PA3_L_M,
- MPT_BYTE_44_PA3_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_48,
MPT_BYTE_48_PA3_H_M,
MPT_BYTE_48_PA3_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
+ (u32)(pages[i] >> PAGES_SHIFT_8));
break;
case 4:
mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_56,
MPT_BYTE_56_PA4_H_M,
MPT_BYTE_56_PA4_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ (u32)(pages[i] >> PAGES_SHIFT_32));
break;
case 5:
roce_set_field(mpt_entry->mpt_byte_56,
MPT_BYTE_56_PA5_L_M,
- MPT_BYTE_56_PA5_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_60,
MPT_BYTE_60_PA5_H_M,
MPT_BYTE_60_PA5_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ (u32)(pages[i] >> PAGES_SHIFT_24));
break;
case 6:
roce_set_field(mpt_entry->mpt_byte_60,
MPT_BYTE_60_PA6_L_M,
- MPT_BYTE_60_PA6_L_S,
- cpu_to_le32((u32)(pages[i])));
+ MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
roce_set_field(mpt_entry->mpt_byte_64,
MPT_BYTE_64_PA6_H_M,
MPT_BYTE_64_PA6_H_S,
- cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ (u32)(pages[i] >> PAGES_SHIFT_16));
break;
default:
break;
@@ -1951,7 +2001,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
free_page((unsigned long) pages);
- mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
+ mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
MPT_BYTE_12_PBL_ADDR_H_S,
@@ -1982,9 +2032,9 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- u32 doorbell[2];
+ __le32 doorbell[2];
- doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
+ doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
doorbell[1] = 0;
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
@@ -2081,10 +2131,8 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
- cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
- cq_context->cq_bt_l = (u32)dma_handle;
- cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
+ cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
@@ -2096,15 +2144,12 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
ilog2((unsigned int)nent));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
- cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
- cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
- cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
+ cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
- cpu_to_le32((mtts[0]) >> 32));
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
/* Dedicated hardware, directly set 0 */
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
@@ -2118,9 +2163,8 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
tptr_dma_addr >> 44);
- cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
- cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
+ cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
@@ -2138,7 +2182,6 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
- cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
}
static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
@@ -2151,7 +2194,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
- u32 doorbell[2];
+ __le32 doorbell[2];
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
@@ -2159,7 +2202,8 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
* flags = 0; Notification Flag = 1, next
* flags = 1; Notification Flag = 0, solocited
*/
- doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
+ doorbell[0] =
+ cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
@@ -2416,7 +2460,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
unsigned long end = 0, flags = 0;
- uint32_t bt_cmd_val[2] = {0};
+ __le32 bt_cmd_val[2] = {0};
void __iomem *bt_cmd;
u64 bt_ba = 0;
@@ -2468,7 +2512,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
- bt_cmd_val[0] = (uint32_t)bt_ba;
+ bt_cmd_val[0] = (__le32)bt_ba;
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
@@ -2569,10 +2613,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_sqp_context *context;
struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle = 0;
+ u32 __iomem *addr;
int rq_pa_start;
+ __le32 tmp;
u32 reg_val;
u64 *mtts;
- u32 __iomem *addr;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -2598,7 +2643,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
- context->sq_rq_bt_l = (u32)(dma_handle);
+ context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
roce_set_field(context->qp1c_bytes_12,
QP1C_BYTES_12_SQ_RQ_BT_H_M,
QP1C_BYTES_12_SQ_RQ_BT_H_S,
@@ -2610,7 +2655,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
+ le32_to_cpu(hr_qp->sq_signal_bits));
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -2624,7 +2669,8 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+ context->cur_rq_wqe_ba_l =
+ cpu_to_le32((u32)(mtts[rq_pa_start]));
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
@@ -2643,7 +2689,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_32_TX_CQ_NUM_S,
to_hr_cq(ibqp->send_cq)->cqn);
- context->cur_sq_wqe_ba_l = (u32)mtts[0];
+ context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
@@ -2658,23 +2704,25 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context));
- writel(context->qp1c_bytes_4, addr);
- writel(context->sq_rq_bt_l, addr + 1);
- writel(context->qp1c_bytes_12, addr + 2);
- writel(context->qp1c_bytes_16, addr + 3);
- writel(context->qp1c_bytes_20, addr + 4);
- writel(context->cur_rq_wqe_ba_l, addr + 5);
- writel(context->qp1c_bytes_28, addr + 6);
- writel(context->qp1c_bytes_32, addr + 7);
- writel(context->cur_sq_wqe_ba_l, addr + 8);
- writel(context->qp1c_bytes_40, addr + 9);
+ writel(le32_to_cpu(context->qp1c_bytes_4), addr);
+ writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
+ writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
+ writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
+ writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
+ writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
+ writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
+ writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
+ writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
+ writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
}
/* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context));
- roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
+ tmp = cpu_to_le32(reg_val);
+ roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
+ reg_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->phy_port * sizeof(*context), reg_val);
@@ -2712,7 +2760,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
- uint32_t doorbell[2] = {0};
+ __le32 doorbell[2] = {0};
int rq_pa_start = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
@@ -2887,7 +2935,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->sq_rq_bt_l = (u32)(dma_handle);
+ context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
roce_set_field(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
@@ -2899,7 +2947,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
attr->min_rnr_timer);
- context->irrl_ba_l = (u32)(dma_handle_2);
+ context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
roce_set_field(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
@@ -2913,7 +2961,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1);
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
+ le32_to_cpu(hr_qp->sq_signal_bits));
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port;
@@ -2991,7 +3039,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+ context->cur_rq_wqe_ba_l =
+ cpu_to_le32((u32)(mtts[rq_pa_start]));
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
@@ -3071,7 +3120,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
goto out;
}
- context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+ context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(context->qpc_bytes_120,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
@@ -3219,7 +3268,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
- context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+ context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
@@ -3386,16 +3435,16 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
addr = ROCEE_QP1C_CFG0_0_REG +
hr_qp->port * sizeof(struct hns_roce_sqp_context);
- context.qp1c_bytes_4 = roce_read(hr_dev, addr);
- context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
- context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
- context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
- context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
- context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
- context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
- context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
- context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
- context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
+ context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
+ context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
+ context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
+ context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
+ context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
+ context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
+ context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
+ context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
+ context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
+ context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
hr_qp->state = roce_get_field(context.qp1c_bytes_4,
QP1C_BYTES_4_QP_STATE_M,
@@ -3557,7 +3606,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = context->rnr_retry;
+ qp_attr->rnr_retry = (u8)context->rnr_retry;
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -3595,42 +3644,47 @@ static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
u32 *old_send, u32 *old_retry,
u32 *tsp_st, u32 *success_flags)
{
+ __le32 *old_send_tmp, *old_retry_tmp;
u32 sdb_retry_cnt;
u32 sdb_send_ptr;
u32 cur_cnt, old_cnt;
+ __le32 tmp, tmp1;
u32 send_ptr;
sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
- cur_cnt = roce_get_field(sdb_send_ptr,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ tmp = cpu_to_le32(sdb_send_ptr);
+ tmp1 = cpu_to_le32(sdb_retry_cnt);
+ cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+
+ old_send_tmp = (__le32 *)old_send;
+ old_retry_tmp = (__le32 *)old_retry;
if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
- old_cnt = roce_get_field(*old_send,
+ old_cnt = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(*old_retry,
+ roce_get_field(*old_retry_tmp,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
*success_flags = 1;
} else {
- old_cnt = roce_get_field(*old_send,
+ old_cnt = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
*success_flags = 1;
} else {
- send_ptr = roce_get_field(*old_send,
+ send_ptr = roce_get_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
+ roce_get_field(tmp1,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- roce_set_field(*old_send,
+ roce_set_field(*old_send_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
send_ptr);
@@ -3646,11 +3700,14 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
u32 sdb_send_ptr, old_send;
+ __le32 sdb_issue_ptr_tmp;
+ __le32 sdb_send_ptr_tmp;
u32 success_flags = 0;
unsigned long end;
u32 old_retry;
u32 inv_cnt;
u32 tsp_st;
+ __le32 tmp;
if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
*wait_stage < HNS_ROCE_V1_DB_STAGE1) {
@@ -3679,10 +3736,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
ROCEE_SDB_SEND_PTR_REG);
}
- if (roce_get_field(sdb_issue_ptr,
+ sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
+ sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
+ if (roce_get_field(sdb_issue_ptr_tmp,
ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
- roce_get_field(sdb_send_ptr,
+ roce_get_field(sdb_send_ptr_tmp,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
@@ -3690,7 +3749,8 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
do {
tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
- if (roce_get_bit(tsp_st,
+ tmp = cpu_to_le32(tsp_st);
+ if (roce_get_bit(tmp,
ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
*wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
return 0;
@@ -3699,8 +3759,9 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
if (!time_before(jiffies, end)) {
dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
"issue 0x%x send 0x%x.\n",
- hr_qp->qpn, sdb_issue_ptr,
- sdb_send_ptr);
+ hr_qp->qpn,
+ le32_to_cpu(sdb_issue_ptr_tmp),
+ le32_to_cpu(sdb_send_ptr_tmp));
return 0;
}
@@ -4102,9 +4163,9 @@ static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
struct device *dev = &hr_dev->pdev->dev;
u32 cqn;
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -4340,6 +4401,7 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
u32 aeshift_val;
u32 ceshift_val;
u32 cemask_val;
+ __le32 tmp;
int i;
/*
@@ -4348,30 +4410,34 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
* interrupt, mask irq, clear irq, cancel mask operation
*/
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+ tmp = cpu_to_le32(aeshift_val);
/* AEQE overflow */
- if (roce_get_bit(aeshift_val,
+ if (roce_get_bit(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
dev_warn(dev, "AEQ overflow!\n");
/* Set mask */
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(caepaemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_ENABLE);
+ caepaemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
/* Clear int state(INT_WC : write 1 clear) */
caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ tmp = cpu_to_le32(caepaest_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ caepaest_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
/* Clear mask */
caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(caepaemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_DISABLE);
+ caepaemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
}
@@ -4379,8 +4445,9 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
i * CEQ_REG_OFFSET);
+ tmp = cpu_to_le32(ceshift_val);
- if (roce_get_bit(ceshift_val,
+ if (roce_get_bit(tmp,
ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
int_work++;
@@ -4389,9 +4456,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cemask_val = roce_read(hr_dev,
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
+ tmp = cpu_to_le32(cemask_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_ENABLE);
+ cemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET, cemask_val);
@@ -4399,9 +4468,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cealmovf_val = roce_read(hr_dev,
ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
+ tmp = cpu_to_le32(cealmovf_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
1);
+ cealmovf_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
i * CEQ_REG_OFFSET, cealmovf_val);
@@ -4409,9 +4480,11 @@ static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
cemask_val = roce_read(hr_dev,
ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
+ tmp = cpu_to_le32(cemask_val);
+ roce_set_bit(tmp,
ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
HNS_ROCE_INT_MASK_DISABLE);
+ cemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
i * CEQ_REG_OFFSET, cemask_val);
}
@@ -4435,13 +4508,16 @@ static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
{
u32 aemask_val;
int masken = 0;
+ __le32 tmp;
int i;
/* AEQ INT */
aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ tmp = cpu_to_le32(aemask_val);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ aemask_val = le32_to_cpu(tmp);
roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
/* CEQ INT */
@@ -4473,20 +4549,24 @@ static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
int enable_flag)
{
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ __le32 tmp;
u32 val;
val = readl(eqc);
+ tmp = cpu_to_le32(val);
if (enable_flag)
- roce_set_field(val,
+ roce_set_field(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_VALID);
else
- roce_set_field(val,
+ roce_set_field(tmp,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_INVALID);
+
+ val = le32_to_cpu(tmp);
writel(val, eqc);
}
@@ -4499,6 +4579,9 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
u32 eqconsindx_val = 0;
u32 eqcuridx_val = 0;
u32 eqshift_val = 0;
+ __le32 tmp2 = 0;
+ __le32 tmp1 = 0;
+ __le32 tmp = 0;
int num_bas;
int ret;
int i;
@@ -4530,14 +4613,13 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
}
eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
eq->log_entries);
+ eqshift_val = le32_to_cpu(tmp);
writel(eqshift_val, eqc);
/* Configure eq extended address 12~44bit */
@@ -4549,18 +4631,18 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
* using 4K page, and shift more 32 because of
* caculating the high 32 bit value evaluated to hardware.
*/
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ eqcuridx_val = le32_to_cpu(tmp1);
writel(eqcuridx_val, eqc + 8);
/* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ eqconsindx_val = le32_to_cpu(tmp2);
writel(eqconsindx_val, eqc + 0xc);
return 0;
@@ -4835,16 +4917,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
- struct fwnode_handle *fwnode;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
- fwnode = acpi_fwnode_handle(args.adev);
- pdev = hns_roce_find_pdev(fwnode);
+ pdev = hns_roce_find_pdev(args.fwnode);
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index e9a2717ea7cd..66440147d9eb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -260,7 +260,7 @@ struct hns_roce_cqe {
__le32 cqe_byte_4;
union {
__le32 r_key;
- __be32 immediate_data;
+ __le32 immediate_data;
};
__le32 byte_cnt;
__le32 cqe_byte_16;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a6e11be0ea0f..0218c0f8c2a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -53,7 +54,7 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
-static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind)
{
struct hns_roce_v2_wqe_data_seg *dseg;
@@ -100,10 +101,10 @@ static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
}
}
-static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
@@ -164,23 +165,30 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
return 0;
}
-static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
+ struct ib_qp_attr attr;
unsigned int sge_ind = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
bool loopback;
+ int attr_mask;
u32 tmp_len;
int ret = 0;
u8 *smac;
@@ -273,7 +281,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata = wr->ex.imm_data;
+ ud_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
default:
ud_sq_wqe->immtdata = 0;
@@ -330,14 +339,13 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- 0);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- 0);
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_TCLASS_SHIFT);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
+ ah->av.sl_tclass_flowlabel &
+ HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_SL_M,
V2_UD_SEND_WQE_BYTE_40_SL_S,
@@ -371,7 +379,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata = wr->ex.imm_data;
+ rc_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key =
@@ -485,7 +494,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
&sge_ind, bad_wr);
@@ -523,6 +531,19 @@ out:
qp->sq_next_wqe = ind;
qp->next_sge = sge_ind;
+
+ if (qp->state == IB_QPS_ERR) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
+ qp->state, IB_QPS_ERR);
+ if (ret) {
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -530,16 +551,19 @@ out:
return ret;
}
-static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
+ struct ib_qp_attr attr;
unsigned long flags;
void *wqe = NULL;
+ int attr_mask;
int ret = 0;
int nreq;
int ind;
@@ -608,6 +632,20 @@ out:
wmb();
*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
+
+ if (hr_qp->state == IB_QPS_ERR) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
+ attr_mask, hr_qp->state,
+ IB_QPS_ERR);
+ if (ret) {
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -702,8 +740,8 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
int ret;
/* Setup the queue entries for command queue */
- priv->cmq.csq.desc_num = 1024;
- priv->cmq.crq.desc_num = 1024;
+ priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
+ priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
/* Setup the lock for command queue */
spin_lock_init(&priv->cmq.csq.lock);
@@ -925,7 +963,8 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[2];
- struct hns_roce_pf_res *res;
+ struct hns_roce_pf_res_a *req_a;
+ struct hns_roce_pf_res_b *req_b;
int ret;
int i;
@@ -943,21 +982,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
- res = (struct hns_roce_pf_res *)desc[0].data;
+ req_a = (struct hns_roce_pf_res_a *)desc[0].data;
+ req_b = (struct hns_roce_pf_res_b *)desc[1].data;
- hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
+ hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
PF_RES_DATA_1_PF_QPC_BT_NUM_M,
PF_RES_DATA_1_PF_QPC_BT_NUM_S);
- hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
+ hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
- hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
+ hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
PF_RES_DATA_3_PF_CQC_BT_NUM_M,
PF_RES_DATA_3_PF_CQC_BT_NUM_S);
- hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
+ hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
PF_RES_DATA_4_PF_MPT_BT_NUM_M,
PF_RES_DATA_4_PF_MPT_BT_NUM_S);
+ hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
+ PF_RES_DATA_3_PF_SL_NUM_M,
+ PF_RES_DATA_3_PF_SL_NUM_S);
+
return 0;
}
@@ -1203,12 +1247,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->eqe_ba_pg_sz = 0;
caps->eqe_buf_pg_sz = 0;
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->tsq_buf_pg_sz = 0;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
HNS_ROCE_CAP_FLAG_RQ_INLINE |
- HNS_ROCE_CAP_FLAG_RECORD_DB;
+ HNS_ROCE_CAP_FLAG_RECORD_DB |
+ HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1224,6 +1270,228 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
+ enum hns_roce_link_table_type type)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cfg_llm_a *req_a =
+ (struct hns_roce_cfg_llm_a *)desc[0].data;
+ struct hns_roce_cfg_llm_b *req_b =
+ (struct hns_roce_cfg_llm_b *)desc[1].data;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ struct hns_roce_link_table_entry *entry;
+ enum hns_roce_opcode_type opcode;
+ u32 page_num;
+ int i;
+
+ switch (type) {
+ case TSQ_LINK_TABLE:
+ link_tbl = &priv->tsq;
+ opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
+ break;
+ case TPQ_LINK_TABLE:
+ link_tbl = &priv->tpq;
+ opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ page_num = link_tbl->npages;
+ entry = link_tbl->table.buf;
+ memset(req_a, 0, sizeof(*req_a));
+ memset(req_b, 0, sizeof(*req_b));
+
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ if (i == 0) {
+ req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
+ req_a->base_addr_h = (link_tbl->table.map >> 32) &
+ 0xffffffff;
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S,
+ link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S,
+ link_tbl->pg_sz);
+ req_a->head_ba_l = entry[0].blk_ba0;
+ req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
+ roce_set_field(req_a->head_ptr,
+ CFG_LLM_HEAD_PTR_M,
+ CFG_LLM_HEAD_PTR_S, 0);
+ } else {
+ req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
+ roce_set_field(req_b->tail_ba_h,
+ CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr,
+ CFG_LLM_TAIL_PTR_M,
+ CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
+ }
+ }
+ roce_set_field(req_a->depth_pgsz_init_en,
+ CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
+ enum hns_roce_link_table_type type)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ struct hns_roce_link_table_entry *entry;
+ struct device *dev = hr_dev->dev;
+ u32 buf_chk_sz;
+ dma_addr_t t;
+ int func_num = 1;
+ int pg_num_a;
+ int pg_num_b;
+ int pg_num;
+ int size;
+ int i;
+
+ switch (type) {
+ case TSQ_LINK_TABLE:
+ link_tbl = &priv->tsq;
+ buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
+ pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
+ pg_num_b = hr_dev->caps.sl_num * 4 + 2;
+ break;
+ case TPQ_LINK_TABLE:
+ link_tbl = &priv->tpq;
+ buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
+ pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
+ pg_num_b = 2 * 4 * func_num + 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pg_num = max(pg_num_a, pg_num_b);
+ size = pg_num * sizeof(struct hns_roce_link_table_entry);
+
+ link_tbl->table.buf = dma_alloc_coherent(dev, size,
+ &link_tbl->table.map,
+ GFP_KERNEL);
+ if (!link_tbl->table.buf)
+ goto out;
+
+ link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
+ GFP_KERNEL);
+ if (!link_tbl->pg_list)
+ goto err_kcalloc_failed;
+
+ entry = link_tbl->table.buf;
+ for (i = 0; i < pg_num; ++i) {
+ link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &t, GFP_KERNEL);
+ if (!link_tbl->pg_list[i].buf)
+ goto err_alloc_buf_failed;
+
+ link_tbl->pg_list[i].map = t;
+ memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
+
+ entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
+ roce_set_field(entry[i].blk_ba1_nxt_ptr,
+ HNS_ROCE_LINK_TABLE_BA1_M,
+ HNS_ROCE_LINK_TABLE_BA1_S,
+ t >> 44);
+
+ if (i < (pg_num - 1))
+ roce_set_field(entry[i].blk_ba1_nxt_ptr,
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M,
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S,
+ i + 1);
+ }
+ link_tbl->npages = pg_num;
+ link_tbl->pg_sz = buf_chk_sz;
+
+ return hns_roce_config_link_table(hr_dev, type);
+
+err_alloc_buf_failed:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz,
+ link_tbl->pg_list[i].buf,
+ link_tbl->pg_list[i].map);
+ kfree(link_tbl->pg_list);
+
+err_kcalloc_failed:
+ dma_free_coherent(dev, size, link_tbl->table.buf,
+ link_tbl->table.map);
+
+out:
+ return -ENOMEM;
+}
+
+static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_link_table *link_tbl)
+{
+ struct device *dev = hr_dev->dev;
+ int size;
+ int i;
+
+ size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
+
+ for (i = 0; i < link_tbl->npages; ++i)
+ if (link_tbl->pg_list[i].buf)
+ dma_free_coherent(dev, link_tbl->pg_sz,
+ link_tbl->pg_list[i].buf,
+ link_tbl->pg_list[i].map);
+ kfree(link_tbl->pg_list);
+
+ dma_free_coherent(dev, size, link_tbl->table.buf,
+ link_tbl->table.map);
+}
+
+static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ int ret;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
+ if (ret) {
+ dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
+ goto err_tpq_init_failed;
+ }
+
+ return 0;
+
+err_tpq_init_failed:
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+
+ return ret;
+}
+
+static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+}
+
static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
@@ -1307,13 +1575,45 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
+ int gid_index, const union ib_gid *gid,
+ enum hns_roce_sgid_type sgid_type)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_sgid_tb *sgid_tb =
+ (struct hns_roce_cfg_sgid_tb *)desc.data;
+ u32 *p;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
+
+ roce_set_field(sgid_tb->table_idx_rsv,
+ CFG_SGID_TB_TABLE_IDX_M,
+ CFG_SGID_TB_TABLE_IDX_S, gid_index);
+ roce_set_field(sgid_tb->vf_sgid_type_rsv,
+ CFG_SGID_TB_VF_SGID_TYPE_M,
+ CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
+
+ p = (u32 *)&gid->raw[0];
+ sgid_tb->vf_sgid_l = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[4];
+ sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[8];
+ sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
+
+ p = (u32 *)&gid->raw[0xc];
+ sgid_tb->vf_sgid_h = cpu_to_le32(*p);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
- int gid_index, union ib_gid *gid,
+ int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
- u32 *p;
- u32 val;
+ int ret;
if (!gid || !attr)
return -EINVAL;
@@ -1328,49 +1628,37 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
}
- p = (u32 *)&gid->raw[0];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[4];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[8];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
- 0x20 * gid_index);
-
- p = (u32 *)&gid->raw[0xc];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
- 0x20 * gid_index);
-
- val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
- roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
- ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
-
- roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
+ ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
+ if (ret)
+ dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
- return 0;
+ return ret;
}
static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
u8 *addr)
{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_smac_tb *smac_tb =
+ (struct hns_roce_cfg_smac_tb *)desc.data;
u16 reg_smac_h;
u32 reg_smac_l;
- u32 val;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
reg_smac_l = *(u32 *)(&addr[0]);
- roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
- 0x08 * phy_port);
- val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
+ reg_smac_h = *(u16 *)(&addr[4]);
- reg_smac_h = *(u16 *)(&addr[4]);
- roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
- ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
- roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
+ memset(smac_tb, 0, sizeof(*smac_tb));
+ roce_set_field(smac_tb->tb_idx_rsv,
+ CFG_SMAC_TB_IDX_M,
+ CFG_SMAC_TB_IDX_S, phy_port);
+ roce_set_field(smac_tb->vf_smac_h_rsv,
+ CFG_SMAC_TB_VF_SMAC_H_M,
+ CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
+ smac_tb->vf_smac_l = reg_smac_l;
- return 0;
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
@@ -1758,6 +2046,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
+ struct ib_qp_attr attr;
+ int attr_mask;
int is_send;
u16 wqe_ctr;
u32 opcode;
@@ -1844,8 +2134,17 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
break;
}
- /* CQE status error, directly return */
- if (wc->status != IB_WC_SUCCESS)
+ /* flush cqe if wc status is error, excluding flush error */
+ if ((wc->status != IB_WC_SUCCESS) &&
+ (wc->status != IB_WC_WR_FLUSH_ERR)) {
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
+ &attr, attr_mask,
+ (*cur_qp)->state, IB_QPS_ERR);
+ }
+
+ if (wc->status == IB_WC_WR_FLUSH_ERR)
return 0;
if (is_send) {
@@ -1931,7 +2230,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1940,7 +2240,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
@@ -2273,10 +2574,10 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
/* No VLAN need to set 0xFFF */
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
- V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
- V2_QPC_BYTE_24_VLAN_IDX_S, 0);
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0);
/*
* Set some fields in context to zero, Because the default values
@@ -2886,21 +3187,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
- V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
-
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, grh->flow_label);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
- V2_QPC_BYTE_28_FL_S, 0);
-
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, 0);
-
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
@@ -2911,9 +3197,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
- memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
-
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
@@ -2952,12 +3235,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-
return 0;
}
@@ -3135,13 +3412,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_S, 0);
}
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -3224,9 +3494,114 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
;
} else {
dev_err(dev, "Illegal state for QP!\n");
+ ret = -EINVAL;
goto out;
}
+ /* When QP state is err, SQ and RQ WQE should be flushed */
+ if (new_state == IB_QPS_ERR) {
+ roce_set_field(context->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+ hr_qp->sq.head);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ }
+
+ if (attr_mask & IB_QP_AV) {
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr->ah_attr);
+ const struct ib_gid_attr *gid_attr = NULL;
+ u8 src_mac[ETH_ALEN];
+ int is_roce_protocol;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+ u8 hr_port;
+
+ ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
+ hr_qp->port + 1;
+ hr_port = ib_port - 1;
+ is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ }
+
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ roce_set_field(qpc_mask->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_VLAN_ID_M,
+ V2_QPC_BYTE_24_VLAN_ID_S, 0);
+
+ if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
+ dev_err(hr_dev->dev,
+ "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index,
+ hr_dev->caps.gid_table_len[hr_port]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
+ dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ roce_set_field(context->byte_52_udpspn_dmac,
+ V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
+ (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+ 0 : 0x12b7);
+
+ roce_set_field(qpc_mask->byte_52_udpspn_dmac,
+ V2_QPC_BYTE_52_UDPSPN_M,
+ V2_QPC_BYTE_52_UDPSPN_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
+
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+ roce_set_field(qpc_mask->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, 0);
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, grh->flow_label);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, 0);
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
@@ -3497,6 +3872,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user) {
+ if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(hr_qp->ibqp.uobject->context),
+ &hr_qp->sdb);
+
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(
to_hr_ucontext(hr_qp->ibqp.uobject->context),
@@ -3579,6 +3959,74 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ struct hns_roce_qp *hr_qp;
+ struct ib_qp_attr attr;
+ int attr_mask;
+ int ret;
+
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (!hr_qp) {
+ dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
+ return;
+ }
+
+ if (hr_qp->ibqp.uobject) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
+ return;
+ }
+ }
+
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ hr_qp->state, IB_QPS_ERR);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
+ qpn);
+}
+
+static void hns_roce_irq_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *irq_work =
+ container_of(work, struct hns_roce_work, work);
+ u32 qpn = irq_work->qpn;
+
+ switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
+ default:
+ break;
+ }
+
+ kfree(irq_work);
+}
+
+static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq, u32 qpn)
+{
+ struct hns_roce_work *irq_work;
+
+ irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
+ if (!irq_work)
+ return;
+
+ INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
+ irq_work->hr_dev = hr_dev;
+ irq_work->qpn = qpn;
+ irq_work->event_type = eq->event_type;
+ irq_work->sub_type = eq->sub_type;
+ queue_work(hr_dev->irq_workq, &(irq_work->work));
+}
+
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
u32 doorbell[2];
@@ -3681,14 +4129,9 @@ static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
- int event_type)
+ int event_type, u32 qpn)
{
struct device *dev = hr_dev->dev;
- u32 qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_COMM_EST:
@@ -3715,14 +4158,9 @@ static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
- int event_type)
+ int event_type, u32 cqn)
{
struct device *dev = hr_dev->dev;
- u32 cqn;
-
- cqn = roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
- HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -3787,6 +4225,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe;
int aeqe_found = 0;
int event_type;
+ int sub_type;
+ u32 qpn;
+ u32 cqn;
while ((aeqe = next_aeqe_sw_v2(eq))) {
@@ -3798,6 +4239,15 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
event_type = roce_get_field(aeqe->asyn,
HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+ sub_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -3811,7 +4261,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
+ qpn);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
@@ -3820,7 +4271,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
+ cqn);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
dev_warn(dev, "DB overflow.\n");
@@ -3843,6 +4295,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
break;
};
+ eq->event_type = event_type;
+ eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = 1;
@@ -3850,6 +4304,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
+ hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
}
set_eq_cons_index_v2(eq);
@@ -4052,15 +4507,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
u32 bt_chk_sz;
u32 mhop_num;
int eqe_alloc;
- int ba_num;
int i = 0;
int j = 0;
mhop_num = hr_dev->caps.eqe_hop_num;
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
- ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
- buf_chk_sz;
/* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) {
@@ -4669,6 +5121,13 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
}
}
+ hr_dev->irq_workq =
+ create_singlethread_workqueue("hns_roce_irq_workqueue");
+ if (!hr_dev->irq_workq) {
+ dev_err(dev, "Create irq workqueue failed!\n");
+ goto err_request_irq_fail;
+ }
+
return 0;
err_request_irq_fail:
@@ -4719,12 +5178,17 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(hr_dev->irq_names[i]);
kfree(eq_table->eq);
+
+ flush_workqueue(hr_dev->irq_workq);
+ destroy_workqueue(hr_dev->irq_workq);
}
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
.hw_profile = hns_roce_v2_profile,
+ .hw_init = hns_roce_v2_init,
+ .hw_exit = hns_roce_v2_exit,
.post_mbox = hns_roce_v2_post_mbox,
.chk_mbox = hns_roce_v2_chk_mbox,
.set_gid = hns_roce_v2_set_gid,
@@ -4749,6 +5213,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
/* required last entry */
{0, }
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index d47675f365c7..14aa308befef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -112,6 +112,9 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+#define CMD_CSQ_DESC_NUM 1024
+#define CMD_CRQ_DESC_NUM 1024
+
enum {
NO_ARMED = 0x0,
REG_NXT_CEQE = 0x2,
@@ -203,6 +206,10 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
};
@@ -447,8 +454,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_24_TC_S 8
#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
-#define V2_QPC_BYTE_24_VLAN_IDX_S 16
-#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
+#define V2_QPC_BYTE_24_VLAN_ID_S 16
+#define V2_QPC_BYTE_24_VLAN_ID_M GENMASK(27, 16)
#define V2_QPC_BYTE_24_MTU_S 28
#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
@@ -768,7 +775,7 @@ struct hns_roce_v2_cqe {
__le32 byte_4;
union {
__le32 rkey;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_12;
__le32 byte_16;
@@ -926,7 +933,7 @@ struct hns_roce_v2_cq_db {
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
__le32 msg_len;
- __be32 immtdata;
+ __le32 immtdata;
__le32 byte_16;
__le32 byte_20;
__le32 byte_24;
@@ -1012,7 +1019,7 @@ struct hns_roce_v2_rc_send_wqe {
__le32 msg_len;
union {
__le32 inv_key;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_16;
__le32 byte_20;
@@ -1061,6 +1068,40 @@ struct hns_roce_query_version {
__le32 rsv[5];
};
+struct hns_roce_cfg_llm_a {
+ __le32 base_addr_l;
+ __le32 base_addr_h;
+ __le32 depth_pgsz_init_en;
+ __le32 head_ba_l;
+ __le32 head_ba_h_nxtptr;
+ __le32 head_ptr;
+};
+
+#define CFG_LLM_QUE_DEPTH_S 0
+#define CFG_LLM_QUE_DEPTH_M GENMASK(12, 0)
+
+#define CFG_LLM_QUE_PGSZ_S 16
+#define CFG_LLM_QUE_PGSZ_M GENMASK(19, 16)
+
+#define CFG_LLM_INIT_EN_S 20
+#define CFG_LLM_INIT_EN_M GENMASK(20, 20)
+
+#define CFG_LLM_HEAD_PTR_S 0
+#define CFG_LLM_HEAD_PTR_M GENMASK(11, 0)
+
+struct hns_roce_cfg_llm_b {
+ __le32 tail_ba_l;
+ __le32 tail_ba_h;
+ __le32 tail_ptr;
+ __le32 rsv[3];
+};
+
+#define CFG_LLM_TAIL_BA_H_S 0
+#define CFG_LLM_TAIL_BA_H_M GENMASK(19, 0)
+
+#define CFG_LLM_TAIL_PTR_S 0
+#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
+
struct hns_roce_cfg_global_param {
__le32 time_cfg_udp_port;
__le32 rsv[5];
@@ -1072,7 +1113,7 @@ struct hns_roce_cfg_global_param {
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
-struct hns_roce_pf_res {
+struct hns_roce_pf_res_a {
__le32 rsv;
__le32 qpc_bt_idx_num;
__le32 srqc_bt_idx_num;
@@ -1111,6 +1152,32 @@ struct hns_roce_pf_res {
#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
+struct hns_roce_pf_res_b {
+ __le32 rsv0;
+ __le32 smac_idx_num;
+ __le32 sgid_idx_num;
+ __le32 qid_idx_sl_num;
+ __le32 rsv[2];
+};
+
+#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
+#define PF_RES_DATA_1_PF_SMAC_IDX_M GENMASK(7, 0)
+
+#define PF_RES_DATA_1_PF_SMAC_NUM_S 8
+#define PF_RES_DATA_1_PF_SMAC_NUM_M GENMASK(16, 8)
+
+#define PF_RES_DATA_2_PF_SGID_IDX_S 0
+#define PF_RES_DATA_2_PF_SGID_IDX_M GENMASK(7, 0)
+
+#define PF_RES_DATA_2_PF_SGID_NUM_S 8
+#define PF_RES_DATA_2_PF_SGID_NUM_M GENMASK(16, 8)
+
+#define PF_RES_DATA_3_PF_QID_IDX_S 0
+#define PF_RES_DATA_3_PF_QID_IDX_M GENMASK(9, 0)
+
+#define PF_RES_DATA_3_PF_SL_NUM_S 16
+#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+
struct hns_roce_vf_res_a {
__le32 vf_id;
__le32 vf_qpc_bt_idx_num;
@@ -1179,13 +1246,6 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
-/* Reg field definition */
-#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
-#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
-
-#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
-#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
-
struct hns_roce_cfg_bt_attr {
__le32 vf_qpc_cfg;
__le32 vf_srqc_cfg;
@@ -1230,6 +1290,32 @@ struct hns_roce_cfg_bt_attr {
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+ __le32 vf_sgid_ml;
+ __le32 vf_sgid_mh;
+ __le32 vf_sgid_h;
+ __le32 vf_sgid_type_rsv;
+};
+#define CFG_SGID_TB_TABLE_IDX_S 0
+#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
+
+#define CFG_SGID_TB_VF_SGID_TYPE_S 0
+#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0)
+
+struct hns_roce_cfg_smac_tb {
+ __le32 tb_idx_rsv;
+ __le32 vf_smac_l;
+ __le32 vf_smac_h_rsv;
+ __le32 rsv[3];
+};
+#define CFG_SMAC_TB_IDX_S 0
+#define CFG_SMAC_TB_IDX_M GENMASK(7, 0)
+
+#define CFG_SMAC_TB_VF_SMAC_H_S 0
+#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
+
struct hns_roce_cmq_desc {
__le16 opcode;
__le16 flag;
@@ -1276,8 +1362,32 @@ struct hns_roce_v2_cmq {
u16 last_status;
};
+enum hns_roce_link_table_type {
+ TSQ_LINK_TABLE,
+ TPQ_LINK_TABLE,
+};
+
+struct hns_roce_link_table {
+ struct hns_roce_buf_list table;
+ struct hns_roce_buf_list *pg_list;
+ u32 npages;
+ u32 pg_sz;
+};
+
+struct hns_roce_link_table_entry {
+ u32 blk_ba0;
+ u32 blk_ba1_nxt_ptr;
+};
+#define HNS_ROCE_LINK_TABLE_BA1_S 0
+#define HNS_ROCE_LINK_TABLE_BA1_M GENMASK(19, 0)
+
+#define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20
+#define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+
struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table tsq;
+ struct hns_roce_link_table tpq;
};
struct hns_roce_eq_context {
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 21b901cfa2d6..c5cae9a38c04 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -74,8 +74,7 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
-static int hns_roce_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr, void **context)
+static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
u8 port = attr->port_num - 1;
@@ -87,8 +86,7 @@ static int hns_roce_add_gid(const union ib_gid *gid,
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
- (union ib_gid *)gid, attr);
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
@@ -208,7 +206,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN;
- props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
+ props->max_send_sge = hr_dev->caps.max_sq_sg;
+ props->max_recv_sge = hr_dev->caps.max_rq_sg;
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
@@ -535,6 +534,9 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
+ ib_dev->uverbs_ex_cmd_mask |=
+ (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+
/* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device;
@@ -887,8 +889,7 @@ error_failed_cmd_init:
error_failed_cmq_init:
if (hr_dev->hw->reset) {
- ret = hr_dev->hw->reset(hr_dev, false);
- if (ret)
+ if (hr_dev->hw->reset(hr_dev, false))
dev_err(dev, "Dereset RoCE engine failed!\n");
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index b9f2c871ff9a..e11c149da04d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -37,7 +37,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index baaf906f7c2e..efb7e961ca65 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -115,7 +115,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
+ base) ?
+ -ENOMEM :
+ 0;
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -489,6 +492,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_TGT)
+ return 0;
+
+ return 1;
+}
+
static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI ||
@@ -613,6 +624,23 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_mtt;
}
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ (udata->inlen >= sizeof(ucmd)) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_sq(init_attr)) {
+ ret = hns_roce_db_map_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ ucmd.sdb_addr, &hr_qp->sdb);
+ if (ret) {
+ dev_err(dev, "sq record doorbell map failed!\n");
+ goto err_mtt;
+ }
+
+ /* indicate kernel supports sq record db */
+ resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
+ hr_qp->sdb_en = 1;
+ }
+
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_rq(init_attr)) {
@@ -621,7 +649,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ucmd.db_addr, &hr_qp->rdb);
if (ret) {
dev_err(dev, "rq record doorbell map failed!\n");
- goto err_mtt;
+ goto err_sq_dbmap;
}
}
} else {
@@ -734,7 +762,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
- /* indicate kernel supports record db */
+ /* indicate kernel supports rq record db */
resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
@@ -770,6 +798,16 @@ err_wrid:
kfree(hr_qp->rq.wrid);
}
+err_sq_dbmap:
+ if (ib_pd->uobject)
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ (udata->inlen >= sizeof(ucmd)) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_sq(init_attr))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ &hr_qp->sdb);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
@@ -903,6 +941,17 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ?
attr->qp_state : cur_state;
+ if (ibqp->uobject &&
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ goto out;
+ }
+ }
+
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index 2962979c06e9..d867ef1ac72a 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
+ depends on IPV6 || !IPV6
depends on PCI
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 7b2655128b9f..423818a7d333 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -57,6 +57,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip_fib.h>
+#include <net/secure_seq.h>
#include <net/tcp.h>
#include <asm/checksum.h>
@@ -2164,7 +2165,6 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
struct i40iw_cm_listener *listener)
{
struct i40iw_cm_node *cm_node;
- struct timespec ts;
int oldarpindex;
int arpindex;
struct net_device *netdev = iwdev->netdev;
@@ -2214,10 +2214,26 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd =
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
- ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
- (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
+ if (cm_node->ipv4) {
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
+ htonl(cm_node->rem_addr[0]),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4;
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ __be32 loc[4] = {
+ htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
+ htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
+ };
+ __be32 rem[4] = {
+ htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
+ htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
+ };
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6;
+ }
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 2836c5420d60..55a1fbf0e670 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -435,45 +435,24 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
}
/**
- * i40iw_manage_apbvt - add or delete tcp port
+ * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
-int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
+static enum i40iw_status_code
+i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port)
{
struct i40iw_apbvt_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- unsigned long flags;
- struct i40iw_cm_core *cm_core = &iwdev->cm_core;
- enum i40iw_status_code status = 0;
- bool in_use;
-
- /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
- * protect against race where add APBVT CQP can race ahead of the delete
- * APBVT for same port.
- */
- spin_lock_irqsave(&cm_core->apbvt_lock, flags);
-
- if (!add_port) {
- in_use = i40iw_port_in_use(cm_core, accel_local_port);
- if (in_use)
- goto exit;
- clear_bit(accel_local_port, cm_core->ports_in_use);
- } else {
- in_use = test_and_set_bit(accel_local_port,
- cm_core->ports_in_use);
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
- if (in_use)
- return 0;
- }
+ enum i40iw_status_code status;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
- if (!cqp_request) {
- status = -ENOMEM;
- goto exit;
- }
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
@@ -489,14 +468,54 @@ int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool ad
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
-exit:
- if (!add_port)
- spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return status;
}
/**
+ * i40iw_manage_apbvt - add or delete tcp port
+ * @iwdev: iwarp device
+ * @accel_local_port: port for apbvt
+ * @add_port: add or delete port
+ */
+enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ enum i40iw_status_code status;
+ unsigned long flags;
+ bool in_use;
+
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ if (add_port) {
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ in_use = __test_and_set_bit(accel_local_port,
+ cm_core->ports_in_use);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ if (in_use)
+ return 0;
+ return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
+ true);
+ } else {
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ in_use = i40iw_port_in_use(cm_core, accel_local_port);
+ if (in_use) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return 0;
+ }
+ __clear_bit(accel_local_port, cm_core->ports_in_use);
+ status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
+ false);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return status;
+ }
+}
+
+/**
* i40iw_manage_arp_cache - manage hw arp cache
* @iwdev: iwarp device
* @mac_addr: mac address ptr
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 68679ad4c6da..e2e6c74a7452 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -71,7 +71,8 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp - iwdev->used_qps;
props->max_qp_wr = I40IW_MAX_QP_WRS;
- props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
props->max_mr = iwdev->max_mr - iwdev->used_mrs;
@@ -1409,6 +1410,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
struct vm_area_struct *vma;
struct hstate *h;
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) {
h = hstate_vma(vma);
@@ -1417,6 +1419,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
iwmr->page_msk = huge_page_mask(h);
}
}
+ up_read(&current->mm->mmap_sem);
}
/**
@@ -2198,8 +2201,8 @@ static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, in
* @bad_wr: return of bad wr if err
*/
static int i40iw_post_send(struct ib_qp *ibqp,
- struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
{
struct i40iw_qp *iwqp;
struct i40iw_qp_uk *ukqp;
@@ -2374,9 +2377,8 @@ out:
* @ib_wr: work request for receive
* @bad_wr: bad wr caused an error
*/
-static int i40iw_post_recv(struct ib_qp *ibqp,
- struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
+static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
{
struct i40iw_qp *iwqp;
struct i40iw_qp_uk *ukqp;
@@ -2701,21 +2703,6 @@ static int i40iw_query_gid(struct ib_device *ibdev,
}
/**
- * i40iw_modify_port Modify port properties
- * @ibdev: device pointer from stack
- * @port: port number
- * @port_modify_mask: mask for port modifications
- * @props: port properties
- */
-static int i40iw_modify_port(struct ib_device *ibdev,
- u8 port,
- int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
-/**
* i40iw_query_pkey - Query partition key
* @ibdev: device pointer from stack
* @port: port number
@@ -2732,28 +2719,6 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
}
/**
- * i40iw_create_ah - create address handle
- * @ibpd: ptr of pd
- * @ah_attr: address handle attributes
- */
-static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
- struct rdma_ah_attr *attr,
- struct ib_udata *udata)
-
-{
- return ERR_PTR(-ENOSYS);
-}
-
-/**
- * i40iw_destroy_ah - Destroy address handle
- * @ah: pointer to address handle
- */
-static int i40iw_destroy_ah(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-/**
* i40iw_get_vector_affinity - report IRQ affinity mask
* @ibdev: IB device
* @comp_vector: completion vector index
@@ -2820,7 +2785,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dev.parent = &pcidev->dev;
iwibdev->ibdev.query_port = i40iw_query_port;
- iwibdev->ibdev.modify_port = i40iw_modify_port;
iwibdev->ibdev.query_pkey = i40iw_query_pkey;
iwibdev->ibdev.query_gid = i40iw_query_gid;
iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
@@ -2840,8 +2804,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
iwibdev->ibdev.query_device = i40iw_query_device;
- iwibdev->ibdev.create_ah = i40iw_create_ah;
- iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
iwibdev->ibdev.drain_sq = i40iw_drain_sq;
iwibdev->ibdev.drain_rq = i40iw_drain_rq;
iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 9345d5b546d1..e9e3a6f390db 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -82,12 +82,11 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
struct mlx4_ib_ah *ah)
{
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ const struct ib_gid_attr *gid_attr;
struct mlx4_dev *dev = ibdev->dev;
int is_mcast = 0;
struct in6_addr in6;
u16 vlan_tag = 0xffff;
- union ib_gid sgid;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int ret;
@@ -96,25 +95,30 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
is_mcast = 1;
memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
- ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &gid_attr);
- if (ret)
- return ERR_PTR(ret);
eth_zero_addr(ah->av.eth.s_mac);
- if (is_vlan_dev(gid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
- memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(gid_attr.ndev);
+
+ /*
+ * If sgid_attr is NULL we are being called by mlx4_ib_create_ah_slave
+ * and we are directly creating an AV for a slave's gid_index.
+ */
+ gid_attr = ah_attr->grh.sgid_attr;
+ if (gid_attr) {
+ if (is_vlan_dev(gid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(ah->av.eth.s_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, gid_attr);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
+ } else {
+ /* mlx4_ib_create_ah_slave fills in the s_mac and the vlan */
+ ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+ }
+
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
(rdma_ah_get_port_num(ah_attr) << 24));
- ret = mlx4_ib_gid_index_to_real_index(ibdev,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index);
- if (ret < 0)
- return ERR_PTR(ret);
- ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = grh->hop_limit;
if (rdma_ah_get_static_rate(ah_attr)) {
@@ -173,6 +177,40 @@ struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
return create_ib_ah(pd, ah_attr, ah); /* never fails */
}
+/* AH's created via this call must be free'd by mlx4_ib_destroy_ah. */
+struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac,
+ u16 vlan_tag)
+{
+ struct rdma_ah_attr slave_attr = *ah_attr;
+ struct mlx4_ib_ah *mah;
+ struct ib_ah *ah;
+
+ slave_attr.grh.sgid_attr = NULL;
+ slave_attr.grh.sgid_index = slave_sgid_index;
+ ah = mlx4_ib_create_ah(pd, &slave_attr, NULL);
+ if (IS_ERR(ah))
+ return ah;
+
+ ah->device = pd->device;
+ ah->pd = pd;
+ ah->type = ah_attr->type;
+ mah = to_mah(ah);
+
+ /* get rid of force-loopback bit */
+ mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
+ memcpy(mah->av.eth.s_mac, s_mac, 6);
+
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
+ mah->av.eth.vlan = cpu_to_be16(vlan_tag);
+
+ return ah;
+}
+
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
{
struct mlx4_ib_ah *ah = to_mah(ibah);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 90a3e2642c2e..e5466d786bb1 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -506,7 +506,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
{
struct ib_sge list;
struct ib_ud_wr wr;
- struct ib_send_wr *bad_wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *tun_ctx;
struct mlx4_ib_demux_pv_qp *tun_qp;
struct mlx4_rcv_tunnel_mad *tun_mad;
@@ -1310,7 +1310,8 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
int index)
{
struct ib_sge sg_list;
- struct ib_recv_wr recv_wr, *bad_recv_wr;
+ struct ib_recv_wr recv_wr;
+ const struct ib_recv_wr *bad_recv_wr;
int size;
size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
@@ -1361,19 +1362,16 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
{
struct ib_sge list;
struct ib_ud_wr wr;
- struct ib_send_wr *bad_wr;
+ const struct ib_send_wr *bad_wr;
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
struct mlx4_ib_demux_pv_qp *sqp;
struct mlx4_mad_snd_buf *sqp_mad;
struct ib_ah *ah;
struct ib_qp *send_qp = NULL;
- struct ib_global_route *grh;
unsigned wire_tx_ix = 0;
int ret = 0;
u16 wire_pkey_ix;
int src_qpnum;
- u8 sgid_index;
-
sqp_ctx = dev->sriov.sqps[port-1];
@@ -1394,16 +1392,11 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
send_qp = sqp->qp;
/* create ah */
- grh = rdma_ah_retrieve_grh(attr);
- sgid_index = grh->sgid_index;
- grh->sgid_index = 0;
- ah = rdma_create_ah(sqp_ctx->pd, attr);
+ ah = mlx4_ib_create_ah_slave(sqp_ctx->pd, attr,
+ rdma_ah_retrieve_grh(attr)->sgid_index,
+ s_mac, vlan_id);
if (IS_ERR(ah))
return -ENOMEM;
- grh->sgid_index = sgid_index;
- to_mah(ah)->av.ib.gid_index = sgid_index;
- /* get rid of force-loopback bit */
- to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
spin_lock(&sqp->tx_lock);
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
(MLX4_NUM_TUNNEL_BUFS - 1))
@@ -1445,12 +1438,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.num_sge = 1;
wr.wr.opcode = IB_WR_SEND;
wr.wr.send_flags = IB_SEND_SIGNALED;
- if (s_mac)
- memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
- if (vlan_id < 0x1000)
- vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13;
- to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
-
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
if (!ret)
@@ -1461,7 +1448,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- rdma_destroy_ah(ah);
+ mlx4_ib_destroy_ah(ah);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4ec519afc45b..ca0f1ee26091 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -246,9 +246,7 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
}
-static int mlx4_ib_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context)
+static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
@@ -271,8 +269,9 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
- if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
- (port_gid_table->gids[i].gid_type == attr->gid_type)) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ port_gid_table->gids[i].gid_type == attr->gid_type) {
found = i;
break;
}
@@ -289,7 +288,8 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
ret = -ENOMEM;
} else {
*context = port_gid_table->gids[free].ctx;
- memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
+ memcpy(&port_gid_table->gids[free].gid,
+ &attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
@@ -380,17 +380,15 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
}
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index)
+ const struct ib_gid_attr *attr)
{
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
struct gid_cache_context *ctx = NULL;
- union ib_gid gid;
struct mlx4_port_gid_table *port_gid_table;
int real_index = -EINVAL;
int i;
- int ret;
unsigned long flags;
- struct ib_gid_attr attr;
+ u8 port_num = attr->port_num;
if (port_num > MLX4_MAX_PORTS)
return -EINVAL;
@@ -399,21 +397,15 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
port_num = 1;
if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
- return index;
-
- ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
- if (ret)
- return ret;
-
- if (attr.ndev)
- dev_put(attr.ndev);
+ return attr->index;
spin_lock_irqsave(&iboe->lock, flags);
port_gid_table = &iboe->gids[port_num - 1];
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
- if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
- attr.gid_type == port_gid_table->gids[i].gid_type) {
+ if (!memcmp(&port_gid_table->gids[i].gid,
+ &attr->gid, sizeof(attr->gid)) &&
+ attr->gid_type == port_gid_table->gids[i].gid_type) {
ctx = port_gid_table->gids[i].ctx;
break;
}
@@ -525,8 +517,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
- props->max_sge = min(dev->dev->caps.max_sq_sg,
- dev->dev->caps.max_rq_sg);
+ props->max_send_sge = dev->dev->caps.max_sq_sg;
+ props->max_recv_sge = dev->dev->caps.max_rq_sg;
props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
@@ -770,7 +762,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
IB_SPEED_FDR : IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -2709,6 +2702,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
+ ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq;
+ ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq;
ibdev->ib_dev.post_send = mlx4_ib_post_send;
ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7b1429917aba..e10dccc7958f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -322,7 +322,6 @@ struct mlx4_ib_qp {
u32 doorbell_qpn;
__be32 sq_signal_bits;
unsigned sq_next_wqe;
- int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx4_ib_wq sq;
@@ -760,6 +759,10 @@ void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
+struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ int slave_sgid_index, u8 *s_mac,
+ u16 vlan_tag);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int mlx4_ib_destroy_ah(struct ib_ah *ah);
@@ -771,21 +774,23 @@ int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int mlx4_ib_destroy_srq(struct ib_srq *srq);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_qp(struct ib_qp *qp);
+void mlx4_ib_drain_sq(struct ib_qp *qp);
+void mlx4_ib_drain_rq(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
@@ -900,7 +905,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
- u8 port_num, int index);
+ const struct ib_gid_attr *attr);
void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
int port);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3b8045fd23ed..6dd3cd2c2f80 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -204,91 +204,26 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the
- * first four bytes of every 64 byte chunk with
- * 0x7FFFFFF | (invalid_ownership_value << 31).
- *
- * When the max work request size is less than or equal to the WQE
- * basic block size, as an optimization, we can stamp all WQEs with
- * 0xffffffff, and skip the very first chunk of each WQE.
+ * first four bytes of every 64 byte chunk with 0xffffffff, except for
+ * the very first chunk of the WQE.
*/
-static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
{
__be32 *wqe;
int i;
int s;
- int ind;
void *buf;
- __be32 stamp;
struct mlx4_wqe_ctrl_seg *ctrl;
- if (qp->sq_max_wqes_per_wr > 1) {
- s = roundup(size, 1U << qp->sq.wqe_shift);
- for (i = 0; i < s; i += 64) {
- ind = (i >> qp->sq.wqe_shift) + n;
- stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
- cpu_to_be32(0xffffffff);
- buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
- *wqe = stamp;
- }
- } else {
- ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
- for (i = 64; i < s; i += 64) {
- wqe = buf + i;
- *wqe = cpu_to_be32(0xffffffff);
- }
+ buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+ ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
+ s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
+ for (i = 64; i < s; i += 64) {
+ wqe = buf + i;
+ *wqe = cpu_to_be32(0xffffffff);
}
}
-static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
-{
- struct mlx4_wqe_ctrl_seg *ctrl;
- struct mlx4_wqe_inline_seg *inl;
- void *wqe;
- int s;
-
- ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = sizeof(struct mlx4_wqe_ctrl_seg);
-
- if (qp->ibqp.qp_type == IB_QPT_UD) {
- struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
- struct mlx4_av *av = (struct mlx4_av *)dgram->av;
- memset(dgram, 0, sizeof *dgram);
- av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
- s += sizeof(struct mlx4_wqe_datagram_seg);
- }
-
- /* Pad the remainder of the WQE with an inline data segment. */
- if (size > s) {
- inl = wqe + s;
- inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
- }
- ctrl->srcrb_flags = 0;
- ctrl->qpn_vlan.fence_size = size / 16;
- /*
- * Make sure descriptor is fully written before setting ownership bit
- * (because HW can start executing as soon as we do).
- */
- wmb();
-
- ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
- (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
-
- stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
-}
-
-/* Post NOP WQE to prevent wrap-around in the middle of WR */
-static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
-{
- unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
- if (unlikely(s < qp->sq_max_wqes_per_wr)) {
- post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
- ind += s;
- }
- return ind;
-}
-
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{
struct ib_event event;
@@ -433,8 +368,7 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
- bool shrink_wqe)
+ enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
{
int s;
@@ -461,70 +395,20 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
if (s > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
+ qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+
/*
- * Hermon supports shrinking WQEs, such that a single work
- * request can include multiple units of 1 << wqe_shift. This
- * way, work requests can differ in size, and do not have to
- * be a power of 2 in size, saving memory and speeding up send
- * WR posting. Unfortunately, if we do this then the
- * wqe_index field in CQEs can't be used to look up the WR ID
- * anymore, so we do this only if selective signaling is off.
- *
- * Further, on 32-bit platforms, we can't use vmap() to make
- * the QP buffer virtually contiguous. Thus we have to use
- * constant-sized WRs to make sure a WR is always fully within
- * a single page-sized chunk.
- *
- * Finally, we use NOP work requests to pad the end of the
- * work queue, to avoid wrap-around in the middle of WR. We
- * set NEC bit to avoid getting completions with error for
- * these NOP WRs, but since NEC is only supported starting
- * with firmware 2.2.232, we use constant-sized WRs for older
- * firmware.
- *
- * And, since MLX QPs only support SEND, we use constant-sized
- * WRs in this case.
- *
- * We look for the smallest value of wqe_shift such that the
- * resulting number of wqes does not exceed device
- * capabilities.
- *
- * We set WQE size to at least 64 bytes, this way stamping
- * invalidates each WQE.
+ * We need to leave 2 KB + 1 WR of headroom in the SQ to
+ * allow HW to prefetch.
*/
- if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
- qp->sq_signal_bits && BITS_PER_LONG == 64 &&
- type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
- !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
- qp->sq.wqe_shift = ilog2(64);
- else
- qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
-
- for (;;) {
- qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
-
- /*
- * We need to leave 2 KB + 1 WR of headroom in the SQ to
- * allow HW to prefetch.
- */
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
- qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
- qp->sq_max_wqes_per_wr +
- qp->sq_spare_wqes);
-
- if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
- break;
-
- if (qp->sq_max_wqes_per_wr <= 1)
- return -EINVAL;
-
- ++qp->sq.wqe_shift;
- }
-
- qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
- (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
- send_wqe_overhead(type, qp->flags)) /
+ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
+ qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
+ qp->sq_spare_wqes);
+
+ qp->sq.max_gs =
+ (min(dev->dev->caps.max_sq_desc_sz,
+ (1 << qp->sq.wqe_shift)) -
+ send_wqe_overhead(type, qp->flags)) /
sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
@@ -538,7 +422,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
cap->max_send_wr = qp->sq.max_post =
- (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
+ qp->sq.wqe_cnt - qp->sq_spare_wqes;
cap->max_send_sge = min(qp->sq.max_gs,
min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg));
@@ -977,7 +861,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
{
int qpn;
int err;
- struct ib_qp_cap backup_cap;
struct mlx4_ib_sqp *sqp = NULL;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -1178,9 +1061,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
- err = set_kernel_sq_size(dev, &init_attr->cap,
- qp_type, qp, true);
+ err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
if (err)
goto err;
@@ -1192,20 +1073,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
&qp->buf)) {
- memcpy(&init_attr->cap, &backup_cap,
- sizeof(backup_cap));
- err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
- qp, false);
- if (err)
- goto err_db;
-
- if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf)) {
- err = -ENOMEM;
- goto err_db;
- }
+ err = -ENOMEM;
+ goto err_db;
}
err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
@@ -1859,8 +1730,7 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int real_sgid_index =
- mlx4_ib_gid_index_to_real_index(dev, port,
- grh->sgid_index);
+ mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
if (real_sgid_index < 0)
return real_sgid_index;
@@ -2176,6 +2046,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
{
struct ib_uobject *ibuobject;
struct ib_srq *ibsrq;
+ const struct ib_gid_attr *gid_attr = NULL;
struct ib_rwq_ind_table *rwq_ind_tbl;
enum ib_qp_type qp_type;
struct mlx4_ib_dev *dev;
@@ -2356,29 +2227,17 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
if (attr_mask & IB_QP_AV) {
u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- union ib_gid gid;
- struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
u16 vlan = 0xffff;
u8 smac[ETH_ALEN];
- int status = 0;
int is_eth =
rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
if (is_eth) {
- int index =
- rdma_ah_read_grh(&attr->ah_attr)->sgid_index;
-
- status = ib_get_cached_gid(&dev->ib_dev, port_num,
- index, &gid, &gid_attr);
- if (!status) {
- vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
- memcpy(smac, gid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(gid_attr.ndev);
- }
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+ memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
- if (status)
- goto out;
if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
port_num, vlan, smac))
@@ -2389,7 +2248,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
if (is_eth &&
(cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
- u8 qpc_roce_mode = gid_type_to_qpc(gid_attr.gid_type);
+ u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type);
if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
err = -EINVAL;
@@ -2594,11 +2453,9 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
for (i = 0; i < qp->sq.wqe_cnt; ++i) {
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
- if (qp->sq_max_wqes_per_wr == 1)
- ctrl->qpn_vlan.fence_size =
- 1 << (qp->sq.wqe_shift - 4);
-
- stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
+ ctrl->qpn_vlan.fence_size =
+ 1 << (qp->sq.wqe_shift - 4);
+ stamp_send_wqe(qp, i);
}
}
@@ -2937,7 +2794,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
}
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
- struct ib_ud_wr *wr,
+ const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
@@ -3085,7 +2942,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
}
#define MLX4_ROCEV2_QP1_SPORT 0xC000
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
+static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
struct ib_device *ib_dev = sqp->qp.ibqp.device;
@@ -3181,10 +3038,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
guid_cache[ah->av.ib.gid_index];
} else {
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid, NULL);
+ sqp->ud_header.grh.source_gid =
+ ah->ibah.sgid_attr->gid;
}
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
@@ -3369,7 +3224,7 @@ static __be32 convert_access(int acc)
}
static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
struct mlx4_ib_mr *mr = to_mmr(wr->mr);
@@ -3399,7 +3254,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
}
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap);
@@ -3415,7 +3270,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
}
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
aseg->swap_add = cpu_to_be64(wr->swap);
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
@@ -3424,7 +3279,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->remote_qpn);
@@ -3435,7 +3290,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
struct mlx4_wqe_datagram_seg *dseg,
- struct ib_ud_wr *wr,
+ const struct ib_ud_wr *wr,
enum mlx4_ib_qp_type qpt)
{
union mlx4_ext_av *av = &to_mah(wr->ah)->av;
@@ -3457,7 +3312,8 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
}
-static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
+static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
+ unsigned *mlx_seg_len)
{
struct mlx4_wqe_inline_seg *inl = wqe;
struct mlx4_ib_tunnel_header hdr;
@@ -3540,9 +3396,9 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr);
}
-static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
- struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz, __be32 *blh)
+static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
+ const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
+ unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
{
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
@@ -3560,7 +3416,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
return 0;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3582,8 +3438,8 @@ static void add_zero_len_inline(void *wqe)
inl->byte_count = cpu_to_be32(1 << 31);
}
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
@@ -3593,7 +3449,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(stamp);
int uninitialized_var(size);
unsigned uninitialized_var(seglen);
__be32 dummy;
@@ -3623,7 +3478,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -3865,22 +3721,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
- stamp = ind + qp->sq_spare_wqes;
- ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
-
/*
* We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post.
- *
- * Same optimization applies to padding with NOP wqe
- * in case of WQE shrinking (used to prevent wrap-around
- * in the middle of WR).
*/
- if (wr->next) {
- stamp_send_wqe(qp, stamp, size * 16);
- ind = pad_wraparound(qp, ind);
- }
+ if (wr->next)
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
+ ind++;
}
out:
@@ -3902,9 +3750,8 @@ out:
*/
mmiowb();
- stamp_send_wqe(qp, stamp, size * 16);
+ stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
- ind = pad_wraparound(qp, ind);
qp->sq_next_wqe = ind;
}
@@ -3913,8 +3760,14 @@ out:
return err;
}
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
struct mlx4_wqe_data_seg *scat;
@@ -3929,7 +3782,8 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4000,6 +3854,12 @@ out:
return err;
}
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
{
switch (mlx4_state) {
@@ -4047,9 +3907,9 @@ static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (port_num == 0 || port_num > dev->caps.num_ports)
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE)
rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
@@ -4465,3 +4325,132 @@ int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
kfree(ib_rwq_ind_tbl);
return 0;
}
+
+struct mlx4_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx4_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx4_ib_drain_cqe *sdrain,
+ struct mlx4_ib_dev *dev)
+{
+ struct mlx4_dev *mdev = dev->dev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx4_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx4_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx4_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx4_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_dev *mdev = dev->dev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx4_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index ebee56cbc0e2..3731b31c3653 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -307,8 +307,8 @@ void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx4_ib_srq *srq = to_msrq(ibsrq);
struct mlx4_wqe_srq_next_seg *next;
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index fb4d77be019b..0440966bc6ec 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,5 +1,5 @@
config MLX5_INFINIBAND
- tristate "Mellanox Connect-IB HCA support"
+ tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index d42b922bede8..b8e4b15e2674 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index e6bde32a83f3..ffd03bf1a71e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -37,7 +37,6 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct rdma_ah_attr *ah_attr)
{
enum ib_gid_type gid_type;
- int err;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -53,18 +52,12 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
- ah_attr->grh.sgid_index,
- &gid_type);
- if (err)
- return ERR_PTR(err);
+ gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev,
- rdma_ah_get_port_num(ah_attr),
- rdma_ah_read_grh(ah_attr)->sgid_index);
+ mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 188512bf46e6..c84fef9a8a08 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -32,6 +32,21 @@
#include "cmd.h"
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
+ return err;
+}
+
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
@@ -170,3 +185,15 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
return err;
}
+
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
+ 0, 0);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index e7206c8a8011..88cbb1c41703 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,9 +37,11 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
+int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 985fa2637390..7e4e358a4fd8 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -359,9 +359,6 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
int ret;
char lbuf[11];
- if (*pos)
- return 0;
-
ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -370,11 +367,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (ret < 0)
return ret;
- if (copy_to_user(buf, lbuf, ret))
- return -EFAULT;
-
- *pos += ret;
- return ret;
+ return simple_read_from_buffer(buf, count, pos, lbuf, ret);
}
static const struct file_operations dbg_cc_fops = {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index ad39d64b8108..088205d7f1a1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1184,7 +1184,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
- return -ENOSYS;
+ return -EOPNOTSUPP;
if (cq_period > MLX5_MAX_CQ_PERIOD)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
new file mode 100644
index 000000000000..ac116d63e466
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
+struct devx_obj {
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+ u32 dinlen; /* destroy inbox length */
+ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+};
+
+struct devx_umem {
+ struct mlx5_core_dev *mdev;
+ struct ib_umem *umem;
+ u32 page_offset;
+ int page_shift;
+ int ncont;
+ u32 dinlen;
+ u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
+};
+
+struct devx_umem_reg_cmd {
+ void *in;
+ u32 inlen;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+};
+
+static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
+{
+ return to_mucontext(ib_uverbs_get_ucontext(file));
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u64 general_obj_types;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
+
+ general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
+ !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return 0;
+}
+
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+
+ mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_DESTROY_TIR:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
+ obj_id);
+ return true;
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
+ table_id);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u32 obj_id;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MKEY:
+ obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ break;
+ case MLX5_CMD_OP_QUERY_CQ:
+ obj_id = MLX5_GET(query_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_CQ:
+ obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SQ:
+ obj_id = MLX5_GET(query_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_SQ:
+ obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQ:
+ obj_id = MLX5_GET(query_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQ:
+ obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ break;
+ case MLX5_CMD_OP_QUERY_RMP:
+ obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RMP:
+ obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ break;
+ case MLX5_CMD_OP_QUERY_RQT:
+ obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_MODIFY_RQT:
+ obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIR:
+ obj_id = MLX5_GET(query_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIR:
+ obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ break;
+ case MLX5_CMD_OP_QUERY_TIS:
+ obj_id = MLX5_GET(query_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_MODIFY_TIS:
+ obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ break;
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ break;
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ break;
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(query_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ obj_id = MLX5_GET(modify_scheduling_element_in, in,
+ scheduling_element_id);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ break;
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ break;
+ case MLX5_CMD_OP_QUERY_QP:
+ obj_id = MLX5_GET(query_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ break;
+ case MLX5_CMD_OP_QUERY_DCT:
+ obj_id = MLX5_GET(query_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRQ:
+ obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ break;
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ break;
+ case MLX5_CMD_OP_QUERY_SRQ:
+ obj_id = MLX5_GET(query_srq_in, in, srqn);
+ break;
+ case MLX5_CMD_OP_ARM_RQ:
+ obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ break;
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ break;
+ case MLX5_CMD_OP_ARM_XRQ:
+ obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ break;
+ default:
+ return false;
+ }
+
+ if (obj_id == obj->obj_id)
+ return true;
+
+ return false;
+}
+
+static bool devx_is_obj_create_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+ if (op_mod == 0)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_modify_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_XRQ:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+
+ if (op_mod == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_query_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_general_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int user_vector;
+ int dev_eqn;
+ unsigned int irqn;
+ int err;
+
+ if (uverbs_copy_from(&user_vector, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
+ return -EFAULT;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+ if (err < 0)
+ return err;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ &dev_eqn, sizeof(dev_eqn)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *Security note:
+ * The hardware protection mechanism works like this: Each device object that
+ * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
+ * the device specification manual) upon its creation. Then upon doorbell,
+ * hardware fetches the object context for which the doorbell was rang, and
+ * validates that the UAR through which the DB was rang matches the UAR ID
+ * of the object.
+ * If no match the doorbell is silently ignored by the hardware. Of course,
+ * the user cannot ring a doorbell on a UAR that was not mapped to it.
+ * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
+ * mailboxes (except tagging them with UID), we expose to the user its UAR
+ * ID, so it can embed it in these objects in the expected specification
+ * format. So the only thing the user can do is hurt itself by creating a
+ * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
+ * may ring a doorbell on its objects.
+ * The consequence of that will be that another user can schedule a QP/SQ
+ * of the buggy user for execution (just insert it to the hardware schedule
+ * queue or arm its CQ for event generation), no further harm is expected.
+ */
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 user_idx;
+ s32 dev_idx;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (uverbs_copy_from(&user_idx, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
+ return -EFAULT;
+
+ dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ &dev_idx, sizeof(dev_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ void *cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
+ void *cmd_out;
+ int err;
+
+ c = devx_ufile2uctx(file);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ /* Only white list of some general HCA commands are allowed for this method. */
+ if (!devx_is_general_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ cmd_out_len);
+}
+
+static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ u32 *dinlen,
+ u32 *obj_id)
+{
+ u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
+ u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
+
+ switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
+ break;
+
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ break;
+ case MLX5_CMD_OP_ALLOC_PD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ break;
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ break;
+ case MLX5_CMD_OP_CREATE_RMP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ break;
+ case MLX5_CMD_OP_CREATE_SQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ break;
+ case MLX5_CMD_OP_CREATE_RQT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ break;
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
+ *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
+ MLX5_SET(destroy_flow_table_in, din, other_vport,
+ MLX5_GET(create_flow_table_in, in, other_vport));
+ MLX5_SET(destroy_flow_table_in, din, vport_number,
+ MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, table_type,
+ MLX5_GET(create_flow_table_in, in, table_type));
+ MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
+ *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
+ MLX5_SET(destroy_flow_group_in, din, other_vport,
+ MLX5_GET(create_flow_group_in, in, other_vport));
+ MLX5_SET(destroy_flow_group_in, din, vport_number,
+ MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, table_type,
+ MLX5_GET(create_flow_group_in, in, table_type));
+ MLX5_SET(destroy_flow_group_in, din, table_id,
+ MLX5_GET(create_flow_group_in, in, table_id));
+ MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
+ *obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ MLX5_SET(delete_fte_in, din, other_vport,
+ MLX5_GET(set_fte_in, in, other_vport));
+ MLX5_SET(delete_fte_in, din, vport_number,
+ MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, table_type,
+ MLX5_GET(set_fte_in, in, table_type));
+ MLX5_SET(delete_fte_in, din, table_id,
+ MLX5_GET(set_fte_in, in, table_id));
+ MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ break;
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ break;
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ break;
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
+ *obj_id = MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_hierarchy,
+ MLX5_GET(create_scheduling_element_in, in,
+ scheduling_hierarchy));
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_element_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
+ *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
+ *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_CREATE_QP:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ break;
+ case MLX5_CMD_OP_CREATE_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ break;
+ case MLX5_CMD_OP_CREATE_DCT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ break;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ break;
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
+ MLX5_SET(detach_from_mcg_in, din, qpn,
+ MLX5_GET(attach_to_mcg_in, in, qpn));
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
+ MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
+ MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ break;
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ break;
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int devx_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct devx_obj *obj = uobject->object;
+ int ret;
+
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ kfree(obj);
+ return ret;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ void *cmd_out;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ struct devx_obj *obj;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_create_cmd(cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ return 0;
+
+obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_modify_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct devx_obj *obj = uobj->object;
+ void *cmd_out;
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(obj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(obj->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ cmd_out, cmd_out_len);
+}
+
+static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj)
+{
+ u64 addr;
+ size_t size;
+ u32 access;
+ int npages;
+ int err;
+ u32 page_mask;
+
+ if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
+ uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
+ return -EFAULT;
+
+ err = uverbs_get_flags32(&access, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_SUPPORTED);
+ if (err)
+ return err;
+
+ err = ib_check_mr_access(access);
+ if (err)
+ return err;
+
+ obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+
+ mlx5_ib_cont_pages(obj->umem, obj->umem->address,
+ MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
+ &obj->page_shift, &obj->ncont, NULL);
+
+ if (!npages) {
+ ib_umem_release(obj->umem);
+ return -EINVAL;
+ }
+
+ page_mask = (1 << obj->page_shift) - 1;
+ obj->page_offset = obj->umem->address & page_mask;
+
+ return 0;
+}
+
+static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
+ (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
+ cmd->in = uverbs_zalloc(attrs, cmd->inlen);
+ return PTR_ERR_OR_ZERO(cmd->in);
+}
+
+static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd)
+{
+ void *umem;
+ __be64 *mtt;
+
+ umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
+ mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
+ MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
+ MLX5_SET(umem, umem, log_page_size, obj->page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset, obj->page_offset);
+ mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
+ (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
+ MLX5_IB_MTT_READ);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem_reg_cmd cmd;
+ struct devx_umem *obj;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+ u32 obj_id;
+ struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int err;
+
+ if (!c->devx_uid)
+ return -EPERM;
+
+ obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ if (err)
+ goto err_obj_free;
+
+ err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
+ if (err)
+ goto err_umem_release;
+
+ devx_umem_reg_cmd_build(dev, obj, &cmd);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
+ sizeof(cmd.out));
+ if (err)
+ goto err_umem_release;
+
+ obj->mdev = dev->mdev;
+ uobj->object = obj;
+ devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
+ if (err)
+ goto err_umem_destroy;
+
+ return 0;
+
+err_umem_destroy:
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
+err_umem_release:
+ ib_umem_release(obj->umem);
+err_obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int devx_umem_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct devx_umem *obj = uobject->object;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+
+ err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (ib_is_destroy_retryable(err, why, uobject))
+ return err;
+
+ ib_umem_release(obj->umem);
+ kfree(obj);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_UMEM_REG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OTHER,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+DECLARE_UVERBS_OBJECT_TREE(devx_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
+
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
+{
+ return &devx_objects;
+}
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
new file mode 100644
index 000000000000..1a29f47f836e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
+ [MLX5_IB_FLOW_TYPE_NORMAL] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ .u.ptr = {
+ .len = sizeof(u16), /* data is priority */
+ .min_len = sizeof(u16),
+ }
+ },
+ [MLX5_IB_FLOW_TYPE_SNIFFER] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ void *devx_obj;
+ int dest_id, dest_type;
+ void *cmd_in;
+ int inlen;
+ bool dest_devx, dest_qp;
+ struct ib_qp *qp = NULL;
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ dest_devx =
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_qp = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+
+ if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ return -EINVAL;
+
+ if (dest_devx) {
+ devx_obj = uverbs_attr_get_obj(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ if (IS_ERR(devx_obj))
+ return PTR_ERR(devx_obj);
+
+ /* Verify that the given DEVX object is a flow
+ * steering destination.
+ */
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ return -EINVAL;
+ } else {
+ struct mlx5_ib_qp *mqp;
+
+ qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ if (qp->qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ mqp = to_mqp(qp);
+ if (mqp->flags & MLX5_IB_QP_RSS)
+ dest_id = mqp->rss_qp.tirn;
+ else
+ dest_id = mqp->raw_packet_qp.rq.tirn;
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ }
+
+ if (dev->rep)
+ return -ENOTSUPP;
+
+ cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+ dest_id, dest_type);
+ if (IS_ERR(flow_handler))
+ return PTR_ERR(flow_handler);
+
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+
+ return 0;
+}
+
+static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct mlx5_ib_flow_matcher *obj = uobject->object;
+ int ret;
+
+ ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
+ if (ret)
+ return ret;
+
+ kfree(obj);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct mlx5_ib_flow_matcher *obj;
+ int err;
+
+ obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->mask_len = uverbs_attr_get_len(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ err = uverbs_copy_from(&obj->matcher_mask,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ if (err)
+ goto end;
+
+ obj->flow_type = uverbs_attr_get_enum_id(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+
+ if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
+ err = uverbs_copy_from(&obj->priority,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+ if (err)
+ goto end;
+ }
+
+ err = uverbs_copy_from(&obj->match_criteria_enable,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
+ if (err)
+ goto end;
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ atomic_set(&obj->usecnt, 0);
+ return 0;
+
+end:
+ kfree(obj);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_CREATE_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DESTROY_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_fs,
+ UVERBS_OBJECT_FLOW,
+ &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ mlx5_ib_flow_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+
+DECLARE_UVERBS_OBJECT_TREE(flow_objects,
+ &UVERBS_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER));
+
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ int i = 0;
+
+ root[i++] = &flow_objects;
+ root[i++] = &mlx5_ib_fs;
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 79e6309460dc..4950df3f71b6 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -477,8 +477,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
return gsi->tx_qps[qp_index];
}
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
struct ib_qp *tx_qp;
@@ -522,8 +522,8 @@ err:
return ret;
}
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e3e330f59c2c..c414f3809e5c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -419,8 +419,8 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
- props->port_cap_flags |= IB_PORT_CM_SUP;
- props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
roce_address_table_size);
@@ -510,12 +510,11 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
vlan_id, port_num);
}
-static int mlx5_ib_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
+static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, gid, attr);
+ attr->index, &attr->gid, attr);
}
static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
@@ -525,41 +524,15 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index)
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
- struct ib_gid_attr attr;
- union ib_gid gid;
-
- if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
- return 0;
-
- dev_put(attr.ndev);
-
- if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type)
-{
- struct ib_gid_attr attr;
- union ib_gid gid;
- int ret;
-
- ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
- if (ret)
- return ret;
-
- dev_put(attr.ndev);
-
- *gid_type = attr.gid_type;
-
- return 0;
-}
-
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
@@ -915,7 +888,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
- props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_send_sge = max_sq_sg;
+ props->max_recv_sge = max_rq_sg;
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
@@ -1246,7 +1220,6 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
- props->grh_required = rep->grh_required;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
@@ -1585,31 +1558,26 @@ error:
return err;
}
-static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+static void deallocate_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
{
struct mlx5_bfreg_info *bfregi;
- int err;
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
- bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
- return err;
- }
- }
- }
-
- return 0;
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
{
int err;
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return 0;
+
err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
if (err)
return err;
@@ -1631,6 +1599,9 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
{
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return;
+
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
@@ -1660,6 +1631,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int err;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
+ u32 dump_fill_mkey;
bool lib_uar_4k;
if (!dev->ib_active)
@@ -1676,8 +1648,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
return ERR_PTR(err);
- if (req.flags)
- return ERR_PTR(-EINVAL);
+ if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
+ return ERR_PTR(-EOPNOTSUPP);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP);
@@ -1755,10 +1727,26 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
+ if (err)
+ goto out_uars;
+
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ /* Block DEVX on Infiniband as of SELinux */
+ if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
+ err = -EPERM;
+ goto out_td;
+ }
+
+ err = mlx5_ib_devx_create(dev, context);
if (err)
- goto out_uars;
+ goto out_td;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
+ if (err)
+ goto out_mdev;
}
INIT_LIST_HEAD(&context->vma_private_list);
@@ -1819,9 +1807,18 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
+ if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ resp.dump_fill_mkey = dump_fill_mkey;
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
+ }
+ resp.response_length += sizeof(resp.dump_fill_mkey);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
- goto out_td;
+ goto out_mdev;
bfregi->ver = ver;
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
@@ -1831,9 +1828,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
return &context->ibucontext;
+out_mdev:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context);
out_td:
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_uars:
deallocate_uars(dev, context);
@@ -1856,9 +1855,11 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context);
+
bfregi = &context->bfregi;
- if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -2040,7 +2041,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
unsigned long idx;
- phys_addr_t pfn, pa;
+ phys_addr_t pfn;
pgprot_t prot;
u32 bfreg_dyn_idx = 0;
u32 uar_index;
@@ -2131,8 +2132,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
goto err;
}
- pa = pfn << PAGE_SHIFT;
-
err = mlx5_ib_set_vma_data(vma, context);
if (err)
goto err;
@@ -2699,7 +2698,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
IPPROTO_GRE);
MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
- 0xffff);
+ ntohs(ib_spec->gre.mask.protocol));
MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
ntohs(ib_spec->gre.val.protocol));
@@ -2979,11 +2978,11 @@ static void counters_clear_description(struct ib_counters *counters)
static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
{
- struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
struct mlx5_ib_flow_handler *handler = container_of(flow_id,
struct mlx5_ib_flow_handler,
ibflow);
struct mlx5_ib_flow_handler *iter, *tmp;
+ struct mlx5_ib_dev *dev = handler->dev;
mutex_lock(&dev->flow_db->lock);
@@ -3001,6 +3000,8 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
counters_clear_description(handler->ibcounters);
mutex_unlock(&dev->flow_db->lock);
+ if (handler->flow_matcher)
+ atomic_dec(&handler->flow_matcher->usecnt);
kfree(handler);
return 0;
@@ -3021,6 +3022,26 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+ struct mlx5_ib_flow_prio *prio,
+ int priority,
+ int num_entries, int num_groups)
+{
+ struct mlx5_flow_table *ft;
+
+ ft = mlx5_create_auto_grouped_flow_table(ns, priority,
+ num_entries,
+ num_groups,
+ 0, 0);
+ if (IS_ERR(ft))
+ return ERR_CAST(ft);
+
+ prio->flow_table = ft;
+ prio->refcount = 0;
+ return prio;
+}
+
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
struct ib_flow_attr *flow_attr,
enum flow_table_type ft_type)
@@ -3033,7 +3054,6 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int num_entries;
int num_groups;
int priority;
- int err = 0;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
@@ -3083,21 +3103,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return ERR_PTR(-ENOMEM);
ft = prio->flow_table;
- if (!ft) {
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, 0);
-
- if (!IS_ERR(ft)) {
- prio->refcount = 0;
- prio->flow_table = ft;
- } else {
- err = PTR_ERR(ft);
- }
- }
+ if (!ft)
+ return _get_prio(ns, prio, priority, num_entries, num_groups);
- return err ? ERR_PTR(err) : prio;
+ return prio;
}
static void set_underlay_qp(struct mlx5_ib_dev *dev,
@@ -3356,6 +3365,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ft_prio->refcount++;
handler->prio = ft_prio;
+ handler->dev = dev;
ft_prio->flow_table = ft;
free:
@@ -3648,6 +3658,189 @@ free_ucmd:
return ERR_PTR(err);
}
+static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
+ int priority, bool mcast)
+{
+ int max_table_size;
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio;
+
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (max_table_size < MLX5_FS_MAX_ENTRIES)
+ return ERR_PTR(-ENOMEM);
+
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(priority, false);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ prio = &dev->flow_db->prios[priority];
+
+ if (prio->flow_table)
+ return prio;
+
+ return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+ MLX5_FS_MAX_TYPES);
+}
+
+static struct mlx5_ib_flow_handler *
+_create_raw_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct mlx5_flow_destination *dst,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen)
+{
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ memcpy(spec->match_value, cmd_in, inlen);
+ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
+ fs_matcher->mask_len);
+ spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dst, 1);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+ ft_prio->flow_table = ft;
+
+free:
+ if (err)
+ kfree(handler);
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
+ void *match_v)
+{
+ void *match_c;
+ void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
+ void *dmac, *dmac_mask;
+ void *ipv4, *ipv4_mask;
+
+ if (!(fs_matcher->match_criteria_enable &
+ (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
+ return false;
+
+ match_c = fs_matcher->matcher_mask.match_params;
+ match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+
+ dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dmac_47_16);
+ dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dmac_47_16);
+
+ if (is_multicast_ether_addr(dmac) &&
+ is_multicast_ether_addr(dmac_mask))
+ return true;
+
+ ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
+ ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
+ return true;
+
+ return false;
+}
+
+struct mlx5_ib_flow_handler *
+mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id,
+ int dest_type)
+{
+ struct mlx5_flow_destination *dst;
+ struct mlx5_ib_flow_prio *ft_prio;
+ int priority = fs_matcher->priority;
+ struct mlx5_ib_flow_handler *handler;
+ bool mcast;
+ int err;
+
+ if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOMEM);
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, priority, mcast);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
+ dst->type = dest_type;
+ dst->tir_num = dest_id;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+ dst->ft_num = dest_id;
+ }
+
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
+ inlen);
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ atomic_inc(&fs_matcher->usecnt);
+ handler->flow_matcher = fs_matcher;
+
+ kfree(dst);
+
+ return handler;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+
+ return ERR_PTR(err);
+}
+
static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
{
u32 flags = 0;
@@ -3672,12 +3865,11 @@ mlx5_ib_create_flow_action_esp(struct ib_device *device,
u64 flags;
int err = 0;
- if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&action_flags, attrs,
- MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS)))
- return ERR_PTR(-EFAULT);
-
- if (action_flags >= (MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1))
- return ERR_PTR(-EOPNOTSUPP);
+ err = uverbs_get_flags64(
+ &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
+ if (err)
+ return ERR_PTR(err);
flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
@@ -4466,7 +4658,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
cancel_work_sync(&devr->ports[port].pkey_change_work);
}
-static u32 get_core_cap_flags(struct ib_device *ibdev)
+static u32 get_core_cap_flags(struct ib_device *ibdev,
+ struct mlx5_hca_vport_context *rep)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
@@ -4475,11 +4668,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
+ if (rep->grh_required)
+ ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
+
if (ll == IB_LINK_LAYER_INFINIBAND)
- return RDMA_CORE_PORT_IBA_IB;
+ return ret | RDMA_CORE_PORT_IBA_IB;
if (raw_support)
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ ret |= RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -4502,17 +4698,23 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr attr;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ struct mlx5_hca_vport_context rep = {0};
int err;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
-
err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
+ &rep);
+ if (err)
+ return err;
+ }
+
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = get_core_cap_flags(ibdev);
+ immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
@@ -4610,7 +4812,7 @@ static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
@@ -4689,12 +4891,21 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(req_cqe_flush_error),
};
+#define INIT_EXT_PPCNT_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
+
+static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
+ INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
+};
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
int i;
for (i = 0; i < dev->num_ports; i++) {
- if (dev->port[i].cnts.set_id)
+ if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
@@ -4724,7 +4935,10 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
num_counters += ARRAY_SIZE(cong_cnts);
}
-
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ }
cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
if (!cnts->names)
return -ENOMEM;
@@ -4781,6 +4995,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
offsets[j] = cong_cnts[i].offset;
}
}
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
+ names[j] = ext_ppcnt_cnts[i].name;
+ offsets[j] = ext_ppcnt_cnts[i].offset;
+ }
+ }
}
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
@@ -4826,7 +5047,8 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
return rdma_alloc_hw_stats_struct(port->cnts.names,
port->cnts.num_q_counters +
- port->cnts.num_cong_counters,
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -4859,6 +5081,34 @@ free:
return ret;
}
+static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_port *port,
+ struct rdma_hw_stats *stats)
+{
+ int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int ret, i;
+ void *out;
+
+ out = kvzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
+ stats->value[i + offset] =
+ be64_to_cpup((__be64 *)(out +
+ port->cnts.offsets[i + offset]));
+ }
+
+free:
+ kvfree(out);
+ return ret;
+}
+
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port_num, int index)
@@ -4872,13 +5122,21 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
if (!stats)
return -EINVAL;
- num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+ num_counters = port->cnts.num_q_counters +
+ port->cnts.num_cong_counters +
+ port->cnts.num_ext_ppcnt_counters;
/* q_counters are per IB device, query the master mdev */
ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
+ if (ret)
+ return ret;
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
&mdev_port_num);
@@ -4905,11 +5163,6 @@ done:
return num_counters;
}
-static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
-{
- return mlx5_rdma_netdev_free(netdev);
-}
-
static struct net_device*
mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
u8 port_num,
@@ -4919,17 +5172,12 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
void (*setup)(struct net_device *))
{
struct net_device *netdev;
- struct rdma_netdev *rn;
if (type != RDMA_NETDEV_IPOIB)
return ERR_PTR(-EOPNOTSUPP);
netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
name, setup);
- if (likely(!IS_ERR_OR_NULL(netdev))) {
- rn = netdev_priv(netdev);
- rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
- }
return netdev;
}
@@ -5127,8 +5375,8 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
- port_num + 1);
+ mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
}
@@ -5263,45 +5511,47 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_dm, UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
- &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- &UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- UVERBS_ATTR_TYPE(u64),
- UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm,
+ UVERBS_OBJECT_DM,
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_flow_action,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
+ enum mlx5_ib_uapi_flow_action_flags));
-#define NUM_TREES 2
static int populate_specs_root(struct mlx5_ib_dev *dev)
{
- const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
- uverbs_default_get_objects()};
- size_t num_trees = 1;
+ const struct uverbs_object_tree_def **trees = dev->driver_trees;
+ size_t num_trees = 0;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_flow_action;
+ if (mlx5_accel_ipsec_device_caps(dev->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_DEVICE)
+ trees[num_trees++] = &mlx5_ib_flow_action;
- if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
- !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
- default_root[num_trees++] = &mlx5_ib_dm;
+ if (MLX5_CAP_DEV_MEM(dev->mdev, memic))
+ trees[num_trees++] = &mlx5_ib_dm;
- dev->ib_dev.specs_root =
- uverbs_alloc_spec_tree(num_trees, default_root);
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_UCTX)
+ trees[num_trees++] = mlx5_ib_get_devx_tree();
- return PTR_ERR_OR_ZERO(dev->ib_dev.specs_root);
-}
+ num_trees += mlx5_ib_get_flow_trees(trees + num_trees);
-static void depopulate_specs_root(struct mlx5_ib_dev *dev)
-{
- uverbs_free_spec_tree(dev->ib_dev.specs_root);
+ WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees));
+ trees[num_trees] = NULL;
+ dev->ib_dev.driver_specs = trees;
+
+ return 0;
}
static int mlx5_ib_read_counters(struct ib_counters *counters,
@@ -5552,6 +5802,8 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
dev->ib_dev.query_qp = mlx5_ib_query_qp;
dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
+ dev->ib_dev.drain_sq = mlx5_ib_drain_sq;
+ dev->ib_dev.drain_rq = mlx5_ib_drain_rq;
dev->ib_dev.post_send = mlx5_ib_post_send;
dev->ib_dev.post_recv = mlx5_ib_post_recv;
dev->ib_dev.create_cq = mlx5_ib_create_cq;
@@ -5649,9 +5901,9 @@ int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
return 0;
}
-static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
- u8 port_num)
+static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
{
+ u8 port_num;
int i;
for (i = 0; i < dev->num_ports; i++) {
@@ -5674,6 +5926,8 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
return mlx5_add_netdev_notifier(dev, port_num);
}
@@ -5690,14 +5944,12 @@ int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
enum rdma_link_layer ll;
int port_type_cap;
int err = 0;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET)
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
return err;
}
@@ -5712,19 +5964,17 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
int err;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ err = mlx5_ib_stage_common_roce_init(dev);
if (err)
return err;
- err = mlx5_enable_eth(dev, port_num);
+ err = mlx5_enable_eth(dev);
if (err)
goto cleanup;
}
@@ -5741,9 +5991,7 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
- port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -5842,11 +6090,6 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, NULL);
}
-static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
-{
- depopulate_specs_root(dev);
-}
-
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
@@ -5915,8 +6158,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
ib_dealloc_device((struct ib_device *)dev);
}
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
-
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile)
{
@@ -5983,7 +6224,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6031,7 +6272,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
- mlx5_ib_stage_depopulate_specs),
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6046,7 +6287,7 @@ static const struct mlx5_ib_profile nic_rep_profile = {
mlx5_ib_stage_rep_reg_cleanup),
};
-static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
@@ -6080,8 +6321,6 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
- } else {
- mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
}
mutex_unlock(&mlx5_ib_multiport_mutex);
@@ -6099,11 +6338,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
- if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
- u8 port_num = mlx5_core_native_port_num(mdev) - 1;
-
- return mlx5_ib_add_slave_port(mdev, port_num);
- }
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
+ return mlx5_ib_add_slave_port(mdev);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
if (!dev)
@@ -6113,7 +6349,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- if (MLX5_VPORT_MANAGER(mdev) &&
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d89c8fe626f6..320d4dfe8c2f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -46,6 +46,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/mlx5-abi.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -78,12 +79,6 @@ enum {
MLX5_REQ_SCAT_DATA64_CQE = 0x22,
};
-enum mlx5_ib_latency_class {
- MLX5_IB_LATENCY_CLASS_LOW,
- MLX5_IB_LATENCY_CLASS_MEDIUM,
- MLX5_IB_LATENCY_CLASS_HIGH,
-};
-
enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_IGNORE_MKEY = 1,
MLX5_MAD_IFC_IGNORE_BKEY = 2,
@@ -143,6 +138,7 @@ struct mlx5_ib_ucontext {
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
+ u16 devx_uid;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -176,6 +172,18 @@ struct mlx5_ib_flow_handler {
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_handle *rule;
struct ib_counters *ibcounters;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_flow_matcher *flow_matcher;
+};
+
+struct mlx5_ib_flow_matcher {
+ struct mlx5_ib_match_params matcher_mask;
+ int mask_len;
+ enum mlx5_ib_flow_type flow_type;
+ u16 priority;
+ struct mlx5_core_dev *mdev;
+ atomic_t usecnt;
+ u8 match_criteria_enable;
};
struct mlx5_ib_flow_db {
@@ -461,7 +469,7 @@ struct mlx5_umr_wr {
u32 mkey;
};
-static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
+static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct mlx5_umr_wr, wr);
}
@@ -665,6 +673,7 @@ struct mlx5_ib_counters {
size_t *offsets;
u32 num_q_counters;
u32 num_cong_counters;
+ u32 num_ext_ppcnt_counters;
u16 set_id;
bool set_id_valid;
};
@@ -851,6 +860,7 @@ to_mcounters(struct ib_counters *ibcntrs)
struct mlx5_ib_dev {
struct ib_device ib_dev;
+ const struct uverbs_object_tree_def *driver_trees[6];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -1004,8 +1014,8 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1014,10 +1024,12 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+void mlx5_ib_drain_sq(struct ib_qp *qp);
+void mlx5_ib_drain_rq(struct ib_qp *qp);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
@@ -1183,10 +1195,8 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
- int index);
-int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
- int index, enum ib_gid_type *gid_type);
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1200,10 +1210,10 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
@@ -1217,6 +1227,36 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context);
+const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
+struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ void *cmd_in, int inlen, int dest_id, int dest_type);
+bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
+int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+#else
+static inline int
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context) {}
+static inline const struct uverbs_object_tree_def *
+mlx5_ib_get_devx_tree(void) { return NULL; }
+static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
+ int *dest_type)
+{
+ return false;
+}
+static inline int
+mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
+{
+ return 0;
+}
+#endif
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1318,4 +1358,7 @@ static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
unsigned long mlx5_ib_get_xlt_emergency_page(void);
void mlx5_ib_put_xlt_emergency_page(void);
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 90a9c461cedc..9fb1d9cb9401 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -271,16 +271,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -310,19 +310,11 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations size_fops = {
@@ -337,16 +329,16 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
{
struct mlx5_cache_ent *ent = filp->private_data;
struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20];
+ char lbuf[20] = {0};
u32 var;
int err;
int c;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ count = min(count, sizeof(lbuf) - 1);
+ if (copy_from_user(lbuf, buf, count))
return -EFAULT;
c = order2idx(dev, ent->order);
- lbuf[sizeof(lbuf) - 1] = 0;
if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL;
@@ -372,19 +364,11 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- if (*pos)
- return 0;
-
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
if (err < 0)
return err;
- if (copy_to_user(buf, lbuf, err))
- return -EFAULT;
-
- *pos += err;
-
- return err;
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
}
static const struct file_operations limit_fops = {
@@ -914,7 +898,7 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
struct mlx5_umr_wr *umrwr)
{
struct umr_common *umrc = &dev->umrc;
- struct ib_send_wr *bad;
+ const struct ib_send_wr *bad;
int err;
struct mlx5_ib_umr_context umr_context;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1a87a690a4c..d216e0d2921d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -488,7 +488,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
down_read(&ctx->umem_rwsem);
rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
- mr_leaf_free, imr);
+ mr_leaf_free, true, imr);
up_read(&ctx->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a4f1f638509f..6cba2a02d11b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -563,32 +563,21 @@ static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
}
static int alloc_bfreg(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- enum mlx5_ib_latency_class lat)
+ struct mlx5_bfreg_info *bfregi)
{
- int bfregn = -EINVAL;
+ int bfregn = -ENOMEM;
mutex_lock(&bfregi->lock);
- switch (lat) {
- case MLX5_IB_LATENCY_CLASS_LOW:
+ if (bfregi->ver >= 2) {
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
+ if (bfregn < 0)
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
+ }
+
+ if (bfregn < 0) {
BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
bfregn = 0;
bfregi->count[bfregn]++;
- break;
-
- case MLX5_IB_LATENCY_CLASS_MEDIUM:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_med_class_bfreg(dev, bfregi);
- break;
-
- case MLX5_IB_LATENCY_CLASS_HIGH:
- if (bfregi->ver < 2)
- bfregn = -ENOMEM;
- else
- bfregn = alloc_high_class_bfreg(dev, bfregi);
- break;
}
mutex_unlock(&bfregi->lock);
@@ -641,13 +630,13 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
-static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn,
- bool dyn_bfreg)
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg)
{
- int bfregs_per_sys_page;
- int index_of_sys_page;
- int offset;
+ unsigned int bfregs_per_sys_page;
+ u32 index_of_sys_page;
+ u32 offset;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
@@ -655,6 +644,10 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
if (dyn_bfreg) {
index_of_sys_page += bfregi->num_static_sys_pages;
+
+ if (index_of_sys_page >= bfregi->num_sys_pages)
+ return -EINVAL;
+
if (bfregn > bfregi->num_dyn_bfregs ||
bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
@@ -819,21 +812,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
bfregn = MLX5_CROSS_CHANNEL_BFREG;
}
else {
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to medium latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
- if (bfregn < 0) {
- mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
- if (bfregn < 0) {
- mlx5_ib_warn(dev, "bfreg allocation failed\n");
- return bfregn;
- }
- }
- }
+ bfregn = alloc_bfreg(dev, &context->bfregi);
+ if (bfregn < 0)
+ return bfregn;
}
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
@@ -1626,7 +1607,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_resources *devr = &dev->devr;
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp;
+ struct mlx5_ib_create_qp_resp resp = {};
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
@@ -2555,18 +2536,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- err = mlx5_get_roce_gid_type(dev, port, grh->sgid_index,
- &gid_type);
- if (err)
- return err;
+
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
if (qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC ||
qp->ibqp.qp_type == IB_QPT_XRC_INI ||
qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ path->udp_sport =
+ mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
} else {
@@ -3529,7 +3508,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
}
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
- struct ib_send_wr *wr, void *qend,
+ const struct ib_send_wr *wr, void *qend,
struct mlx5_ib_qp *qp, int *size)
{
void *seg = eseg;
@@ -3582,7 +3561,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
@@ -3730,9 +3709,9 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr, int atomic)
+ const struct ib_send_wr *wr, int atomic)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(umr, 0, sizeof(*umr));
@@ -3803,9 +3782,10 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE;
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
{
- struct mlx5_umr_wr *umrwr = umr_wr(wr);
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
@@ -3854,7 +3834,7 @@ static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
seg += mr_list_size;
}
-static __be32 send_ieth(struct ib_send_wr *wr)
+static __be32 send_ieth(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
@@ -3886,7 +3866,7 @@ static u8 wq_sig(void *wqe)
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
}
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz)
{
struct mlx5_wqe_inline_seg *seg;
@@ -4032,7 +4012,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
return 0;
}
-static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
+static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size)
{
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
@@ -4134,7 +4114,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_sig_handover_wr *wr, u32 size,
+ const struct ib_sig_handover_wr *wr, u32 size,
u32 length, u32 pdn)
{
struct ib_mr *sig_mr = wr->sig_mr;
@@ -4165,10 +4145,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
}
-static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
- void **seg, int *size)
+static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size)
{
- struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
+ const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
u32 xlt_size;
@@ -4243,7 +4223,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
}
static int set_reg_wr(struct mlx5_ib_qp *qp,
- struct ib_reg_wr *wr,
+ const struct ib_reg_wr *wr,
void **seg, int *size)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
@@ -4314,10 +4294,10 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
}
}
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, unsigned *idx,
- int *size, int nreq)
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq, bool send_signaled, bool solicited)
{
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
return -ENOMEM;
@@ -4328,10 +4308,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*(uint32_t *)(*seg + 8) = 0;
(*ctrl)->imm = send_ieth(wr);
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (wr->send_flags & IB_SEND_SIGNALED ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- MLX5_WQE_CTRL_SOLICITED : 0);
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
@@ -4339,6 +4317,16 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
return 0;
}
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id,
@@ -4360,9 +4348,8 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
qp->sq.w_list[idx].next = qp->sq.cur_post;
}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -4393,7 +4380,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4498,10 +4485,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* SET_PSV WQEs are not signaled and solicited
* on error
*/
- wr->send_flags &= ~IB_SEND_SIGNALED;
- wr->send_flags |= IB_SEND_SOLICITED;
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4520,8 +4505,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
fence, MLX5_OPCODE_SET_PSV);
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, nreq);
+ err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
+ &size, nreq, false, true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
@@ -4690,13 +4675,19 @@ out:
return err;
}
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
{
sig->signature = calc_sig(sig, size);
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
@@ -4714,7 +4705,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
@@ -4776,6 +4767,12 @@ out:
return err;
}
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5365,7 +5362,9 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
rwq->wqe_count = ucmd->rq_wqe_count;
rwq->wqe_shift = ucmd->rq_wqe_shift;
- rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
+ return -EINVAL;
+
rwq->log_rq_stride = rwq->wqe_shift;
rwq->log_rq_size = ilog2(rwq->wqe_count);
return 0;
@@ -5697,3 +5696,132 @@ out:
kvfree(in);
return err;
}
+
+struct mlx5_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx5_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx5_ib_drain_cqe *sdrain,
+ struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx5_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx5_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..d359fecf7a5b 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
- if (desc_size == 0 || srq->msrq.max_gs > desc_size)
- return ERR_PTR(-EINVAL);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size);
- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
- return ERR_PTR(-EINVAL);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+ err = -EINVAL;
+ goto err_srq;
+ }
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- if (buf_size < desc_size)
- return ERR_PTR(-EINVAL);
+ if (buf_size < desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
in.type = init_attr->srq_type;
if (pd->uobject)
@@ -440,8 +446,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
spin_unlock(&srq->lock);
}
-int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index e7f6223e9c60..0823c0bc7e73 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -281,10 +281,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->grh.flow_label =
ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
header->grh.hop_limit = ah->av->hop_limit;
- ib_get_cached_gid(&dev->ib_dev,
- be32_to_cpu(ah->av->port_pd) >> 24,
- ah->av->gid_index % dev->limits.gid_table_len,
- &header->grh.source_gid, NULL);
+ header->grh.source_gid = ah->ibah.sgid_attr->gid;
memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 5508afbf1c67..220a3e4717a3 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -519,10 +519,10 @@ int mthca_max_srq_sge(struct mthca_dev *dev);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
-int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
@@ -530,14 +530,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct ib_qp_init_attr *qp_init_attr);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 541f237965c7..0d3473b4596e 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -96,8 +96,9 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
- props->max_sge = mdev->limits.max_sg;
- props->max_sge_rd = props->max_sge;
+ props->max_send_sge = mdev->limits.max_sg;
+ props->max_recv_sge = mdev->limits.max_sg;
+ props->max_sge_rd = mdev->limits.max_sg;
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
props->max_cqe = mdev->limits.max_cqes;
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
@@ -448,7 +449,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
int err;
if (init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
srq = kmalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index af1c49d70b89..3d37f2373d63 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1488,7 +1488,7 @@ void mthca_free_qp(struct mthca_dev *dev,
/* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
- int ind, struct ib_ud_wr *wr,
+ int ind, const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
@@ -1581,7 +1581,7 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
- struct ib_atomic_wr *wr)
+ const struct ib_atomic_wr *wr)
{
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap);
@@ -1594,7 +1594,7 @@ static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
}
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
@@ -1604,15 +1604,15 @@ static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
}
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
- struct ib_ud_wr *wr)
+ const struct ib_ud_wr *wr)
{
memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
useg->dqpn = cpu_to_be32(wr->remote_qpn);
useg->qkey = cpu_to_be32(wr->remote_qkey);
}
-int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1814,8 +1814,8 @@ out:
return err;
}
-int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -1925,8 +1925,8 @@ out:
return err;
}
-int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -2165,8 +2165,8 @@ out:
return err;
}
-int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f79732bc73b4..9a3fc6fb0d7e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -472,8 +472,8 @@ void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
spin_unlock(&srq->lock);
}
-int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
@@ -572,8 +572,8 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
return err;
}
-int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 00c27291dc26..bedaa02749fb 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -159,7 +159,7 @@ do { \
#define NES_EVENT_TIMEOUT 1200000
#else
-#define nes_debug(level, fmt, args...)
+#define nes_debug(level, fmt, args...) no_printk(fmt, ##args)
#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6cdfbf8c5674..2b67ace5b614 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -58,6 +58,7 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/ip_fib.h>
+#include <net/secure_seq.h>
#include <net/tcp.h>
#include <linux/fcntl.h>
@@ -1445,7 +1446,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
struct nes_cm_listener *listener)
{
struct nes_cm_node *cm_node;
- struct timespec ts;
int oldarpindex = 0;
int arpindex = 0;
struct nes_device *nesdev;
@@ -1496,8 +1496,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
NES_CM_DEFAULT_RCV_WND_SCALE;
- ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr),
+ htonl(cm_node->rem_addr),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
sizeof(struct tcphdr) - ETH_HLEN - VLAN_HLEN;
cm_node->tcp_cntxt.rcv_nxt = 0;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 18a7de1c3923..bd0675d8f298 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -70,8 +70,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
static void nes_terminate_start_timer(struct nes_qp *nesqp);
-#ifdef CONFIG_INFINIBAND_NES_DEBUG
-static unsigned char *nes_iwarp_state_str[] = {
+static const char *const nes_iwarp_state_str[] = {
"Non-Existent",
"Idle",
"RTS",
@@ -82,7 +81,7 @@ static unsigned char *nes_iwarp_state_str[] = {
"RSVD2",
};
-static unsigned char *nes_tcp_state_str[] = {
+static const char *const nes_tcp_state_str[] = {
"Non-Existent",
"Closed",
"Listen",
@@ -100,7 +99,6 @@ static unsigned char *nes_tcp_state_str[] = {
"RSVD3",
"RSVD4",
};
-#endif
static inline void print_ip(struct nes_cm_node *cm_node)
{
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 32f26556c808..6940c7215961 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -436,7 +436,8 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
props->max_mr_size = 0x80000000;
props->max_qp = nesibdev->max_qp;
props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
- props->max_sge = nesdev->nesadapter->max_sge;
+ props->max_send_sge = nesdev->nesadapter->max_sge;
+ props->max_recv_sge = nesdev->nesadapter->max_sge;
props->max_cq = nesibdev->max_cq;
props->max_cqe = nesdev->nesadapter->max_cqe;
props->max_mr = nesibdev->max_mr;
@@ -754,26 +755,6 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
/**
- * nes_create_ah
- */
-static struct ib_ah *nes_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-
-/**
- * nes_destroy_ah
- */
-static int nes_destroy_ah(struct ib_ah *ah)
-{
- return -ENOSYS;
-}
-
-
-/**
* nes_get_encoded_size
*/
static inline u8 nes_get_encoded_size(int *size)
@@ -3004,42 +2985,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return err;
}
-
-/**
- * nes_muticast_attach
- */
-static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_multicast_detach
- */
-static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
-
-/**
- * nes_process_mad
- */
-static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- nes_debug(NES_DBG_INIT, "\n");
- return -ENOSYS;
-}
-
static inline void
-fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
+fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, const struct ib_send_wr *ib_wr,
+ u32 uselkey)
{
int sge_index;
int total_payload_length = 0;
@@ -3065,8 +3013,8 @@ fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselke
/**
* nes_post_send
*/
-static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
+static int nes_post_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
{
u64 u64temp;
unsigned long flags = 0;
@@ -3327,8 +3275,8 @@ out:
/**
* nes_post_recv
*/
-static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
+static int nes_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
{
u64 u64temp;
unsigned long flags = 0;
@@ -3735,8 +3683,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.mmap = nes_mmap;
nesibdev->ibdev.alloc_pd = nes_alloc_pd;
nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
- nesibdev->ibdev.create_ah = nes_create_ah;
- nesibdev->ibdev.destroy_ah = nes_destroy_ah;
nesibdev->ibdev.create_qp = nes_create_qp;
nesibdev->ibdev.modify_qp = nes_modify_qp;
nesibdev->ibdev.query_qp = nes_query_qp;
@@ -3753,10 +3699,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.alloc_mr = nes_alloc_mr;
nesibdev->ibdev.map_mr_sg = nes_map_mr_sg;
- nesibdev->ibdev.attach_mcast = nes_multicast_attach;
- nesibdev->ibdev.detach_mcast = nes_multicast_detach;
- nesibdev->ibdev.process_mad = nes_process_mad;
-
nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
nesibdev->ibdev.post_send = nes_post_send;
nesibdev->ibdev.post_recv = nes_post_recv;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 3897b64532e1..58188fe5aed2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -71,7 +71,7 @@ static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
}
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
- struct rdma_ah_attr *attr, union ib_gid *sgid,
+ struct rdma_ah_attr *attr, const union ib_gid *sgid,
int pdid, bool *isvlan, u16 vlan_tag)
{
int status;
@@ -164,17 +164,14 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ocrdma_ah *ah;
bool isvlan = false;
u16 vlan_tag = 0xffff;
- struct ib_gid_attr sgid_attr;
+ const struct ib_gid_attr *sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- const struct ib_global_route *grh;
- union ib_gid sgid;
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
- grh = rdma_ah_read_grh(attr);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
@@ -186,20 +183,15 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
if (status)
goto av_err;
- status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index, &sgid,
- &sgid_attr);
- if (status) {
- pr_err("%s(): Failed to query sgid, status = %d\n",
- __func__, status);
- goto av_conf_err;
- }
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
- dev_put(sgid_attr.ndev);
+ sgid_attr = attr->grh.sgid_attr;
+ if (is_vlan_dev(sgid_attr->ndev))
+ vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
+
/* Get network header type for this GID */
- ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
- status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
+ status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
+ &isvlan, vlan_tag);
if (status)
goto av_conf_err;
@@ -262,12 +254,6 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
-{
- /* modify_ah is unsupported */
- return -ENOSYS;
-}
-
int ocrdma_process_mad(struct ib_device *ibdev,
int process_mad_flags,
u8 port_num,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 1a65c47945aa..c0c32c9b80ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -55,7 +55,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int ocrdma_destroy_ah(struct ib_ah *ah);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *,
int process_mad_flags,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 6c136e5017fe..e578281471af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1365,8 +1365,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strncpy(dev->model_number,
- hba_attribs->controller_model_number, 31);
+ strlcpy(dev->model_number,
+ hba_attribs->controller_model_number,
+ sizeof(dev->model_number));
}
dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
free_mqe:
@@ -2494,8 +2495,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
{
int status;
struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
+ const struct ib_gid_attr *sgid_attr;
u32 vlan_id = 0xFFFF;
u8 mac_addr[6], hdr_type;
union {
@@ -2525,25 +2525,23 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
sizeof(cmd->params.dgid));
- status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index,
- &sgid, &sgid_attr);
- if (!status) {
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
- memcpy(mac_addr, sgid_attr.ndev->dev_addr, ETH_ALEN);
- dev_put(sgid_attr.ndev);
- }
+ sgid_attr = ah_attr->grh.sgid_attr;
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
+ memcpy(mac_addr, sgid_attr->ndev->dev_addr, ETH_ALEN);
qp->sgid_idx = grh->sgid_index;
- memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
+ memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
+ sizeof(cmd->params.sgid));
status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status)
return status;
+
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
- hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ hdr_type = rdma_gid_attr_network_type(sgid_attr);
if (hdr_type == RDMA_NETWORK_IPV4) {
- rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+ rdma_gid2ip(&sgid_addr._sockaddr, &sgid_attr->gid);
rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5962c0ed9847..7832ee3e0c84 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -176,7 +176,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.create_ah = ocrdma_create_ah;
dev->ibdev.destroy_ah = ocrdma_destroy_ah;
dev->ibdev.query_ah = ocrdma_query_ah;
- dev->ibdev.modify_ah = ocrdma_modify_ah;
dev->ibdev.poll_cq = ocrdma_poll_cq;
dev->ibdev.post_send = ocrdma_post_send;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 82e20fc32890..c158ca9fde6d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -89,7 +89,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+ attr->max_send_sge = dev->attr.max_send_sge;
+ attr->max_recv_sge = dev->attr.max_recv_sge;
attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
@@ -196,11 +197,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->sm_lid = 0;
props->sm_sl = 0;
props->state = port_state;
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
- IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP;
+ props->ip_gids = true;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1774,13 +1774,13 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
* protect against proessing in-flight CQEs for this QP.
*/
spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
spin_lock(&qp->rq_cq->cq_lock);
-
- ocrdma_del_qpn_map(dev, qp);
-
- if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+ ocrdma_del_qpn_map(dev, qp);
spin_unlock(&qp->rq_cq->cq_lock);
+ } else {
+ ocrdma_del_qpn_map(dev, qp);
+ }
spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
if (!pd->uctx) {
@@ -1953,7 +1953,7 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
/* unprivileged verbs and their support functions. */
static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_ewqe_ud_hdr *ud_hdr =
(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
@@ -2000,7 +2000,7 @@ static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
- struct ib_send_wr *wr, u32 wqe_size)
+ const struct ib_send_wr *wr, u32 wqe_size)
{
int i;
char *dpp_addr;
@@ -2038,7 +2038,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
}
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int status;
struct ocrdma_sge *sge;
@@ -2057,7 +2057,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int status;
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
@@ -2075,7 +2075,7 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
}
static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
struct ocrdma_sge *sge = ext_rw + 1;
@@ -2105,7 +2105,7 @@ static int get_encoded_page_size(int pg_sz)
static int ocrdma_build_reg(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
@@ -2166,8 +2166,8 @@ static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->sq_db);
}
-int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int status = 0;
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
@@ -2278,8 +2278,8 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
iowrite32(val, qp->rq_db);
}
-static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
- u16 tag)
+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
+ const struct ib_recv_wr *wr, u16 tag)
{
u32 wqe_size = 0;
struct ocrdma_sge *sge;
@@ -2299,8 +2299,8 @@ static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
ocrdma_cpu_to_le32(rqe, wqe_size);
}
-int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
@@ -2369,8 +2369,8 @@ static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
}
-int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int status = 0;
unsigned long flags;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 9a9971708646..b69cfdce7970 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -43,10 +43,10 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
- struct ib_send_wr **bad_wr);
-int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_wr);
+int ocrdma_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int ocrdma_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
@@ -100,8 +100,8 @@ int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
int ocrdma_destroy_srq(struct ib_srq *);
-int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_recv_wr);
+int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_recv_wr);
int ocrdma_dereg_mr(struct ib_mr *);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index ad22b32bbd9c..a0af6d424aed 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -191,6 +191,11 @@ static int qedr_register_device(struct qedr_dev *dev)
QEDR_UVERBS(MODIFY_QP) |
QEDR_UVERBS(QUERY_QP) |
QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(CREATE_SRQ) |
+ QEDR_UVERBS(DESTROY_SRQ) |
+ QEDR_UVERBS(QUERY_SRQ) |
+ QEDR_UVERBS(MODIFY_SRQ) |
+ QEDR_UVERBS(POST_SRQ_RECV) |
QEDR_UVERBS(REG_MR) |
QEDR_UVERBS(DEREG_MR) |
QEDR_UVERBS(POLL_CQ) |
@@ -229,6 +234,11 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.query_qp = qedr_query_qp;
dev->ibdev.destroy_qp = qedr_destroy_qp;
+ dev->ibdev.create_srq = qedr_create_srq;
+ dev->ibdev.destroy_srq = qedr_destroy_srq;
+ dev->ibdev.modify_srq = qedr_modify_srq;
+ dev->ibdev.query_srq = qedr_query_srq;
+ dev->ibdev.post_srq_recv = qedr_post_srq_recv;
dev->ibdev.query_pkey = qedr_query_pkey;
dev->ibdev.create_ah = qedr_create_ah;
@@ -325,8 +335,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
if (IS_IWARP(dev)) {
- spin_lock_init(&dev->idr_lock);
- idr_init(&dev->qpidr);
+ spin_lock_init(&dev->qpidr.idr_lock);
+ idr_init(&dev->qpidr.idr);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
@@ -653,42 +663,70 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_NOT_DEFINED 0
#define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2
+#define EVENT_TYPE_SRQ 3
struct qedr_dev *dev = (struct qedr_dev *)context;
struct regpair *async_handle = (struct regpair *)fw_handle;
u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event;
+ struct ib_srq *ibsrq;
+ struct qedr_srq *srq;
+ unsigned long flags;
struct ib_cq *ibcq;
struct ib_qp *ibqp;
struct qedr_cq *cq;
struct qedr_qp *qp;
+ u16 srq_id;
- switch (e_code) {
- case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
- event.event = IB_EVENT_CQ_ERR;
- event_type = EVENT_TYPE_CQ;
- break;
- case ROCE_ASYNC_EVENT_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
- event.event = IB_EVENT_QP_FATAL;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
- event.event = IB_EVENT_QP_REQ_ERR;
- event_type = EVENT_TYPE_QP;
- break;
- case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- event_type = EVENT_TYPE_QP;
- break;
- default:
+ if (IS_ROCE(dev)) {
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case ROCE_ASYNC_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n",
+ e_code, roce_handle64);
+ }
+ } else {
+ switch (e_code) {
+ case QED_IWARP_EVENT_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ case QED_IWARP_EVENT_SRQ_EMPTY:
+ event.event = IB_EVENT_SRQ_ERR;
+ event_type = EVENT_TYPE_SRQ;
+ break;
+ default:
DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
roce_handle64);
+ }
}
-
switch (event_type) {
case EVENT_TYPE_CQ:
cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
@@ -722,6 +760,25 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
break;
+ case EVENT_TYPE_SRQ:
+ srq_id = (u16)roce_handle64;
+ spin_lock_irqsave(&dev->srqidr.idr_lock, flags);
+ srq = idr_find(&dev->srqidr.idr, srq_id);
+ if (srq) {
+ ibsrq = &srq->ibsrq;
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ ibsrq->event_handler(&event,
+ ibsrq->srq_context);
+ }
+ } else {
+ DP_NOTICE(dev,
+ "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
+ roce_handle64);
+ }
+ spin_unlock_irqrestore(&dev->srqidr.idr_lock, flags);
+ DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
default:
break;
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 86d4511e0d75..a2d708dceb8d 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,6 +58,7 @@
#define QEDR_MSG_RQ " RQ"
#define QEDR_MSG_SQ " SQ"
#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_SRQ " SRQ"
#define QEDR_MSG_GSI " GSI"
#define QEDR_MSG_IWARP " IW"
@@ -122,6 +123,11 @@ struct qedr_device_attr {
#define QEDR_ENET_STATE_BIT (0)
+struct qedr_idr {
+ spinlock_t idr_lock; /* Protect idr data-structure */
+ struct idr idr;
+};
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -165,8 +171,8 @@ struct qedr_dev {
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
enum qed_rdma_type rdma_type;
- spinlock_t idr_lock; /* Protect qpidr data-structure */
- struct idr qpidr;
+ struct qedr_idr qpidr;
+ struct qedr_idr srqidr;
struct workqueue_struct *iwarp_wq;
u16 iwarp_max_mtu;
@@ -337,6 +343,34 @@ struct qedr_qp_hwq_info {
qed_chain_get_capacity(p_info->pbl) \
} while (0)
+struct qedr_srq_hwq_info {
+ u32 max_sges;
+ u32 max_wr;
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 wqe_prod;
+ u32 sge_prod;
+ u32 wr_prod_cnt;
+ u32 wr_cons_cnt;
+ u32 num_elems;
+
+ u32 *virt_prod_pair_addr;
+ dma_addr_t phy_prod_pair_addr;
+};
+
+struct qedr_srq {
+ struct ib_srq ibsrq;
+ struct qedr_dev *dev;
+
+ struct qedr_userq usrq;
+ struct qedr_srq_hwq_info hw_srq;
+ struct ib_umem *prod_umem;
+ u16 srq_id;
+ u32 srq_limit;
+ /* lock to protect srq recv post */
+ spinlock_t lock;
+};
+
enum qedr_qp_err_bitmap {
QEDR_QP_ERR_SQ_FULL = 1,
QEDR_QP_ERR_RQ_FULL = 2,
@@ -538,4 +572,9 @@ static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct qedr_mr, ibmr);
}
+
+static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qedr_srq, ibsrq);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 7e1f7021396a..228dd7d49622 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -161,12 +161,23 @@ struct rdma_rq_sge {
#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
};
+struct rdma_srq_wqe_header {
+ struct regpair wr_id;
+ u8 num_sges /* number of SGEs in WQE */;
+ u8 reserved2[7];
+};
+
struct rdma_srq_sge {
struct regpair addr;
__le32 length;
__le32 l_key;
};
+union rdma_srq_elm {
+ struct rdma_srq_wqe_header header;
+ struct rdma_srq_sge sge;
+};
+
/* Rdma doorbell data for flags update */
struct rdma_pwm_flags_data {
__le16 icid; /* internal CID */
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 26dc374787f7..505fa3648762 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -491,7 +491,7 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = idr_find(&dev->qpidr, conn_param->qpn);
+ qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
@@ -679,7 +679,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = idr_find(&dev->qpidr, conn_param->qpn);
+ qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
@@ -737,9 +737,9 @@ void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
struct qedr_qp *qp = get_qedr_qp(ibqp);
if (atomic_dec_and_test(&qp->refcnt)) {
- spin_lock_irq(&qp->dev->idr_lock);
- idr_remove(&qp->dev->qpidr, qp->qp_id);
- spin_unlock_irq(&qp->dev->idr_lock);
+ spin_lock_irq(&qp->dev->qpidr.idr_lock);
+ idr_remove(&qp->dev->qpidr.idr, qp->qp_id);
+ spin_unlock_irq(&qp->dev->qpidr.idr_lock);
kfree(qp);
}
}
@@ -748,5 +748,5 @@ struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- return idr_find(&dev->qpidr, qpn);
+ return idr_find(&dev->qpidr.idr, qpn);
}
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 0f14e687bb91..85578887421b 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -380,18 +380,17 @@ int qedr_destroy_gsi_qp(struct qedr_dev *dev)
#define QEDR_GSI_QPN (1)
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
struct qedr_qp *qp,
- struct ib_send_wr *swr,
+ const struct ib_send_wr *swr,
struct ib_ud_header *udh,
int *roce_mode)
{
bool has_vlan = false, has_grh_ipv6 = true;
struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- union ib_gid sgid;
+ const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
int send_size = 0;
u16 vlan_id = 0;
u16 ether_type;
- struct ib_gid_attr sgid_attr;
int rc;
int ip_ver = 0;
@@ -402,28 +401,16 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
for (i = 0; i < swr->num_sge; ++i)
send_size += swr->sg_list[i].length;
- rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
- grh->sgid_index, &sgid, &sgid_attr);
- if (rc) {
- DP_ERR(dev,
- "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index);
- return rc;
- }
-
- vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev);
if (vlan_id < VLAN_CFI_MASK)
has_vlan = true;
- dev_put(sgid_attr.ndev);
-
- has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
if (!has_udp) {
/* RoCE v1 */
ether_type = ETH_P_IBOE;
*roce_mode = ROCE_V1;
- } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
/* RoCE v2 IPv4 */
ip_ver = 4;
ether_type = ETH_P_IP;
@@ -471,7 +458,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
udh->grh.flow_label = grh->flow_label;
udh->grh.hop_limit = grh->hop_limit;
udh->grh.destination_gid = grh->dgid;
- memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
sizeof(udh->grh.source_gid.raw));
} else {
/* IPv4 header */
@@ -482,7 +469,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
udh->ip4.frag_off = htons(IP_DF);
udh->ip4.ttl = grh->hop_limit;
- ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
udh->ip4.saddr = ipv4_addr;
ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
udh->ip4.daddr = ipv4_addr;
@@ -501,7 +488,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
struct qedr_qp *qp,
- struct ib_send_wr *swr,
+ const struct ib_send_wr *swr,
struct qed_roce_ll2_packet **p_packet)
{
u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
@@ -550,8 +537,8 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
return 0;
}
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qed_roce_ll2_packet *pkt = NULL;
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -620,8 +607,8 @@ err:
return rc;
}
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index a55916323ea9..d46dcd3f6424 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
@@ -46,10 +46,10 @@ static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
/* RDMA CM */
int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qedr_qp *qp);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index f07b8df96f43..8cc3df24e04e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,10 @@
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
+#define RDMA_MAX_SGE_PER_SRQ (4)
+#define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
+
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
@@ -84,6 +88,19 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
return 0;
}
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_device_attr *qattr = &dev->attr;
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ srq_attr->srq_limit = srq->srq_limit;
+ srq_attr->max_wr = qattr->max_srq_wr;
+ srq_attr->max_sge = qattr->max_sge;
+
+ return 0;
+}
+
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata)
{
@@ -112,7 +129,8 @@ int qedr_query_device(struct ib_device *ibdev,
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = qattr->max_sge;
+ attr->max_send_sge = qattr->max_sge;
+ attr->max_recv_sge = qattr->max_sge;
attr->max_sge_rd = qattr->max_sge;
attr->max_cq = qattr->max_cq;
attr->max_cqe = qattr->max_cqe;
@@ -224,7 +242,7 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->lmc = 0;
attr->sm_lid = 0;
attr->sm_sl = 0;
- attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->ip_gids = true;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
attr->gid_tbl_len = 1;
attr->pkey_tbl_len = 1;
@@ -1075,27 +1093,19 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
struct qed_rdma_modify_qp_in_params
*qp_params)
{
+ const struct ib_gid_attr *gid_attr;
enum rdma_network_type nw_type;
- struct ib_gid_attr gid_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- union ib_gid gid;
u32 ipv4_addr;
- int rc = 0;
int i;
- rc = ib_get_cached_gid(ibqp->device,
- rdma_ah_get_port_num(&attr->ah_attr),
- grh->sgid_index, &gid, &gid_attr);
- if (rc)
- return rc;
-
- qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+ gid_attr = grh->sgid_attr;
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
- dev_put(gid_attr.ndev);
- nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ nw_type = rdma_gid_attr_network_type(gid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV6:
- memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
&grh->dgid,
@@ -1105,7 +1115,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
break;
case RDMA_NETWORK_IB:
- memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
&grh->dgid,
@@ -1115,7 +1125,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
case RDMA_NETWORK_IPV4:
memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
- ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
qp_params->sgid.ipv4_addr = ipv4_addr;
ipv4_addr =
qedr_get_ipv4_from_gid(grh->dgid.raw);
@@ -1189,6 +1199,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return 0;
}
+static int qedr_copy_srq_uresp(struct qedr_dev *dev,
+ struct qedr_srq *srq, struct ib_udata *udata)
+{
+ struct qedr_create_srq_uresp uresp = {};
+ int rc;
+
+ uresp.srq_id = srq->srq_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "create srq: problem copying data to user space\n");
+
+ return rc;
+}
+
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
@@ -1255,13 +1280,18 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->state = QED_ROCE_QP_STATE_RESET;
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
qp->sq_cq = get_qedr_cq(attrs->send_cq);
- qp->rq_cq = get_qedr_cq(attrs->recv_cq);
qp->dev = dev;
- qp->rq.max_sges = attrs->cap.max_recv_sge;
- DP_DEBUG(dev, QEDR_MSG_QP,
- "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
- qp->rq.max_sges, qp->rq_cq->icid);
+ if (attrs->srq) {
+ qp->srq = get_qedr_srq(attrs->srq);
+ } else {
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+ }
+
DP_DEBUG(dev, QEDR_MSG_QP,
"QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
pd->pd_id, qp->qp_type, qp->max_inline_data,
@@ -1276,9 +1306,303 @@ static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
- qp->rq.db = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
- qp->rq.db_data.data.icid = qp->icid;
+ if (!qp->srq) {
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ }
+}
+
+static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_srq_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (attrs->attr.max_wr > qattr->max_srq_wr) {
+ DP_ERR(dev,
+ "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
+ attrs->attr.max_wr, qattr->max_srq_wr);
+ return -EINVAL;
+ }
+
+ if (attrs->attr.max_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
+ attrs->attr.max_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_free_srq_user_params(struct qedr_srq *srq)
+{
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ ib_umem_release(srq->prod_umem);
+}
+
+static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qedr_dev *dev = srq->dev;
+
+ dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
+
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ hw_srq->virt_prod_pair_addr,
+ hw_srq->phy_prod_pair_addr);
+}
+
+static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
+ struct qedr_srq *srq,
+ struct qedr_create_srq_ureq *ureq,
+ int access, int dmasync)
+{
+ struct scatterlist *sg;
+ int rc;
+
+ rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
+ ureq->srq_len, access, dmasync, 1);
+ if (rc)
+ return rc;
+
+ srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers),
+ access, dmasync);
+ if (IS_ERR(srq->prod_umem)) {
+ qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+ ib_umem_release(srq->usrq.umem);
+ DP_ERR(srq->dev,
+ "create srq: failed ib_umem_get for producer, got %ld\n",
+ PTR_ERR(srq->prod_umem));
+ return PTR_ERR(srq->prod_umem);
+ }
+
+ sg = srq->prod_umem->sg_head.sgl;
+ srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
+
+ return 0;
+}
+
+static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
+ struct qedr_dev *dev,
+ struct ib_srq_init_attr *init_attr)
+{
+ struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ dma_addr_t phy_prod_pair_addr;
+ u32 num_elems;
+ void *va;
+ int rc;
+
+ va = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(struct rdma_srq_producers),
+ &phy_prod_pair_addr, GFP_KERNEL);
+ if (!va) {
+ DP_ERR(dev,
+ "create srq: failed to allocate dma memory for producer\n");
+ return -ENOMEM;
+ }
+
+ hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
+ hw_srq->virt_prod_pair_addr = va;
+
+ num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ num_elems,
+ QEDR_SRQ_WQE_ELEM_SIZE,
+ &hw_srq->pbl, NULL);
+ if (rc)
+ goto err0;
+
+ hw_srq->num_elems = num_elems;
+
+ return 0;
+
+err0:
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+ va, phy_prod_pair_addr);
+ return rc;
+}
+
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+ void *ptr, u32 id);
+static void qedr_idr_remove(struct qedr_dev *dev,
+ struct qedr_idr *qidr, u32 id);
+
+struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qed_rdma_destroy_srq_in_params destroy_in_params;
+ struct qed_rdma_create_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_srq_out_params out_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_srq_ureq ureq = {};
+ u64 pbl_base_addr, phy_prod_pair_addr;
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_srq_hwq_info *hw_srq;
+ struct qedr_ucontext *ctx = NULL;
+ u32 page_cnt, page_size;
+ struct qedr_srq *srq;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create SRQ called from %s (pd %p)\n",
+ (udata) ? "User lib" : "kernel", pd);
+
+ rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+ if (rc)
+ return ERR_PTR(-EINVAL);
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ srq->dev = dev;
+ hw_srq = &srq->hw_srq;
+ spin_lock_init(&srq->lock);
+
+ hw_srq->max_wr = init_attr->attr.max_wr;
+ hw_srq->max_sges = init_attr->attr.max_sge;
+
+ if (udata && ibpd->uobject && ibpd->uobject->context) {
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create srq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
+ if (rc)
+ goto err0;
+
+ page_cnt = srq->usrq.pbl_info.num_pbes;
+ pbl_base_addr = srq->usrq.pbl_tbl->pa;
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = BIT(srq->usrq.umem->page_shift);
+ } else {
+ struct qed_chain *pbl;
+
+ rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
+ if (rc)
+ goto err0;
+
+ pbl = &hw_srq->pbl;
+ page_cnt = qed_chain_get_page_cnt(pbl);
+ pbl_base_addr = qed_chain_get_pbl_phys(pbl);
+ phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+ page_size = QED_CHAIN_PAGE_SIZE;
+ }
+
+ in_params.pd_id = pd->pd_id;
+ in_params.pbl_base_addr = pbl_base_addr;
+ in_params.prod_pair_addr = phy_prod_pair_addr;
+ in_params.num_pages = page_cnt;
+ in_params.page_size = page_size;
+
+ rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
+ if (rc)
+ goto err1;
+
+ srq->srq_id = out_params.srq_id;
+
+ if (udata) {
+ rc = qedr_copy_srq_uresp(dev, srq, udata);
+ if (rc)
+ goto err2;
+ }
+
+ rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
+ if (rc)
+ goto err2;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
+ return &srq->ibsrq;
+
+err2:
+ destroy_in_params.srq_id = srq->srq_id;
+
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
+err1:
+ if (udata)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+err0:
+ kfree(srq);
+
+ return ERR_PTR(-EFAULT);
+}
+
+int qedr_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct qed_rdma_destroy_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+ qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
+ in_params.srq_id = srq->srq_id;
+ dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
+
+ if (ibsrq->pd->uobject)
+ qedr_free_srq_user_params(srq);
+ else
+ qedr_free_srq_kernel_params(srq);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "destroy srq: destroyed srq with srq_id=0x%0x\n",
+ srq->srq_id);
+ kfree(srq);
+
+ return 0;
+}
+
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct qed_rdma_modify_srq_in_params in_params = {};
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ int rc;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ DP_ERR(dev,
+ "modify srq: invalid attribute mask=0x%x specified for %p\n",
+ attr_mask, srq);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->hw_srq.max_wr) {
+ DP_ERR(dev,
+ "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
+ attr->srq_limit, srq->hw_srq.max_wr);
+ return -EINVAL;
+ }
+
+ in_params.srq_id = srq->srq_id;
+ in_params.wqe_limit = attr->srq_limit;
+ rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
+ if (rc)
+ return rc;
+ }
+
+ srq->srq_limit = attr->srq_limit;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
+
+ return 0;
}
static inline void
@@ -1299,9 +1623,17 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev,
params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
params->stats_queue = 0;
- params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
params->srq_id = 0;
params->use_srq = false;
+
+ if (!qp->srq) {
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+
+ } else {
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = qp->srq->srq_id;
+ params->use_srq = true;
+ }
}
static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1318,32 +1650,27 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
-static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+ void *ptr, u32 id)
{
int rc;
- if (!rdma_protocol_iwarp(&dev->ibdev, 1))
- return 0;
-
idr_preload(GFP_KERNEL);
- spin_lock_irq(&dev->idr_lock);
+ spin_lock_irq(&qidr->idr_lock);
- rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+ rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
- spin_unlock_irq(&dev->idr_lock);
+ spin_unlock_irq(&qidr->idr_lock);
idr_preload_end();
return rc < 0 ? rc : 0;
}
-static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
{
- if (!rdma_protocol_iwarp(&dev->ibdev, 1))
- return;
-
- spin_lock_irq(&dev->idr_lock);
- idr_remove(&dev->qpidr, id);
- spin_unlock_irq(&dev->idr_lock);
+ spin_lock_irq(&qidr->idr_lock);
+ idr_remove(&qidr->idr, id);
+ spin_unlock_irq(&qidr->idr_lock);
}
static inline void
@@ -1356,9 +1683,10 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
&qp->usq.pbl_info, FW_PAGE_SHIFT);
-
- qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
- qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+ if (!qp->srq) {
+ qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+ qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+ }
qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
@@ -1404,11 +1732,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
if (rc)
return rc;
- /* RQ - read access only (0), dma sync not required (0) */
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
- if (rc)
- return rc;
+ if (!qp->srq) {
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+ ureq.rq_len, 0, 0, alloc_and_init);
+ if (rc)
+ return rc;
+ }
memset(&in_params, 0, sizeof(in_params));
qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
@@ -1416,8 +1746,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
in_params.qp_handle_hi = ureq.qp_handle_hi;
in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
- in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
- in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ if (!qp->srq) {
+ in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+ in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ }
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
&in_params, &out_params);
@@ -1679,16 +2011,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
if (rc)
return ERR_PTR(rc);
- if (attrs->srq)
- return ERR_PTR(-EINVAL);
-
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
udata ? "user library" : "kernel", attrs->event_handler, pd,
get_qedr_cq(attrs->send_cq),
get_qedr_cq(attrs->send_cq)->icid,
get_qedr_cq(attrs->recv_cq),
- get_qedr_cq(attrs->recv_cq)->icid);
+ attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
@@ -1715,9 +2044,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
- rc = qedr_idr_add(dev, qp, qp->qp_id);
- if (rc)
- goto err;
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
+ if (rc)
+ goto err;
+ }
return &qp->ibqp;
@@ -2289,8 +2620,9 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
qedr_free_qp_resources(dev, qp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- qedr_idr_remove(dev, qp->qp_id);
+ if (atomic_dec_and_test(&qp->refcnt) &&
+ rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
kfree(qp);
}
return rc;
@@ -2305,7 +2637,7 @@ struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
if (!ah)
return ERR_PTR(-ENOMEM);
- ah->attr = *attr;
+ rdma_copy_ah_attr(&ah->attr, attr);
return &ah->ibah;
}
@@ -2314,6 +2646,7 @@ int qedr_destroy_ah(struct ib_ah *ibah)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
+ rdma_destroy_ah_attr(&ah->attr);
kfree(ah);
return 0;
}
@@ -2705,9 +3038,9 @@ static void swap_wqe_data64(u64 *p)
static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
struct qedr_qp *qp, u8 *wqe_size,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr, u8 *bits,
- u8 bit)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr,
+ u8 *bits, u8 bit)
{
u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
char *seg_prt, *wqe;
@@ -2790,7 +3123,7 @@ static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
} while (0)
static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
u32 data_size = 0;
int i;
@@ -2814,8 +3147,8 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
struct qedr_qp *qp,
struct rdma_sq_rdma_wqe_1st *rwqe,
struct rdma_sq_rdma_wqe_2nd *rwqe2,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
@@ -2837,8 +3170,8 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
struct qedr_qp *qp,
struct rdma_sq_send_wqe_1st *swqe,
struct rdma_sq_send_wqe_2st *swqe2,
- struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
memset(swqe2, 0, sizeof(*swqe2));
if (wr->send_flags & IB_SEND_INLINE) {
@@ -2854,7 +3187,7 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
static int qedr_prepare_reg(struct qedr_qp *qp,
struct rdma_sq_fmr_wqe_1st *fwqe1,
- struct ib_reg_wr *wr)
+ const struct ib_reg_wr *wr)
{
struct qedr_mr *mr = get_qedr_mr(wr->mr);
struct rdma_sq_fmr_wqe_2nd *fwqe2;
@@ -2916,7 +3249,8 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp,
+ const struct ib_send_wr *wr)
{
int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev;
@@ -2953,8 +3287,8 @@ static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true;
}
-static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3168,8 +3502,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rc;
}
-int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3234,8 +3568,104 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rc;
}
-int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
+{
+ u32 used;
+
+ /* Calculate number of elements used based on producer
+ * count and consumer count and subtract it from max
+ * work request supported so that we get elements left.
+ */
+ used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+
+ return hw_srq->max_wr - used;
+}
+
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
+ struct qedr_srq_hwq_info *hw_srq;
+ struct qedr_dev *dev = srq->dev;
+ struct qed_chain *pbl;
+ unsigned long flags;
+ int status = 0;
+ u32 num_sge;
+ u32 offset;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ hw_srq = &srq->hw_srq;
+ pbl = &srq->hw_srq.pbl;
+ while (wr) {
+ struct rdma_srq_wqe_header *hdr;
+ int i;
+
+ if (!qedr_srq_elem_left(hw_srq) ||
+ wr->num_sge > srq->hw_srq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
+ hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+ wr->num_sge, srq->hw_srq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ hdr = qed_chain_produce(pbl);
+ num_sge = wr->num_sge;
+ /* Set number of sge and work request id in header */
+ SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
+
+ srq->hw_srq.wr_prod_cnt++;
+ hw_srq->wqe_prod++;
+ hw_srq->sge_prod++;
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
+ wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
+
+ /* Set SGE length, lkey and address */
+ SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
+ wr->sg_list[i].length, wr->sg_list[i].lkey);
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ,
+ "[%d]: len %d key %x addr %x:%x\n",
+ i, srq_sge->length, srq_sge->l_key,
+ srq_sge->addr.hi, srq_sge->addr.lo);
+ hw_srq->sge_prod++;
+ }
+
+ /* Flush WQE and SGE information before
+ * updating producer.
+ */
+ wmb();
+
+ /* SRQ producer is 8 bytes. Need to update SGE producer index
+ * in first 4 bytes and need to update WQE producer in
+ * next 4 bytes.
+ */
+ *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
+ offset = offsetof(struct rdma_srq_producers, wqe_prod);
+ *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
+ hw_srq->wqe_prod;
+
+ /* Flush producer after updating it. */
+ wmb();
+ wr = wr->next;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
+ qed_chain_get_elem_left(pbl));
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return status;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qedr_dev *dev = qp->dev;
@@ -3625,6 +4055,31 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
wc->wr_id = wr_id;
}
+static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ struct qedr_srq *srq = qp->srq;
+ u64 wr_id;
+
+ wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
+ le32_to_cpu(resp->srq_wr_id.lo), u64);
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->wr_id = wr_id;
+ wc->byte_len = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+ } else {
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+ }
+ srq->hw_srq.wr_cons_cnt++;
+
+ return 1;
+}
static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
struct qedr_cq *cq, struct ib_wc *wc,
struct rdma_cqe_responder *resp)
@@ -3674,6 +4129,19 @@ static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
}
}
+static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ int cnt;
+
+ cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+
+ return cnt;
+}
+
static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
struct qedr_cq *cq, int num_entries,
struct ib_wc *wc, struct rdma_cqe_responder *resp,
@@ -3751,6 +4219,11 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
&cqe->resp, &update);
break;
+ case RDMA_CQE_TYPE_RESPONDER_SRQ:
+ cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
+ wc, &cqe->resp);
+ update = 1;
+ break;
case RDMA_CQE_TYPE_INVALID:
default:
DP_ERR(dev, "Error: invalid CQE type = %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 2c57e4c592a6..0b7d0124b16c 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -66,6 +66,15 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp);
+struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int qedr_destroy_srq(struct ib_srq *ibsrq);
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah);
@@ -82,10 +91,10 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
-int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
- struct ib_send_wr **bad_wr);
-int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
- struct ib_recv_wr **bad_wr);
+int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 14b4057a2b8f..41babbc0db58 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1489,7 +1489,8 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mr_size = ~0ULL;
rdi->dparms.props.max_qp = ib_qib_max_qps;
rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
- rdi->dparms.props.max_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_send_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_recv_sge = ib_qib_max_sges;
rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index f9a46768a19a..666613eef88f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -78,9 +78,6 @@ struct qib_verbs_txreq;
#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
-#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
-
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
/* Values for set/get portinfo VLCap OperationalVLs */
@@ -314,7 +311,7 @@ void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
-int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
+int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
index 29ab11c34f3f..d1dae2af4ca9 100644
--- a/drivers/infiniband/hw/usnic/Kconfig
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -1,10 +1,10 @@
config INFINIBAND_USNIC
tristate "Verbs support for Cisco VIC"
depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
+ depends on INFINIBAND_USER_ACCESS
select ENIC
select NET_VENDOR_CISCO
select PCI_IOV
- select INFINIBAND_USER_ACCESS
---help---
This is a low-level driver for Cisco's Virtual Interface
Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 995a26b65156..7875883621f4 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -92,8 +92,8 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
ufdev->pdev = pdev;
ufdev->netdev = pci_get_drvdata(pdev);
spin_lock_init(&ufdev->lock);
- strncpy(ufdev->name, netdev_name(ufdev->netdev),
- sizeof(ufdev->name) - 1);
+ BUILD_BUG_ON(sizeof(ufdev->name) != sizeof(ufdev->netdev->name));
+ strcpy(ufdev->name, ufdev->netdev->name);
return ufdev;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index 0b2cc4e79707..f0b71d593da5 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -57,7 +57,7 @@ struct usnic_fwd_dev {
char mac[ETH_ALEN];
unsigned int mtu;
__be32 inaddr;
- char name[IFNAMSIZ+1];
+ char name[IFNAMSIZ];
};
struct usnic_fwd_flow {
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a688a5669168..9973ac893635 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -666,7 +666,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
- usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
+ usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
kfree(mr);
return 0;
}
@@ -771,15 +771,15 @@ int usnic_ib_destroy_ah(struct ib_ah *ah)
return -EINVAL;
}
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
usnic_dbg("\n");
return -EINVAL;
}
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
usnic_dbg("\n");
return -EINVAL;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 1fda94425116..2a2c9beb715f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -80,10 +80,10 @@ struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
struct ib_udata *udata);
int usnic_ib_destroy_ah(struct ib_ah *ah);
-int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *wc);
int usnic_ib_req_notify_cq(struct ib_cq *cq,
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 4381c0a9a873..9dd39daa602b 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -41,6 +41,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <rdma/ib_verbs.h>
#include "usnic_log.h"
#include "usnic_uiom.h"
@@ -88,7 +89,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- if (dirty)
+ if (!PageDirty(page) && dirty)
set_page_dirty_lock(page);
put_page(page);
usnic_dbg("pa: %pa\n", &pa);
@@ -114,6 +115,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
dma_addr_t pa;
unsigned int gup_flags;
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
if (!can_do_mlock())
return -EPERM;
@@ -127,7 +138,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
down_write(&current->mm->mmap_sem);
- locked = npages + current->mm->locked_vm;
+ locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -143,7 +154,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
gup_flags, page_list, NULL);
@@ -186,7 +197,7 @@ out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
else
- current->mm->locked_vm = locked;
+ current->mm->pinned_vm = locked;
up_write(&current->mm->mmap_sem);
free_page((unsigned long) page_list);
@@ -420,18 +431,22 @@ out_free_uiomr:
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *ucontext)
{
+ struct task_struct *task;
struct mm_struct *mm;
unsigned long diff;
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
- mm = get_task_mm(current);
- if (!mm) {
- kfree(uiomr);
- return;
- }
+ task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
+ if (!task)
+ goto out;
+ mm = get_task_mm(task);
+ put_task_struct(task);
+ if (!mm)
+ goto out;
diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
@@ -443,7 +458,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
* up here and not be able to take the mmap_sem. In that case
* we defer the vm_locked accounting to the system workqueue.
*/
- if (closing) {
+ if (ucontext->closing) {
if (!down_write_trylock(&mm->mmap_sem)) {
INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
uiomr->mm = mm;
@@ -455,9 +470,10 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
} else
down_write(&mm->mmap_sem);
- current->mm->locked_vm -= diff;
+ mm->pinned_vm -= diff;
up_write(&mm->mmap_sem);
mmput(mm);
+out:
kfree(uiomr);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 431efe4143f4..8c096acff123 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -39,6 +39,8 @@
#include "usnic_uiom_interval_tree.h"
+struct ib_ucontext;
+
#define USNIC_UIOM_READ (1)
#define USNIC_UIOM_WRITE (2)
@@ -89,7 +91,8 @@ void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size,
int access, int dmasync);
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *ucontext);
int usnic_uiom_init(char *drv_name);
void usnic_uiom_fini(void);
#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 44cb1cfba417..42b8685c997e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -378,11 +378,6 @@ static inline enum ib_port_speed pvrdma_port_speed_to_ib(
return (enum ib_port_speed)speed;
}
-static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
-{
- return attr_mask;
-}
-
static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
{
return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index f95b97646c25..0f004c737620 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -278,19 +278,6 @@ int pvrdma_destroy_cq(struct ib_cq *cq)
return ret;
}
-/**
- * pvrdma_modify_cq - modify the CQ moderation parameters
- * @ibcq: the CQ to modify
- * @cq_count: number of CQEs that will trigger an event
- * @cq_period: max period of time in usec before triggering an event
- *
- * @return: -EOPNOTSUPP as CQ resize is not supported.
- */
-int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
{
return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
@@ -428,16 +415,3 @@ int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
/* Ensure we do not return errors from poll_cq */
return npolled;
}
-
-/**
- * pvrdma_resize_cq - resize CQ
- * @ibcq: the completion queue
- * @entries: CQ entries
- * @udata: user data
- *
- * @return: -EOPNOTSUPP as CQ resize is not supported.
- */
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
-{
- return -EOPNOTSUPP;
-}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 0be33a81bbe6..a5719899f49a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -62,9 +62,7 @@ static DEFINE_MUTEX(pvrdma_device_list_lock);
static LIST_HEAD(pvrdma_device_list);
static struct workqueue_struct *event_wq;
-static int pvrdma_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context);
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -216,8 +214,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
dev->ib_dev.post_send = pvrdma_post_send;
dev->ib_dev.post_recv = pvrdma_post_recv;
dev->ib_dev.create_cq = pvrdma_create_cq;
- dev->ib_dev.modify_cq = pvrdma_modify_cq;
- dev->ib_dev.resize_cq = pvrdma_resize_cq;
dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
dev->ib_dev.poll_cq = pvrdma_poll_cq;
dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
@@ -261,7 +257,6 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
dev->ib_dev.modify_srq = pvrdma_modify_srq;
dev->ib_dev.query_srq = pvrdma_query_srq;
dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
- dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv;
dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
sizeof(struct pvrdma_srq *),
@@ -650,13 +645,11 @@ static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
return 0;
}
-static int pvrdma_add_gid(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
- void **context)
+static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct pvrdma_dev *dev = to_vdev(attr->device);
- return pvrdma_add_gid_at_index(dev, gid,
+ return pvrdma_add_gid_at_index(dev, &attr->gid,
ib_gid_type_to_pvrdma(attr->gid_type),
attr->index);
}
@@ -699,8 +692,12 @@ static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
}
static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+ struct net_device *ndev,
unsigned long event)
{
+ struct pci_dev *pdev_net;
+ unsigned int slot;
+
switch (event) {
case NETDEV_REBOOT:
case NETDEV_DOWN:
@@ -718,6 +715,24 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
else
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
+ case NETDEV_UNREGISTER:
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ break;
+ case NETDEV_REGISTER:
+ /* vmxnet3 will have same bus, slot. But func will be 0 */
+ slot = PCI_SLOT(dev->pdev->devfn);
+ pdev_net = pci_get_slot(dev->pdev->bus,
+ PCI_DEVFN(slot, 0));
+ if ((dev->netdev == NULL) &&
+ (pci_get_drvdata(pdev_net) == ndev)) {
+ /* this is our netdev */
+ dev->netdev = ndev;
+ dev_hold(ndev);
+ }
+ pci_dev_put(pdev_net);
+ break;
+
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
event, dev->ib_dev.name);
@@ -734,8 +749,11 @@ static void pvrdma_netdevice_event_work(struct work_struct *work)
mutex_lock(&pvrdma_device_list_lock);
list_for_each_entry(dev, &pvrdma_device_list, device_link) {
- if (dev->netdev == netdev_work->event_netdev) {
- pvrdma_netdevice_event_handle(dev, netdev_work->event);
+ if ((netdev_work->event == NETDEV_REGISTER) ||
+ (dev->netdev == netdev_work->event_netdev)) {
+ pvrdma_netdevice_event_handle(dev,
+ netdev_work->event_netdev,
+ netdev_work->event);
break;
}
}
@@ -968,6 +986,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
ret = -ENODEV;
goto err_free_cq_ring;
}
+ dev_hold(dev->netdev);
dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
@@ -1040,6 +1059,10 @@ err_free_intrs:
pvrdma_free_irq(dev);
pci_free_irq_vectors(pdev);
err_free_cq_ring:
+ if (dev->netdev) {
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ }
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring:
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
@@ -1079,6 +1102,11 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
flush_workqueue(event_wq);
+ if (dev->netdev) {
+ dev_put(dev->netdev);
+ dev->netdev = NULL;
+ }
+
/* Unregister ib device */
ib_unregister_device(&dev->ib_dev);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index eb5b1065ec08..60083c0363a5 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -599,7 +599,8 @@ static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
qp->rq.offset + n * qp->rq.wqe_size);
}
-static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
+ const struct ib_reg_wr *wr)
{
struct pvrdma_user_mr *mr = to_vmr(wr->mr);
@@ -623,8 +624,8 @@ static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
*
* @return: 0 on success, otherwise errno returned.
*/
-int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_dev *dev = to_vdev(ibqp->device);
@@ -827,8 +828,8 @@ out:
*
* @return: 0 on success, otherwise errno returned.
*/
-int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct pvrdma_dev *dev = to_vdev(ibqp->device);
unsigned long flags;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index af235967a9c2..dc0ce877c7a3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -52,13 +52,6 @@
#include "pvrdma.h"
-int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- /* No support for kernel clients. */
- return -EOPNOTSUPP;
-}
-
/**
* pvrdma_query_srq - query shared receive queue
* @ibsrq: the shared receive queue to query
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index a51463cd2f37..b65d10b0a875 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -82,7 +82,8 @@ int pvrdma_query_device(struct ib_device *ibdev,
props->max_qp = dev->dsr->caps.max_qp;
props->max_qp_wr = dev->dsr->caps.max_qp_wr;
props->device_cap_flags = dev->dsr->caps.device_cap_flags;
- props->max_sge = dev->dsr->caps.max_sge;
+ props->max_send_sge = dev->dsr->caps.max_sge;
+ props->max_recv_sge = dev->dsr->caps.max_sge;
props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
dev->dsr->caps.max_sge_rd);
props->max_srq = dev->dsr->caps.max_srq;
@@ -154,7 +155,8 @@ int pvrdma_query_port(struct ib_device *ibdev, u8 port,
props->gid_tbl_len = resp->attrs.gid_tbl_len;
props->port_cap_flags =
pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
- props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
props->max_msg_sz = resp->attrs.max_msg_sz;
props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index b7b25728a7e5..b2e3ab50cb08 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -412,15 +412,10 @@ struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
- struct ib_udata *udata);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
-int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
- struct ib_udata *udata);
int pvrdma_destroy_cq(struct ib_cq *cq);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
@@ -435,8 +430,6 @@ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int pvrdma_destroy_srq(struct ib_srq *srq);
-int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
@@ -446,9 +439,9 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int pvrdma_destroy_qp(struct ib_qp *qp);
-int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ba3639a0d77c..89ec0f64abfc 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -120,7 +120,8 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- ah->attr = *ah_attr;
+ rdma_copy_ah_attr(&ah->attr, ah_attr);
+
atomic_set(&ah->refcount, 0);
if (dev->driver_f.notify_new_ah)
@@ -148,6 +149,7 @@ int rvt_destroy_ah(struct ib_ah *ibah)
dev->n_ahs_allocated--;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ rdma_destroy_ah_attr(&ah->attr);
kfree(ah);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 41183bd665ca..5ce403c6cddb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -780,14 +780,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!rdi)
return ERR_PTR(-EINVAL);
- if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
+ if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
init_attr->create_flags)
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
+ if (init_attr->cap.max_recv_sge >
+ rdi->dparms.props.max_recv_sge ||
init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL);
@@ -1336,13 +1337,13 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->qp_access_flags = attr->qp_access_flags;
if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
+ rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
}
if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
+ rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
qp->s_alt_pkey_index = attr->alt_pkey_index;
}
@@ -1459,6 +1460,8 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue);
+ rdma_destroy_ah_attr(&qp->remote_ah_attr);
+ rdma_destroy_ah_attr(&qp->alt_ah_attr);
kfree(qp);
return 0;
}
@@ -1535,8 +1538,8 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Return: 0 on success otherwise errno
*/
-int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_rwq *wq = qp->r_rq.wq;
@@ -1617,7 +1620,7 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
static inline int rvt_qp_valid_operation(
struct rvt_qp *qp,
const struct rvt_operation_params *post_parms,
- struct ib_send_wr *wr)
+ const struct ib_send_wr *wr)
{
int len;
@@ -1714,7 +1717,7 @@ static inline int rvt_qp_is_avail(
* @wr: the work request to send
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
- struct ib_send_wr *wr,
+ const struct ib_send_wr *wr,
int *call_send)
{
struct rvt_swqe *wqe;
@@ -1888,8 +1891,8 @@ bail_inval_free:
*
* Return: 0 on success else errno
*/
-int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
@@ -1945,8 +1948,8 @@ bail:
*
* Return: 0 on success else errno
*/
-int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_rwq *wq;
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 8409f80d5f25..264811fdc530 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -60,10 +60,10 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int rvt_destroy_qp(struct ib_qp *ibqp);
int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
-int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr);
-int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
+int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 3707952b4364..78e06fc456c5 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -82,7 +82,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
struct ib_srq *ret;
if (srq_init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 7121e1b1eb89..10999fa69281 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -91,7 +91,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
- rxe->attr.max_sge = RXE_MAX_SGE;
+ rxe->attr.max_send_sge = RXE_MAX_SGE;
+ rxe->attr.max_recv_sge = RXE_MAX_SGE;
rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
rxe->attr.max_cq = RXE_MAX_CQ;
rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 7f1ae364088a..26fe8d7dbc55 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -55,29 +55,41 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
struct rdma_ah_attr *attr)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+
memset(av, 0, sizeof(*av));
- memcpy(&av->grh, rdma_ah_read_grh(attr),
- sizeof(*rdma_ah_read_grh(attr)));
+ memcpy(av->grh.dgid.raw, grh->dgid.raw, sizeof(grh->dgid.raw));
+ av->grh.flow_label = grh->flow_label;
+ av->grh.sgid_index = grh->sgid_index;
+ av->grh.hop_limit = grh->hop_limit;
+ av->grh.traffic_class = grh->traffic_class;
av->port_num = port_num;
}
void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
attr->type = RDMA_AH_ATTR_TYPE_ROCE;
- memcpy(rdma_ah_retrieve_grh(attr), &av->grh, sizeof(av->grh));
+
+ memcpy(grh->dgid.raw, av->grh.dgid.raw, sizeof(av->grh.dgid.raw));
+ grh->flow_label = av->grh.flow_label;
+ grh->sgid_index = av->grh.sgid_index;
+ grh->hop_limit = av->grh.hop_limit;
+ grh->traffic_class = av->grh.traffic_class;
+
rdma_ah_set_ah_flags(attr, IB_AH_GRH);
rdma_ah_set_port_num(attr, av->port_num);
}
-void rxe_av_fill_ip_info(struct rxe_av *av,
- struct rdma_ah_attr *attr,
- struct ib_gid_attr *sgid_attr,
- union ib_gid *sgid)
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{
- rdma_gid2ip((struct sockaddr *)&av->sgid_addr, sgid);
+ const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
+
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
&rdma_ah_read_grh(attr)->dgid);
- av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
+ av->network_type = rdma_gid_attr_network_type(sgid_attr);
}
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 98d470d1f3fc..83311dd07019 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+ wqe->status = IB_WC_FATAL_ERR;
return COMPST_ERROR;
}
reset_retry_counters(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index a51ece596c43..87d14f7ef21b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -43,10 +43,7 @@ void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
-void rxe_av_fill_ip_info(struct rxe_av *av,
- struct rdma_ah_attr *attr,
- struct ib_gid_attr *sgid_attr,
- union ib_gid *sgid);
+void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59ec6d918ed4..8094cbaa54a9 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -182,39 +182,19 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
#endif
-/*
- * Derive the net_device from the av.
- * For physical devices, this will just return rxe->ndev.
- * But for VLAN devices, it will return the vlan dev.
- * Caller should dev_put() the returned net_device.
- */
-static struct net_device *rxe_netdev_from_av(struct rxe_dev *rxe,
- int port_num,
- struct rxe_av *av)
-{
- union ib_gid gid;
- struct ib_gid_attr attr;
- struct net_device *ndev = rxe->ndev;
-
- if (ib_get_cached_gid(&rxe->ib_dev, port_num, av->grh.sgid_index,
- &gid, &attr) == 0 &&
- attr.ndev && attr.ndev != ndev)
- ndev = attr.ndev;
- else
- /* Only to ensure that caller may call dev_put() */
- dev_hold(ndev);
-
- return ndev;
-}
-
static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
struct rxe_qp *qp,
struct rxe_av *av)
{
+ const struct ib_gid_attr *attr;
struct dst_entry *dst = NULL;
struct net_device *ndev;
- ndev = rxe_netdev_from_av(rxe, qp->attr.port_num, av);
+ attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
+ av->grh.sgid_index);
+ if (IS_ERR(attr))
+ return NULL;
+ ndev = attr->ndev;
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
@@ -243,9 +223,13 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
rt6_get_cookie((struct rt6_info *)dst);
#endif
}
- }
- dev_put(ndev);
+ if (dst && (qp_type(qp) == IB_QPT_RC)) {
+ dst_hold(dst);
+ sk_dst_set(qp->sk->sk, dst);
+ }
+ }
+ rdma_put_gid_attr(attr);
return dst;
}
@@ -418,11 +402,7 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
- if (qp_type(qp) == IB_QPT_RC)
- sk_dst_set(qp->sk->sk, dst);
- else
- dst_release(dst);
-
+ dst_release(dst);
return 0;
}
@@ -450,11 +430,7 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
av->grh.traffic_class,
av->grh.hop_limit);
- if (qp_type(qp) == IB_QPT_RC)
- sk_dst_set(qp->sk->sk, dst);
- else
- dst_release(dst);
-
+ dst_release(dst);
return 0;
}
@@ -536,9 +512,13 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
unsigned int hdr_len;
struct sk_buff *skb;
struct net_device *ndev;
+ const struct ib_gid_attr *attr;
const int port_num = 1;
- ndev = rxe_netdev_from_av(rxe, port_num, av);
+ attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
+ if (IS_ERR(attr))
+ return NULL;
+ ndev = attr->ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
@@ -550,10 +530,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
GFP_ATOMIC);
- if (unlikely(!skb)) {
- dev_put(ndev);
- return NULL;
- }
+ if (unlikely(!skb))
+ goto out;
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
@@ -568,7 +546,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->hdr = skb_put_zero(skb, paylen);
pkt->mask |= RXE_GRH_MASK;
- dev_put(ndev);
+out:
+ rdma_put_gid_attr(attr);
return skb;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 1b596fbbe251..4555510d86c4 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -83,7 +83,7 @@ enum rxe_device_param {
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
RXE_MAX_LOG_CQE = 15,
- RXE_MAX_MR = 2 * 1024,
+ RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
RXE_MAX_EE_RD_ATOM = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b9f7aa1114b2..c58452daffc7 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -49,9 +49,9 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
goto err1;
}
- if (cap->max_send_sge > rxe->attr.max_sge) {
+ if (cap->max_send_sge > rxe->attr.max_send_sge) {
pr_warn("invalid send sge = %d > %d\n",
- cap->max_send_sge, rxe->attr.max_sge);
+ cap->max_send_sge, rxe->attr.max_send_sge);
goto err1;
}
@@ -62,9 +62,9 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
goto err1;
}
- if (cap->max_recv_sge > rxe->attr.max_sge) {
+ if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
pr_warn("invalid recv sge = %d > %d\n",
- cap->max_recv_sge, rxe->attr.max_sge);
+ cap->max_recv_sge, rxe->attr.max_recv_sge);
goto err1;
}
}
@@ -580,9 +580,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
struct ib_udata *udata)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
@@ -623,30 +620,14 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.qkey = attr->qkey;
if (mask & IB_QP_AV) {
- ib_get_cached_gid(&rxe->ib_dev, 1,
- rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
- &sgid, &sgid_attr);
rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
- rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr,
- &sgid_attr, &sgid);
- if (sgid_attr.ndev)
- dev_put(sgid_attr.ndev);
+ rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr);
}
if (mask & IB_QP_ALT_PATH) {
- u8 sgid_index =
- rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
-
- ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
- &sgid, &sgid_attr);
-
rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
&attr->alt_ah_attr);
- rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr,
- &sgid_attr, &sgid);
- if (sgid_attr.ndev)
- dev_put(sgid_attr.ndev);
-
+ rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr);
qp->attr.alt_port_num = attr->alt_port_num;
qp->attr.alt_pkey_index = attr->alt_pkey_index;
qp->attr.alt_timeout = attr->alt_timeout;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index dfba44a40f0b..d30dbac24583 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -225,9 +225,14 @@ static int hdr_check(struct rxe_pkt_info *pkt)
goto err1;
}
+ if (unlikely(qpn == 0)) {
+ pr_warn_once("QP 0 not supported");
+ goto err1;
+ }
+
if (qpn != IB_MULTICAST_QPN) {
- index = (qpn == 0) ? port->qp_smi_index :
- ((qpn == 1) ? port->qp_gsi_index : qpn);
+ index = (qpn == 1) ? port->qp_gsi_index : qpn;
+
qp = rxe_pool_get_index(&rxe->qp_pool, index);
if (unlikely(!qp)) {
pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
@@ -256,8 +261,7 @@ static int hdr_check(struct rxe_pkt_info *pkt)
return 0;
err2:
- if (qp)
- rxe_drop_ref(qp);
+ rxe_drop_ref(qp);
err1:
return -EINVAL;
}
@@ -328,6 +332,7 @@ err1:
static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
+ const struct ib_gid_attr *gid_attr;
union ib_gid dgid;
union ib_gid *pdgid;
@@ -339,9 +344,14 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
}
- return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
- IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, skb->dev, NULL);
+ gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
+ IB_GID_TYPE_ROCE_UDP_ENCAP,
+ 1, skb->dev);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+
+ rdma_put_gid_attr(gid_attr);
+ return 0;
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5b57de30dee4..aa5833318372 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -884,6 +884,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9deafc3aa6af..f5b1e0ad6142 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -222,25 +222,11 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd)
return 0;
}
-static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
- struct rxe_av *av)
+static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
+ struct rxe_av *av)
{
- int err;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
-
- err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
- rdma_ah_read_grh(attr)->sgid_index, &sgid,
- &sgid_attr);
- if (err) {
- pr_err("Failed to query sgid. err = %d\n", err);
- return err;
- }
-
rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
- rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
- dev_put(sgid_attr.ndev);
- return 0;
+ rxe_av_fill_ip_info(av, attr);
}
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
@@ -255,28 +241,17 @@ static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
err = rxe_av_chk_attr(rxe, attr);
if (err)
- goto err1;
+ return ERR_PTR(err);
ah = rxe_alloc(&rxe->ah_pool);
- if (!ah) {
- err = -ENOMEM;
- goto err1;
- }
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
rxe_add_ref(pd);
ah->pd = pd;
- err = rxe_init_av(rxe, attr, &ah->av);
- if (err)
- goto err2;
-
+ rxe_init_av(rxe, attr, &ah->av);
return &ah->ibah;
-
-err2:
- rxe_drop_ref(pd);
- rxe_drop_ref(ah);
-err1:
- return ERR_PTR(err);
}
static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
@@ -289,10 +264,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
if (err)
return err;
- err = rxe_init_av(rxe, attr, &ah->av);
- if (err)
- return err;
-
+ rxe_init_av(rxe, attr, &ah->av);
return 0;
}
@@ -315,7 +287,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah)
return 0;
}
-static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
+static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
{
int err;
int i;
@@ -466,8 +438,8 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq)
return 0;
}
-static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
unsigned long flags;
@@ -582,7 +554,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp)
return 0;
}
-static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length)
{
int num_sge = ibwr->num_sge;
@@ -610,7 +582,7 @@ err1:
}
static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
- struct ib_send_wr *ibwr)
+ const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
wr->num_sge = ibwr->num_sge;
@@ -665,7 +637,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}
-static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
@@ -713,7 +685,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
return 0;
}
-static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, u32 length)
{
int err;
@@ -754,8 +726,8 @@ err1:
return err;
}
-static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
int err = 0;
unsigned int mask;
@@ -797,8 +769,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
return err;
}
-static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
struct rxe_qp *qp = to_rqp(ibqp);
@@ -820,8 +792,8 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return rxe_post_send_kernel(qp, wr, bad_wr);
}
-static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a50b062ed13e..1abe3c62f106 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -91,11 +91,9 @@ enum {
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
- IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
- IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -252,11 +250,11 @@ struct ipoib_cm_tx {
struct ipoib_neigh *neigh;
struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ unsigned int tx_head;
+ unsigned int tx_tail;
unsigned long flags;
u32 mtu;
- unsigned max_send_sge;
+ unsigned int max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -325,15 +323,22 @@ struct ipoib_dev_priv {
spinlock_t lock;
struct net_device *dev;
+ void (*next_priv_destructor)(struct net_device *dev);
struct napi_struct send_napi;
struct napi_struct recv_napi;
unsigned long flags;
+ /*
+ * This protects access to the child_intfs list.
+ * To READ from child_intfs the RTNL or vlan_rwsem read side must be
+ * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
+ * that order) This lock exists because we have a few contexts where
+ * we need the child_intfs, but do not want to grab the RTNL.
+ */
struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
- struct mutex sysfs_mutex;
struct rb_root path_tree;
struct list_head path_list;
@@ -373,8 +378,8 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
+ unsigned int tx_head;
+ unsigned int tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
@@ -404,7 +409,7 @@ struct ipoib_dev_priv {
#endif
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
- unsigned max_send_sge;
+ unsigned int max_send_sge;
bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
};
@@ -414,7 +419,7 @@ struct ipoib_ah {
struct ib_ah *ah;
struct list_head list;
struct kref ref;
- unsigned last_send;
+ unsigned int last_send;
int valid;
};
@@ -483,6 +488,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
kref_put(&ah->ref, ipoib_free_ah);
}
int ipoib_open(struct net_device *dev);
+void ipoib_intf_free(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
int ipoib_add_umcast_attr(struct net_device *dev);
@@ -510,9 +516,6 @@ void ipoib_ib_dev_down(struct net_device *dev);
int ipoib_ib_dev_stop_default(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_dev_cleanup(struct net_device *dev);
-
void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_carrier_on_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
@@ -600,7 +603,6 @@ void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
-void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#define IPOIB_FLAGS_RC 0x80
#define IPOIB_FLAGS_UC 0x40
@@ -729,7 +731,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
static inline
int ipoib_cm_dev_init(struct net_device *dev)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 6535d9beb24d..ea01b8dd2be6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -78,7 +78,7 @@ static struct ib_send_wr ipoib_cm_rx_drain_wr = {
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event);
+ const struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
@@ -94,7 +94,6 @@ static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -102,7 +101,7 @@ static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
- ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
+ ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
@@ -120,7 +119,6 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ib_sge *sge, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
@@ -128,7 +126,7 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
- ret = ib_post_recv(rx->qp, wr, &bad_wr);
+ ret = ib_post_recv(rx->qp, wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
@@ -212,7 +210,6 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
- struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
@@ -227,7 +224,7 @@ static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
- if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
@@ -275,7 +272,7 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
- unsigned psn)
+ unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
@@ -363,7 +360,7 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
if (!rx->rx_ring)
return -ENOMEM;
- t = kmalloc(sizeof *t, GFP_KERNEL);
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
goto err_free_1;
@@ -421,8 +418,9 @@ err_free_1:
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
- struct ib_qp *qp, struct ib_cm_req_event_param *req,
- unsigned psn)
+ struct ib_qp *qp,
+ const struct ib_cm_req_event_param *req,
+ unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
@@ -432,7 +430,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
- rep.private_data_len = sizeof data;
+ rep.private_data_len = sizeof(data);
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
@@ -441,16 +439,17 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
return ib_send_cm_rep(cm_id, &rep);
}
-static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
- unsigned psn;
+ unsigned int psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
- p = kzalloc(sizeof *p, GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
@@ -503,7 +502,7 @@ err_qp:
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
@@ -547,7 +546,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
- size = min(length, (unsigned) PAGE_SIZE);
+ size = min_t(unsigned int, length, PAGE_SIZE);
skb_frag_size_set(frag, size);
skb->data_len += size;
@@ -641,8 +640,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
}
- frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
- (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
+ frags = PAGE_ALIGN(wc->byte_len -
+ min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
+ PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
mapping, GFP_ATOMIC);
@@ -657,7 +657,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
- memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
+ memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
@@ -698,13 +698,11 @@ static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ipoib_tx_buf *tx_req)
{
- struct ib_send_wr *bad_wr;
-
ipoib_build_sge(priv, tx_req);
priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
- return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
+ return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
@@ -712,7 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
- unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
+ unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -982,7 +980,8 @@ void ipoib_cm_dev_stop(struct net_device *dev)
cancel_delayed_work(&priv->cm.stale_task);
}
-static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
@@ -1068,8 +1067,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge =
- min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
+ attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
tx->max_send_sge = attr.cap.max_send_sge;
@@ -1094,7 +1093,7 @@ static int ipoib_cm_send_req(struct net_device *dev,
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
- req.private_data_len = sizeof data;
+ req.private_data_len = sizeof(data);
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
@@ -1152,7 +1151,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
ret = -ENOMEM;
goto err_tx;
}
- memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
+ memset(p->tx_ring, 0, ipoib_sendq_size * sizeof(*p->tx_ring));
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
memalloc_noio_restore(noio_flag);
@@ -1248,7 +1247,7 @@ timeout:
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event)
+ const struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
@@ -1305,7 +1304,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx;
- tx = kzalloc(sizeof *tx, GFP_ATOMIC);
+ tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
if (!tx)
return NULL;
@@ -1370,7 +1369,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
- memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
+ memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
@@ -1428,7 +1427,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
- unsigned mtu = priv->mcast_mtu;
+ unsigned int mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
@@ -1518,19 +1517,16 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
- return -EPERM;
-
- if (!mutex_trylock(&priv->sysfs_mutex))
- return restart_syscall();
if (!rtnl_trylock()) {
- mutex_unlock(&priv->sysfs_mutex);
return restart_syscall();
}
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
ret = ipoib_set_mode(dev, buf);
/* The assumption is that the function ipoib_set_mode returned
@@ -1539,7 +1535,6 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
*/
if (ret != -EBUSY)
rtnl_unlock();
- mutex_unlock(&priv->sysfs_mutex);
return (!ret || ret == -EBUSY) ? count : ret;
}
@@ -1564,7 +1559,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
- if (PTR_ERR(priv->cm.srq) != -ENOSYS)
+ if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 2706bf26cbac..83429925dfc6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -102,7 +102,7 @@ static int ipoib_set_coalesce(struct net_device *dev,
ret = rdma_set_cq_moderation(priv->recv_cq,
coal->rx_max_coalesced_frames,
coal->rx_coalesce_usecs);
- if (ret && ret != -ENOSYS) {
+ if (ret && ret != -EOPNOTSUPP) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index ea302b054601..178488028734 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -262,15 +262,15 @@ static const struct file_operations ipoib_path_fops = {
void ipoib_create_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- char name[IFNAMSIZ + sizeof "_path"];
+ char name[IFNAMSIZ + sizeof("_path")];
- snprintf(name, sizeof name, "%s_mcg", dev->name);
+ snprintf(name, sizeof(name), "%s_mcg", dev->name);
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_mcg_fops);
if (!priv->mcg_dentry)
ipoib_warn(priv, "failed to create mcg debug file\n");
- snprintf(name, sizeof name, "%s_path", dev->name);
+ snprintf(name, sizeof(name), "%s_path", dev->name);
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_path_fops);
if (!priv->path_dentry)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f47f9ace1f48..9006a13af1de 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -57,7 +58,7 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ipoib_ah *ah;
struct ib_ah *vah;
- ah = kmalloc(sizeof *ah, GFP_KERNEL);
+ ah = kmalloc(sizeof(*ah), GFP_KERNEL);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -100,7 +101,6 @@ static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- struct ib_recv_wr *bad_wr;
int ret;
priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
@@ -108,7 +108,7 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
- ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
@@ -202,7 +202,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
memcpy(mapping, priv->rx_ring[wr_id].mapping,
- IPOIB_UD_RX_SG * sizeof *mapping);
+ IPOIB_UD_RX_SG * sizeof(*mapping));
/*
* If we can't allocate a new RX buffer, dump
@@ -541,7 +541,6 @@ static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req,
void *head, int hlen)
{
- struct ib_send_wr *bad_wr;
struct sk_buff *skb = tx_req->skb;
ipoib_build_sge(priv, tx_req);
@@ -558,7 +557,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
} else
priv->tx_wr.wr.opcode = IB_WR_SEND;
- return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
+ return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
}
int ipoib_send(struct net_device *dev, struct sk_buff *skb,
@@ -568,7 +567,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
- unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
+ unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -1069,7 +1068,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
bool ret = false;
netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
- if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+ if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
return false;
netif_addr_lock_bh(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 26cde95bc0f3..e3d28f9ad9c0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -215,11 +215,6 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static void ipoib_uninit(struct net_device *dev)
-{
- ipoib_dev_cleanup(dev);
-}
-
static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -634,7 +629,7 @@ struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
{
struct ipoib_path_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
@@ -770,8 +765,10 @@ static void path_rec_completion(int status,
struct rdma_ah_attr av;
if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
- pathrec, &av))
+ pathrec, &av, NULL)) {
ah = ipoib_create_ah(dev, priv->pd, &av);
+ rdma_destroy_ah_attr(&av);
+ }
}
spin_lock_irqsave(&priv->lock, flags);
@@ -883,7 +880,7 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!priv->broadcast)
return NULL;
- path = kzalloc(sizeof *path, GFP_ATOMIC);
+ path = kzalloc(sizeof(*path), GFP_ATOMIC);
if (!path)
return NULL;
@@ -1199,11 +1196,13 @@ static void ipoib_timeout(struct net_device *dev)
static int ipoib_hard_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr,
+ const void *saddr,
+ unsigned int len)
{
struct ipoib_header *header;
- header = skb_push(skb, sizeof *header);
+ header = skb_push(skb, sizeof(*header));
header->proto = htons(type);
header->reserved = 0;
@@ -1306,9 +1305,6 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
int i;
LIST_HEAD(remove_list);
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- return;
-
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
@@ -1320,9 +1316,6 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
/* neigh is obsolete if it was idle for two GC periods */
dt = 2 * arp_tbl.gc_interval;
neigh_obsolete = jiffies - dt;
- /* handle possible race condition */
- if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- goto out_unlock;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
@@ -1360,9 +1353,8 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
- if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(priv->wq, &priv->neigh_reap_task,
- arp_tbl.gc_interval);
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
}
@@ -1371,7 +1363,7 @@ static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
{
struct ipoib_neigh *neigh;
- neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
+ neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
if (!neigh)
return NULL;
@@ -1524,9 +1516,8 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
if (!htbl)
return -ENOMEM;
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
size = roundup_pow_of_two(arp_tbl.gc_thresh3);
- buckets = kcalloc(size, sizeof(*buckets), GFP_KERNEL);
+ buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
if (!buckets) {
kfree(htbl);
return -ENOMEM;
@@ -1539,7 +1530,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
atomic_set(&ntbl->entries, 0);
/* start garbage collection */
- clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
@@ -1554,7 +1544,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct ipoib_neigh __rcu **buckets = htbl->buckets;
struct ipoib_neigh_table *ntbl = htbl->ntbl;
- kfree(buckets);
+ kvfree(buckets);
kfree(htbl);
complete(&ntbl->deleted);
}
@@ -1649,15 +1639,11 @@ out_unlock:
static void ipoib_neigh_hash_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- int stopped;
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted);
- /* Stop GC if called at init fail need to cancel work */
- stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- if (!stopped)
- cancel_delayed_work(&priv->neigh_reap_task);
+ cancel_delayed_work_sync(&priv->neigh_reap_task);
ipoib_flush_neighs(priv);
@@ -1755,13 +1741,11 @@ static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
}
-int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
+static int ipoib_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret = -ENOMEM;
- priv->ca = ca;
- priv->port = port;
priv->qp = NULL;
/*
@@ -1777,7 +1761,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* create pd, which used both for control and datapath*/
priv->pd = ib_alloc_pd(priv->ca, 0);
if (IS_ERR(priv->pd)) {
- pr_warn("%s: failed to allocate PD\n", ca->name);
+ pr_warn("%s: failed to allocate PD\n", priv->ca->name);
goto clean_wq;
}
@@ -1787,7 +1771,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
goto out_free_pd;
}
- if (ipoib_neigh_hash_init(priv) < 0) {
+ ret = ipoib_neigh_hash_init(priv);
+ if (ret) {
pr_warn("%s failed to init neigh hash\n", dev->name);
goto out_dev_uninit;
}
@@ -1796,12 +1781,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_open(dev)) {
pr_warn("%s failed to open device\n", dev->name);
ret = -ENODEV;
- goto out_dev_uninit;
+ goto out_hash_uninit;
}
}
return 0;
+out_hash_uninit:
+ ipoib_neigh_hash_uninit(dev);
+
out_dev_uninit:
ipoib_ib_dev_cleanup(dev);
@@ -1821,21 +1809,151 @@ out:
return ret;
}
-void ipoib_dev_cleanup(struct net_device *dev)
+/*
+ * This must be called before doing an unregister_netdev on a parent device to
+ * shutdown the IB event handler.
+ */
+static void ipoib_parent_unregister_pre(struct net_device *ndev)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv;
- LIST_HEAD(head);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
- ASSERT_RTNL();
+ /*
+ * ipoib_set_mac checks netif_running before pushing work, clearing
+ * running ensures the it will not add more work.
+ */
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
+ rtnl_unlock();
- /* Delete any child interfaces first */
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
- /* Stop GC on child */
- set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
- cancel_delayed_work(&cpriv->neigh_reap_task);
- unregister_netdevice_queue(cpriv->dev, &head);
+ /* ipoib_event() cannot be running once this returns */
+ ib_unregister_event_handler(&priv->event_handler);
+
+ /*
+ * Work on the queue grabs the rtnl lock, so this cannot be done while
+ * also holding it.
+ */
+ flush_workqueue(ipoib_workqueue);
+}
+
+static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
+{
+ priv->hca_caps = priv->ca->attrs.device_cap_flags;
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ priv->dev->hw_features |= NETIF_F_TSO;
+
+ priv->dev->features |= priv->dev->hw_features;
+ }
+}
+
+static int ipoib_parent_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ib_port_attr attr;
+ int result;
+
+ result = ib_query_port(priv->ca, priv->port, &attr);
+ if (result) {
+ pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
+ priv->port);
+ return result;
+ }
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
}
- unregister_netdevice_many(&head);
+
+ result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
+ if (result) {
+ pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
+
+ SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_id = priv->port - 1;
+
+ return 0;
+}
+
+static void ipoib_child_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ up_write(&ppriv->vlan_rwsem);
+
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+}
+
+static int ipoib_ndo_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ int rc;
+
+ if (priv->parent) {
+ ipoib_child_init(ndev);
+ } else {
+ rc = ipoib_parent_init(ndev);
+ if (rc)
+ return rc;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ ndev->max_mtu = IPOIB_CM_MTU;
+
+ ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ priv->pkey |= 0x8000;
+
+ ndev->broadcast[8] = priv->pkey >> 8;
+ ndev->broadcast[9] = priv->pkey & 0xff;
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ ipoib_set_dev_features(priv);
+
+ rc = ipoib_dev_init(ndev);
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
+ }
+
+ return 0;
+}
+
+static void ipoib_ndo_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ASSERT_RTNL();
+
+ /*
+ * ipoib_remove_one guarantees the children are removed before the
+ * parent, and that is the only place where a parent can be removed.
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
ipoib_neigh_hash_uninit(dev);
@@ -1847,6 +1965,16 @@ void ipoib_dev_cleanup(struct net_device *dev)
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+
+ dev_put(priv->parent);
+ }
}
static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
@@ -1894,7 +2022,8 @@ static const struct header_ops ipoib_header_ops = {
};
static const struct net_device_ops ipoib_netdev_ops_pf = {
- .ndo_uninit = ipoib_uninit,
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
@@ -1913,7 +2042,8 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
- .ndo_uninit = ipoib_uninit,
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
@@ -1945,6 +2075,13 @@ void ipoib_setup_common(struct net_device *dev)
netif_keep_dst(dev);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
+
+ /*
+ * unregister_netdev always frees the netdev, we use this mode
+ * consistently to unify all the various unregister paths, including
+ * those connected to rtnl_link_ops which require it.
+ */
+ dev->needs_free_netdev = true;
}
static void ipoib_build_priv(struct net_device *dev)
@@ -1955,7 +2092,6 @@ static void ipoib_build_priv(struct net_device *dev)
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
- mutex_init(&priv->sysfs_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -1999,9 +2135,7 @@ static struct net_device
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
- rn->free_rdma_netdev = free_netdev;
rn->hca = hca;
-
dev->netdev_ops = &ipoib_netdev_default_pf;
return dev;
@@ -2039,6 +2173,9 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
if (!priv)
return NULL;
+ priv->ca = hca;
+ priv->port = port;
+
dev = ipoib_get_netdev(hca, port, name);
if (!dev)
goto free_priv;
@@ -2053,6 +2190,15 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
rn = netdev_priv(dev);
rn->clnt_priv = priv;
+
+ /*
+ * Only the child register_netdev flows can handle priv_destructor
+ * being set, so we force it to NULL here and handle manually until it
+ * is safe to turn on.
+ */
+ priv->next_priv_destructor = dev->priv_destructor;
+ dev->priv_destructor = NULL;
+
ipoib_build_priv(dev);
return priv;
@@ -2061,6 +2207,27 @@ free_priv:
return NULL;
}
+void ipoib_intf_free(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ dev->priv_destructor = priv->next_priv_destructor;
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+
+ /*
+ * There are some error flows around register_netdev failing that may
+ * attempt to call priv_destructor twice, prevent that from happening.
+ */
+ dev->priv_destructor = NULL;
+
+ /* unregister/destroy is very complicated. Make bugs more obvious. */
+ rn->clnt_priv = NULL;
+
+ kfree(priv);
+}
+
static ssize_t show_pkey(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2186,12 +2353,6 @@ static ssize_t create_child(struct device *dev,
if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- pkey |= 0x8000;
-
ret = ipoib_vlan_add(to_net_dev(dev), pkey);
return ret ? ret : count;
@@ -2223,87 +2384,19 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
-void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
-{
- priv->hca_caps = hca->attrs.device_cap_flags;
-
- if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-
- if (priv->hca_caps & IB_DEVICE_UD_TSO)
- priv->dev->hw_features |= NETIF_F_TSO;
-
- priv->dev->features |= priv->dev->hw_features;
- }
-}
-
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
- struct ib_port_attr attr;
- struct rdma_netdev *rn;
- int result = -ENOMEM;
+ struct net_device *ndev;
+ int result;
priv = ipoib_intf_alloc(hca, port, format);
if (!priv) {
pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
- goto alloc_mem_failed;
- }
-
- SET_NETDEV_DEV(priv->dev, hca->dev.parent);
- priv->dev->dev_id = port - 1;
-
- result = ib_query_port(hca, port, &attr);
- if (result) {
- pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
- goto device_init_failed;
- }
-
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
-
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
- priv->dev->max_mtu = IPOIB_CM_MTU;
-
- priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
-
- result = ib_query_pkey(hca, port, 0, &priv->pkey);
- if (result) {
- pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
-
- ipoib_set_dev_features(priv, hca);
-
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- priv->pkey |= 0x8000;
-
- priv->dev->broadcast[8] = priv->pkey >> 8;
- priv->dev->broadcast[9] = priv->pkey & 0xff;
-
- result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
- if (result) {
- pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
- }
-
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
- sizeof(union ib_gid));
- set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
-
- result = ipoib_dev_init(priv->dev, hca, port);
- if (result) {
- pr_warn("%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
- goto device_init_failed;
+ return ERR_PTR(-ENOMEM);
}
+ ndev = priv->dev;
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
@@ -2312,46 +2405,43 @@ static struct net_device *ipoib_add_port(const char *format,
/* call event handler to ensure pkey in sync */
queue_work(ipoib_workqueue, &priv->flush_heavy);
- result = register_netdev(priv->dev);
+ result = register_netdev(ndev);
if (result) {
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
hca->name, port, result);
- goto register_failed;
+
+ ipoib_parent_unregister_pre(ndev);
+ ipoib_intf_free(ndev);
+ free_netdev(ndev);
+
+ return ERR_PTR(result);
}
- result = -ENOMEM;
- if (ipoib_cm_add_mode_attr(priv->dev))
+ /*
+ * We cannot set priv_destructor before register_netdev because we
+ * need priv to be always valid during the error flow to execute
+ * ipoib_parent_unregister_pre(). Instead handle it manually and only
+ * enter priv_destructor mode once we are completely registered.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
+ if (device_create_file(&ndev->dev, &dev_attr_create_child))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
+ if (device_create_file(&ndev->dev, &dev_attr_delete_child))
goto sysfs_failed;
- return priv->dev;
+ return ndev;
sysfs_failed:
- unregister_netdev(priv->dev);
-
-register_failed:
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
- /* Stop GC if started before flush */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
- ipoib_dev_cleanup(priv->dev);
-
-device_init_failed:
- rn = netdev_priv(priv->dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
-
-alloc_mem_failed:
- return ERR_PTR(result);
+ ipoib_parent_unregister_pre(ndev);
+ unregister_netdev(ndev);
+ return ERR_PTR(-ENOMEM);
}
static void ipoib_add_one(struct ib_device *device)
@@ -2362,7 +2452,7 @@ static void ipoib_add_one(struct ib_device *device)
int p;
int count = 0;
- dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
+ dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
return;
@@ -2396,39 +2486,18 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
- struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
-
- ib_unregister_event_handler(&priv->event_handler);
- flush_workqueue(ipoib_workqueue);
-
- /* mark interface in the middle of destruction */
- set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+ LIST_HEAD(head);
+ ipoib_parent_unregister_pre(priv->dev);
rtnl_lock();
- dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
- rtnl_unlock();
-
- /* Stop GC */
- set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(priv->wq);
-
- /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
- mutex_lock(&priv->sysfs_mutex);
- unregister_netdev(priv->dev);
- mutex_unlock(&priv->sysfs_mutex);
-
- parent_rn->free_rdma_netdev(priv->dev);
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
- struct rdma_netdev *child_rn;
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
+ list)
+ unregister_netdevice_queue(cpriv->dev, &head);
+ unregister_netdevice_queue(priv->dev, &head);
+ unregister_netdevice_many(&head);
- child_rn = netdev_priv(cpriv->dev);
- child_rn->free_rdma_netdev(cpriv->dev);
- kfree(cpriv);
- }
-
- kfree(priv);
+ rtnl_unlock();
}
kfree(dev_list);
@@ -2476,8 +2545,7 @@ static int __init ipoib_init_module(void)
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
- WQ_MEM_RECLAIM);
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 6709328d90f8..b9e9562f5034 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -140,7 +140,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -822,6 +822,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
@@ -917,7 +918,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -997,7 +998,7 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{
struct ipoib_mcast_iter *iter;
- iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 3e44087935ae..d4d553a51fa9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -122,15 +122,6 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
- if (child_pkey == 0 || child_pkey == 0x8000)
- return -EINVAL;
-
- /*
- * Set the full membership bit, so that we join the right
- * broadcast group, etc.
- */
- child_pkey |= 0x8000;
-
err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
child_pkey, IPOIB_RTNL_CHILD);
@@ -139,19 +130,6 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return err;
}
-static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
-{
- struct ipoib_dev_priv *priv, *ppriv;
-
- priv = ipoib_priv(dev);
- ppriv = ipoib_priv(priv->parent);
-
- down_write(&ppriv->vlan_rwsem);
- unregister_netdevice_queue(dev, head);
- list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
-}
-
static size_t ipoib_get_size(const struct net_device *dev)
{
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
@@ -167,7 +145,6 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.setup = ipoib_setup_common,
.newlink = ipoib_new_child_link,
.changelink = ipoib_changelink,
- .dellink = ipoib_unregister_child_dev,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 984a88096f39..9f36ca786df8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -52,7 +52,7 @@ int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
if (set_qkey) {
ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto out;
@@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
- .max_send_sge = min_t(u32, priv->ca->attrs.max_sge,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
@@ -168,8 +168,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
- if (ret != -ENOSYS)
- return -ENODEV;
+ if (ret != -EOPNOTSUPP)
+ return ret;
req_vec = (priv->port - 1) * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 55a9b71ed05a..341753fbda54 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -50,68 +50,112 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
}
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+static bool is_child_unique(struct ipoib_dev_priv *ppriv,
+ struct ipoib_dev_priv *priv)
+{
+ struct ipoib_dev_priv *tpriv;
+
+ ASSERT_RTNL();
+
+ /*
+ * Since the legacy sysfs interface uses pkey for deletion it cannot
+ * support more than one interface with the same pkey, it creates
+ * ambiguity. The RTNL interface deletes using the netdev so it does
+ * not have a problem to support duplicated pkeys.
+ */
+ if (priv->child_type != IPOIB_LEGACY_CHILD)
+ return true;
+
+ /*
+ * First ensure this isn't a duplicate. We check the parent device and
+ * then all of the legacy child interfaces to make sure the Pkey
+ * doesn't match.
+ */
+ if (ppriv->pkey == priv->pkey)
+ return false;
+
+ list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+ if (tpriv->pkey == priv->pkey &&
+ tpriv->child_type == IPOIB_LEGACY_CHILD)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * NOTE: If this function fails then the priv->dev will remain valid, however
+ * priv can have been freed and must not be touched by caller in the error
+ * case.
+ *
+ * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
+ * free the net_device (just as rtnl_newlink does) otherwise the net_device
+ * will be freed when the rtnl is unlocked.
+ */
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
+ struct net_device *ndev = priv->dev;
int result;
- priv->max_ib_mtu = ppriv->max_ib_mtu;
- /* MTU will be reset when mcast join happens */
- priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
- priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
- priv->parent = ppriv->dev;
- set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ ASSERT_RTNL();
+
+ /*
+ * Racing with unregister of the parent must be prevented by the
+ * caller.
+ */
+ WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
- ipoib_set_dev_features(priv, ppriv->ca);
+ if (pkey == 0 || pkey == 0x8000) {
+ result = -EINVAL;
+ goto out_early;
+ }
+ priv->parent = ppriv->dev;
priv->pkey = pkey;
+ priv->child_type = type;
- memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
- memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
- set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
- priv->dev->broadcast[8] = pkey >> 8;
- priv->dev->broadcast[9] = pkey & 0xff;
-
- result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
- if (result < 0) {
- ipoib_warn(ppriv, "failed to initialize subinterface: "
- "device %s, port %d",
- ppriv->ca->name, ppriv->port);
- goto err;
+ if (!is_child_unique(ppriv, priv)) {
+ result = -ENOTUNIQ;
+ goto out_early;
}
- result = register_netdevice(priv->dev);
+ /* We do not need to touch priv if register_netdevice fails */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
- goto register_failed;
+
+ /*
+ * register_netdevice sometimes calls priv_destructor,
+ * sometimes not. Make sure it was done.
+ */
+ goto out_early;
}
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
- if (ipoib_cm_add_mode_attr(priv->dev))
+ if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_pkey_attr(priv->dev))
+ if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
- if (ipoib_add_umcast_attr(priv->dev))
+ if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
- if (device_create_file(&priv->dev->dev, &dev_attr_parent))
+ if (device_create_file(&ndev->dev, &dev_attr_parent))
goto sysfs_failed;
}
- priv->child_type = type;
- list_add_tail(&priv->list, &ppriv->child_intfs);
-
return 0;
sysfs_failed:
- result = -ENOMEM;
unregister_netdevice(priv->dev);
+ return -ENOMEM;
-register_failed:
- ipoib_dev_cleanup(priv->dev);
-
-err:
+out_early:
+ if (ndev->priv_destructor)
+ ndev->priv_destructor(ndev);
return result;
}
@@ -119,129 +163,124 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
- struct ipoib_dev_priv *tpriv;
+ struct net_device *ndev;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = ipoib_priv(pdev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
- return -EPERM;
-
- snprintf(intf_name, sizeof intf_name, "%s.%04x",
- ppriv->dev->name, pkey);
-
- if (!mutex_trylock(&ppriv->sysfs_mutex))
+ if (!rtnl_trylock())
return restart_syscall();
- if (!rtnl_trylock()) {
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
- }
-
- if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
+ return -EPERM;
}
+ ppriv = ipoib_priv(pdev);
+
+ snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+ ppriv->dev->name, pkey);
+
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv) {
result = -ENOMEM;
goto out;
}
-
- /*
- * First ensure this isn't a duplicate. We check the parent device and
- * then all of the legacy child interfaces to make sure the Pkey
- * doesn't match.
- */
- if (ppriv->pkey == pkey) {
- result = -ENOTUNIQ;
- goto out;
- }
-
- list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
- if (tpriv->pkey == pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD) {
- result = -ENOTUNIQ;
- goto out;
- }
- }
+ ndev = priv->dev;
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(ndev);
+
out:
- up_write(&ppriv->vlan_rwsem);
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- if (result && priv) {
- struct rdma_netdev *rn;
+ return result;
+}
+
+struct ipoib_vlan_delete_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+/*
+ * sysfs callbacks of a netdevice cannot obtain the rtnl lock as
+ * unregister_netdev ultimately deletes the sysfs files while holding the rtnl
+ * lock. This deadlocks the system.
+ *
+ * A callback can use rtnl_trylock to avoid the deadlock but it cannot call
+ * unregister_netdev as that internally takes and releases the rtnl_lock. So
+ * instead we find the netdev to unregister and then do the actual unregister
+ * from the global work queue where we can obtain the rtnl_lock safely.
+ */
+static void ipoib_vlan_delete_task(struct work_struct *work)
+{
+ struct ipoib_vlan_delete_work *pwork =
+ container_of(work, struct ipoib_vlan_delete_work, work);
+ struct net_device *dev = pwork->dev;
+
+ rtnl_lock();
+
+ /* Unregistering tasks can race with another task or parent removal */
+ if (dev->reg_state == NETREG_REGISTERED) {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- rn = netdev_priv(priv->dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
}
- return result;
+ rtnl_unlock();
+
+ kfree(pwork);
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
- struct net_device *dev = NULL;
+ int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- ppriv = ipoib_priv(pdev);
-
- if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
- return -EPERM;
-
- if (!mutex_trylock(&ppriv->sysfs_mutex))
+ if (!rtnl_trylock())
return restart_syscall();
- if (!rtnl_trylock()) {
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
- }
-
- if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
- return restart_syscall();
+ return -EPERM;
}
+ ppriv = ipoib_priv(pdev);
+
+ rc = -ENODEV;
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- list_del(&priv->list);
- dev = priv->dev;
+ struct ipoib_vlan_delete_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del_init(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ work->dev = priv->dev;
+ INIT_WORK(&work->work, ipoib_vlan_delete_task);
+ queue_work(ipoib_workqueue, &work->work);
+
+ rc = 0;
break;
}
}
- up_write(&ppriv->vlan_rwsem);
-
- if (dev) {
- ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
- unregister_netdevice(dev);
- }
+out:
rtnl_unlock();
- mutex_unlock(&ppriv->sysfs_mutex);
-
- if (dev) {
- struct rdma_netdev *rn;
-
- rn = netdev_priv(dev);
- rn->free_rdma_netdev(priv->dev);
- kfree(priv);
- return 0;
- }
- return -ENODEV;
+ return rc;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9a6434c31db2..3fecd87c9f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -610,12 +610,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint32_t initial_cmdsn)
{
struct iscsi_cls_session *cls_session;
- struct iscsi_session *session;
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
u32 max_fr_sectors;
- u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -633,8 +631,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
- max_cmds = iser_conn->max_cmds;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
+ shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -660,7 +658,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
}
mutex_unlock(&iser_conn->state_mutex);
} else {
- max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
if (iscsi_host_add(shost, NULL))
goto free_host;
}
@@ -676,21 +674,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
iser_warn("max_sectors was reduced from %u to %u\n",
iser_max_sectors, shost->max_sectors);
- if (cmds_max > max_cmds) {
- iser_info("cmds_max changed from %u to %u\n",
- cmds_max, max_cmds);
- cmds_max = max_cmds;
- }
-
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- cmds_max, 0,
+ shost->can_queue, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
- session = cls_session->dd_data;
- shost->can_queue = session->scsi_cmds_max;
return cls_session;
remove_host:
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index ca844a926e6a..009be8889d71 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -311,7 +311,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -405,7 +405,8 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
+ wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr,
+ wr);
wr->wr.opcode = IB_WR_REG_SIG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.sg_list = &data_reg->sge;
@@ -457,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return n < 0 ? n : -EINVAL;
}
- wr = reg_wr(iser_tx_next_wr(tx_desc));
+ wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
wr->wr.opcode = IB_WR_REG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.send_flags = 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 616d978cbf2b..b686a4aaffe8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1022,7 +1022,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
- struct ib_recv_wr wr, *wr_failed;
+ struct ib_recv_wr wr;
int ib_ret;
desc->sge.addr = desc->rsp_dma;
@@ -1036,7 +1036,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
wr.next = NULL;
ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed);
+ ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count--;
@@ -1050,7 +1050,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
unsigned int my_rx_head = iser_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
- struct ib_recv_wr *wr, *wr_failed;
+ struct ib_recv_wr *wr;
int i, ib_ret;
for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
@@ -1067,7 +1067,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
wr->next = NULL; /* mark end of work requests list */
ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed);
+ ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
@@ -1086,7 +1086,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
{
- struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
+ struct ib_send_wr *wr = iser_tx_next_wr(tx_desc);
int ib_ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
@@ -1100,10 +1100,10 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->opcode = IB_WR_SEND;
wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
- ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
+ ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL);
if (ib_ret)
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, bad_wr->opcode);
+ ib_ret, wr->opcode);
return ib_ret;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index cccbcf0eb035..f39670c5c25c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -136,7 +136,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
- attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
+ attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
@@ -299,7 +299,8 @@ isert_create_device_ib_res(struct isert_device *device)
struct ib_device *ib_dev = device->ib_device;
int ret;
- isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
+ isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
+ ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
ret = isert_alloc_comps(device);
@@ -809,7 +810,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
static int
isert_post_recvm(struct isert_conn *isert_conn, u32 count)
{
- struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ struct ib_recv_wr *rx_wr;
int i, ret;
struct iser_rx_desc *rx_desc;
@@ -825,8 +826,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
- &rx_wr_failed);
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
@@ -836,7 +836,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
static int
isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
{
- struct ib_recv_wr *rx_wr_failed, rx_wr;
+ struct ib_recv_wr rx_wr;
int ret;
if (!rx_desc->in_use) {
@@ -853,7 +853,7 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
@@ -864,7 +864,7 @@ static int
isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct ib_send_wr send_wr, *send_wr_failed;
+ struct ib_send_wr send_wr;
int ret;
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
@@ -879,7 +879,7 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
+ ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
@@ -967,7 +967,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static int
isert_login_post_recv(struct isert_conn *isert_conn)
{
- struct ib_recv_wr rx_wr, *rx_wr_fail;
+ struct ib_recv_wr rx_wr;
struct ib_sge sge;
int ret;
@@ -986,7 +986,7 @@ isert_login_post_recv(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
@@ -1829,7 +1829,6 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
- struct ib_send_wr *wr_failed;
int ret;
ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
@@ -1838,8 +1837,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
return ret;
}
- ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
- &wr_failed);
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
return ret;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 0c8aec62a425..61558788b3fa 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9786b24b956f..444d16520506 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -57,13 +57,10 @@
#define DRV_NAME "ib_srp"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "July 26, 2015"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_INFO(release_date, DRV_RELDATE);
#if !defined(CONFIG_DYNAMIC_DEBUG)
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
@@ -145,7 +142,8 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event);
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
@@ -1211,7 +1209,6 @@ static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
u32 rkey)
{
- struct ib_send_wr *bad_wr;
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
@@ -1222,7 +1219,7 @@ static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
wr.wr_cqe = &req->reg_cqe;
req->reg_cqe.done = srp_inv_rkey_err_done;
- return ib_post_send(ch->qp, &wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr, NULL);
}
static void srp_unmap_data(struct scsi_cmnd *scmnd,
@@ -1503,7 +1500,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
u32 rkey;
@@ -1567,7 +1563,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
srp_map_desc(state, desc->mr->iova,
desc->mr->length, desc->mr->rkey);
- err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ err = ib_post_send(ch->qp, &wr.wr, NULL);
if (unlikely(err)) {
WARN_ON_ONCE(err == -ENOMEM);
return err;
@@ -2018,7 +2014,7 @@ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
{
struct srp_target_port *target = ch->target;
struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
+ struct ib_send_wr wr;
list.addr = iu->dma;
list.length = len;
@@ -2033,13 +2029,13 @@ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
- return ib_post_send(ch->qp, &wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr, NULL);
}
static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
{
struct srp_target_port *target = ch->target;
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma;
@@ -2053,7 +2049,7 @@ static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
wr.sg_list = &list;
wr.num_sge = 1;
- return ib_post_recv(ch->qp, &wr, &bad_wr);
+ return ib_post_recv(ch->qp, &wr, NULL);
}
static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
@@ -2558,7 +2554,7 @@ error:
}
static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
+ const struct ib_cm_event *event,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
@@ -2643,7 +2639,8 @@ static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -3843,7 +3840,7 @@ static ssize_t srp_create_target(struct device *dev,
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
- ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
+ ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
if (ret)
goto out;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3081c629a7f7..f37cbad022a2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -575,8 +575,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
- ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
- NULL);
+ ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
goto err_query_port;
@@ -720,7 +719,7 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
&& ioctx_size != sizeof(struct srpt_send_ioctx));
- ring = kmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
+ ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
if (!ring)
goto out;
for (i = 0; i < ring_size; ++i) {
@@ -734,7 +733,7 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
err:
while (--i >= 0)
srpt_free_ioctx(sdev, ring[i], dma_size, dir);
- kfree(ring);
+ kvfree(ring);
ring = NULL;
out:
return ring;
@@ -759,7 +758,7 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
- kfree(ioctx_ring);
+ kvfree(ioctx_ring);
}
/**
@@ -817,7 +816,7 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma;
@@ -831,9 +830,9 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
wr.num_sge = 1;
if (sdev->use_srq)
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ return ib_post_srq_recv(sdev->srq, &wr, NULL);
else
- return ib_post_recv(ch->qp, &wr, &bad_wr);
+ return ib_post_recv(ch->qp, &wr, NULL);
}
/**
@@ -847,7 +846,6 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
*/
static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
- struct ib_send_wr *bad_wr;
struct ib_rdma_wr wr = {
.wr = {
.next = NULL,
@@ -860,7 +858,7 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
ch->qp->qp_num);
- return ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ return ib_post_send(ch->qp, &wr.wr, NULL);
}
static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1754,13 +1752,15 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = min(attrs->max_send_sge,
+ SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
qp_init->srq = sdev->srq;
} else {
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
+ qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
+ SRPT_MAX_SG_PER_WQE);
}
if (ch->using_rdma_cm) {
@@ -1833,8 +1833,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
int ret;
if (!srpt_set_ch_state(ch, CH_DRAINING)) {
- pr_debug("%s-%d: already closed\n", ch->sess_name,
- ch->qp->qp_num);
+ pr_debug("%s: already closed\n", ch->sess_name);
return false;
}
@@ -1940,8 +1939,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
- ch->sess_name, ch->qp->qp_num,
+ pr_info("Closing channel %s because target %s_%d has been disabled\n",
+ ch->sess_name,
sport->sdev->device->name, sport->port);
srpt_close_ch(ch);
}
@@ -2029,8 +2028,7 @@ static void srpt_release_channel_work(struct work_struct *w)
target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
@@ -2087,7 +2085,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
struct rdma_conn_param rdma_cm;
struct ib_cm_rep_param ib_cm;
} *rep_param = NULL;
- struct srpt_rdma_ch *ch;
+ struct srpt_rdma_ch *ch = NULL;
char i_port_id[36];
u32 it_iu_len;
int i, ret;
@@ -2221,26 +2219,28 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
+ WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
+ ch->sess = NULL;
pr_info("Rejected login for initiator %s: ret = %d.\n",
ch->sess_name, ret);
rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
- goto reject;
+ goto destroy_ib;
}
mutex_lock(&sport->mutex);
@@ -2279,7 +2279,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
ret);
- goto destroy_ib;
+ goto reject;
}
pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
@@ -2358,8 +2358,11 @@ free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
ch->max_rsp_size, DMA_TO_DEVICE);
+
free_ch:
- if (ib_cm_id)
+ if (rdma_cm_id)
+ rdma_cm_id->context = NULL;
+ else
ib_cm_id->context = NULL;
kfree(ch);
ch = NULL;
@@ -2379,6 +2382,15 @@ reject:
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
+ if (ch && ch->sess) {
+ srpt_close_ch(ch);
+ /*
+ * Tell the caller not to free cm_id since
+ * srpt_release_channel_work() will do that.
+ */
+ ret = 0;
+ }
+
out:
kfree(rep_param);
kfree(rsp);
@@ -2388,7 +2400,7 @@ out:
}
static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
+ const struct ib_cm_req_event_param *param,
void *private_data)
{
char sguid[40];
@@ -2500,7 +2512,8 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
* a non-zero value in any other case will trigger a race with the
* ib_destroy_cm_id() call in srpt_release_channel().
*/
-static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srpt_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
{
struct srpt_rdma_ch *ch = cm_id->context;
int ret;
@@ -2610,7 +2623,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx =
container_of(se_cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
- struct ib_send_wr *first_wr = NULL, *bad_wr;
+ struct ib_send_wr *first_wr = NULL;
struct ib_cqe *cqe = &ioctx->rdma_cqe;
enum srpt_command_state new_state;
int ret, i;
@@ -2634,7 +2647,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
cqe = NULL;
}
- ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret) {
pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
__func__, ret, ioctx->n_rdma,
@@ -2672,7 +2685,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
container_of(cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_device *sdev = ch->sport->sdev;
- struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
+ struct ib_send_wr send_wr, *first_wr = &send_wr;
struct ib_sge sge;
enum srpt_command_state state;
int resp_len, ret, i;
@@ -2745,7 +2758,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret < 0) {
pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
__func__, ioctx->cmd.tag, ret);
@@ -2969,7 +2982,8 @@ static void srpt_add_one(struct ib_device *device)
pr_debug("device = %p\n", device);
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
+ GFP_KERNEL);
if (!sdev)
goto err;
@@ -3023,8 +3037,6 @@ static void srpt_add_one(struct ib_device *device)
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
- WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
-
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
@@ -3597,11 +3609,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
- * @group: Not used.
* @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct srpt_port *sport = wwn->priv;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2361483476a0..444dfd7281b5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -396,9 +396,9 @@ struct srpt_port {
* @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
+ * @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
struct ib_device *device;
@@ -410,9 +410,9 @@ struct srpt_device {
struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
+ struct srpt_port port[];
};
#endif /* IB_SRPT_H */
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index cd4e6679d61a..5419c1c1f621 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c81c79d01d93..370206f987f9 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -481,7 +481,7 @@ static int evdev_release(struct inode *inode, struct file *file)
evdev_detach_client(evdev, client);
for (i = 0; i < EV_CNT; ++i)
- kfree(client->evmasks[i]);
+ bitmap_free(client->evmasks[i]);
kvfree(client);
@@ -925,17 +925,15 @@ static int evdev_handle_get_val(struct evdev_client *client,
{
int ret;
unsigned long *mem;
- size_t len;
- len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long);
- mem = kmalloc(len, GFP_KERNEL);
+ mem = bitmap_alloc(maxbit, GFP_KERNEL);
if (!mem)
return -ENOMEM;
spin_lock_irq(&dev->event_lock);
spin_lock(&client->buffer_lock);
- memcpy(mem, bits, len);
+ bitmap_copy(mem, bits, maxbit);
spin_unlock(&dev->event_lock);
@@ -947,7 +945,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
if (ret < 0)
evdev_queue_syn_dropped(client);
- kfree(mem);
+ bitmap_free(mem);
return ret;
}
@@ -1003,13 +1001,13 @@ static int evdev_set_mask(struct evdev_client *client,
if (!cnt)
return 0;
- mask = kcalloc(sizeof(unsigned long), BITS_TO_LONGS(cnt), GFP_KERNEL);
+ mask = bitmap_zalloc(cnt, GFP_KERNEL);
if (!mask)
return -ENOMEM;
error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
if (error < 0) {
- kfree(mask);
+ bitmap_free(mask);
return error;
}
@@ -1018,7 +1016,7 @@ static int evdev_set_mask(struct evdev_client *client,
client->evmasks[type] = mask;
spin_unlock_irqrestore(&client->buffer_lock, flags);
- kfree(oldmask);
+ bitmap_free(oldmask);
return 0;
}
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 2909e9561cf3..afdc20ca0e24 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/gameport/lightning.c b/drivers/input/gameport/lightning.c
index 85d6ee09f11f..c6e74c7945cb 100644
--- a/drivers/input/gameport/lightning.c
+++ b/drivers/input/gameport/lightning.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 7c217848613e..6437645858f9 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 6365c1958264..3304aaaffe87 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
*/
void input_alloc_absinfo(struct input_dev *dev)
{
- if (!dev->absinfo)
- dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
- GFP_KERNEL);
+ if (dev->absinfo)
+ return;
- WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
+ dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
+ if (!dev->absinfo) {
+ dev_err(dev->dev.parent ?: &dev->dev,
+ "%s: unable to allocate memory\n", __func__);
+ /*
+ * We will handle this allocation failure in
+ * input_register_device() when we refuse to register input
+ * device with ABS bits but without absinfo.
+ */
+ }
}
EXPORT_SYMBOL(input_alloc_absinfo);
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 55efdfc7eb62..98307039a534 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 15a71acb6997..f466c0d34247 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index c65b5fa69f1e..2b82a838c511 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/types.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c79dbcb4d146..2b445c8d3fcd 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index ae3ee24a2368..14cb956beac4 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index de0dd4756c84..804b1b80a8be 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
@@ -263,6 +259,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
+ /* else: fall through */
default:
return data[0];
}
@@ -282,11 +279,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
+ /* fall through */
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
+ /* fall through */
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
+ /* fall through */
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -380,6 +380,7 @@ static void db9_timer(struct timer_list *t)
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
+ /* fall through */
case DB9_MULTI_0802:
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 4e10ffdf8a36..d62e73dd9f7f 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -24,10 +24,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 0f519db64748..50a60065ab14 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index eac9c5b8d73e..e10395ba62bc 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index a9ac2f9cfce0..43ff817d80ac 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 0de9a0943a9e..3536d5f5ad18 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -56,7 +52,7 @@ static int make_magnitude_modifier(struct iforce* iforce,
iforce_send_packet(iforce, FF_CMD_MAGNITUDE, data);
- iforce_dump_packet("magnitude: ", FF_CMD_MAGNITUDE, data);
+ iforce_dump_packet(iforce, "magnitude", FF_CMD_MAGNITUDE, data);
return 0;
}
@@ -178,7 +174,7 @@ static int make_condition_modifier(struct iforce* iforce,
data[9] = (100 * lsat) >> 16;
iforce_send_packet(iforce, FF_CMD_CONDITION, data);
- iforce_dump_packet("condition", FF_CMD_CONDITION, data);
+ iforce_dump_packet(iforce, "condition", FF_CMD_CONDITION, data);
return 0;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index daeeb4c7e3b0..58d5cfe46526 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -33,21 +29,14 @@ MODULE_LICENSE("GPL");
static signed short btn_joystick[] =
{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 };
-
-static signed short btn_avb_pegasus[] =
-{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 };
+ BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A,
+ BTN_B, BTN_C, BTN_DEAD, -1 };
-static signed short btn_wheel[] =
-{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 };
-
-static signed short btn_avb_tw[] =
+static signed short btn_joystick_avb[] =
{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE,
- BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 };
+ BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_DEAD, -1 };
-static signed short btn_avb_wheel[] =
+static signed short btn_wheel[] =
{ BTN_GEAR_DOWN, BTN_GEAR_UP, BTN_BASE, BTN_BASE2, BTN_BASE3,
BTN_BASE4, BTN_BASE5, BTN_BASE6, -1 };
@@ -73,9 +62,9 @@ static struct iforce_device iforce_device[] = {
{ 0x044f, 0xa01c, "Thrustmaster Motor Sport GT", btn_wheel, abs_wheel, ff_iforce },
{ 0x046d, 0xc281, "Logitech WingMan Force", btn_joystick, abs_joystick, ff_iforce },
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
- { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_avb_pegasus, abs_avb_pegasus, ff_iforce },
- { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce },
- { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //?
+ { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
+ { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
@@ -360,7 +349,7 @@ int iforce_init_device(struct iforce *iforce)
for (i = 0; c[i]; i++)
if (!iforce_get_id_packet(iforce, c + i))
- iforce_dump_packet("info", iforce->ecmd, iforce->edata);
+ iforce_dump_packet(iforce, "info", iforce->ecmd, iforce->edata);
/*
* Disable spring, enable force feedback.
@@ -388,7 +377,6 @@ int iforce_init_device(struct iforce *iforce)
for (i = 0; iforce->type->btn[i] >= 0; i++)
set_bit(iforce->type->btn[i], input_dev->keybit);
- set_bit(BTN_DEAD, input_dev->keybit);
for (i = 0; iforce->type->abs[i] >= 0; i++) {
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 08f98f2eaf88..c10169f4554e 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
@@ -33,14 +29,10 @@ static struct {
} iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}};
-void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data)
+void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data)
{
- int i;
-
- printk(KERN_DEBUG __FILE__ ": %s cmd = %04x, data = ", msg, cmd);
- for (i = 0; i < LO(cmd); i++)
- printk("%02x ", data[i]);
- printk("\n");
+ dev_dbg(iforce->dev->dev.parent, "%s %s cmd = %04x, data = %*ph\n",
+ __func__, msg, cmd, LO(cmd), data);
}
/*
@@ -255,7 +247,7 @@ int iforce_get_id_packet(struct iforce *iforce, char *packet)
iforce->cr.bRequest = packet[0];
iforce->ctrl->dev = iforce->usbdev;
- status = usb_submit_urb(iforce->ctrl, GFP_ATOMIC);
+ status = usb_submit_urb(iforce->ctrl, GFP_KERNEL);
if (status) {
dev_err(&iforce->intf->dev,
"usb_submit_urb failed %d\n", status);
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index 154e827b559b..f4ba4a751fe0 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index e8724f1a4a25..78073259c9a1 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 96ae4f5bd0eb..0e9d01f8bcb6 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -19,10 +19,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
@@ -158,7 +154,7 @@ int iforce_init_device(struct iforce *iforce);
int iforce_control_playback(struct iforce*, u16 id, unsigned int);
void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data);
int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data);
-void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) ;
+void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data);
int iforce_get_id_packet(struct iforce *iforce, char *packet);
/* iforce-ff.c */
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 17c2c800743c..598788b3da62 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 7f4dff9a566f..344ab44ff581 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index a9d0e3edca94..95a34ab34fc3 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/pxrc.c b/drivers/input/joystick/pxrc.c
index 07a0dbd3ced2..ea2bf5951d67 100644
--- a/drivers/input/joystick/pxrc.c
+++ b/drivers/input/joystick/pxrc.c
@@ -3,7 +3,6 @@
* Driver for Phoenix RC Flight Controller Adapter
*
* Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
- *
*/
#include <linux/kernel.h>
@@ -16,31 +15,22 @@
#include <linux/mutex.h>
#include <linux/input.h>
-#define PXRC_VENDOR_ID (0x1781)
-#define PXRC_PRODUCT_ID (0x0898)
-
-static const struct usb_device_id pxrc_table[] = {
- { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, pxrc_table);
+#define PXRC_VENDOR_ID 0x1781
+#define PXRC_PRODUCT_ID 0x0898
struct pxrc {
struct input_dev *input;
- struct usb_device *udev;
struct usb_interface *intf;
struct urb *urb;
struct mutex pm_mutex;
bool is_open;
- __u8 epaddr;
char phys[64];
- unsigned char *data;
- size_t bsize;
};
static void pxrc_usb_irq(struct urb *urb)
{
struct pxrc *pxrc = urb->context;
+ u8 *data = urb->transfer_buffer;
int error;
switch (urb->status) {
@@ -68,15 +58,15 @@ static void pxrc_usb_irq(struct urb *urb)
}
if (urb->actual_length == 8) {
- input_report_abs(pxrc->input, ABS_X, pxrc->data[0]);
- input_report_abs(pxrc->input, ABS_Y, pxrc->data[2]);
- input_report_abs(pxrc->input, ABS_RX, pxrc->data[3]);
- input_report_abs(pxrc->input, ABS_RY, pxrc->data[4]);
- input_report_abs(pxrc->input, ABS_RUDDER, pxrc->data[5]);
- input_report_abs(pxrc->input, ABS_THROTTLE, pxrc->data[6]);
- input_report_abs(pxrc->input, ABS_MISC, pxrc->data[7]);
-
- input_report_key(pxrc->input, BTN_A, pxrc->data[1]);
+ input_report_abs(pxrc->input, ABS_X, data[0]);
+ input_report_abs(pxrc->input, ABS_Y, data[2]);
+ input_report_abs(pxrc->input, ABS_RX, data[3]);
+ input_report_abs(pxrc->input, ABS_RY, data[4]);
+ input_report_abs(pxrc->input, ABS_RUDDER, data[5]);
+ input_report_abs(pxrc->input, ABS_THROTTLE, data[6]);
+ input_report_abs(pxrc->input, ABS_MISC, data[7]);
+
+ input_report_key(pxrc->input, BTN_A, data[1]);
}
exit:
@@ -120,61 +110,73 @@ static void pxrc_close(struct input_dev *input)
mutex_unlock(&pxrc->pm_mutex);
}
-static int pxrc_usb_init(struct pxrc *pxrc)
+static void pxrc_free_urb(void *_pxrc)
{
+ struct pxrc *pxrc = _pxrc;
+
+ usb_free_urb(pxrc->urb);
+}
+
+static int pxrc_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct pxrc *pxrc;
struct usb_endpoint_descriptor *epirq;
- unsigned int pipe;
- int retval;
+ size_t xfer_size;
+ void *xfer_buf;
+ int error;
- /* Set up the endpoint information */
- /* This device only has an interrupt endpoint */
- retval = usb_find_common_endpoints(pxrc->intf->cur_altsetting,
- NULL, NULL, &epirq, NULL);
- if (retval) {
- dev_err(&pxrc->intf->dev,
- "Could not find endpoint\n");
- goto error;
+ /*
+ * Locate the endpoint information. This device only has an
+ * interrupt endpoint.
+ */
+ error = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &epirq, NULL);
+ if (error) {
+ dev_err(&intf->dev, "Could not find endpoint\n");
+ return error;
}
- pxrc->bsize = usb_endpoint_maxp(epirq);
- pxrc->epaddr = epirq->bEndpointAddress;
- pxrc->data = devm_kmalloc(&pxrc->intf->dev, pxrc->bsize, GFP_KERNEL);
- if (!pxrc->data) {
- retval = -ENOMEM;
- goto error;
- }
+ pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
+ if (!pxrc)
+ return -ENOMEM;
- usb_set_intfdata(pxrc->intf, pxrc);
- usb_make_path(pxrc->udev, pxrc->phys, sizeof(pxrc->phys));
- strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
+ mutex_init(&pxrc->pm_mutex);
+ pxrc->intf = intf;
- pxrc->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxrc->urb) {
- retval = -ENOMEM;
- goto error;
- }
+ usb_set_intfdata(pxrc->intf, pxrc);
- pipe = usb_rcvintpipe(pxrc->udev, pxrc->epaddr),
- usb_fill_int_urb(pxrc->urb, pxrc->udev, pipe, pxrc->data, pxrc->bsize,
- pxrc_usb_irq, pxrc, 1);
+ xfer_size = usb_endpoint_maxp(epirq);
+ xfer_buf = devm_kmalloc(&intf->dev, xfer_size, GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
-error:
- return retval;
+ pxrc->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxrc->urb)
+ return -ENOMEM;
+ error = devm_add_action_or_reset(&intf->dev, pxrc_free_urb, pxrc);
+ if (error)
+ return error;
-}
+ usb_fill_int_urb(pxrc->urb, udev,
+ usb_rcvintpipe(udev, epirq->bEndpointAddress),
+ xfer_buf, xfer_size, pxrc_usb_irq, pxrc, 1);
-static int pxrc_input_init(struct pxrc *pxrc)
-{
- pxrc->input = devm_input_allocate_device(&pxrc->intf->dev);
- if (pxrc->input == NULL) {
- dev_err(&pxrc->intf->dev, "couldn't allocate input device\n");
+ pxrc->input = devm_input_allocate_device(&intf->dev);
+ if (!pxrc->input) {
+ dev_err(&intf->dev, "couldn't allocate input device\n");
return -ENOMEM;
}
pxrc->input->name = "PXRC Flight Controller Adapter";
+
+ usb_make_path(udev, pxrc->phys, sizeof(pxrc->phys));
+ strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
pxrc->input->phys = pxrc->phys;
- usb_to_input_id(pxrc->udev, &pxrc->input->id);
+
+ usb_to_input_id(udev, &pxrc->input->id);
pxrc->input->open = pxrc_open;
pxrc->input->close = pxrc_close;
@@ -190,46 +192,16 @@ static int pxrc_input_init(struct pxrc *pxrc)
input_set_drvdata(pxrc->input, pxrc);
- return input_register_device(pxrc->input);
-}
-
-static int pxrc_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct pxrc *pxrc;
- int retval;
-
- pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
- if (!pxrc)
- return -ENOMEM;
-
- mutex_init(&pxrc->pm_mutex);
- pxrc->udev = usb_get_dev(interface_to_usbdev(intf));
- pxrc->intf = intf;
-
- retval = pxrc_usb_init(pxrc);
- if (retval)
- goto error;
-
- retval = pxrc_input_init(pxrc);
- if (retval)
- goto err_free_urb;
+ error = input_register_device(pxrc->input);
+ if (error)
+ return error;
return 0;
-
-err_free_urb:
- usb_free_urb(pxrc->urb);
-
-error:
- return retval;
}
static void pxrc_disconnect(struct usb_interface *intf)
{
- struct pxrc *pxrc = usb_get_intfdata(intf);
-
- usb_free_urb(pxrc->urb);
- usb_set_intfdata(intf, NULL);
+ /* All driver resources are devm-managed. */
}
static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
@@ -284,6 +256,12 @@ static int pxrc_reset_resume(struct usb_interface *intf)
return pxrc_resume(intf);
}
+static const struct usb_device_id pxrc_table[] = {
+ { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pxrc_table);
+
static struct usb_driver pxrc_driver = {
.name = "pxrc",
.probe = pxrc_probe,
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 5e602a6852b7..f46bf4d41972 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index bb3faeff8cac..ffb9c1f495b6 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -24,10 +24,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 05da0ed514e2..20540ee71d7f 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index cb10e7b097ae..ba8579435d6c 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 7e17cde464f0..6f4a01cfe79f 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index e2685753e460..bf2f9925e416 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index ef5391ba4470..b60cab168e2a 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 6bd97ffee761..4713957b0cbb 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -721,7 +721,7 @@ config KEYBOARD_CROS_EC
help
Say Y here to enable the matrix keyboard used by ChromeOS devices
and implemented on the ChromeOS EC. You must enable one bus option
- (MFD_CROS_EC_I2C or MFD_CROS_EC_SPI) to use this.
+ (CROS_EC_I2C or CROS_EC_SPI) to use this.
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 32d94c63dc33..2835fba71c33 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -885,6 +885,7 @@ static int adp5589_probe(struct i2c_client *client,
switch (id->driver_data) {
case ADP5585_02:
kpad->support_row5 = true;
+ /* fall through */
case ADP5585_01:
kpad->is_adp5585 = true;
kpad->var = &const_adp5585;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index e04a3b4e55d6..420e33c49e58 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index f1235831283d..6f62da2909ec 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -34,10 +34,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 489ddd37bd4e..81be6f781f0b 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -1,25 +1,15 @@
-/*
- * ChromeOS EC keyboard driver
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS EC keyboard driver
+//
+// Copyright (C) 2012 Google, Inc.
+//
+// This driver uses the ChromeOS EC byte-level message-based protocol for
+// communicating the keyboard state (which keys are pressed) from a keyboard EC
+// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+// but everything else (including deghosting) is done here. The main
+// motivation for this is to keep the EC firmware as simple as possible, since
+// it cannot be easily upgraded and EC flash/IRAM space is relatively
+// expensive.
#include <linux/module.h>
#include <linux/bitops.h>
@@ -170,9 +160,6 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
int col, row;
int new_state;
int old_state;
- int num_cols;
-
- num_cols = len;
if (ckdev->ghost_filter && cros_ec_keyb_has_ghosting(ckdev, kb_state)) {
/*
@@ -242,19 +229,17 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
u32 val;
unsigned int ev_type;
+ /*
+ * If not wake enabled, discard key state changes during
+ * suspend. Switches will be re-checked in
+ * cros_ec_keyb_resume() to be sure nothing is lost.
+ */
+ if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
+ return NOTIFY_OK;
+
switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
- if (device_may_wakeup(ckdev->dev)) {
- pm_wakeup_event(ckdev->dev, 0);
- } else {
- /*
- * If keyboard is not wake enabled, discard key state
- * changes during suspend. Switches will be re-checked
- * in cros_ec_keyb_resume() to be sure nothing is lost.
- */
- if (queued_during_suspend)
- return NOTIFY_OK;
- }
+ pm_wakeup_event(ckdev->dev, 0);
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
@@ -268,10 +253,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
break;
case EC_MKBP_EVENT_SYSRQ:
- if (device_may_wakeup(ckdev->dev))
- pm_wakeup_event(ckdev->dev, 0);
- else if (queued_during_suspend)
- return NOTIFY_OK;
+ pm_wakeup_event(ckdev->dev, 0);
val = get_unaligned_le32(&ckdev->ec->event_data.data.sysrq);
dev_dbg(ckdev->dev, "sysrq code from EC: %#x\n", val);
@@ -280,10 +262,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_BUTTON:
case EC_MKBP_EVENT_SWITCH:
- if (device_may_wakeup(ckdev->dev))
- pm_wakeup_event(ckdev->dev, 0);
- else if (queued_during_suspend)
- return NOTIFY_OK;
+ pm_wakeup_event(ckdev->dev, 0);
if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
val = get_unaligned_le32(
@@ -683,6 +662,6 @@ static struct platform_driver cros_ec_keyb_driver = {
module_platform_driver(cros_ec_keyb_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC keyboard driver");
MODULE_ALIAS("platform:cros-ec-keyb");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 052e37675086..492a971b95b5 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -196,7 +196,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
ssize_t ret;
int i;
- bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL);
+ bits = bitmap_zalloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
@@ -216,7 +216,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
buf[ret++] = '\n';
buf[ret] = '\0';
- kfree(bits);
+ bitmap_free(bits);
return ret;
}
@@ -240,7 +240,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
ssize_t error;
int i;
- bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL);
+ bits = bitmap_zalloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
@@ -284,7 +284,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
mutex_unlock(&ddata->disable_lock);
out:
- kfree(bits);
+ bitmap_free(bits);
return error;
}
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index a4e404aaf64b..5c7afdec192c 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
#define HIL_DATA 0x1
#define HIL_CMD 0x3
#define HIL_IRQ 2
- #define hil_readb(p) readb(p)
- #define hil_writeb(v,p) writeb((v),(p))
+ #define hil_readb(p) readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p))
#else
#error "HIL is not supported on this platform"
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 25d61d8d4fc4..539cb670de41 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -1,11 +1,7 @@
-/*
- * Driver for the IMX keypad port.
- * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the IMX keypad port.
+// Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index fb9b8e23ab93..de26e2df0ad5 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <j.cormack@doc.ic.ac.uk>, or by paper mail:
- * Justin Cormack, 68 Dartmouth Park Road, London NW5 1SN, UK.
*/
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 53c768b95939..effb63205d3d 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -1,14 +1,7 @@
-/*
- * Driver for the IMX SNVS ON/OFF Power Key
- * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Driver for the IMX SNVS ON/OFF Power Key
+// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/device.h>
#include <linux/err.h>
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index 8b6de9a692dc..15a5e74dbe91 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <marek.vasut@gmail.com>, or by paper mail:
- * Marek Vasut, Liskovecka 559, Frydek-Mistek, 738 01 Czech Republic
*/
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index c95707ea2656..ad5d7f94f95a 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 8f64b9ded8d0..f7598114b962 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/slab.h>
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 67482b248b2d..a8937ceac66a 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -466,7 +466,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
- remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
+ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 18ad956454f1..48153e0ca19a 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -20,6 +20,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -28,6 +29,7 @@
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
+#define PON_RESIN_N_SET BIT(1)
#define PON_PS_HOLD_RST_CTL 0x5a
#define PON_PS_HOLD_RST_CTL2 0x5b
@@ -38,10 +40,15 @@
#define PON_PULL_CTL 0x70
#define PON_KPDPWR_PULL_UP BIT(1)
+#define PON_RESIN_PULL_UP BIT(0)
#define PON_DBC_CTL 0x71
#define PON_DBC_DELAY_MASK 0x7
+struct pm8941_data {
+ unsigned int pull_up_bit;
+ unsigned int status_bit;
+};
struct pm8941_pwrkey {
struct device *dev;
@@ -52,6 +59,9 @@ struct pm8941_pwrkey {
unsigned int revision;
struct notifier_block reboot_notifier;
+
+ u32 code;
+ const struct pm8941_data *data;
};
static int pm8941_reboot_notify(struct notifier_block *nb,
@@ -124,7 +134,8 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
if (error)
return IRQ_HANDLED;
- input_report_key(pwrkey->input, KEY_POWER, !!(sts & PON_KPDPWR_N_SET));
+ input_report_key(pwrkey->input, pwrkey->code,
+ sts & pwrkey->data->status_bit);
input_sync(pwrkey->input);
return IRQ_HANDLED;
@@ -157,6 +168,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
bool pull_up;
+ struct device *parent;
u32 req_delay;
int error;
@@ -175,12 +187,30 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return -ENOMEM;
pwrkey->dev = &pdev->dev;
+ pwrkey->data = of_device_get_match_data(&pdev->dev);
- pwrkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ parent = pdev->dev.parent;
+ pwrkey->regmap = dev_get_regmap(parent, NULL);
if (!pwrkey->regmap) {
- dev_err(&pdev->dev, "failed to locate regmap\n");
- return -ENODEV;
+ /*
+ * We failed to get regmap for parent. Let's see if we are
+ * a child of pon node and read regmap and reg from its
+ * parent.
+ */
+ pwrkey->regmap = dev_get_regmap(parent->parent, NULL);
+ if (!pwrkey->regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap\n");
+ return -ENODEV;
+ }
+
+ error = of_property_read_u32(parent->of_node,
+ "reg", &pwrkey->baseaddr);
+ } else {
+ error = of_property_read_u32(pdev->dev.of_node, "reg",
+ &pwrkey->baseaddr);
}
+ if (error)
+ return error;
pwrkey->irq = platform_get_irq(pdev, 0);
if (pwrkey->irq < 0) {
@@ -188,11 +218,6 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return pwrkey->irq;
}
- error = of_property_read_u32(pdev->dev.of_node, "reg",
- &pwrkey->baseaddr);
- if (error)
- return error;
-
error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
&pwrkey->revision);
if (error) {
@@ -200,13 +225,21 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return error;
}
+ error = of_property_read_u32(pdev->dev.of_node, "linux,code",
+ &pwrkey->code);
+ if (error) {
+ dev_dbg(&pdev->dev,
+ "no linux,code assuming power (%d)\n", error);
+ pwrkey->code = KEY_POWER;
+ }
+
pwrkey->input = devm_input_allocate_device(&pdev->dev);
if (!pwrkey->input) {
dev_dbg(&pdev->dev, "unable to allocate input device\n");
return -ENOMEM;
}
- input_set_capability(pwrkey->input, EV_KEY, KEY_POWER);
+ input_set_capability(pwrkey->input, EV_KEY, pwrkey->code);
pwrkey->input->name = "pm8941_pwrkey";
pwrkey->input->phys = "pm8941_pwrkey/input0";
@@ -225,8 +258,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PULL_CTL,
- PON_KPDPWR_PULL_UP,
- pull_up ? PON_KPDPWR_PULL_UP : 0);
+ pwrkey->data->pull_up_bit,
+ pull_up ? pwrkey->data->pull_up_bit : 0);
if (error) {
dev_err(&pdev->dev, "failed to set pull: %d\n", error);
return error;
@@ -271,8 +304,19 @@ static int pm8941_pwrkey_remove(struct platform_device *pdev)
return 0;
}
+static const struct pm8941_data pwrkey_data = {
+ .pull_up_bit = PON_KPDPWR_PULL_UP,
+ .status_bit = PON_KPDPWR_N_SET,
+};
+
+static const struct pm8941_data resin_data = {
+ .pull_up_bit = PON_RESIN_PULL_UP,
+ .status_bit = PON_RESIN_N_SET,
+};
+
static const struct of_device_id pm8941_pwr_key_id_table[] = {
- { .compatible = "qcom,pm8941-pwrkey" },
+ { .compatible = "qcom,pm8941-pwrkey", .data = &pwrkey_data },
+ { .compatible = "qcom,pm8941-resin", .data = &resin_data },
{ }
};
MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 5c8c79623c87..e8de3aaf9f63 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -277,7 +277,7 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- GFP_ATOMIC, &pm->data_dma);
+ GFP_KERNEL, &pm->data_dma);
if (!pm->data)
return -1;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index d91f3b1c5375..594f72e39639 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -63,6 +63,9 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *);
static void xenkbd_handle_motion_event(struct xenkbd_info *info,
struct xenkbd_motion *motion)
{
+ if (unlikely(!info->ptr))
+ return;
+
input_report_rel(info->ptr, REL_X, motion->rel_x);
input_report_rel(info->ptr, REL_Y, motion->rel_y);
if (motion->rel_z)
@@ -73,6 +76,9 @@ static void xenkbd_handle_motion_event(struct xenkbd_info *info,
static void xenkbd_handle_position_event(struct xenkbd_info *info,
struct xenkbd_position *pos)
{
+ if (unlikely(!info->ptr))
+ return;
+
input_report_abs(info->ptr, ABS_X, pos->abs_x);
input_report_abs(info->ptr, ABS_Y, pos->abs_y);
if (pos->rel_z)
@@ -97,6 +103,9 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info,
return;
}
+ if (unlikely(!dev))
+ return;
+
input_event(dev, EV_KEY, key->keycode, value);
input_sync(dev);
}
@@ -192,7 +201,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i;
- unsigned int abs, touch;
+ bool with_mtouch, with_kbd, with_ptr;
struct xenkbd_info *info;
struct input_dev *kbd, *ptr, *mtouch;
@@ -211,106 +220,127 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (!info->page)
goto error_nomem;
- /* Set input abs params to match backend screen res */
- abs = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_FEAT_ABS_POINTER, 0);
- ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_WIDTH,
- ptr_size[KPARAM_X]);
- ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
- XENKBD_FIELD_HEIGHT,
- ptr_size[KPARAM_Y]);
- if (abs) {
- ret = xenbus_write(XBT_NIL, dev->nodename,
- XENKBD_FIELD_REQ_ABS_POINTER, "1");
- if (ret) {
- pr_warn("xenkbd: can't request abs-pointer\n");
- abs = 0;
- }
- }
+ /*
+ * The below are reverse logic, e.g. if the feature is set, then
+ * do not expose the corresponding virtual device.
+ */
+ with_kbd = !xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_DSBL_KEYBRD, 0);
+
+ with_ptr = !xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_DSBL_POINTER, 0);
- touch = xenbus_read_unsigned(dev->nodename,
- XENKBD_FIELD_FEAT_MTOUCH, 0);
- if (touch) {
+ /* Direct logic: if set, then create multi-touch device. */
+ with_mtouch = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_MTOUCH, 0);
+ if (with_mtouch) {
ret = xenbus_write(XBT_NIL, dev->nodename,
XENKBD_FIELD_REQ_MTOUCH, "1");
if (ret) {
pr_warn("xenkbd: can't request multi-touch");
- touch = 0;
+ with_mtouch = 0;
}
}
/* keyboard */
- kbd = input_allocate_device();
- if (!kbd)
- goto error_nomem;
- kbd->name = "Xen Virtual Keyboard";
- kbd->phys = info->phys;
- kbd->id.bustype = BUS_PCI;
- kbd->id.vendor = 0x5853;
- kbd->id.product = 0xffff;
-
- __set_bit(EV_KEY, kbd->evbit);
- for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
- __set_bit(i, kbd->keybit);
- for (i = KEY_OK; i < KEY_MAX; i++)
- __set_bit(i, kbd->keybit);
-
- ret = input_register_device(kbd);
- if (ret) {
- input_free_device(kbd);
- xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
- goto error;
+ if (with_kbd) {
+ kbd = input_allocate_device();
+ if (!kbd)
+ goto error_nomem;
+ kbd->name = "Xen Virtual Keyboard";
+ kbd->phys = info->phys;
+ kbd->id.bustype = BUS_PCI;
+ kbd->id.vendor = 0x5853;
+ kbd->id.product = 0xffff;
+
+ __set_bit(EV_KEY, kbd->evbit);
+ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
+ __set_bit(i, kbd->keybit);
+ for (i = KEY_OK; i < KEY_MAX; i++)
+ __set_bit(i, kbd->keybit);
+
+ ret = input_register_device(kbd);
+ if (ret) {
+ input_free_device(kbd);
+ xenbus_dev_fatal(dev, ret,
+ "input_register_device(kbd)");
+ goto error;
+ }
+ info->kbd = kbd;
}
- info->kbd = kbd;
/* pointing device */
- ptr = input_allocate_device();
- if (!ptr)
- goto error_nomem;
- ptr->name = "Xen Virtual Pointer";
- ptr->phys = info->phys;
- ptr->id.bustype = BUS_PCI;
- ptr->id.vendor = 0x5853;
- ptr->id.product = 0xfffe;
-
- if (abs) {
- __set_bit(EV_ABS, ptr->evbit);
- input_set_abs_params(ptr, ABS_X, 0, ptr_size[KPARAM_X], 0, 0);
- input_set_abs_params(ptr, ABS_Y, 0, ptr_size[KPARAM_Y], 0, 0);
- } else {
- input_set_capability(ptr, EV_REL, REL_X);
- input_set_capability(ptr, EV_REL, REL_Y);
- }
- input_set_capability(ptr, EV_REL, REL_WHEEL);
+ if (with_ptr) {
+ unsigned int abs;
+
+ /* Set input abs params to match backend screen res */
+ abs = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_ABS_POINTER, 0);
+ ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_WIDTH,
+ ptr_size[KPARAM_X]);
+ ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_HEIGHT,
+ ptr_size[KPARAM_Y]);
+ if (abs) {
+ ret = xenbus_write(XBT_NIL, dev->nodename,
+ XENKBD_FIELD_REQ_ABS_POINTER, "1");
+ if (ret) {
+ pr_warn("xenkbd: can't request abs-pointer\n");
+ abs = 0;
+ }
+ }
- __set_bit(EV_KEY, ptr->evbit);
- for (i = BTN_LEFT; i <= BTN_TASK; i++)
- __set_bit(i, ptr->keybit);
+ ptr = input_allocate_device();
+ if (!ptr)
+ goto error_nomem;
+ ptr->name = "Xen Virtual Pointer";
+ ptr->phys = info->phys;
+ ptr->id.bustype = BUS_PCI;
+ ptr->id.vendor = 0x5853;
+ ptr->id.product = 0xfffe;
+
+ if (abs) {
+ __set_bit(EV_ABS, ptr->evbit);
+ input_set_abs_params(ptr, ABS_X, 0,
+ ptr_size[KPARAM_X], 0, 0);
+ input_set_abs_params(ptr, ABS_Y, 0,
+ ptr_size[KPARAM_Y], 0, 0);
+ } else {
+ input_set_capability(ptr, EV_REL, REL_X);
+ input_set_capability(ptr, EV_REL, REL_Y);
+ }
+ input_set_capability(ptr, EV_REL, REL_WHEEL);
- ret = input_register_device(ptr);
- if (ret) {
- input_free_device(ptr);
- xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
- goto error;
+ __set_bit(EV_KEY, ptr->evbit);
+ for (i = BTN_LEFT; i <= BTN_TASK; i++)
+ __set_bit(i, ptr->keybit);
+
+ ret = input_register_device(ptr);
+ if (ret) {
+ input_free_device(ptr);
+ xenbus_dev_fatal(dev, ret,
+ "input_register_device(ptr)");
+ goto error;
+ }
+ info->ptr = ptr;
}
- info->ptr = ptr;
/* multi-touch device */
- if (touch) {
+ if (with_mtouch) {
int num_cont, width, height;
mtouch = input_allocate_device();
if (!mtouch)
goto error_nomem;
- num_cont = xenbus_read_unsigned(info->xbdev->nodename,
+ num_cont = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_NUM_CONTACTS,
1);
- width = xenbus_read_unsigned(info->xbdev->nodename,
+ width = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_WIDTH,
XENFB_WIDTH);
- height = xenbus_read_unsigned(info->xbdev->nodename,
+ height = xenbus_read_unsigned(info->xbdev->otherend,
XENKBD_FIELD_MT_HEIGHT,
XENFB_HEIGHT);
@@ -346,6 +376,11 @@ static int xenkbd_probe(struct xenbus_device *dev,
info->mtouch = mtouch;
}
+ if (!(with_kbd || with_ptr || with_mtouch)) {
+ ret = -ENXIO;
+ goto error;
+ }
+
ret = xenkbd_connect_backend(dev, info);
if (ret < 0)
goto error;
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index f0c9bf87b4e3..1365cd94ed9b 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -894,12 +894,12 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* allocate usb buffers */
yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->irq_dma);
+ GFP_KERNEL, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->ctl_dma);
+ GFP_KERNEL, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 032d27983b6c..f1e66e257cff 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -472,6 +472,7 @@ static int atp_status_check(struct urb *urb)
dev->info->datalen, dev->urb->actual_length);
dev->overflow_warned = true;
}
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
@@ -810,7 +811,7 @@ static int atp_open(struct input_dev *input)
{
struct atp *dev = input_get_drvdata(input);
- if (usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
dev->open = true;
@@ -976,7 +977,7 @@ static int atp_recover(struct atp *dev)
if (error)
return error;
- if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
return 0;
@@ -994,7 +995,7 @@ static int atp_resume(struct usb_interface *iface)
{
struct atp *dev = usb_get_intfdata(iface);
- if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC))
+ if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
return -EIO;
return 0;
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 5775d40b3d53..14239fbd72cf 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2554,6 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN5_APP;
+ /* fall through */
case CYAPA_STATE_GEN5_APP:
/*
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 016397850b1b..c1b524ab4623 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -680,6 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN6_APP;
+ /* fall through */
case CYAPA_STATE_GEN6_APP:
/*
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 1f9cd7d8b7ad..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index c060d270bc4d..88e315d2cfd3 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -387,7 +387,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_IAP_PASSWORD_READ,
val);
- if (len < sizeof(u16)) {
+ if (len < (int)sizeof(u16)) {
error = len < 0 ? len : -EIO;
dev_err(dev, "failed to read iap password: %d\n",
error);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index dd85b16dc6f8..44f57cf6675b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -340,7 +340,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
*/
if (packet[3] & 0x80)
fingers = 4;
- /* pass through... */
+ /* fall through */
case 1:
/*
* byte 1: . . . . x11 x10 x9 x8
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index 9ce71dfa0de1..b9e68606c44a 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -26,10 +26,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 6f165e053f4d..2fd6c84cd5b7 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -27,10 +27,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 7b02b652e267..b8965e6bc890 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 8df526620ebf..3e8fb8136452 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
@@ -143,7 +139,8 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
switch (sermouse->type) {
case SERIO_MS:
- sermouse->type = SERIO_MP;
+ sermouse->type = SERIO_MP;
+ /* fall through */
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
@@ -154,6 +151,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
+ /* fall through */
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 3df501c3421b..f8663d7891f2 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -20,32 +20,33 @@
* However, when used with the E3 mailboard that producecs non-standard
* scancodes, a custom key table must be prepared and loaded from userspace.
*/
-#include <linux/gpio.h>
#include <linux/irq.h>
+#include <linux/platform_data/ams-delta-fiq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/serio.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/mach-types.h>
-#include <mach/board-ams-delta.h>
-
-#include <mach/ams-delta-fiq.h>
+#define DRIVER_NAME "ams-delta-serio"
MODULE_AUTHOR("Matt Callow");
MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
MODULE_LICENSE("GPL");
-static struct serio *ams_delta_serio;
+struct ams_delta_serio {
+ struct serio *serio;
+ struct regulator *vcc;
+ unsigned int *fiq_buffer;
+};
-static int check_data(int data)
+static int check_data(struct serio *serio, int data)
{
int i, parity = 0;
/* check valid stop bit */
if (!(data & 0x400)) {
- dev_warn(&ams_delta_serio->dev,
- "invalid stop bit, data=0x%X\n",
- data);
+ dev_warn(&serio->dev, "invalid stop bit, data=0x%X\n", data);
return SERIO_FRAME;
}
/* calculate the parity */
@@ -55,9 +56,9 @@ static int check_data(int data)
}
/* it should be odd */
if (!(parity & 0x01)) {
- dev_warn(&ams_delta_serio->dev,
- "parity check failed, data=0x%X parity=0x%X\n",
- data, parity);
+ dev_warn(&serio->dev,
+ "parity check failed, data=0x%X parity=0x%X\n", data,
+ parity);
return SERIO_PARITY;
}
return 0;
@@ -65,127 +66,130 @@ static int check_data(int data)
static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
{
- int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ struct ams_delta_serio *priv = dev_id;
+ int *circ_buff = &priv->fiq_buffer[FIQ_CIRC_BUFF];
int data, dfl;
u8 scancode;
- fiq_buffer[FIQ_IRQ_PEND] = 0;
+ priv->fiq_buffer[FIQ_IRQ_PEND] = 0;
/*
* Read data from the circular buffer, check it
* and then pass it on the serio
*/
- while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+ while (priv->fiq_buffer[FIQ_KEYS_CNT] > 0) {
- data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
- fiq_buffer[FIQ_KEYS_CNT]--;
- if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
- fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+ data = circ_buff[priv->fiq_buffer[FIQ_HEAD_OFFSET]++];
+ priv->fiq_buffer[FIQ_KEYS_CNT]--;
+ if (priv->fiq_buffer[FIQ_HEAD_OFFSET] ==
+ priv->fiq_buffer[FIQ_BUF_LEN])
+ priv->fiq_buffer[FIQ_HEAD_OFFSET] = 0;
- dfl = check_data(data);
+ dfl = check_data(priv->serio, data);
scancode = (u8) (data >> 1) & 0xFF;
- serio_interrupt(ams_delta_serio, scancode, dfl);
+ serio_interrupt(priv->serio, scancode, dfl);
}
return IRQ_HANDLED;
}
static int ams_delta_serio_open(struct serio *serio)
{
- /* enable keyboard */
- gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 1);
+ struct ams_delta_serio *priv = serio->port_data;
- return 0;
+ /* enable keyboard */
+ return regulator_enable(priv->vcc);
}
static void ams_delta_serio_close(struct serio *serio)
{
+ struct ams_delta_serio *priv = serio->port_data;
+
/* disable keyboard */
- gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 0);
+ regulator_disable(priv->vcc);
}
-static const struct gpio ams_delta_gpios[] __initconst_or_module = {
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATA,
- .flags = GPIOF_DIR_IN,
- .label = "serio-data",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK,
- .flags = GPIOF_DIR_IN,
- .label = "serio-clock",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_PWR,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "serio-power",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATAOUT,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "serio-dataout",
- },
-};
-
-static int __init ams_delta_serio_init(void)
+static int ams_delta_serio_init(struct platform_device *pdev)
{
- int err;
-
- if (!machine_is_ams_delta())
- return -ENODEV;
+ struct ams_delta_serio *priv;
+ struct serio *serio;
+ int irq, err;
- ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!ams_delta_serio)
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- ams_delta_serio->id.type = SERIO_8042;
- ams_delta_serio->open = ams_delta_serio_open;
- ams_delta_serio->close = ams_delta_serio_close;
- strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
- sizeof(ams_delta_serio->name));
- strlcpy(ams_delta_serio->phys, "GPIO/serio0",
- sizeof(ams_delta_serio->phys));
-
- err = gpio_request_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
- if (err) {
- pr_err("ams_delta_serio: Couldn't request gpio pins\n");
- goto serio;
+ priv->fiq_buffer = pdev->dev.platform_data;
+ if (!priv->fiq_buffer)
+ return -EINVAL;
+
+ priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(priv->vcc)) {
+ err = PTR_ERR(priv->vcc);
+ dev_err(&pdev->dev, "regulator request failed (%d)\n", err);
+ /*
+ * When running on a non-dt platform and requested regulator
+ * is not available, devm_regulator_get() never returns
+ * -EPROBE_DEFER as it is not able to justify if the regulator
+ * may still appear later. On the other hand, the board can
+ * still set full constriants flag at late_initcall in order
+ * to instruct devm_regulator_get() to returnn a dummy one
+ * if sufficient. Hence, if we get -ENODEV here, let's convert
+ * it to -EPROBE_DEFER and wait for the board to decide or
+ * let Deferred Probe infrastructure handle this error.
+ */
+ if (err == -ENODEV)
+ err = -EPROBE_DEFER;
+ return err;
}
- err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
- ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
- "ams-delta-serio", 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ err = devm_request_irq(&pdev->dev, irq, ams_delta_serio_interrupt,
+ IRQ_TYPE_EDGE_RISING, DRIVER_NAME, priv);
if (err < 0) {
- pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
- gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
- goto gpio;
+ dev_err(&pdev->dev, "IRQ request failed (%d)\n", err);
+ return err;
}
- /*
- * Since GPIO register handling for keyboard clock pin is performed
- * at FIQ level, switch back from edge to simple interrupt handler
- * to avoid bad interaction.
- */
- irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
- handle_simple_irq);
- serio_register_port(ams_delta_serio);
- dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+ serio = kzalloc(sizeof(*serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ priv->serio = serio;
+
+ serio->id.type = SERIO_8042;
+ serio->open = ams_delta_serio_open;
+ serio->close = ams_delta_serio_close;
+ strlcpy(serio->name, "AMS DELTA keyboard adapter", sizeof(serio->name));
+ strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys));
+ serio->dev.parent = &pdev->dev;
+ serio->port_data = priv;
+
+ serio_register_port(serio);
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&serio->dev, "%s\n", serio->name);
return 0;
-gpio:
- gpio_free_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
-serio:
- kfree(ams_delta_serio);
- return err;
}
-module_init(ams_delta_serio_init);
-static void __exit ams_delta_serio_exit(void)
+static int ams_delta_serio_exit(struct platform_device *pdev)
{
- serio_unregister_port(ams_delta_serio);
- free_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
- gpio_free_array(ams_delta_gpios,
- ARRAY_SIZE(ams_delta_gpios));
+ struct ams_delta_serio *priv = platform_get_drvdata(pdev);
+
+ serio_unregister_port(priv->serio);
+
+ return 0;
}
-module_exit(ams_delta_serio_exit);
+
+static struct platform_driver ams_delta_serio_driver = {
+ .probe = ams_delta_serio_init,
+ .remove = ams_delta_serio_exit,
+ .driver = {
+ .name = DRIVER_NAME
+ },
+};
+module_platform_driver(ams_delta_serio_driver);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 9c54c43c9749..2d1e2993b5a8 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 25151d9214e0..47a0e81a2989 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -424,6 +424,9 @@ static struct hv_driver hv_kbd_drv = {
.id_table = id_table,
.probe = hv_kbd_probe,
.remove = hv_kbd_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init hv_kbd_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
+ {
+ /* Lenovo LaVie Z */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 824f4c1c1f31..b8bc71569349 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -573,6 +573,9 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
port = &i8042_ports[port_no];
serio = port->exists ? port->serio : NULL;
+ if (irq && serio)
+ pm_wakeup_event(&serio->dev, 0);
+
filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index d0fccc8ec259..fbb6b33845fa 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -23,10 +23,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 8cf964736902..a308d7811427 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -21,10 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 24a90c8db5b3..2e1fb0649260 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 545fa6e89035..c82cd5079d0e 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1712,7 +1712,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
- GFP_ATOMIC, &aiptek->data_dma);
+ GFP_KERNEL, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32267c1afebc..2a80675cfd94 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -92,6 +92,19 @@ config TOUCHSCREEN_AD7879_SPI
To compile this driver as a module, choose M here: the
module will be called ad7879-spi.
+config TOUCHSCREEN_ADC
+ tristate "Generic ADC based resistive touchscreen"
+ depends on IIO
+ select IIO_BUFFER_CB
+ help
+ Say Y here if you want to use the generic ADC
+ resistive touchscreen driver.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called resistive-adc-touch.ko.
+
config TOUCHSCREEN_AR1021_I2C
tristate "Microchip AR1020/1021 i2c touchscreen"
depends on I2C && OF
@@ -151,6 +164,18 @@ config TOUCHSCREEN_BU21013
To compile this driver as a module, choose M here: the
module will be called bu21013_ts.
+config TOUCHSCREEN_BU21029
+ tristate "Rohm BU21029 based touch panel controllers"
+ depends on I2C
+ help
+ Say Y here if you have a Rohm BU21029 touchscreen controller
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bu21029_ts.
+
config TOUCHSCREEN_CHIPONE_ICN8318
tristate "chipone icn8318 touchscreen controller"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fd4fd32fb73f..5911a4190cd2 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -13,11 +13,13 @@ obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
+obj-$(CONFIG_TOUCHSCREEN_ADC) += resistive-adc-touch.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C) += ar1021_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
+obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 54fe190fd4bc..3232af5dcf89 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -75,6 +75,7 @@
#define MXT_SPT_DIGITIZER_T43 43
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71 71
#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100
/* MXT_GEN_MESSAGE_T5 object */
@@ -88,12 +89,12 @@
#define MXT_COMMAND_DIAGNOSTIC 5
/* Define for T6 status byte */
-#define MXT_T6_STATUS_RESET (1 << 7)
-#define MXT_T6_STATUS_OFL (1 << 6)
-#define MXT_T6_STATUS_SIGERR (1 << 5)
-#define MXT_T6_STATUS_CAL (1 << 4)
-#define MXT_T6_STATUS_CFGERR (1 << 3)
-#define MXT_T6_STATUS_COMSERR (1 << 2)
+#define MXT_T6_STATUS_RESET BIT(7)
+#define MXT_T6_STATUS_OFL BIT(6)
+#define MXT_T6_STATUS_SIGERR BIT(5)
+#define MXT_T6_STATUS_CAL BIT(4)
+#define MXT_T6_STATUS_CFGERR BIT(3)
+#define MXT_T6_STATUS_COMSERR BIT(2)
/* MXT_GEN_POWER_T7 field */
struct t7_config {
@@ -112,14 +113,14 @@ struct t7_config {
#define MXT_T9_RANGE 18
/* MXT_TOUCH_MULTI_T9 status */
-#define MXT_T9_UNGRIP (1 << 0)
-#define MXT_T9_SUPPRESS (1 << 1)
-#define MXT_T9_AMP (1 << 2)
-#define MXT_T9_VECTOR (1 << 3)
-#define MXT_T9_MOVE (1 << 4)
-#define MXT_T9_RELEASE (1 << 5)
-#define MXT_T9_PRESS (1 << 6)
-#define MXT_T9_DETECT (1 << 7)
+#define MXT_T9_UNGRIP BIT(0)
+#define MXT_T9_SUPPRESS BIT(1)
+#define MXT_T9_AMP BIT(2)
+#define MXT_T9_VECTOR BIT(3)
+#define MXT_T9_MOVE BIT(4)
+#define MXT_T9_RELEASE BIT(5)
+#define MXT_T9_PRESS BIT(6)
+#define MXT_T9_DETECT BIT(7)
struct t9_range {
__le16 x;
@@ -127,9 +128,9 @@ struct t9_range {
} __packed;
/* MXT_TOUCH_MULTI_T9 orient */
-#define MXT_T9_ORIENT_SWITCH (1 << 0)
-#define MXT_T9_ORIENT_INVERTX (1 << 1)
-#define MXT_T9_ORIENT_INVERTY (1 << 2)
+#define MXT_T9_ORIENT_SWITCH BIT(0)
+#define MXT_T9_ORIENT_INVERTX BIT(1)
+#define MXT_T9_ORIENT_INVERTY BIT(2)
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
@@ -214,7 +215,7 @@ enum t100_type {
#define MXT_FRAME_CRC_PASS 0x04
#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
#define MXT_BOOT_STATUS_MASK 0x3f
-#define MXT_BOOT_EXTENDED_ID (1 << 5)
+#define MXT_BOOT_EXTENDED_ID BIT(5)
#define MXT_BOOT_ID_MASK 0x1f
/* Touchscreen absolute values */
@@ -276,6 +277,19 @@ enum mxt_suspend_mode {
MXT_SUSPEND_T9_CTRL = 1,
};
+/* Config update context */
+struct mxt_cfg {
+ u8 *raw;
+ size_t raw_size;
+ off_t raw_pos;
+
+ u8 *mem;
+ size_t mem_size;
+ int start_ofs;
+
+ struct mxt_info info;
+};
+
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
@@ -317,6 +331,7 @@ struct mxt_data {
u8 T6_reportid;
u16 T6_address;
u16 T7_address;
+ u16 T71_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
u8 T19_reportid;
@@ -382,6 +397,7 @@ static bool mxt_object_readable(unsigned int type)
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
return true;
default:
return false;
@@ -712,13 +728,13 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
u8 status = msg[1];
u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16);
- complete(&data->crc_completion);
-
if (crc != data->config_crc) {
data->config_crc = crc;
dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc);
}
+ complete(&data->crc_completion);
+
/* Detect reset */
if (status & MXT_T6_STATUS_RESET)
complete(&data->reset_completion);
@@ -827,6 +843,10 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
mxt_input_sync(data);
}
+ /* if active, pressure must be non-zero */
+ if (!amplitude)
+ amplitude = MXT_PRESSURE_DEFAULT;
+
/* Touch active */
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
input_report_abs(input_dev, ABS_MT_POSITION_X, x);
@@ -1279,12 +1299,7 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
return crc;
}
-static int mxt_prepare_cfg_mem(struct mxt_data *data,
- const struct firmware *cfg,
- unsigned int data_pos,
- unsigned int cfg_start_ofs,
- u8 *config_mem,
- size_t config_mem_size)
+static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
struct device *dev = &data->client->dev;
struct mxt_object *object;
@@ -1295,9 +1310,9 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
u16 reg;
u8 val;
- while (data_pos < cfg->size) {
+ while (cfg->raw_pos < cfg->raw_size) {
/* Read type, instance, length */
- ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%x %x %x%n",
&type, &instance, &size, &offset);
if (ret == 0) {
/* EOF */
@@ -1306,20 +1321,20 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
dev_err(dev, "Bad format: failed to parse object\n");
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
object = mxt_get_object(data, type);
if (!object) {
/* Skip object */
for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
&val, &offset);
if (ret != 1) {
dev_err(dev, "Bad format in T%d at %d\n",
type, i);
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
}
continue;
}
@@ -1354,7 +1369,7 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
reg = object->start_address + mxt_obj_size(object) * instance;
for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
&val,
&offset);
if (ret != 1) {
@@ -1362,15 +1377,15 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
type, i);
return -EINVAL;
}
- data_pos += offset;
+ cfg->raw_pos += offset;
if (i > mxt_obj_size(object))
continue;
- byte_offset = reg + i - cfg_start_ofs;
+ byte_offset = reg + i - cfg->start_ofs;
- if (byte_offset >= 0 && byte_offset < config_mem_size) {
- *(config_mem + byte_offset) = val;
+ if (byte_offset >= 0 && byte_offset < cfg->mem_size) {
+ *(cfg->mem + byte_offset) = val;
} else {
dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
reg, object->type, byte_offset);
@@ -1382,22 +1397,21 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
return 0;
}
-static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start,
- u8 *config_mem, size_t config_mem_size)
+static int mxt_upload_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
unsigned int byte_offset = 0;
int error;
/* Write configuration as blocks */
- while (byte_offset < config_mem_size) {
- unsigned int size = config_mem_size - byte_offset;
+ while (byte_offset < cfg->mem_size) {
+ unsigned int size = cfg->mem_size - byte_offset;
if (size > MXT_MAX_BLOCK_WRITE)
size = MXT_MAX_BLOCK_WRITE;
error = __mxt_write_reg(data->client,
- cfg_start + byte_offset,
- size, config_mem + byte_offset);
+ cfg->start_ofs + byte_offset,
+ size, cfg->mem + byte_offset);
if (error) {
dev_err(&data->client->dev,
"Config write error, ret=%d\n", error);
@@ -1431,65 +1445,75 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data);
* <SIZE> - 2-byte object size as hex
* <CONTENTS> - array of <SIZE> 1-byte hex values
*/
-static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
+static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
{
struct device *dev = &data->client->dev;
- struct mxt_info cfg_info;
+ struct mxt_cfg cfg;
int ret;
int offset;
- int data_pos;
int i;
- int cfg_start_ofs;
u32 info_crc, config_crc, calculated_crc;
- u8 *config_mem;
- size_t config_mem_size;
+ u16 crc_start = 0;
+
+ /* Make zero terminated copy of the OBP_RAW file */
+ cfg.raw = kmemdup_nul(fw->data, fw->size, GFP_KERNEL);
+ if (!cfg.raw)
+ return -ENOMEM;
+
+ cfg.raw_size = fw->size;
mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
- if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
+ if (strncmp(cfg.raw, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
dev_err(dev, "Unrecognised config file\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos = strlen(MXT_CFG_MAGIC);
+ cfg.raw_pos = strlen(MXT_CFG_MAGIC);
/* Load information block and check */
for (i = 0; i < sizeof(struct mxt_info); i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
- (unsigned char *)&cfg_info + i,
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%hhx%n",
+ (unsigned char *)&cfg.info + i,
&offset);
if (ret != 1) {
dev_err(dev, "Bad format\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
}
- if (cfg_info.family_id != data->info->family_id) {
+ if (cfg.info.family_id != data->info->family_id) {
dev_err(dev, "Family ID mismatch!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- if (cfg_info.variant_id != data->info->variant_id) {
+ if (cfg.info.variant_id != data->info->variant_id) {
dev_err(dev, "Variant ID mismatch!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
/* Read CRCs */
- ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset);
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &info_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Info CRC\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
- ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset);
+ ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &config_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Config CRC\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_raw;
}
- data_pos += offset;
+ cfg.raw_pos += offset;
/*
* The Info Block CRC is calculated over mxt_info and the object
@@ -1515,39 +1539,39 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
}
/* Malloc memory to store configuration */
- cfg_start_ofs = MXT_OBJECT_START +
+ cfg.start_ofs = MXT_OBJECT_START +
data->info->object_num * sizeof(struct mxt_object) +
MXT_INFO_CHECKSUM_SIZE;
- config_mem_size = data->mem_size - cfg_start_ofs;
- config_mem = kzalloc(config_mem_size, GFP_KERNEL);
- if (!config_mem) {
- dev_err(dev, "Failed to allocate memory\n");
- return -ENOMEM;
+ cfg.mem_size = data->mem_size - cfg.start_ofs;
+ cfg.mem = kzalloc(cfg.mem_size, GFP_KERNEL);
+ if (!cfg.mem) {
+ ret = -ENOMEM;
+ goto release_raw;
}
- ret = mxt_prepare_cfg_mem(data, cfg, data_pos, cfg_start_ofs,
- config_mem, config_mem_size);
+ ret = mxt_prepare_cfg_mem(data, &cfg);
if (ret)
goto release_mem;
/* Calculate crc of the received configs (not the raw config file) */
- if (data->T7_address < cfg_start_ofs) {
- dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n",
- data->T7_address, cfg_start_ofs);
- ret = 0;
- goto release_mem;
- }
+ if (data->T71_address)
+ crc_start = data->T71_address;
+ else if (data->T7_address)
+ crc_start = data->T7_address;
+ else
+ dev_warn(dev, "Could not find CRC start\n");
- calculated_crc = mxt_calculate_crc(config_mem,
- data->T7_address - cfg_start_ofs,
- config_mem_size);
+ if (crc_start > cfg.start_ofs) {
+ calculated_crc = mxt_calculate_crc(cfg.mem,
+ crc_start - cfg.start_ofs,
+ cfg.mem_size);
- if (config_crc > 0 && config_crc != calculated_crc)
- dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n",
- calculated_crc, config_crc);
+ if (config_crc > 0 && config_crc != calculated_crc)
+ dev_warn(dev, "Config CRC in file inconsistent, calculated=%06X, file=%06X\n",
+ calculated_crc, config_crc);
+ }
- ret = mxt_upload_cfg_mem(data, cfg_start_ofs,
- config_mem, config_mem_size);
+ ret = mxt_upload_cfg_mem(data, &cfg);
if (ret)
goto release_mem;
@@ -1562,8 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
/* T7 config may have changed */
mxt_init_t7_power_cfg(data);
+release_raw:
+ kfree(cfg.raw);
release_mem:
- kfree(config_mem);
+ kfree(cfg.mem);
return ret;
}
@@ -1591,6 +1617,7 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T5_msg_size = 0;
data->T6_reportid = 0;
data->T7_address = 0;
+ data->T71_address = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
data->T19_reportid = 0;
@@ -1656,12 +1683,16 @@ static int mxt_parse_object_table(struct mxt_data *data,
case MXT_GEN_POWER_T7:
data->T7_address = object->start_address;
break;
+ case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
+ data->T71_address = object->start_address;
+ break;
case MXT_TOUCH_MULTI_T9:
data->multitouch = MXT_TOUCH_MULTI_T9;
+ /* Only handle messages from first T9 instance */
data->T9_reportid_min = min_id;
- data->T9_reportid_max = max_id;
- data->num_touchids = object->num_report_ids
- * mxt_obj_instances(object);
+ data->T9_reportid_max = min_id +
+ object->num_report_ids - 1;
+ data->num_touchids = object->num_report_ids;
break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;
@@ -1981,10 +2012,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
/* Register input device */
input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(dev, "Failed to allocate memory\n");
+ if (!input_dev)
return -ENOMEM;
- }
input_dev->name = "Atmel maXTouch Touchscreen";
input_dev->phys = data->phys;
@@ -2055,12 +2084,6 @@ static int mxt_initialize_input_device(struct mxt_data *data)
}
if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
- data->t100_aux_ampl) {
- input_set_abs_params(input_dev, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
- }
-
- if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
data->t100_aux_vect) {
input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
new file mode 100644
index 000000000000..49a8d4bbca3a
--- /dev/null
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rohm BU21029 touchscreen controller driver
+ *
+ * Copyright (C) 2015-2018 Bosch Sicherheitssysteme GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/timer.h>
+
+/*
+ * HW_ID1 Register (PAGE=0, ADDR=0x0E, Reset value=0x02, Read only)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | HW_IDH |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * HW_ID2 Register (PAGE=0, ADDR=0x0F, Reset value=0x29, Read only)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | HW_IDL |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * HW_IDH: high 8bits of IC's ID
+ * HW_IDL: low 8bits of IC's ID
+ */
+#define BU21029_HWID_REG (0x0E << 3)
+#define SUPPORTED_HWID 0x0229
+
+/*
+ * CFR0 Register (PAGE=0, ADDR=0x00, Reset value=0x20)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 0 | 0 | CALIB | INTRM | 0 | 0 | 0 | 0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * CALIB: 0 = not to use calibration result (*)
+ * 1 = use calibration result
+ * INTRM: 0 = INT output depend on "pen down" (*)
+ * 1 = INT output always "0"
+ */
+#define BU21029_CFR0_REG (0x00 << 3)
+#define CFR0_VALUE 0x00
+
+/*
+ * CFR1 Register (PAGE=0, ADDR=0x01, Reset value=0xA6)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | MAV | AVE[2:0] | 0 | SMPL[2:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * MAV: 0 = median average filter off
+ * 1 = median average filter on (*)
+ * AVE: AVE+1 = number of average samples for MAV,
+ * if AVE>SMPL, then AVE=SMPL (=3)
+ * SMPL: SMPL+1 = number of conversion samples for MAV (=7)
+ */
+#define BU21029_CFR1_REG (0x01 << 3)
+#define CFR1_VALUE 0xA6
+
+/*
+ * CFR2 Register (PAGE=0, ADDR=0x02, Reset value=0x04)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | INTVL_TIME[3:0] | TIME_ST_ADC[3:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * INTVL_TIME: waiting time between completion of conversion
+ * and start of next conversion, only usable in
+ * autoscan mode (=20.480ms)
+ * TIME_ST_ADC: waiting time between application of voltage
+ * to panel and start of A/D conversion (=100us)
+ */
+#define BU21029_CFR2_REG (0x02 << 3)
+#define CFR2_VALUE 0xC9
+
+/*
+ * CFR3 Register (PAGE=0, ADDR=0x0B, Reset value=0x72)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | RM8 | STRETCH| PU90K | DUAL | PIDAC_OFS[3:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * RM8: 0 = coordinate resolution is 12bit (*)
+ * 1 = coordinate resolution is 8bit
+ * STRETCH: 0 = SCL_STRETCH function off
+ * 1 = SCL_STRETCH function on (*)
+ * PU90K: 0 = internal pull-up resistance for touch detection is ~50kohms (*)
+ * 1 = internal pull-up resistance for touch detection is ~90kohms
+ * DUAL: 0 = dual touch detection off (*)
+ * 1 = dual touch detection on
+ * PIDAC_OFS: dual touch detection circuit adjustment, it is not necessary
+ * to change this from initial value
+ */
+#define BU21029_CFR3_REG (0x0B << 3)
+#define CFR3_VALUE 0x42
+
+/*
+ * LDO Register (PAGE=0, ADDR=0x0C, Reset value=0x00)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 0 | PVDD[2:0] | 0 | AVDD[2:0] |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * PVDD: output voltage of panel output regulator (=2.000V)
+ * AVDD: output voltage of analog circuit regulator (=2.000V)
+ */
+#define BU21029_LDO_REG (0x0C << 3)
+#define LDO_VALUE 0x77
+
+/*
+ * Serial Interface Command Byte 1 (CID=1)
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * | 1 | CF | CMSK | PDM | STP |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * CF: conversion function, see table 3 in datasheet p6 (=0000, automatic scan)
+ * CMSK: 0 = executes convert function (*)
+ * 1 = reads the convert result
+ * PDM: 0 = power down after convert function stops (*)
+ * 1 = keep power on after convert function stops
+ * STP: 1 = abort current conversion and power down, set to "0" automatically
+ */
+#define BU21029_AUTOSCAN 0x80
+
+/*
+ * The timeout value needs to be larger than INTVL_TIME + tConv4 (sample and
+ * conversion time), where tConv4 is calculated by formula:
+ * tPON + tDLY1 + (tTIME_ST_ADC + (tADC * tSMPL) * 2 + tDLY2) * 3
+ * see figure 8 in datasheet p15 for details of each field.
+ */
+#define PEN_UP_TIMEOUT_MS 50
+
+#define STOP_DELAY_MIN_US 50
+#define STOP_DELAY_MAX_US 1000
+#define START_DELAY_MS 2
+#define BUF_LEN 8
+#define SCALE_12BIT (1 << 12)
+#define MAX_12BIT ((1 << 12) - 1)
+#define DRIVER_NAME "bu21029"
+
+struct bu21029_ts_data {
+ struct i2c_client *client;
+ struct input_dev *in_dev;
+ struct timer_list timer;
+ struct regulator *vdd;
+ struct gpio_desc *reset_gpios;
+ u32 x_plate_ohms;
+ struct touchscreen_properties prop;
+};
+
+static void bu21029_touch_report(struct bu21029_ts_data *bu21029, const u8 *buf)
+{
+ u16 x, y, z1, z2;
+ u32 rz;
+ s32 max_pressure = input_abs_get_max(bu21029->in_dev, ABS_PRESSURE);
+
+ /*
+ * compose upper 8 and lower 4 bits into a 12bit value:
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | ByteH | ByteL |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * |b07|b06|b05|b04|b03|b02|b01|b00|b07|b06|b05|b04|b03|b02|b01|b00|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * |v11|v10|v09|v08|v07|v06|v05|v04|v03|v02|v01|v00| 0 | 0 | 0 | 0 |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ */
+ x = (buf[0] << 4) | (buf[1] >> 4);
+ y = (buf[2] << 4) | (buf[3] >> 4);
+ z1 = (buf[4] << 4) | (buf[5] >> 4);
+ z2 = (buf[6] << 4) | (buf[7] >> 4);
+
+ if (z1 && z2) {
+ /*
+ * calculate Rz (pressure resistance value) by equation:
+ * Rz = Rx * (x/Q) * ((z2/z1) - 1), where
+ * Rx is x-plate resistance,
+ * Q is the touch screen resolution (8bit = 256, 12bit = 4096)
+ * x, z1, z2 are the measured positions.
+ */
+ rz = z2 - z1;
+ rz *= x;
+ rz *= bu21029->x_plate_ohms;
+ rz /= z1;
+ rz = DIV_ROUND_CLOSEST(rz, SCALE_12BIT);
+ if (rz <= max_pressure) {
+ touchscreen_report_pos(bu21029->in_dev, &bu21029->prop,
+ x, y, false);
+ input_report_abs(bu21029->in_dev, ABS_PRESSURE,
+ max_pressure - rz);
+ input_report_key(bu21029->in_dev, BTN_TOUCH, 1);
+ input_sync(bu21029->in_dev);
+ }
+ }
+}
+
+static void bu21029_touch_release(struct timer_list *t)
+{
+ struct bu21029_ts_data *bu21029 = from_timer(bu21029, t, timer);
+
+ input_report_abs(bu21029->in_dev, ABS_PRESSURE, 0);
+ input_report_key(bu21029->in_dev, BTN_TOUCH, 0);
+ input_sync(bu21029->in_dev);
+}
+
+static irqreturn_t bu21029_touch_soft_irq(int irq, void *data)
+{
+ struct bu21029_ts_data *bu21029 = data;
+ u8 buf[BUF_LEN];
+ int error;
+
+ /*
+ * Read touch data and deassert interrupt (will assert again after
+ * INTVL_TIME + tConv4 for continuous touch)
+ */
+ error = i2c_smbus_read_i2c_block_data(bu21029->client, BU21029_AUTOSCAN,
+ sizeof(buf), buf);
+ if (error < 0)
+ goto out;
+
+ bu21029_touch_report(bu21029, buf);
+
+ /* reset timer for pen up detection */
+ mod_timer(&bu21029->timer,
+ jiffies + msecs_to_jiffies(PEN_UP_TIMEOUT_MS));
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void bu21029_put_chip_in_reset(struct bu21029_ts_data *bu21029)
+{
+ if (bu21029->reset_gpios) {
+ gpiod_set_value_cansleep(bu21029->reset_gpios, 1);
+ usleep_range(STOP_DELAY_MIN_US, STOP_DELAY_MAX_US);
+ }
+}
+
+static int bu21029_start_chip(struct input_dev *dev)
+{
+ struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
+ struct i2c_client *i2c = bu21029->client;
+ struct {
+ u8 reg;
+ u8 value;
+ } init_table[] = {
+ {BU21029_CFR0_REG, CFR0_VALUE},
+ {BU21029_CFR1_REG, CFR1_VALUE},
+ {BU21029_CFR2_REG, CFR2_VALUE},
+ {BU21029_CFR3_REG, CFR3_VALUE},
+ {BU21029_LDO_REG, LDO_VALUE}
+ };
+ int error, i;
+ __be16 hwid;
+
+ error = regulator_enable(bu21029->vdd);
+ if (error) {
+ dev_err(&i2c->dev, "failed to power up chip: %d", error);
+ return error;
+ }
+
+ /* take chip out of reset */
+ if (bu21029->reset_gpios) {
+ gpiod_set_value_cansleep(bu21029->reset_gpios, 0);
+ msleep(START_DELAY_MS);
+ }
+
+ error = i2c_smbus_read_i2c_block_data(i2c, BU21029_HWID_REG,
+ sizeof(hwid), (u8 *)&hwid);
+ if (error < 0) {
+ dev_err(&i2c->dev, "failed to read HW ID\n");
+ goto err_out;
+ }
+
+ if (be16_to_cpu(hwid) != SUPPORTED_HWID) {
+ dev_err(&i2c->dev,
+ "unsupported HW ID 0x%x\n", be16_to_cpu(hwid));
+ error = -ENODEV;
+ goto err_out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(init_table); ++i) {
+ error = i2c_smbus_write_byte_data(i2c,
+ init_table[i].reg,
+ init_table[i].value);
+ if (error < 0) {
+ dev_err(&i2c->dev,
+ "failed to write %#02x to register %#02x: %d\n",
+ init_table[i].value, init_table[i].reg,
+ error);
+ goto err_out;
+ }
+ }
+
+ error = i2c_smbus_write_byte(i2c, BU21029_AUTOSCAN);
+ if (error < 0) {
+ dev_err(&i2c->dev, "failed to start autoscan\n");
+ goto err_out;
+ }
+
+ enable_irq(bu21029->client->irq);
+ return 0;
+
+err_out:
+ bu21029_put_chip_in_reset(bu21029);
+ regulator_disable(bu21029->vdd);
+ return error;
+}
+
+static void bu21029_stop_chip(struct input_dev *dev)
+{
+ struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
+
+ disable_irq(bu21029->client->irq);
+ del_timer_sync(&bu21029->timer);
+
+ bu21029_put_chip_in_reset(bu21029);
+ regulator_disable(bu21029->vdd);
+}
+
+static int bu21029_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bu21029_ts_data *bu21029;
+ struct input_dev *in_dev;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ dev_err(&client->dev,
+ "i2c functionality support is not sufficient\n");
+ return -EIO;
+ }
+
+ bu21029 = devm_kzalloc(&client->dev, sizeof(*bu21029), GFP_KERNEL);
+ if (!bu21029)
+ return -ENOMEM;
+
+ error = device_property_read_u32(&client->dev, "rohm,x-plate-ohms",
+ &bu21029->x_plate_ohms);
+ if (error) {
+ dev_err(&client->dev,
+ "invalid 'x-plate-ohms' supplied: %d\n", error);
+ return error;
+ }
+
+ bu21029->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(bu21029->vdd)) {
+ error = PTR_ERR(bu21029->vdd);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to acquire 'vdd' supply: %d\n", error);
+ return error;
+ }
+
+ bu21029->reset_gpios = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(bu21029->reset_gpios)) {
+ error = PTR_ERR(bu21029->reset_gpios);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to acquire 'reset' gpio: %d\n", error);
+ return error;
+ }
+
+ in_dev = devm_input_allocate_device(&client->dev);
+ if (!in_dev) {
+ dev_err(&client->dev, "unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ bu21029->client = client;
+ bu21029->in_dev = in_dev;
+ timer_setup(&bu21029->timer, bu21029_touch_release, 0);
+
+ in_dev->name = DRIVER_NAME;
+ in_dev->id.bustype = BUS_I2C;
+ in_dev->open = bu21029_start_chip;
+ in_dev->close = bu21029_stop_chip;
+
+ input_set_capability(in_dev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(in_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(in_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(in_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+ touchscreen_parse_properties(in_dev, false, &bu21029->prop);
+
+ input_set_drvdata(in_dev, bu21029);
+
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, bu21029_touch_soft_irq,
+ IRQF_ONESHOT, DRIVER_NAME, bu21029);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to request touch irq: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(in_dev);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to register input device: %d\n", error);
+ return error;
+ }
+
+ i2c_set_clientdata(client, bu21029);
+
+ return 0;
+}
+
+static int __maybe_unused bu21029_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
+
+ if (!device_may_wakeup(dev)) {
+ mutex_lock(&bu21029->in_dev->mutex);
+ if (bu21029->in_dev->users)
+ bu21029_stop_chip(bu21029->in_dev);
+ mutex_unlock(&bu21029->in_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused bu21029_resume(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
+
+ if (!device_may_wakeup(dev)) {
+ mutex_lock(&bu21029->in_dev->mutex);
+ if (bu21029->in_dev->users)
+ bu21029_start_chip(bu21029->in_dev);
+ mutex_unlock(&bu21029->in_dev->mutex);
+ }
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(bu21029_pm_ops, bu21029_suspend, bu21029_resume);
+
+static const struct i2c_device_id bu21029_ids[] = {
+ { DRIVER_NAME, 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, bu21029_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id bu21029_of_ids[] = {
+ { .compatible = "rohm,bu21029" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bu21029_of_ids);
+#endif
+
+static struct i2c_driver bu21029_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(bu21029_of_ids),
+ .pm = &bu21029_pm_ops,
+ },
+ .id_table = bu21029_ids,
+ .probe = bu21029_probe,
+};
+module_i2c_driver(bu21029_driver);
+
+MODULE_AUTHOR("Zhu Yi <yi.zhu5@cn.bosch.com>");
+MODULE_DESCRIPTION("Rohm BU21029 touchscreen controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 2facad75eb6d..7fe41965c5d1 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -1,9 +1,9 @@
/*
* Touch Screen driver for EETI's I2C connected touch screen panels
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
*
* See EETI's software guide for the protocol specification:
- * http://home.eeti.com.tw/web20/eg/guide.htm
+ * http://home.eeti.com.tw/documentation.html
*
* Based on migor_ts.c
* Copyright (c) 2008 Magnus Damm
@@ -25,28 +25,22 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/timer.h>
#include <linux/gpio/consumer.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-static bool flip_x;
-module_param(flip_x, bool, 0644);
-MODULE_PARM_DESC(flip_x, "flip x coordinate");
-
-static bool flip_y;
-module_param(flip_y, bool, 0644);
-MODULE_PARM_DESC(flip_y, "flip y coordinate");
-
struct eeti_ts {
struct i2c_client *client;
struct input_dev *input;
struct gpio_desc *attn_gpio;
+ struct touchscreen_properties props;
bool running;
};
@@ -73,17 +67,10 @@ static void eeti_ts_report_event(struct eeti_ts *eeti, u8 *buf)
x >>= res - EETI_TS_BITDEPTH;
y >>= res - EETI_TS_BITDEPTH;
- if (flip_x)
- x = EETI_MAXVAL - x;
-
- if (flip_y)
- y = EETI_MAXVAL - y;
-
if (buf[0] & REPORT_BIT_HAS_PRESSURE)
input_report_abs(eeti->input, ABS_PRESSURE, buf[5]);
- input_report_abs(eeti->input, ABS_X, x);
- input_report_abs(eeti->input, ABS_Y, y);
+ touchscreen_report_pos(eeti->input, &eeti->props, x, y, false);
input_report_key(eeti->input, BTN_TOUCH, buf[0] & REPORT_BIT_PRESSED);
input_sync(eeti->input);
}
@@ -178,6 +165,8 @@ static int eeti_ts_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0);
+ touchscreen_parse_properties(input, false, &eeti->props);
+
input->name = client->name;
input->id.bustype = BUS_I2C;
input->open = eeti_ts_open;
@@ -262,10 +251,18 @@ static const struct i2c_device_id eeti_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, eeti_ts_id);
+#ifdef CONFIG_OF
+static const struct of_device_id of_eeti_ts_match[] = {
+ { .compatible = "eeti,exc3000-i2c", },
+ { }
+};
+#endif
+
static struct i2c_driver eeti_ts_driver = {
.driver = {
.name = "eeti_ts",
.pm = &eeti_ts_pm,
+ .of_match_table = of_match_ptr(of_eeti_ts_match),
},
.probe = eeti_ts_probe,
.id_table = eeti_ts_id,
@@ -274,5 +271,5 @@ static struct i2c_driver eeti_ts_driver = {
module_i2c_driver(eeti_ts_driver);
MODULE_DESCRIPTION("EETI Touchscreen driver");
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 752ae9cf4514..80e69bb8283e 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EETI eGalax Multiple Touch Controller
*
* Copyright (C) 2011 Freescale Semiconductor, Inc.
*
* based on max11801_ts.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* EETI eGalax serial touch screen controller is a I2C based multiple
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 83433e8efff7..7f2942f3cec6 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -352,6 +352,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
case 1: /* 6-byte protocol */
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
+ /* fall through */
case 2: /* 4-byte protocol */
input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index 47fe1f184bbc..1d6c8f490b40 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -1,16 +1,11 @@
-/*
- * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
- * Based on driver from 2011:
- * Juergen Beisert, Pengutronix <kernel@pengutronix.de>
- *
- * This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
- * connected to the imx25 ADC.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+// Based on driver from 2011:
+// Juergen Beisert, Pengutronix <kernel@pengutronix.de>
+//
+// This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
+// connected to the imx25 ADC.
#include <linux/clk.h>
#include <linux/device.h>
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 481586909d28..054c2537b392 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -20,10 +20,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ee82a975bfd2..c10fc594f94d 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -1,12 +1,8 @@
-/*
- * Freescale i.MX6UL touchscreen controller driver
- *
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale i.MX6UL touchscreen controller driver
+//
+// Copyright (C) 2015 Freescale Semiconductor, Inc.
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index d1c09e6a2cb6..c89853a36f9e 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -466,7 +466,7 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
}
}
- return 0;
+ return false;
}
static bool raydium_i2c_fw_trigger(struct i2c_client *client)
@@ -492,7 +492,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
}
}
- return 0;
+ return false;
}
static int raydium_i2c_check_path(struct i2c_client *client)
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
new file mode 100644
index 000000000000..cfc8bb4553f7
--- /dev/null
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC generic resistive touchscreen (GRTS)
+ * This is a generic input driver that connects to an ADC
+ * given the channels in device tree, and reports events to the input
+ * subsystem.
+ *
+ * Copyright (C) 2017,2018 Microchip Technology,
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "resistive-adc-touch"
+#define GRTS_DEFAULT_PRESSURE_MIN 50000
+#define GRTS_MAX_POS_MASK GENMASK(11, 0)
+
+/**
+ * grts_state - generic resistive touch screen information struct
+ * @pressure_min: number representing the minimum for the pressure
+ * @pressure: are we getting pressure info or not
+ * @iio_chans: list of channels acquired
+ * @iio_cb: iio_callback buffer for the data
+ * @input: the input device structure that we register
+ * @prop: touchscreen properties struct
+ */
+struct grts_state {
+ u32 pressure_min;
+ bool pressure;
+ struct iio_channel *iio_chans;
+ struct iio_cb_buffer *iio_cb;
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+};
+
+static int grts_cb(const void *data, void *private)
+{
+ const u16 *touch_info = data;
+ struct grts_state *st = private;
+ unsigned int x, y, press = 0x0;
+
+ /* channel data coming in buffer in the order below */
+ x = touch_info[0];
+ y = touch_info[1];
+ if (st->pressure)
+ press = touch_info[2];
+
+ if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
+ /* report end of touch */
+ input_report_key(st->input, BTN_TOUCH, 0);
+ input_sync(st->input);
+ return 0;
+ }
+
+ /* report proper touch to subsystem*/
+ touchscreen_report_pos(st->input, &st->prop, x, y, false);
+ if (st->pressure)
+ input_report_abs(st->input, ABS_PRESSURE, press);
+ input_report_key(st->input, BTN_TOUCH, 1);
+ input_sync(st->input);
+
+ return 0;
+}
+
+static int grts_open(struct input_dev *dev)
+{
+ int error;
+ struct grts_state *st = input_get_drvdata(dev);
+
+ error = iio_channel_start_all_cb(st->iio_cb);
+ if (error) {
+ dev_err(dev->dev.parent, "failed to start callback buffer.\n");
+ return error;
+ }
+ return 0;
+}
+
+static void grts_close(struct input_dev *dev)
+{
+ struct grts_state *st = input_get_drvdata(dev);
+
+ iio_channel_stop_all_cb(st->iio_cb);
+}
+
+static void grts_disable(void *data)
+{
+ iio_channel_release_all_cb(data);
+}
+
+static int grts_probe(struct platform_device *pdev)
+{
+ struct grts_state *st;
+ struct input_dev *input;
+ struct device *dev = &pdev->dev;
+ struct iio_channel *chan;
+ int error;
+
+ st = devm_kzalloc(dev, sizeof(struct grts_state), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ /* get the channels from IIO device */
+ st->iio_chans = devm_iio_channel_get_all(dev);
+ if (IS_ERR(st->iio_chans)) {
+ error = PTR_ERR(st->iio_chans);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "can't get iio channels.\n");
+ return error;
+ }
+
+ chan = &st->iio_chans[0];
+ st->pressure = false;
+ while (chan && chan->indio_dev) {
+ if (!strcmp(chan->channel->datasheet_name, "pressure"))
+ st->pressure = true;
+ chan++;
+ }
+
+ if (st->pressure) {
+ error = device_property_read_u32(dev,
+ "touchscreen-min-pressure",
+ &st->pressure_min);
+ if (error) {
+ dev_dbg(dev, "can't get touchscreen-min-pressure property.\n");
+ st->pressure_min = GRTS_DEFAULT_PRESSURE_MIN;
+ }
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->open = grts_open;
+ input->close = grts_close;
+
+ input_set_abs_params(input, ABS_X, 0, GRTS_MAX_POS_MASK - 1, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, GRTS_MAX_POS_MASK - 1, 0, 0);
+ if (st->pressure)
+ input_set_abs_params(input, ABS_PRESSURE, st->pressure_min,
+ 0xffff, 0, 0);
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+
+ /* parse optional device tree properties */
+ touchscreen_parse_properties(input, false, &st->prop);
+
+ st->input = input;
+ input_set_drvdata(input, st);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "failed to register input device.");
+ return error;
+ }
+
+ st->iio_cb = iio_channel_get_all_cb(dev, grts_cb, st);
+ if (IS_ERR(st->iio_cb)) {
+ dev_err(dev, "failed to allocate callback buffer.\n");
+ return PTR_ERR(st->iio_cb);
+ }
+
+ error = devm_add_action_or_reset(dev, grts_disable, st->iio_cb);
+ if (error) {
+ dev_err(dev, "failed to add disable action.\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id grts_of_match[] = {
+ {
+ .compatible = "resistive-adc-touch",
+ }, {
+ /* sentinel */
+ },
+};
+
+MODULE_DEVICE_TABLE(of, grts_of_match);
+
+static struct platform_driver grts_driver = {
+ .probe = grts_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(grts_of_match),
+ },
+};
+
+module_platform_driver(grts_driver);
+
+MODULE_AUTHOR("Eugen Hristev <eugen.hristev@microchip.com>");
+MODULE_DESCRIPTION("Generic ADC Resistive Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index bda0500c9b57..714affdd742f 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -304,7 +304,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf,
msg[1].len = len;
msg[1].buf = buf;
- i2c_lock_adapter(adap);
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
for (i = 0; i < 2; i++) {
if (__i2c_transfer(adap, &msg[i], 1) < 0) {
@@ -313,7 +313,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf,
}
}
- i2c_unlock_adapter(adap);
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
return ret;
}
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 20f7f3902757..166edeb77776 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1142,7 +1142,7 @@ static int __maybe_unused wdt87xx_resume(struct device *dev)
* The chip may have been reset while system is resuming,
* give it some time to settle.
*/
- mdelay(100);
+ msleep(100);
error = wdt87xx_send_command(client, VND_CMD_START, 0);
if (error)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..689ffe538370 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
- select DMA_DIRECT_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 596b95c50051..60b2eab29cd8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2620,7 +2620,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag);
+ get_order(size), flag & __GFP_NOWARN);
if (!page)
return NULL;
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 1d0b53a04a08..58da65df03f5 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -508,7 +508,7 @@ static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
struct vm_area_struct *vma;
- int ret = VM_FAULT_ERROR;
+ vm_fault_t ret = VM_FAULT_ERROR;
unsigned int flags = 0;
struct mm_struct *mm;
u64 address;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 1d647104bccc..22bdabd3d8e0 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2915,8 +2915,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
-
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f7a96bcf94a6..c73cfce1ccc0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2211,13 +2211,6 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
-
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ddcbbdb5d658..511ff9a1d6d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -367,6 +367,9 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
+ if (dev->bus_dma_mask)
+ dma_limit &= dev->bus_dma_mask;
+
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 85879cfec52f..b128cb4372d3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1390,5 +1390,3 @@ err_reg_driver:
return ret;
}
core_initcall(exynos_iommu_init);
-
-IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..6a237d18fabf 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -485,14 +484,37 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
+ (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
+ } else if (!strncmp(str, "pasid28", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: enable pre-production PASID support\n");
+ intel_iommu_pasid28 = 1;
+ iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -3713,30 +3740,62 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
- void *vaddr;
+ struct page *page = NULL;
+ int order;
- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
- if (iommu_no_mapping(dev) || !vaddr)
- return vaddr;
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
- PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (!*dma_handle)
- goto out_free_pages;
- return vaddr;
+ if (!iommu_no_mapping(dev))
+ flags &= ~(GFP_DMA | GFP_DMA32);
+ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ flags |= GFP_DMA;
+ else
+ flags |= GFP_DMA32;
+ }
+
+ if (gfpflags_allow_blocking(flags)) {
+ unsigned int count = size >> PAGE_SHIFT;
+
+ page = dma_alloc_from_contiguous(dev, count, order,
+ flags & __GFP_NOWARN);
+ if (page && iommu_no_mapping(dev) &&
+ page_to_phys(page) + size > dev->coherent_dma_mask) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+
+ if (!page)
+ page = alloc_pages(flags, order);
+ if (!page)
+ return NULL;
+ memset(page_address(page), 0, size);
+
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (*dma_handle)
+ return page_address(page);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
-out_free_pages:
- dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
return NULL;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- if (!iommu_no_mapping(dev))
- intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+ int order;
+ struct page *page = virt_to_page(vaddr);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ intel_unmap(dev, dma_handle, size);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
}
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 45f6e581cd56..7d65aab36a96 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -24,6 +24,7 @@
#include <linux/pci-ats.h>
#include <linux/dmar.h>
#include <linux/interrupt.h>
+#include <linux/mm_types.h>
#include <asm/page.h>
#define PASID_ENTRY_P BIT_ULL(0)
@@ -594,7 +595,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
- int ret, result;
+ int result;
+ vm_fault_t ret;
u64 address;
handled = 1;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 40ae6e87cb88..f026aa16d5f1 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1108,9 +1108,6 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
-IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
-IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
-
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 0d3350463a3f..27377742600d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -877,7 +877,5 @@ static void __exit msm_iommu_driver_exit(void)
subsys_initcall(msm_iommu_driver_init);
module_exit(msm_iommu_driver_exit);
-IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5c36a8b7656a..f7787e757244 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -27,9 +27,6 @@
#define NO_IOMMU 1
-static const struct of_device_id __iommu_of_table_sentinel
- __used __section(__iommu_of_table_end);
-
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
*
@@ -98,19 +95,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
-static bool of_iommu_driver_present(struct device_node *np)
-{
- /*
- * If the IOMMU still isn't ready by the time we reach init, assume
- * it never will be. We don't want to defer indefinitely, nor attempt
- * to dereference __iommu_of_table after it's been freed.
- */
- if (system_state >= SYSTEM_RUNNING)
- return false;
-
- return of_match_node(&__iommu_of_table, np);
-}
-
static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec)
{
@@ -120,8 +104,7 @@ static int of_iommu_xlate(struct device *dev,
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
- !of_device_is_available(iommu_spec->np) ||
- (!ops && !of_iommu_driver_present(iommu_spec->np)))
+ !of_device_is_available(iommu_spec->np))
return NO_IOMMU;
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
@@ -133,7 +116,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -EPROBE_DEFER;
+ return driver_deferred_probe_check_state(dev);
return ops->of_xlate(dev, iommu_spec);
}
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index fe88a4880d3a..b48aee82d14b 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -945,7 +945,5 @@ static void __exit qcom_iommu_exit(void)
module_init(qcom_iommu_init);
module_exit(qcom_iommu_exit);
-IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1");
-
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 054cd2c8e9c8..de8d3bf91b23 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1284,8 +1284,6 @@ static int __init rk_iommu_init(void)
}
subsys_initcall(rk_iommu_init);
-IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
-
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
MODULE_ALIAS("platform:rockchip-iommu");
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index a16b320739b4..8a9c169b6f99 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -304,6 +304,13 @@ static int tpci200_register(struct tpci200_board *tpci200)
ioremap_nocache(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
+ if (!tpci200->info->interface_regs) {
+ dev_err(&tpci200->info->pdev->dev,
+ "(bn 0x%X, sn 0x%X) failed to map driver user space!",
+ tpci200->info->pdev->bus->number,
+ tpci200->info->pdev->devfn);
+ goto out_release_mem8_space;
+ }
/* Initialize lock that protects interface_regs */
spin_lock_init(&tpci200->regs_lock);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e9233db16e03..383e7b70221d 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config ARM_GIC
bool
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
config ARM_VIC
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config ARM_VIC_NR
int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config ATMEL_AIC5_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
bool
depends on ARCH_CLPS711X
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
default y
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config PIC32_EVIC
bool
@@ -372,3 +372,15 @@ config QCOM_PDC
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
endmenu
+
+config SIFIVE_PLIC
+ bool "SiFive Platform-Level Interrupt Controller"
+ depends on RISCV
+ help
+ This enables support for the PLIC chip found in SiFive (and
+ potentially other) RISC-V systems. The PLIC controls devices
+ interrupts and connects them to each core's local interrupt
+ controller. Aside from timer and software interrupts, all other
+ interrupt sources are subordinate to the PLIC.
+
+ If you don't know what to do here, say Y.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 15f268f646bf..fbd1ec8070ef 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -87,3 +87,4 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
+obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 4eca5c763766..606efa64adff 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
*/
info->scratchpad[0].ul = mc_bus_dev->icid;
msi_info = msi_get_domain_info(msi_domain->parent);
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 25a98de5cfb2..8d6d009d1d58 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
- int alias_count = 0;
+ int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
- return msi_info->ops->msi_prepare(domain->parent,
- dev, max(nvec, alias_count), info);
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 8881a053c173..7b8e87b493fe 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d7842d312d3e..316a57530f6d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -23,6 +23,8 @@
#include <linux/dma-iommu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
} vpe_proxy;
static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
+
/*
* How we allocate LPIs:
*
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
*
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
*/
-#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+ struct list_head entry;
+ u32 base_id;
+ u32 span;
+};
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
{
- return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+ struct lpi_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (range) {
+ INIT_LIST_HEAD(&range->entry);
+ range->base_id = base;
+ range->span = span;
+ }
+
+ return range;
}
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
{
- return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+ struct lpi_range *ra, *rb;
+
+ ra = container_of(a, struct lpi_range, entry);
+ rb = container_of(b, struct lpi_range, entry);
+
+ return rb->base_id - ra->base_id;
}
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
{
- lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+ struct lpi_range *range, *tmp;
- lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
- GFP_KERNEL);
- if (!lpi_bitmap) {
- lpi_chunks = 0;
- return -ENOMEM;
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (!list_is_last(&range->entry, &lpi_range_list) &&
+ (tmp->base_id == (range->base_id + range->span))) {
+ tmp->base_id = range->base_id;
+ tmp->span += range->span;
+ list_del(&range->entry);
+ kfree(range);
+ }
}
+}
- pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
- return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+ struct lpi_range *range, *tmp;
+ int err = -ENOSPC;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (range->span >= nr_lpis) {
+ *base = range->base_id;
+ range->base_id += nr_lpis;
+ range->span -= nr_lpis;
+
+ if (range->span == 0) {
+ list_del(&range->entry);
+ kfree(range);
+ }
+
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&lpi_range_lock);
+
+ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+ return err;
}
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
{
- unsigned long *bitmap = NULL;
- int chunk_id;
- int nr_chunks;
- int i;
+ struct lpi_range *new;
+ int err = 0;
+
+ mutex_lock(&lpi_range_lock);
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&new->entry, &lpi_range_list);
+ list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+ merge_lpi_ranges();
+out:
+ mutex_unlock(&lpi_range_lock);
+ return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+ u32 lpis = (1UL << id_bits) - 8192;
+ u32 numlpis;
+ int err;
+
+ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+ lpis = numlpis;
+ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+ lpis);
+ }
- nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+ /*
+ * Initializing the allocator is just the same as freeing the
+ * full range of LPIs.
+ */
+ err = free_lpi_range(8192, lpis);
+ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+ return err;
+}
- spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int err = 0;
do {
- chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
- 0, nr_chunks, 0);
- if (chunk_id < lpi_chunks)
+ err = alloc_lpi_range(nr_irqs, base);
+ if (!err)
break;
- nr_chunks--;
- } while (nr_chunks > 0);
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
- if (!nr_chunks)
+ if (err)
goto out;
- bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
- sizeof(long),
- GFP_ATOMIC);
+ bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
if (!bitmap)
goto out;
- for (i = 0; i < nr_chunks; i++)
- set_bit(chunk_id + i, lpi_bitmap);
-
- *base = its_chunk_to_lpi(chunk_id);
- *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+ *nr_ids = nr_irqs;
out:
- spin_unlock(&lpi_lock);
-
if (!bitmap)
*base = *nr_ids = 0;
return bitmap;
}
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
{
- int lpi;
-
- spin_lock(&lpi_lock);
-
- for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
- int chunk = its_lpi_to_chunk(lpi);
-
- BUG_ON(chunk > lpi_chunks);
- if (test_bit(chunk, lpi_bitmap)) {
- clear_bit(chunk, lpi_bitmap);
- } else {
- pr_err("Bad LPI chunk %d\n", chunk);
- }
- }
-
- spin_unlock(&lpi_lock);
-
+ WARN_ON(free_lpi_range(base, nr_ids));
kfree(bitmap);
}
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+ lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
{
struct its_node *its;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry)
its_cpu_init_collection(its);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!its_alloc_device_table(its, dev_id))
return NULL;
+ if (WARN_ON(!is_power_of_2(nvecs)))
+ nvecs = roundup_pow_of_two(nvecs);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * We allocate at least one chunk worth of LPIs bet device,
- * and thus that many ITEs. The device may require less though.
+ * Even if the device wants a single LPI, the ITT must be
+ * sized as a power of two (and you need at least one bit...).
*/
- nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+ nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
if (alloc_lpis) {
- lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
col_map = kcalloc(nr_lpis, sizeof(*col_map),
GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
- its_lpi_free_chunks(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
}
if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
- its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
its_free_prop_table(vm->vprop_page);
}
}
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
BUG_ON(!vm);
- bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
if (nr_ids < nr_irqs) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
vprop_page = its_allocate_prop_table(GFP_KERNEL);
if (!vprop_page) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i - 1);
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
struct its_node *its;
int err = 0;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
@@ -3102,7 +3171,7 @@ err:
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return err;
}
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
struct its_node *its;
int ret;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
its_cpu_init_collection(its);
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
if (err)
goto out_free_tables;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_add(&its->entry, &its_nodes);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76ea56d779a1..e214181b77b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
-#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_data.rdists.gicd_typer = typer;
gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..2ff08986b536 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
return ingenic_intc_of_init(node, 1);
}
IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
static int __init intc_2chip_of_init(struct device_node *node,
struct device_node *parent)
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index aed31afb0216..85234d456638 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -12,7 +12,7 @@
#include <asm/io.h>
static void *intc_baseaddr;
-#define IPRA ((unsigned long)intc_baseaddr)
+#define IPRA (intc_baseaddr)
static const unsigned char ipr_table[] = {
0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
@@ -34,7 +34,7 @@ static const unsigned char ipr_table[] = {
static void h8s_disable_irq(struct irq_data *data)
{
int pos;
- unsigned int addr;
+ void __iomem *addr;
unsigned short pri;
int irq = data->irq;
@@ -48,7 +48,7 @@ static void h8s_disable_irq(struct irq_data *data)
static void h8s_enable_irq(struct irq_data *data)
{
int pos;
- unsigned int addr;
+ void __iomem *addr;
unsigned short pri;
int irq = data->irq;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
new file mode 100644
index 000000000000..532e9d68c704
--- /dev/null
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+#define pr_fmt(fmt) "plic: " fmt
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/*
+ * This driver implements a version of the RISC-V PLIC with the actual layout
+ * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
+ *
+ * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
+ *
+ * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
+ * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
+ * Spec.
+ */
+
+#define MAX_DEVICES 1024
+#define MAX_CONTEXTS 15872
+
+/*
+ * Each interrupt source has a priority register associated with it.
+ * We always hardwire it to one in Linux.
+ */
+#define PRIORITY_BASE 0
+#define PRIORITY_PER_ID 4
+
+/*
+ * Each hart context has a vector of interrupt enable bits associated with it.
+ * There's one bit for each interrupt source.
+ */
+#define ENABLE_BASE 0x2000
+#define ENABLE_PER_HART 0x80
+
+/*
+ * Each hart context has a set of control registers associated with it. Right
+ * now there's only two: a source priority threshold over which the hart will
+ * take an interrupt, and a register to claim interrupts.
+ */
+#define CONTEXT_BASE 0x200000
+#define CONTEXT_PER_HART 0x1000
+#define CONTEXT_THRESHOLD 0x00
+#define CONTEXT_CLAIM 0x04
+
+static void __iomem *plic_regs;
+
+struct plic_handler {
+ bool present;
+ int ctxid;
+};
+static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+
+static inline void __iomem *plic_hart_offset(int ctxid)
+{
+ return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
+}
+
+static inline u32 __iomem *plic_enable_base(int ctxid)
+{
+ return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
+}
+
+/*
+ * Protect mask operations on the registers given that we can't assume that
+ * atomic memory operations work on them.
+ */
+static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
+
+static inline void plic_toggle(int ctxid, int hwirq, int enable)
+{
+ u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 hwirq_mask = 1 << (hwirq % 32);
+
+ raw_spin_lock(&plic_toggle_lock);
+ if (enable)
+ writel(readl(reg) | hwirq_mask, reg);
+ else
+ writel(readl(reg) & ~hwirq_mask, reg);
+ raw_spin_unlock(&plic_toggle_lock);
+}
+
+static inline void plic_irq_toggle(struct irq_data *d, int enable)
+{
+ int cpu;
+
+ writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (handler->present)
+ plic_toggle(handler->ctxid, d->hwirq, enable);
+ }
+}
+
+static void plic_irq_enable(struct irq_data *d)
+{
+ plic_irq_toggle(d, 1);
+}
+
+static void plic_irq_disable(struct irq_data *d)
+{
+ plic_irq_toggle(d, 0);
+}
+
+static struct irq_chip plic_chip = {
+ .name = "SiFive PLIC",
+ /*
+ * There is no need to mask/unmask PLIC interrupts. They are "masked"
+ * by reading claim and "unmasked" when writing it back.
+ */
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
+};
+
+static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_data(irq, NULL);
+ irq_set_noprobe(irq);
+ return 0;
+}
+
+static const struct irq_domain_ops plic_irqdomain_ops = {
+ .map = plic_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static struct irq_domain *plic_irqdomain;
+
+/*
+ * Handling an interrupt is a two-step process: first you claim the interrupt
+ * by reading the claim register, then you complete the interrupt by writing
+ * that source ID back to the same claim register. This automatically enables
+ * and disables the interrupt, so there's nothing else to do.
+ */
+static void plic_handle_irq(struct pt_regs *regs)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ csr_clear(sie, SIE_SEIE);
+ while ((hwirq = readl(claim))) {
+ int irq = irq_find_mapping(plic_irqdomain, hwirq);
+
+ if (unlikely(irq <= 0))
+ pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
+ hwirq);
+ else
+ generic_handle_irq(irq);
+ writel(hwirq, claim);
+ }
+ csr_set(sie, SIE_SEIE);
+}
+
+/*
+ * Walk up the DT tree until we find an active RISC-V core (HART) node and
+ * extract the cpuid from it.
+ */
+static int plic_find_hart_id(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hart(node);
+ }
+
+ return -1;
+}
+
+static int __init plic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int error = 0, nr_handlers, nr_mapped = 0, i;
+ u32 nr_irqs;
+
+ if (plic_regs) {
+ pr_warn("PLIC already present.\n");
+ return -ENXIO;
+ }
+
+ plic_regs = of_iomap(node, 0);
+ if (WARN_ON(!plic_regs))
+ return -EIO;
+
+ error = -EINVAL;
+ of_property_read_u32(node, "riscv,ndev", &nr_irqs);
+ if (WARN_ON(!nr_irqs))
+ goto out_iounmap;
+
+ nr_handlers = of_irq_count(node);
+ if (WARN_ON(!nr_handlers))
+ goto out_iounmap;
+ if (WARN_ON(nr_handlers < num_possible_cpus()))
+ goto out_iounmap;
+
+ error = -ENOMEM;
+ plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, NULL);
+ if (WARN_ON(!plic_irqdomain))
+ goto out_iounmap;
+
+ for (i = 0; i < nr_handlers; i++) {
+ struct of_phandle_args parent;
+ struct plic_handler *handler;
+ irq_hw_number_t hwirq;
+ int cpu;
+
+ if (of_irq_parse_one(node, i, &parent)) {
+ pr_err("failed to parse parent for context %d.\n", i);
+ continue;
+ }
+
+ /* skip context holes */
+ if (parent.args[0] == -1)
+ continue;
+
+ cpu = plic_find_hart_id(parent.np);
+ if (cpu < 0) {
+ pr_warn("failed to parse hart ID for context %d.\n", i);
+ continue;
+ }
+
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ handler->present = true;
+ handler->ctxid = i;
+
+ /* priority must be > threshold to trigger an interrupt */
+ writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ plic_toggle(i, hwirq, 0);
+ nr_mapped++;
+ }
+
+ pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
+ nr_irqs, nr_mapped, nr_handlers);
+ set_handle_irq(plic_handle_irq);
+ return 0;
+
+out_iounmap:
+ iounmap(plic_regs);
+ return error;
+}
+
+IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
+IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 3a7e8905a97e..3df527fcf4e1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
};
static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6 },
{ .exti = 1, .irq_parent = 7 },
{ .exti = 2, .irq_parent = 8 },
{ .exti = 3, .irq_parent = 9 },
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index a15a9510c904..e539500752d4 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -98,14 +98,12 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
static void xtensa_mx_irq_enable(struct irq_data *d)
{
- variant_irq_enable(d->hwirq);
xtensa_mx_irq_unmask(d);
}
static void xtensa_mx_irq_disable(struct irq_data *d)
{
xtensa_mx_irq_mask(d);
- variant_irq_disable(d->hwirq);
}
static void xtensa_mx_irq_ack(struct irq_data *d)
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index f728755fa292..000cb5462bcf 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -55,14 +55,12 @@ static void xtensa_irq_unmask(struct irq_data *d)
static void xtensa_irq_enable(struct irq_data *d)
{
- variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
- variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e0c2814d032..ef5560b848ab 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { }
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capi20_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
@@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v)
* /proc/capi/capi20ncci:
* applid ncci
*/
-static int capi20ncci_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct capincci *np;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index ee510f901720..e8949f3dcae1 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v)
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capidrv_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 20d0a080a2b0..ecdeb89645d0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb)
case HD_OPEN_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_OPEN_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, BS_B1OPEN << channel, 0);
@@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb)
case HD_CLOSE_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_CLOSE_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, 0, BS_B1OPEN << channel);
@@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb)
case HD_B2_FLOW_CONTROL:
++channel;
+ /* fall through */
case HD_B1_FLOW_CONTROL:
bcs = cs->bcs + channel;
atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
@@ -972,16 +975,14 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+ ubc->isoinbuf + k * BAS_INBUFSIZE,
+ BAS_INBUFSIZE, read_iso_callback, bcs,
+ BAS_FRAMETIME);
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
- urb->transfer_buffer_length = BAS_INBUFSIZE;
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = read_iso_callback;
- urb->context = bcs;
for (j = 0; j < BAS_NUMFRAMES; j++) {
urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
urb->iso_frame_desc[j].length = BAS_MAXFRAME;
@@ -1005,15 +1006,15 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+ ubc->isooutbuf->data,
+ sizeof(ubc->isooutbuf->data),
+ write_iso_callback, &ubc->isoouturbs[k],
+ BAS_FRAMETIME);
+
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isooutbuf->data;
- urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = write_iso_callback;
- urb->context = &ubc->isoouturbs[k];
for (j = 0; j < BAS_NUMFRAMES; ++j) {
urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
urb->iso_frame_desc[j].length = BAS_NORMFRAME;
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index ae2b2669af1b..8eb28a83832e 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol)
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
+ /* fall through */
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 34c93874af23..ebb3fa2e1d00 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
+ /* fall through */
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
@@ -2219,7 +2220,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hfc_pci *card;
struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
- card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
if (!card) {
printk(KERN_ERR "No kmem for HFC card\n");
return err;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 17cc879ad2bb..6d05946b445e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
int fifon = fifo->fifonum;
int i;
int hdlc = 0;
+ unsigned long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
@@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
return;
}
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->dch) {
rx_skb = fifo->dch->rx_skb;
maxlen = fifo->dch->maxlen;
@@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (fifo->bch) {
if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
fifo->bch->dropcnt += len;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = bchannel_get_rxbuf(fifo->bch, len);
@@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
skb_trim(rx_skb, 0);
pr_warning("%s.B%d: No bufferspace for %d bytes\n",
hw->name, fifo->bch->nr, len);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = fifo->bch->maxlen;
@@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
} else {
printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
"for fifo(%d) HFCUSB_D_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
/* deliver transparent data to layer2 */
recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
static void
@@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb)
__u8 *buf;
static __u8 eof[8];
__u8 s0_state;
+ unsigned long flags;
fifon = fifo->fifonum;
status = urb->status;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
/*
* ISO transfer only partially completed,
@@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb)
struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
struct hfcsusb *hw = fifo->hw;
static __u8 eof[8];
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
fifon = fifo->fifonum;
if ((!fifo->active) || (urb->status)) {
@@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb)
int *tx_idx;
int frame_complete, fifon, status, fillempty = 0;
__u8 threshbit, *p;
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb)
} else {
printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb)
hw->name, __func__,
symbolic(urb_errlist, status), status, fifon);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 1fc290659e94..3e01012be4ab 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -887,6 +887,7 @@ release_card(struct inf_hw *card) {
release_card(card->sc[i]);
card->sc[i] = NULL;
}
+ /* fall through */
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index b791688d0228..386731ec2489 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) {
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
+ /* fall through */
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
@@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) {
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
+ /* fall through */
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
@@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
ich->is->name, hh->id);
ret = -EINVAL;
}
+ /* fall through */
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 89d9ba8ed535..2b317cb63d06 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1084,7 +1084,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
if (!card) {
pr_info("No kmem for Netjet\n");
return err;
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index a18b605fb4f2..b161456c942e 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
bcs->mode = 1;
bcs->channel = bc;
bc = 0;
+ /* fall through */
case (L1_MODE_NULL):
if (bcs->mode == L1_MODE_NULL)
return;
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index ddec47a911a0..9ee06328784c 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -1012,7 +1012,7 @@ dummy_pstack(struct PStack *st, int pr, void *arg) {
static int
init_PStack(struct PStack **stp) {
- *stp = kmalloc(sizeof(struct PStack), GFP_ATOMIC);
+ *stp = kmalloc(sizeof(struct PStack), GFP_KERNEL);
if (!*stp)
return -ENOMEM;
(*stp)->next = NULL;
@@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg)
case (PH_ACTIVATE | INDICATION):
case (PH_ACTIVATE | CONFIRM):
event = EV_LEASED;
+ /* fall through */
case (PH_DEACTIVATE | INDICATION):
case (PH_DEACTIVATE | CONFIRM):
if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags))
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 7108bdb8742e..b12e6cae26c2 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1029,7 +1029,7 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
*cs_out = NULL;
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+ cs = kzalloc(sizeof(struct IsdnCardState), GFP_KERNEL);
if (!cs) {
printk(KERN_WARNING
"HiSax: No memory for IsdnCardState(card %d)\n",
@@ -1059,12 +1059,12 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
"HiSax: Card Type %d out of range\n", card->typ);
goto outf_cs;
}
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+ if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for dlog(card %d)\n", cardnr + 1);
goto outf_cs;
}
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+ if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for status_buf(card %d)\n",
cardnr + 1);
@@ -1123,7 +1123,7 @@ static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
{
int ret;
- if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
+ if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_KERNEL))) {
printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
ll_unload(cs);
goto outf_cs;
@@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
case PH_DEACTIVATE | REQUEST:
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
skb_queue_purge(&bcs->squeue);
+ /* fall through */
default:
B_L2L1(b_if, pr, arg);
break;
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 35c6df6534ec..a6d8af02354a 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.isac, off2));
case R753:
@@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.isac, off2, value);
break;
@@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.hscx[hscx], off2));
case R753:
@@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.hscx[hscx], off2, value);
break;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 97ecb3073045..1d4cd01d4685 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->complete = complete;
+ usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets,
+ complete, context, interval);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = packet_size * num_packets;
- urb->context = context;
- urb->transfer_buffer = buf;
urb->transfer_flags = URB_ISO_ASAP;
urb->actual_length = 0;
- urb->interval = interval;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index d01ff116797b..82c1879f5664 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
case PCTRL_CMD_FRM:
if (frm_extra_delay)
mdelay(frm_extra_delay);
+ /* fall through */
case PCTRL_CMD_FRH:
p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
bcs->hw.isar.newmod = 0;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index da0a1c6aa329..98f60d1523f4 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg)
break;
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index 18a3484b1f7e..368d152a8f1d 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr,
switch (0x5f & *teln) {
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 1cb9930d5e24..f207fda691c7 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->interval = 1;
- urb->transfer_buffer = buf;
+ usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size,
+ complete, context, 1);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = num_packets * packet_size;
- urb->actual_length = 0;
- urb->complete = complete;
- urb->context = context;
urb->transfer_flags = URB_ISO_ASAP;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c
index 4a0425378f37..ba177c3a621b 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/isdn/hysdn/hysdn_boot.c
@@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CBOOTDTA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_BOOTDTA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
@@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CABSDATA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_ABSDATA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 7a501dbe7123..6a5b3f00f9ad 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1640,13 +1640,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
} else
return -EINVAL;
case IIOCDBGVAR:
- if (arg) {
- if (copy_to_user(argp, &dev, sizeof(ulong)))
- return -EFAULT;
- return 0;
- } else
- return -EINVAL;
- break;
+ return -EINVAL;
default:
if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 960f26348bb5..b730037a0e2d 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
cmd.driver = info->isdn_driver;
cmd.arg = info->isdn_channel;
@@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
info->dialing = 1;
// strcpy(dev->num[i], n);
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 8b74ce412524..2a5f6668756c 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
return line;
}
+ /* else: fall through */
case 128:
m[line] = 128; /* leftmost -> set byte to 1000000 */
mbit = 64; /* current bit in the matrix line */
@@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
switch (++line % 10) {
case 1:
m[line++] = 0xfe;
+ /* fall through */
case 2:
m[line++] = 0xfe;
+ /* fall through */
case 3:
m[line++] = 0xfe;
+ /* fall through */
case 4:
m[line++] = 0xfe;
+ /* fall through */
case 5:
m[line++] = 0xbf;
+ /* fall through */
case 6:
m[line++] = 0xfe;
+ /* fall through */
case 7:
m[line++] = 0xfe;
+ /* fall through */
case 8:
m[line++] = 0xfe;
+ /* fall through */
case 9:
m[line++] = 0xfe;
}
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 422dced7c90a..d97c6dd52223 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
+ /* fall through */
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6e3a998f3370..44097a3e0fcc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -57,12 +57,13 @@ config LEDS_AAT1290
depends on PINCTRL
help
This option enables support for the LEDs on the AAT1290.
+
config LEDS_APU
- tristate "Front panel LED support for PC Engines APU/APU2 boards"
+ tristate "Front panel LED support for PC Engines APU/APU2/APU3 boards"
depends on LEDS_CLASS
depends on X86 && DMI
help
- This driver makes the PC Engines APU/APU2 front panel LEDs
+ This driver makes the PC Engines APU/APU2/APU3 front panel LEDs
accessible from userspace programs through the LED subsystem.
To compile this driver as a module, choose M here: the
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 431123b048a2..17d73db1456e 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -103,15 +103,16 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(led_trigger_show);
/* Caller must ensure led_cdev->trigger_lock held */
-void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
+ int ret;
if (!led_cdev->trigger && !trig)
- return;
+ return 0;
name = trig ? trig->name : "none";
event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
@@ -126,7 +127,10 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
+ device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
@@ -134,8 +138,20 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
write_unlock_irqrestore(&trig->leddev_list_lock, flags);
led_cdev->trigger = trig;
+
if (trig->activate)
- trig->activate(led_cdev);
+ ret = trig->activate(led_cdev);
+ else
+ ret = 0;
+
+ if (ret)
+ goto err_activate;
+
+ ret = device_add_groups(led_cdev->dev, trig->groups);
+ if (ret) {
+ dev_err(led_cdev->dev, "Failed to add trigger attributes\n");
+ goto err_add_groups;
+ }
}
if (event) {
@@ -146,6 +162,23 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
"%s: Error sending uevent\n", __func__);
kfree(event);
}
+
+ return 0;
+
+err_add_groups:
+
+ if (trig->deactivate)
+ trig->deactivate(led_cdev);
+err_activate:
+
+ led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_set_brightness(led_cdev, LED_OFF);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_set);
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
index 8c93d68964c7..8d42e46e2de3 100644
--- a/drivers/leds/leds-apu.c
+++ b/drivers/leds/leds-apu.c
@@ -102,6 +102,13 @@ static const struct apu_led_profile apu2_led_profile[] = {
{ "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
};
+/* Same as apu2_led_profile, but with "3" in the LED names. */
+static const struct apu_led_profile apu3_led_profile[] = {
+ { "apu3:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
+ { "apu3:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
+ { "apu3:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
+};
+
static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
{
.ident = "apu",
@@ -134,6 +141,30 @@ static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2")
}
},
+ /* PC Engines APU3 with "Legacy" bios < 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU3")
+ }
+ },
+ /* PC Engines APU3 with "Legacy" bios >= 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu3")
+ }
+ },
+ /* PC Engines APU2 with "Mainline" bios */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3")
+ }
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
@@ -235,6 +266,14 @@ static int __init apu_led_probe(struct platform_device *pdev)
apu_led->platform = APU2_LED_PLATFORM;
apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
apu_led->iosize = APU2_IOSIZE;
+ } else if (dmi_match(DMI_BOARD_NAME, "APU3") ||
+ dmi_match(DMI_BOARD_NAME, "apu3") ||
+ dmi_match(DMI_BOARD_NAME, "PC Engines apu3")) {
+ apu_led->profile = apu3_led_profile;
+ /* Otherwise identical to APU2. */
+ apu_led->platform = APU2_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu3_led_profile);
+ apu_led->iosize = APU2_IOSIZE;
}
spin_lock_init(&apu_led->lock);
@@ -259,7 +298,10 @@ static int __init apu_led_init(void)
if (!(dmi_match(DMI_PRODUCT_NAME, "APU") ||
dmi_match(DMI_PRODUCT_NAME, "APU2") ||
dmi_match(DMI_PRODUCT_NAME, "apu2") ||
- dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2"))) {
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2") ||
+ dmi_match(DMI_PRODUCT_NAME, "APU3") ||
+ dmi_match(DMI_PRODUCT_NAME, "apu3") ||
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu3"))) {
pr_err("Unknown PC Engines board: %s\n",
dmi_get_system_info(DMI_PRODUCT_NAME));
return -ENODEV;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 437173d1712c..4f413a7c5f05 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -1,17 +1,6 @@
-/*
- * TI lm3692x LED Driver
- *
- * Copyright (C) 2017 Texas Instruments
- *
- * Author: Dan Murphy <dmurphy@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * Data sheet is located
- * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
- */
+// SPDX-License-Identifier: GPL-2.0
+// TI LM3692x LED chip family driver
+// Copyright (C) 2017-18 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -26,6 +15,9 @@
#include <linux/slab.h>
#include <uapi/linux/uleds.h>
+#define LM36922_MODEL 0
+#define LM36923_MODEL 1
+
#define LM3692X_REV 0x0
#define LM3692X_RESET 0x1
#define LM3692X_EN 0x10
@@ -44,6 +36,9 @@
#define LM3692X_DEVICE_EN BIT(0)
#define LM3692X_LED1_EN BIT(1)
#define LM3692X_LED2_EN BIT(2)
+#define LM36923_LED3_EN BIT(3)
+#define LM3692X_ENABLE_MASK (LM3692X_DEVICE_EN | LM3692X_LED1_EN | \
+ LM3692X_LED2_EN | LM36923_LED3_EN)
/* Brightness Control Bits */
#define LM3692X_BL_ADJ_POL BIT(0)
@@ -109,6 +104,8 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
+ * @led_enable - LED sync to be enabled
+ * @model_id - Current device model ID enumerated
*/
struct lm3692x_led {
struct mutex lock;
@@ -118,6 +115,8 @@ struct lm3692x_led {
struct gpio_desc *enable_gpio;
struct regulator *regulator;
char label[LED_MAX_NAME_SIZE];
+ int led_enable;
+ int model_id;
};
static const struct reg_default lm3692x_reg_defs[] = {
@@ -200,6 +199,7 @@ out:
static int lm3692x_init(struct lm3692x_led *led)
{
+ int enable_state;
int ret;
if (led->regulator) {
@@ -226,9 +226,25 @@ static int lm3692x_init(struct lm3692x_led *led)
/*
* For glitch free operation, the following data should
- * only be written while device enable bit is 0
+ * only be written while LEDx enable bits are 0 and the device enable
+ * bit is set to 1.
* per Section 7.5.14 of the data sheet
*/
+ ret = regmap_write(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN);
+ if (ret)
+ goto out;
+
+ /* Set the brightness to 0 so when enabled the LEDs do not come
+ * on with full brightness.
+ */
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, 0);
+ if (ret)
+ goto out;
+
ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
if (ret)
@@ -258,6 +274,38 @@ static int lm3692x_init(struct lm3692x_led *led)
if (ret)
goto out;
+ switch (led->led_enable) {
+ case 0:
+ default:
+ if (led->model_id == LM36923_MODEL)
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN |
+ LM36923_LED3_EN;
+ else
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN;
+
+ break;
+ case 1:
+ enable_state = LM3692X_LED1_EN;
+ break;
+ case 2:
+ enable_state = LM3692X_LED2_EN;
+ break;
+
+ case 3:
+ if (led->model_id == LM36923_MODEL) {
+ enable_state = LM36923_LED3_EN;
+ break;
+ }
+
+ ret = -EINVAL;
+ dev_err(&led->client->dev,
+ "LED3 sync not available on this device\n");
+ goto out;
+ }
+
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_ENABLE_MASK,
+ enable_state | LM3692X_DEVICE_EN);
+
return ret;
out:
dev_err(&led->client->dev, "Fail writing initialization values\n");
@@ -274,52 +322,75 @@ out:
return ret;
}
-
-static int lm3692x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3692x_probe_dt(struct lm3692x_led *led)
{
- int ret;
- struct lm3692x_led *led;
- struct device_node *np = client->dev.of_node;
- struct device_node *child_node;
+ struct fwnode_handle *child = NULL;
const char *name;
+ int ret;
- led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- for_each_available_child_of_node(np, child_node) {
- led->led_dev.default_trigger = of_get_property(child_node,
- "linux,default-trigger",
- NULL);
-
- ret = of_property_read_string(child_node, "label", &name);
- if (!ret)
- snprintf(led->label, sizeof(led->label),
- "%s:%s", id->name, name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s::backlight_cluster", id->name);
- };
-
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
"enable", GPIOD_OUT_LOW);
if (IS_ERR(led->enable_gpio)) {
ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ dev_err(&led->client->dev, "Failed to get enable gpio: %d\n",
+ ret);
return ret;
}
- led->regulator = devm_regulator_get(&client->dev, "vled");
+ led->regulator = devm_regulator_get(&led->client->dev, "vled");
if (IS_ERR(led->regulator))
led->regulator = NULL;
- led->client = client;
+ child = device_get_next_child_node(&led->client->dev, child);
+ if (!child) {
+ dev_err(&led->client->dev, "No LED Child node\n");
+ return -ENODEV;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->led_dev.default_trigger);
+
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s::", led->client->name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", led->client->name, name);
+
+ ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
+ if (ret) {
+ dev_err(&led->client->dev, "reg DT property missing\n");
+ return ret;
+ }
+
led->led_dev.name = led->label;
- led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
- mutex_init(&led->lock);
+ ret = devm_led_classdev_register(&led->client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&led->client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ led->led_dev.dev->of_node = to_of_node(child);
+
+ return 0;
+}
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3692x_led *led;
+ int ret;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ led->client = client;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+ led->model_id = id->driver_data;
i2c_set_clientdata(client, led);
led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
@@ -330,15 +401,13 @@ static int lm3692x_probe(struct i2c_client *client,
return ret;
}
- ret = lm3692x_init(led);
+ ret = lm3692x_probe_dt(led);
if (ret)
return ret;
- ret = devm_led_classdev_register(&client->dev, &led->led_dev);
- if (ret) {
- dev_err(&client->dev, "led register err: %d\n", ret);
+ ret = lm3692x_init(led);
+ if (ret)
return ret;
- }
return 0;
}
@@ -348,6 +417,12 @@ static int lm3692x_remove(struct i2c_client *client)
struct lm3692x_led *led = i2c_get_clientdata(client);
int ret;
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable regulator\n");
+ return ret;
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -364,8 +439,8 @@ static int lm3692x_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3692x_id[] = {
- { "lm36922", 0 },
- { "lm36923", 1 },
+ { "lm36922", LM36922_MODEL },
+ { "lm36923", LM36923_MODEL },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm3692x_id);
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 5ec730a31b65..de3623e0d094 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -1,32 +1,21 @@
-/*
- * LEDs driver for LT3593 controllers
- *
- * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * Based on leds-gpio.c,
- *
- * Copyright (C) 2007 8D Technologies inc.
- * Raphael Assenat <raph@8d.com>
- * Copyright (C) 2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <uapi/linux/uleds.h>
struct lt3593_led_data {
+ char name[LED_MAX_NAME_SIZE];
struct led_classdev cdev;
- unsigned gpio;
+ struct gpio_desc *gpiod;
};
static int lt3593_led_set(struct led_classdev *led_cdev,
@@ -46,137 +35,168 @@ static int lt3593_led_set(struct led_classdev *led_cdev,
*/
if (value == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
return 0;
}
pulses = 32 - (value * 32) / 255;
if (pulses == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
mdelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
return 0;
}
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
while (pulses--) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
udelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
udelay(1);
}
return 0;
}
-static int create_lt3593_led(const struct gpio_led *template,
- struct lt3593_led_data *led_dat, struct device *parent)
+static struct lt3593_led_data *lt3593_led_probe_pdata(struct device *dev)
{
+ struct gpio_led_platform_data *pdata = dev_get_platdata(dev);
+ const struct gpio_led *template = &pdata->leds[0];
+ struct lt3593_led_data *led_data;
int ret, state;
- /* skip leds on GPIOs that aren't available */
- if (!gpio_is_valid(template->gpio)) {
- dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
- KBUILD_MODNAME, template->gpio, template->name);
- return 0;
- }
+ if (pdata->num_leds != 1)
+ return ERR_PTR(-EINVAL);
- led_dat->cdev.name = template->name;
- led_dat->cdev.default_trigger = template->default_trigger;
- led_dat->gpio = template->gpio;
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
+ return ERR_PTR(-ENOMEM);
- led_dat->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.name = template->name;
+ led_data->cdev.default_trigger = template->default_trigger;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
- led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_data->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio, state ?
+ ret = devm_gpio_request_one(dev, template->gpio, state ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret < 0)
- return ret;
-
- ret = led_classdev_register(parent, &led_dat->cdev);
- if (ret < 0)
- return ret;
+ return ERR_PTR(ret);
- dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
- KBUILD_MODNAME, template->name, template->gpio);
+ led_data->gpiod = gpio_to_desc(template->gpio);
+ if (!led_data->gpiod)
+ return ERR_PTR(-EPROBE_DEFER);
- return 0;
-}
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0)
+ return ERR_PTR(ret);
-static void delete_lt3593_led(struct lt3593_led_data *led)
-{
- if (!gpio_is_valid(led->gpio))
- return;
+ dev_info(dev, "registered LT3593 LED '%s' at GPIO %d\n",
+ template->name, template->gpio);
- led_classdev_unregister(&led->cdev);
+ return led_data;
}
static int lt3593_led_probe(struct platform_device *pdev)
{
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
- int i, ret = 0;
+ struct device *dev = &pdev->dev;
+ struct lt3593_led_data *led_data;
+ struct fwnode_handle *child;
+ int ret, state = LEDS_GPIO_DEFSTATE_OFF;
+ enum gpiod_flags flags = GPIOD_OUT_LOW;
+ const char *tmp;
+
+ if (dev_get_platdata(dev)) {
+ led_data = lt3593_led_probe_pdata(dev);
+ if (IS_ERR(led_data))
+ return PTR_ERR(led_data);
+
+ goto out;
+ }
- if (!pdata)
- return -EBUSY;
+ if (!dev->of_node)
+ return -ENODEV;
- leds_data = devm_kcalloc(&pdev->dev,
- pdata->num_leds, sizeof(struct lt3593_led_data),
- GFP_KERNEL);
- if (!leds_data)
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
return -ENOMEM;
- for (i = 0; i < pdata->num_leds; i++) {
- ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
- &pdev->dev);
- if (ret < 0)
- goto err;
+ if (device_get_child_node_count(dev) != 1) {
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
}
- platform_set_drvdata(pdev, leds_data);
+ led_data->gpiod = devm_gpiod_get(dev, "lltc,ctrl", 0);
+ if (IS_ERR(led_data->gpiod))
+ return PTR_ERR(led_data->gpiod);
- return 0;
+ child = device_get_next_child_node(dev, NULL);
-err:
- for (i = i - 1; i >= 0; i--)
- delete_lt3593_led(&leds_data[i]);
+ ret = fwnode_property_read_string(child, "label", &tmp);
+ if (ret < 0)
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593::");
+ else
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593:%s", tmp);
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led_data->cdev.default_trigger);
+
+ if (!fwnode_property_read_string(child, "default-state", &tmp)) {
+ if (!strcmp(tmp, "keep")) {
+ state = LEDS_GPIO_DEFSTATE_KEEP;
+ flags = GPIOD_ASIS;
+ } else if (!strcmp(tmp, "on")) {
+ state = LEDS_GPIO_DEFSTATE_ON;
+ flags = GPIOD_OUT_HIGH;
+ }
+ }
- return ret;
-}
+ led_data->cdev.name = led_data->name;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
-static int lt3593_led_remove(struct platform_device *pdev)
-{
- int i;
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return ret;
+ }
- leds_data = platform_get_drvdata(pdev);
+ led_data->cdev.dev->of_node = dev->of_node;
- for (i = 0; i < pdata->num_leds; i++)
- delete_lt3593_led(&leds_data[i]);
+out:
+ platform_set_drvdata(pdev, led_data);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id of_lt3593_leds_match[] = {
+ { .compatible = "lltc,lt3593", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lt3593_leds_match);
+#endif
+
static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
- .remove = lt3593_led_remove,
.driver = {
.name = "leds-lt3593",
+ .of_match_table = of_match_ptr(of_lt3593_leds_match),
},
};
module_platform_driver(lt3593_led_driver);
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 4edf74f1d6d4..8c019c28f9f5 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -268,7 +268,7 @@ static int max8997_led_probe(struct platform_device *pdev)
mode = pdata->led_pdata->mode[led->id];
brightness = pdata->led_pdata->brightness[led->id];
- max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
+ max8997_led_set_mode(led, mode);
if (brightness > led->cdev.max_brightness)
brightness = led->cdev.max_brightness;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 14fe5cd43232..a0a7dc2ef87c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -42,8 +42,8 @@
struct ns2_led_data {
struct led_classdev cdev;
- unsigned cmd;
- unsigned slow;
+ unsigned int cmd;
+ unsigned int slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index a2559b4fdfff..4018af769969 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -10,7 +10,6 @@ if LEDS_TRIGGERS
config LEDS_TRIGGER_TIMER
tristate "LED Timer Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
@@ -21,7 +20,6 @@ config LEDS_TRIGGER_TIMER
config LEDS_TRIGGER_ONESHOT
tristate "LED One-shot Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to blink in one-shot pulses with parameters
controlled via sysfs. It's useful to notify the user on
@@ -36,7 +34,6 @@ config LEDS_TRIGGER_ONESHOT
config LEDS_TRIGGER_DISK
bool "LED Disk Trigger"
depends on IDE_GD_ATA || ATA
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by disk activity.
If unsure, say Y.
@@ -44,14 +41,12 @@ config LEDS_TRIGGER_DISK
config LEDS_TRIGGER_MTD
bool "LED MTD (NAND/NOR) Trigger"
depends on MTD
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by MTD activity.
If unsure, say N.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
@@ -60,7 +55,6 @@ config LEDS_TRIGGER_HEARTBEAT
config LEDS_TRIGGER_BACKLIGHT
tristate "LED backlight Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a backlight device: they
turn off and on when the display is blanked and unblanked.
@@ -69,7 +63,6 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
@@ -79,7 +72,6 @@ config LEDS_TRIGGER_CPU
config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
@@ -88,7 +80,6 @@ config LEDS_TRIGGER_ACTIVITY
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
- depends on LEDS_TRIGGERS
depends on GPIOLIB || COMPILE_TEST
help
This allows LEDs to be controlled by gpio events. It's good
@@ -101,7 +92,6 @@ config LEDS_TRIGGER_GPIO
config LEDS_TRIGGER_DEFAULT_ON
tristate "LED Default ON Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
@@ -111,7 +101,6 @@ comment "iptables trigger is under Netfilter config (LED target)"
config LEDS_TRIGGER_TRANSIENT
tristate "LED Transient Trigger"
- depends on LEDS_TRIGGERS
help
This allows one time activation of a transient state on
GPIO/PWM based hardware.
@@ -119,7 +108,6 @@ config LEDS_TRIGGER_TRANSIENT
config LEDS_TRIGGER_CAMERA
tristate "LED Camera Flash/Torch Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a camera flash/torch device.
This enables direct flash/torch on/off by the driver, kernel space.
@@ -127,7 +115,6 @@ config LEDS_TRIGGER_CAMERA
config LEDS_TRIGGER_PANIC
bool "LED Panic Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be configured to blink on a kernel panic.
Enabling this option will allow to mark certain LEDs as panic indicators,
@@ -137,7 +124,7 @@ config LEDS_TRIGGER_PANIC
config LEDS_TRIGGER_NETDEV
tristate "LED Netdev Trigger"
- depends on NET && LEDS_TRIGGERS
+ depends on NET
help
This allows LEDs to be controlled by network device activity.
If unsure, say Y.
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 5081894082bd..bcbf41c90c30 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -7,8 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
@@ -37,7 +37,6 @@ static void led_activity_function(struct timer_list *t)
struct activity_data *activity_data = from_timer(activity_data, t,
timer);
struct led_classdev *led_cdev = activity_data->led_cdev;
- struct timespec boot_time;
unsigned int target;
unsigned int usage;
int delay;
@@ -57,8 +56,6 @@ static void led_activity_function(struct timer_list *t)
return;
}
- get_monotonic_boottime(&boot_time);
-
cpus = 0;
curr_used = 0;
@@ -76,7 +73,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
- curr_boot = timespec_to_ns(&boot_time) * cpus;
+ curr_boot = ktime_get_boot_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;
@@ -155,8 +152,7 @@ static void led_activity_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", activity_data->invert);
}
@@ -165,8 +161,7 @@ static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -181,21 +176,21 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void activity_activate(struct led_classdev *led_cdev)
+static struct attribute *activity_led_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(activity_led);
+
+static int activity_activate(struct led_classdev *led_cdev)
{
struct activity_data *activity_data;
- int rc;
activity_data = kzalloc(sizeof(*activity_data), GFP_KERNEL);
if (!activity_data)
- return;
+ return -ENOMEM;
- led_cdev->trigger_data = activity_data;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc) {
- kfree(led_cdev->trigger_data);
- return;
- }
+ led_set_trigger_data(led_cdev, activity_data);
activity_data->led_cdev = led_cdev;
timer_setup(&activity_data->timer, led_activity_function, 0);
@@ -203,26 +198,24 @@ static void activity_activate(struct led_classdev *led_cdev)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_activity_function(&activity_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = true;
+
+ return 0;
}
static void activity_deactivate(struct led_classdev *led_cdev)
{
- struct activity_data *activity_data = led_cdev->trigger_data;
-
- if (led_cdev->activated) {
- del_timer_sync(&activity_data->timer);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- kfree(activity_data);
- clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = false;
- }
+ struct activity_data *activity_data = led_get_trigger_data(led_cdev);
+
+ del_timer_sync(&activity_data->timer);
+ kfree(activity_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
static struct led_trigger activity_led_trigger = {
.name = "activity",
.activate = activity_activate,
.deactivate = activity_deactivate,
+ .groups = activity_led_groups,
};
static int activity_reboot_notifier(struct notifier_block *nb,
@@ -272,4 +265,4 @@ module_exit(activity_exit);
MODULE_AUTHOR("Willy Tarreau <w@1wt.eu>");
MODULE_DESCRIPTION("Activity LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 1ca1f1608f76..c2b57beef718 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -64,8 +64,7 @@ static int fb_notifier_callback(struct notifier_block *p,
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", n->invert);
}
@@ -73,8 +72,8 @@ static ssize_t bl_trig_invert_show(struct device *dev,
static ssize_t bl_trig_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t num)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
unsigned long invert;
int ret;
@@ -97,22 +96,22 @@ static ssize_t bl_trig_invert_store(struct device *dev,
}
static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
-static void bl_trig_activate(struct led_classdev *led)
+static struct attribute *bl_trig_attrs[] = {
+ &dev_attr_inverted.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(bl_trig);
+
+static int bl_trig_activate(struct led_classdev *led)
{
int ret;
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
- led->trigger_data = n;
- if (!n) {
- dev_err(led->dev, "unable to allocate backlight trigger\n");
- return;
- }
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_invert;
+ if (!n)
+ return -ENOMEM;
+ led_set_trigger_data(led, n);
n->led = led;
n->brightness = led->brightness;
@@ -122,46 +121,25 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
- led->activated = true;
- return;
-
-err_invert:
- led->trigger_data = NULL;
- kfree(n);
+ return 0;
}
static void bl_trig_deactivate(struct led_classdev *led)
{
- struct bl_trig_notifier *n =
- (struct bl_trig_notifier *) led->trigger_data;
-
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_inverted);
- fb_unregister_client(&n->notifier);
- kfree(n);
- led->activated = false;
- }
+ struct bl_trig_notifier *n = led_get_trigger_data(led);
+
+ fb_unregister_client(&n->notifier);
+ kfree(n);
}
static struct led_trigger bl_led_trigger = {
.name = "backlight",
.activate = bl_trig_activate,
- .deactivate = bl_trig_deactivate
+ .deactivate = bl_trig_deactivate,
+ .groups = bl_trig_groups,
};
-
-static int __init bl_trig_init(void)
-{
- return led_trigger_register(&bl_led_trigger);
-}
-
-static void __exit bl_trig_exit(void)
-{
- led_trigger_unregister(&bl_led_trigger);
-}
-
-module_init(bl_trig_init);
-module_exit(bl_trig_exit);
+module_led_trigger(bl_led_trigger);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("Backlight emulation LED trigger");
diff --git a/drivers/leds/trigger/ledtrig-camera.c b/drivers/leds/trigger/ledtrig-camera.c
index 9bd73a8bad5c..091a09a20c58 100644
--- a/drivers/leds/trigger/ledtrig-camera.c
+++ b/drivers/leds/trigger/ledtrig-camera.c
@@ -10,7 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -54,4 +53,4 @@ module_exit(ledtrig_camera_exit);
MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control");
MODULE_AUTHOR("Milo Kim");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index ff455cb46680..7f6d9219711e 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -17,29 +16,18 @@
#include <linux/leds.h>
#include "../leds.h"
-static void defon_trig_activate(struct led_classdev *led_cdev)
+static int defon_trig_activate(struct led_classdev *led_cdev)
{
led_set_brightness_nosleep(led_cdev, led_cdev->max_brightness);
+ return 0;
}
static struct led_trigger defon_led_trigger = {
.name = "default-on",
.activate = defon_trig_activate,
};
-
-static int __init defon_trig_init(void)
-{
- return led_trigger_register(&defon_led_trigger);
-}
-
-static void __exit defon_trig_exit(void)
-{
- led_trigger_unregister(&defon_led_trigger);
-}
-
-module_init(defon_trig_init);
-module_exit(defon_trig_exit);
+module_led_trigger(defon_led_trigger);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 8891e88d54dd..ed0db8ed825f 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -6,7 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -29,7 +28,7 @@ struct gpio_trig_data {
static irqreturn_t gpio_trig_irq(int irq, void *_led)
{
struct led_classdev *led = _led;
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
int tmp;
tmp = gpio_get_value_cansleep(gpio_data->gpio);
@@ -52,8 +51,7 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
static ssize_t gpio_trig_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->desired_brightness);
}
@@ -61,8 +59,7 @@ static ssize_t gpio_trig_brightness_show(struct device *dev,
static ssize_t gpio_trig_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned desired_brightness;
int ret;
@@ -82,8 +79,7 @@ static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
static ssize_t gpio_trig_inverted_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->inverted);
}
@@ -91,8 +87,8 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
static ssize_t gpio_trig_inverted_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned long inverted;
int ret;
@@ -116,8 +112,7 @@ static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
static ssize_t gpio_trig_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->gpio);
}
@@ -125,8 +120,8 @@ static ssize_t gpio_trig_gpio_show(struct device *dev,
static ssize_t gpio_trig_gpio_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned gpio;
int ret;
@@ -163,76 +158,45 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
}
static DEVICE_ATTR(gpio, 0644, gpio_trig_gpio_show, gpio_trig_gpio_store);
-static void gpio_trig_activate(struct led_classdev *led)
+static struct attribute *gpio_trig_attrs[] = {
+ &dev_attr_desired_brightness.attr,
+ &dev_attr_inverted.attr,
+ &dev_attr_gpio.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_trig);
+
+static int gpio_trig_activate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data;
- int ret;
gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL);
if (!gpio_data)
- return;
-
- ret = device_create_file(led->dev, &dev_attr_gpio);
- if (ret)
- goto err_gpio;
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_inverted;
-
- ret = device_create_file(led->dev, &dev_attr_desired_brightness);
- if (ret)
- goto err_brightness;
+ return -ENOMEM;
gpio_data->led = led;
- led->trigger_data = gpio_data;
- led->activated = true;
-
- return;
-
-err_brightness:
- device_remove_file(led->dev, &dev_attr_inverted);
+ led_set_trigger_data(led, gpio_data);
-err_inverted:
- device_remove_file(led->dev, &dev_attr_gpio);
-
-err_gpio:
- kfree(gpio_data);
+ return 0;
}
static void gpio_trig_deactivate(struct led_classdev *led)
{
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_gpio);
- device_remove_file(led->dev, &dev_attr_inverted);
- device_remove_file(led->dev, &dev_attr_desired_brightness);
- if (gpio_data->gpio != 0)
- free_irq(gpio_to_irq(gpio_data->gpio), led);
- kfree(gpio_data);
- led->activated = false;
- }
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
+ kfree(gpio_data);
}
static struct led_trigger gpio_led_trigger = {
.name = "gpio",
.activate = gpio_trig_activate,
.deactivate = gpio_trig_deactivate,
+ .groups = gpio_trig_groups,
};
-
-static int __init gpio_trig_init(void)
-{
- return led_trigger_register(&gpio_led_trigger);
-}
-module_init(gpio_trig_init);
-
-static void __exit gpio_trig_exit(void)
-{
- led_trigger_unregister(&gpio_led_trigger);
-}
-module_exit(gpio_trig_exit);
+module_led_trigger(gpio_led_trigger);
MODULE_AUTHOR("Felipe Balbi <me@felipebalbi.com>");
MODULE_DESCRIPTION("GPIO LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index f0896de410b8..7a2b12e19329 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -9,8 +9,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -96,8 +96,8 @@ static void led_heartbeat_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", heartbeat_data->invert);
}
@@ -105,8 +105,8 @@ static ssize_t led_invert_show(struct device *dev,
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -121,22 +121,22 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void heartbeat_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *heartbeat_trig_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(heartbeat_trig);
+
+static int heartbeat_trig_activate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data;
- int rc;
heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
if (!heartbeat_data)
- return;